mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 06:24:20 +01:00
Merge remote-tracking branch 'upstream/master' into fix-user-feedback-json-error
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -7,9 +7,10 @@ package-lock.json
|
|||||||
auto_gpt_workspace/*
|
auto_gpt_workspace/*
|
||||||
*.mpeg
|
*.mpeg
|
||||||
.env
|
.env
|
||||||
venv/*
|
*venv/*
|
||||||
outputs/*
|
outputs/*
|
||||||
ai_settings.yaml
|
ai_settings.yaml
|
||||||
.vscode
|
.vscode
|
||||||
|
.idea/*
|
||||||
auto-gpt.json
|
auto-gpt.json
|
||||||
log.txt
|
log.txt
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ To contribute to this GitHub project, you can follow these steps:
|
|||||||
2. Clone the repository to your local machine using the following command:
|
2. Clone the repository to your local machine using the following command:
|
||||||
|
|
||||||
```
|
```
|
||||||
git clone https://github.com/Torantulino/Auto-GPT
|
git clone https://github.com/<YOUR-GITHUB-USERNAME>/Auto-GPT
|
||||||
```
|
```
|
||||||
3. Create a new branch for your changes using the following command:
|
3. Create a new branch for your changes using the following command:
|
||||||
|
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ Your support is greatly appreciated
|
|||||||
|
|
||||||
## 📋 Requirements
|
## 📋 Requirements
|
||||||
- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
|
- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
|
||||||
- OpenAI API key
|
- [OpenAI API key](https://platform.openai.com/account/api-keys)
|
||||||
- [PINECONE API key](https://www.pinecone.io/)
|
- [PINECONE API key](https://www.pinecone.io/)
|
||||||
|
|
||||||
Optional:
|
Optional:
|
||||||
|
|||||||
@@ -301,6 +301,7 @@ def prompt_user():
|
|||||||
def parse_arguments():
|
def parse_arguments():
|
||||||
"""Parses the arguments passed to the script"""
|
"""Parses the arguments passed to the script"""
|
||||||
global cfg
|
global cfg
|
||||||
|
cfg.set_debug_mode(False)
|
||||||
cfg.set_continuous_mode(False)
|
cfg.set_continuous_mode(False)
|
||||||
cfg.set_speak_mode(False)
|
cfg.set_speak_mode(False)
|
||||||
|
|
||||||
@@ -332,6 +333,9 @@ def parse_arguments():
|
|||||||
print_to_console("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
print_to_console("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||||
cfg.set_fast_llm_model(cfg.smart_llm_model)
|
cfg.set_fast_llm_model(cfg.smart_llm_model)
|
||||||
|
|
||||||
|
if args.debug:
|
||||||
|
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||||
|
cfg.set_debug_mode(True)
|
||||||
|
|
||||||
|
|
||||||
# TODO: fill in llm values here
|
# TODO: fill in llm values here
|
||||||
@@ -425,7 +429,7 @@ while True:
|
|||||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||||
|
|
||||||
# Execute command
|
# Execute command
|
||||||
if command_name.lower().startswith( "error" ):
|
if command_name is not None and command_name.lower().startswith( "error" ):
|
||||||
result = f"Command {command_name} threw the following error: " + arguments
|
result = f"Command {command_name} threw the following error: " + arguments
|
||||||
elif command_name == "human_feedback":
|
elif command_name == "human_feedback":
|
||||||
result = f"Human feedback: {user_input}"
|
result = f"Human feedback: {user_input}"
|
||||||
|
|||||||
@@ -54,8 +54,8 @@ class LocalCache(MemoryProviderSingleton):
|
|||||||
vector = vector[np.newaxis, :]
|
vector = vector[np.newaxis, :]
|
||||||
self.data.embeddings = np.concatenate(
|
self.data.embeddings = np.concatenate(
|
||||||
[
|
[
|
||||||
vector,
|
|
||||||
self.data.embeddings,
|
self.data.embeddings,
|
||||||
|
vector,
|
||||||
],
|
],
|
||||||
axis=0,
|
axis=0,
|
||||||
)
|
)
|
||||||
|
|||||||
49
tests/integration/memory_tests.py
Normal file
49
tests/integration/memory_tests.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
import unittest
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
# Add the parent directory of the 'scripts' folder to the Python path
|
||||||
|
sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts'))
|
||||||
|
from config import Config
|
||||||
|
from memory.local import LocalCache
|
||||||
|
|
||||||
|
class TestLocalCache(unittest.TestCase):
|
||||||
|
|
||||||
|
def random_string(self, length):
|
||||||
|
return ''.join(random.choice(string.ascii_letters) for _ in range(length))
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
cfg = cfg = Config()
|
||||||
|
self.cache = LocalCache(cfg)
|
||||||
|
self.cache.clear()
|
||||||
|
|
||||||
|
# Add example texts to the cache
|
||||||
|
self.example_texts = [
|
||||||
|
'The quick brown fox jumps over the lazy dog',
|
||||||
|
'I love machine learning and natural language processing',
|
||||||
|
'The cake is a lie, but the pie is always true',
|
||||||
|
'ChatGPT is an advanced AI model for conversation'
|
||||||
|
]
|
||||||
|
|
||||||
|
for text in self.example_texts:
|
||||||
|
self.cache.add(text)
|
||||||
|
|
||||||
|
# Add some random strings to test noise
|
||||||
|
for _ in range(5):
|
||||||
|
self.cache.add(self.random_string(10))
|
||||||
|
|
||||||
|
def test_get_relevant(self):
|
||||||
|
query = "I'm interested in artificial intelligence and NLP"
|
||||||
|
k = 3
|
||||||
|
relevant_texts = self.cache.get_relevant(query, k)
|
||||||
|
|
||||||
|
print(f"Top {k} relevant texts for the query '{query}':")
|
||||||
|
for i, text in enumerate(relevant_texts, start=1):
|
||||||
|
print(f"{i}. {text}")
|
||||||
|
|
||||||
|
self.assertEqual(len(relevant_texts), k)
|
||||||
|
self.assertIn(self.example_texts[1], relevant_texts)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
Reference in New Issue
Block a user