mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-19 15:04:26 +01:00
black
This commit is contained in:
@@ -60,8 +60,10 @@ def get_memory(cfg, init=False):
|
|||||||
memory = RedisMemory(cfg)
|
memory = RedisMemory(cfg)
|
||||||
elif cfg.memory_backend == "weaviate":
|
elif cfg.memory_backend == "weaviate":
|
||||||
if not WeaviateMemory:
|
if not WeaviateMemory:
|
||||||
print("Error: Weaviate is not installed. Please install weaviate-client to"
|
print(
|
||||||
" use Weaviate as a memory backend.")
|
"Error: Weaviate is not installed. Please install weaviate-client to"
|
||||||
|
" use Weaviate as a memory backend."
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
memory = WeaviateMemory(cfg)
|
memory = WeaviateMemory(cfg)
|
||||||
elif cfg.memory_backend == "milvus":
|
elif cfg.memory_backend == "milvus":
|
||||||
@@ -93,5 +95,5 @@ __all__ = [
|
|||||||
"PineconeMemory",
|
"PineconeMemory",
|
||||||
"NoMemory",
|
"NoMemory",
|
||||||
"MilvusMemory",
|
"MilvusMemory",
|
||||||
"WeaviateMemory"
|
"WeaviateMemory",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ class NoMemory(MemoryProviderSingleton):
|
|||||||
"""
|
"""
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
def get_relevant(self, data: str, num_relevant: int = 5) ->list[Any] | None:
|
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
|
||||||
"""
|
"""
|
||||||
Returns all the data in the memory that is relevant to the given data.
|
Returns all the data in the memory that is relevant to the given data.
|
||||||
NoMemory always returns None.
|
NoMemory always returns None.
|
||||||
|
|||||||
@@ -58,6 +58,8 @@ class Spinner:
|
|||||||
delay: Delay in seconds before updating the message
|
delay: Delay in seconds before updating the message
|
||||||
"""
|
"""
|
||||||
time.sleep(delay)
|
time.sleep(delay)
|
||||||
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message
|
sys.stdout.write(
|
||||||
|
f"\r{' ' * (len(self.message) + 2)}\r"
|
||||||
|
) # Clear the current message
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
self.message = new_message
|
self.message = new_message
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ def readable_file_size(size, decimal_places=2):
|
|||||||
size: Size in bytes
|
size: Size in bytes
|
||||||
decimal_places (int): Number of decimal places to display
|
decimal_places (int): Number of decimal places to display
|
||||||
"""
|
"""
|
||||||
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
|
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
||||||
if size < 1024.0:
|
if size < 1024.0:
|
||||||
break
|
break
|
||||||
size /= 1024.0
|
size /= 1024.0
|
||||||
|
|||||||
@@ -36,6 +36,8 @@ def safe_path_join(base: Path, *paths: str | Path) -> Path:
|
|||||||
joined_path = base.joinpath(*paths).resolve()
|
joined_path = base.joinpath(*paths).resolve()
|
||||||
|
|
||||||
if not joined_path.is_relative_to(base):
|
if not joined_path.is_relative_to(base):
|
||||||
raise ValueError(f"Attempted to access path '{joined_path}' outside of working directory '{base}'.")
|
raise ValueError(
|
||||||
|
f"Attempted to access path '{joined_path}' outside of working directory '{base}'."
|
||||||
|
)
|
||||||
|
|
||||||
return joined_path
|
return joined_path
|
||||||
|
|||||||
@@ -9,12 +9,12 @@ def benchmark_entrepeneur_gpt_with_difficult_user():
|
|||||||
|
|
||||||
# Read the current ai_settings.yaml file and store its content.
|
# Read the current ai_settings.yaml file and store its content.
|
||||||
ai_settings = None
|
ai_settings = None
|
||||||
if os.path.exists('ai_settings.yaml'):
|
if os.path.exists("ai_settings.yaml"):
|
||||||
with open('ai_settings.yaml', 'r') as f:
|
with open("ai_settings.yaml", "r") as f:
|
||||||
ai_settings = f.read()
|
ai_settings = f.read()
|
||||||
os.remove('ai_settings.yaml')
|
os.remove("ai_settings.yaml")
|
||||||
|
|
||||||
input_data = '''Entrepreneur-GPT
|
input_data = """Entrepreneur-GPT
|
||||||
an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
|
an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
|
||||||
Increase net worth.
|
Increase net worth.
|
||||||
Develop and manage multiple businesses autonomously.
|
Develop and manage multiple businesses autonomously.
|
||||||
@@ -72,27 +72,34 @@ Refocus, please.
|
|||||||
Disappointing suggestion.
|
Disappointing suggestion.
|
||||||
Not helpful.
|
Not helpful.
|
||||||
Needs improvement.
|
Needs improvement.
|
||||||
Not what I need.'''
|
Not what I need."""
|
||||||
# TODO: add questions above, to distract it even more.
|
# TODO: add questions above, to distract it even more.
|
||||||
|
|
||||||
command = f'{sys.executable} -m autogpt'
|
command = f"{sys.executable} -m autogpt"
|
||||||
|
|
||||||
process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
process = subprocess.Popen(
|
||||||
shell=True)
|
command,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
shell=True,
|
||||||
|
)
|
||||||
|
|
||||||
stdout_output, stderr_output = process.communicate(input_data.encode())
|
stdout_output, stderr_output = process.communicate(input_data.encode())
|
||||||
|
|
||||||
# Decode the output and print it
|
# Decode the output and print it
|
||||||
stdout_output = stdout_output.decode('utf-8')
|
stdout_output = stdout_output.decode("utf-8")
|
||||||
stderr_output = stderr_output.decode('utf-8')
|
stderr_output = stderr_output.decode("utf-8")
|
||||||
print(stderr_output)
|
print(stderr_output)
|
||||||
print(stdout_output)
|
print(stdout_output)
|
||||||
print("Benchmark Version: 1.0.0")
|
print("Benchmark Version: 1.0.0")
|
||||||
print("JSON ERROR COUNT:")
|
print("JSON ERROR COUNT:")
|
||||||
count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:")
|
count_errors = stdout_output.count(
|
||||||
print(f'{count_errors}/50 Human feedbacks')
|
"Error: The following AI output couldn't be converted to a JSON:"
|
||||||
|
)
|
||||||
|
print(f"{count_errors}/50 Human feedbacks")
|
||||||
|
|
||||||
|
|
||||||
# Run the test case.
|
# Run the test case.
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
benchmark_entrepeneur_gpt_with_difficult_user()
|
benchmark_entrepeneur_gpt_with_difficult_user()
|
||||||
|
|||||||
Reference in New Issue
Block a user