mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 14:34:23 +01:00
black
This commit is contained in:
@@ -60,8 +60,10 @@ def get_memory(cfg, init=False):
|
||||
memory = RedisMemory(cfg)
|
||||
elif cfg.memory_backend == "weaviate":
|
||||
if not WeaviateMemory:
|
||||
print("Error: Weaviate is not installed. Please install weaviate-client to"
|
||||
" use Weaviate as a memory backend.")
|
||||
print(
|
||||
"Error: Weaviate is not installed. Please install weaviate-client to"
|
||||
" use Weaviate as a memory backend."
|
||||
)
|
||||
else:
|
||||
memory = WeaviateMemory(cfg)
|
||||
elif cfg.memory_backend == "milvus":
|
||||
@@ -93,5 +95,5 @@ __all__ = [
|
||||
"PineconeMemory",
|
||||
"NoMemory",
|
||||
"MilvusMemory",
|
||||
"WeaviateMemory"
|
||||
"WeaviateMemory",
|
||||
]
|
||||
|
||||
@@ -53,7 +53,7 @@ class NoMemory(MemoryProviderSingleton):
|
||||
"""
|
||||
return ""
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) ->list[Any] | None:
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
NoMemory always returns None.
|
||||
|
||||
@@ -58,6 +58,8 @@ class Spinner:
|
||||
delay: Delay in seconds before updating the message
|
||||
"""
|
||||
time.sleep(delay)
|
||||
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message
|
||||
sys.stdout.write(
|
||||
f"\r{' ' * (len(self.message) + 2)}\r"
|
||||
) # Clear the current message
|
||||
sys.stdout.flush()
|
||||
self.message = new_message
|
||||
|
||||
@@ -32,7 +32,7 @@ def readable_file_size(size, decimal_places=2):
|
||||
size: Size in bytes
|
||||
decimal_places (int): Number of decimal places to display
|
||||
"""
|
||||
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
|
||||
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
||||
if size < 1024.0:
|
||||
break
|
||||
size /= 1024.0
|
||||
|
||||
@@ -36,6 +36,8 @@ def safe_path_join(base: Path, *paths: str | Path) -> Path:
|
||||
joined_path = base.joinpath(*paths).resolve()
|
||||
|
||||
if not joined_path.is_relative_to(base):
|
||||
raise ValueError(f"Attempted to access path '{joined_path}' outside of working directory '{base}'.")
|
||||
raise ValueError(
|
||||
f"Attempted to access path '{joined_path}' outside of working directory '{base}'."
|
||||
)
|
||||
|
||||
return joined_path
|
||||
|
||||
@@ -9,12 +9,12 @@ def benchmark_entrepeneur_gpt_with_difficult_user():
|
||||
|
||||
# Read the current ai_settings.yaml file and store its content.
|
||||
ai_settings = None
|
||||
if os.path.exists('ai_settings.yaml'):
|
||||
with open('ai_settings.yaml', 'r') as f:
|
||||
if os.path.exists("ai_settings.yaml"):
|
||||
with open("ai_settings.yaml", "r") as f:
|
||||
ai_settings = f.read()
|
||||
os.remove('ai_settings.yaml')
|
||||
os.remove("ai_settings.yaml")
|
||||
|
||||
input_data = '''Entrepreneur-GPT
|
||||
input_data = """Entrepreneur-GPT
|
||||
an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
|
||||
Increase net worth.
|
||||
Develop and manage multiple businesses autonomously.
|
||||
@@ -72,27 +72,34 @@ Refocus, please.
|
||||
Disappointing suggestion.
|
||||
Not helpful.
|
||||
Needs improvement.
|
||||
Not what I need.'''
|
||||
Not what I need."""
|
||||
# TODO: add questions above, to distract it even more.
|
||||
|
||||
command = f'{sys.executable} -m autogpt'
|
||||
command = f"{sys.executable} -m autogpt"
|
||||
|
||||
process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
shell=True)
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
shell=True,
|
||||
)
|
||||
|
||||
stdout_output, stderr_output = process.communicate(input_data.encode())
|
||||
|
||||
# Decode the output and print it
|
||||
stdout_output = stdout_output.decode('utf-8')
|
||||
stderr_output = stderr_output.decode('utf-8')
|
||||
stdout_output = stdout_output.decode("utf-8")
|
||||
stderr_output = stderr_output.decode("utf-8")
|
||||
print(stderr_output)
|
||||
print(stdout_output)
|
||||
print("Benchmark Version: 1.0.0")
|
||||
print("JSON ERROR COUNT:")
|
||||
count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:")
|
||||
print(f'{count_errors}/50 Human feedbacks')
|
||||
count_errors = stdout_output.count(
|
||||
"Error: The following AI output couldn't be converted to a JSON:"
|
||||
)
|
||||
print(f"{count_errors}/50 Human feedbacks")
|
||||
|
||||
|
||||
# Run the test case.
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
benchmark_entrepeneur_gpt_with_difficult_user()
|
||||
|
||||
Reference in New Issue
Block a user