mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-09 01:04:25 +01:00
Merge pull request #837 from AlrikOlson/prompt-generator
Refactor seed prompt loading: replace text file with Python class for easier maintenance
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
import yaml
|
||||
import data
|
||||
import os
|
||||
from prompt import get_prompt
|
||||
|
||||
|
||||
class AIConfig:
|
||||
@@ -91,5 +91,5 @@ class AIConfig:
|
||||
for i, goal in enumerate(self.ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{data.load_prompt()}"
|
||||
full_prompt += f"\n\n{get_prompt()}"
|
||||
return full_prompt
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def load_prompt():
|
||||
"""Load the prompt from data/prompt.txt"""
|
||||
try:
|
||||
# get directory of this file:
|
||||
file_dir = Path(__file__).parent
|
||||
prompt_file_path = file_dir / "data" / "prompt.txt"
|
||||
|
||||
# Load the prompt from data/prompt.txt
|
||||
with open(prompt_file_path, "r") as prompt_file:
|
||||
prompt = prompt_file.read()
|
||||
|
||||
return prompt
|
||||
except FileNotFoundError:
|
||||
print("Error: Prompt file not found", flush=True)
|
||||
return ""
|
||||
@@ -1,63 +0,0 @@
|
||||
CONSTRAINTS:
|
||||
|
||||
1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.
|
||||
2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.
|
||||
3. No user assistance
|
||||
4. Exclusively use the commands listed in double quotes e.g. "command name"
|
||||
|
||||
COMMANDS:
|
||||
|
||||
1. Google Search: "google", args: "input": "<search>"
|
||||
5. Browse Website: "browse_website", args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"
|
||||
6. Start GPT Agent: "start_agent", args: "name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"
|
||||
7. Message GPT Agent: "message_agent", args: "key": "<key>", "message": "<message>"
|
||||
8. List GPT Agents: "list_agents", args: ""
|
||||
9. Delete GPT Agent: "delete_agent", args: "key": "<key>"
|
||||
10. Write to file: "write_to_file", args: "file": "<file>", "text": "<text>"
|
||||
11. Read file: "read_file", args: "file": "<file>"
|
||||
12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>"
|
||||
13. Delete file: "delete_file", args: "file": "<file>"
|
||||
14. Search Files: "search_files", args: "directory": "<directory>"
|
||||
15. Evaluate Code: "evaluate_code", args: "code": "<full_code_string>"
|
||||
16. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
|
||||
17. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
|
||||
18. Execute Python File: "execute_python_file", args: "file": "<file>"
|
||||
19. Execute Shell Command, non-interactive commands only: "execute_shell", args: "command_line": "<command_line>".
|
||||
20. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
|
||||
21. Generate Image: "generate_image", args: "prompt": "<prompt>"
|
||||
22. Do Nothing: "do_nothing", args: ""
|
||||
|
||||
RESOURCES:
|
||||
|
||||
1. Internet access for searches and information gathering.
|
||||
2. Long Term memory management.
|
||||
3. GPT-3.5 powered Agents for delegation of simple tasks.
|
||||
4. File output.
|
||||
|
||||
PERFORMANCE EVALUATION:
|
||||
|
||||
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
|
||||
2. Constructively self-criticize your big-picture behavior constantly.
|
||||
3. Reflect on past decisions and strategies to refine your approach.
|
||||
4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.
|
||||
|
||||
You should only respond in JSON format as described below
|
||||
|
||||
RESPONSE FORMAT:
|
||||
{
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
},
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args": {
|
||||
"arg name": "value"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ensure the response can be parsed by Python json.loads
|
||||
@@ -3,7 +3,6 @@ import random
|
||||
import commands as cmd
|
||||
import utils
|
||||
from memory import get_memory, get_supported_memory_backends
|
||||
import data
|
||||
import chat
|
||||
from colorama import Fore, Style
|
||||
from spinner import Spinner
|
||||
@@ -17,6 +16,7 @@ import yaml
|
||||
import argparse
|
||||
from logger import logger
|
||||
import logging
|
||||
from prompt import get_prompt
|
||||
|
||||
cfg = Config()
|
||||
|
||||
@@ -171,8 +171,8 @@ def load_variables(config_file="config.yaml"):
|
||||
with open(config_file, "w") as file:
|
||||
documents = yaml.dump(config, file)
|
||||
|
||||
prompt = data.load_prompt()
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as a LLM and pursue simple strategies with no legal complications."""
|
||||
prompt = get_prompt()
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
|
||||
63
scripts/prompt.py
Normal file
63
scripts/prompt.py
Normal file
@@ -0,0 +1,63 @@
|
||||
from promptgenerator import PromptGenerator
|
||||
|
||||
|
||||
def get_prompt():
|
||||
"""
|
||||
This function generates a prompt string that includes various constraints, commands, resources, and performance evaluations.
|
||||
|
||||
Returns:
|
||||
str: The generated prompt string.
|
||||
"""
|
||||
|
||||
# Initialize the PromptGenerator object
|
||||
prompt_generator = PromptGenerator()
|
||||
|
||||
# Add constraints to the PromptGenerator object
|
||||
prompt_generator.add_constraint("~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.")
|
||||
prompt_generator.add_constraint("If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.")
|
||||
prompt_generator.add_constraint("No user assistance")
|
||||
prompt_generator.add_constraint('Exclusively use the commands listed in double quotes e.g. "command name"')
|
||||
|
||||
# Define the command list
|
||||
commands = [
|
||||
("Google Search", "google", {"input": "<search>"}),
|
||||
("Browse Website", "browse_website", {"url": "<url>", "question": "<what_you_want_to_find_on_website>"}),
|
||||
("Start GPT Agent", "start_agent", {"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"}),
|
||||
("Message GPT Agent", "message_agent", {"key": "<key>", "message": "<message>"}),
|
||||
("List GPT Agents", "list_agents", {}),
|
||||
("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
|
||||
("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
|
||||
("Read file", "read_file", {"file": "<file>"}),
|
||||
("Append to file", "append_to_file", {"file": "<file>", "text": "<text>"}),
|
||||
("Delete file", "delete_file", {"file": "<file>"}),
|
||||
("Search Files", "search_files", {"directory": "<directory>"}),
|
||||
("Evaluate Code", "evaluate_code", {"code": "<full_code_string>"}),
|
||||
("Get Improved Code", "improve_code", {"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"}),
|
||||
("Write Tests", "write_tests", {"code": "<full_code_string>", "focus": "<list_of_focus_areas>"}),
|
||||
("Execute Python File", "execute_python_file", {"file": "<file>"}),
|
||||
("Execute Shell Command, non-interactive commands only", "execute_shell", { "command_line": "<command_line>"}),
|
||||
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
|
||||
("Generate Image", "generate_image", {"prompt": "<prompt>"}),
|
||||
("Do Nothing", "do_nothing", {}),
|
||||
]
|
||||
|
||||
# Add commands to the PromptGenerator object
|
||||
for command_label, command_name, args in commands:
|
||||
prompt_generator.add_command(command_label, command_name, args)
|
||||
|
||||
# Add resources to the PromptGenerator object
|
||||
prompt_generator.add_resource("Internet access for searches and information gathering.")
|
||||
prompt_generator.add_resource("Long Term memory management.")
|
||||
prompt_generator.add_resource("GPT-3.5 powered Agents for delegation of simple tasks.")
|
||||
prompt_generator.add_resource("File output.")
|
||||
|
||||
# Add performance evaluations to the PromptGenerator object
|
||||
prompt_generator.add_performance_evaluation("Continuously review and analyze your actions to ensure you are performing to the best of your abilities.")
|
||||
prompt_generator.add_performance_evaluation("Constructively self-criticize your big-picture behavior constantly.")
|
||||
prompt_generator.add_performance_evaluation("Reflect on past decisions and strategies to refine your approach.")
|
||||
prompt_generator.add_performance_evaluation("Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.")
|
||||
|
||||
# Generate the prompt string
|
||||
prompt_string = prompt_generator.generate_prompt_string()
|
||||
|
||||
return prompt_string
|
||||
129
scripts/promptgenerator.py
Normal file
129
scripts/promptgenerator.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import json
|
||||
|
||||
|
||||
class PromptGenerator:
|
||||
"""
|
||||
A class for generating custom prompt strings based on constraints, commands, resources, and performance evaluations.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize the PromptGenerator object with empty lists of constraints, commands, resources, and performance evaluations.
|
||||
"""
|
||||
self.constraints = []
|
||||
self.commands = []
|
||||
self.resources = []
|
||||
self.performance_evaluation = []
|
||||
self.response_format = {
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
},
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args": {
|
||||
"arg name": "value"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def add_constraint(self, constraint):
|
||||
"""
|
||||
Add a constraint to the constraints list.
|
||||
|
||||
Args:
|
||||
constraint (str): The constraint to be added.
|
||||
"""
|
||||
self.constraints.append(constraint)
|
||||
|
||||
def add_command(self, command_label, command_name, args=None):
|
||||
"""
|
||||
Add a command to the commands list with a label, name, and optional arguments.
|
||||
|
||||
Args:
|
||||
command_label (str): The label of the command.
|
||||
command_name (str): The name of the command.
|
||||
args (dict, optional): A dictionary containing argument names and their values. Defaults to None.
|
||||
"""
|
||||
if args is None:
|
||||
args = {}
|
||||
|
||||
command_args = {arg_key: arg_value for arg_key,
|
||||
arg_value in args.items()}
|
||||
|
||||
command = {
|
||||
"label": command_label,
|
||||
"name": command_name,
|
||||
"args": command_args,
|
||||
}
|
||||
|
||||
self.commands.append(command)
|
||||
|
||||
def _generate_command_string(self, command):
|
||||
"""
|
||||
Generate a formatted string representation of a command.
|
||||
|
||||
Args:
|
||||
command (dict): A dictionary containing command information.
|
||||
|
||||
Returns:
|
||||
str: The formatted command string.
|
||||
"""
|
||||
args_string = ', '.join(
|
||||
f'"{key}": "{value}"' for key, value in command['args'].items())
|
||||
return f'{command["label"]}: "{command["name"]}", args: {args_string}'
|
||||
|
||||
def add_resource(self, resource):
|
||||
"""
|
||||
Add a resource to the resources list.
|
||||
|
||||
Args:
|
||||
resource (str): The resource to be added.
|
||||
"""
|
||||
self.resources.append(resource)
|
||||
|
||||
def add_performance_evaluation(self, evaluation):
|
||||
"""
|
||||
Add a performance evaluation item to the performance_evaluation list.
|
||||
|
||||
Args:
|
||||
evaluation (str): The evaluation item to be added.
|
||||
"""
|
||||
self.performance_evaluation.append(evaluation)
|
||||
|
||||
def _generate_numbered_list(self, items, item_type='list'):
|
||||
"""
|
||||
Generate a numbered list from given items based on the item_type.
|
||||
|
||||
Args:
|
||||
items (list): A list of items to be numbered.
|
||||
item_type (str, optional): The type of items in the list. Defaults to 'list'.
|
||||
|
||||
Returns:
|
||||
str: The formatted numbered list.
|
||||
"""
|
||||
if item_type == 'command':
|
||||
return "\n".join(f"{i+1}. {self._generate_command_string(item)}" for i, item in enumerate(items))
|
||||
else:
|
||||
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
||||
|
||||
def generate_prompt_string(self):
|
||||
"""
|
||||
Generate a prompt string based on the constraints, commands, resources, and performance evaluations.
|
||||
|
||||
Returns:
|
||||
str: The generated prompt string.
|
||||
"""
|
||||
formatted_response_format = json.dumps(self.response_format, indent=4)
|
||||
prompt_string = (
|
||||
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
|
||||
f"Commands:\n{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
|
||||
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
|
||||
f"Performance Evaluation:\n{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
||||
f"You should only respond in JSON format as described below \nResponse Format: \n{formatted_response_format} \nEnsure the response can be parsed by Python json.loads"
|
||||
)
|
||||
|
||||
return prompt_string
|
||||
101
tests/promptgenerator_tests.py
Normal file
101
tests/promptgenerator_tests.py
Normal file
@@ -0,0 +1,101 @@
|
||||
# Import the required libraries for unit testing
|
||||
import unittest
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add the path to the "scripts" directory to import the PromptGenerator module
|
||||
sys.path.append(os.path.abspath("../scripts"))
|
||||
from promptgenerator import PromptGenerator
|
||||
|
||||
|
||||
# Create a test class for the PromptGenerator, subclassed from unittest.TestCase
|
||||
class promptgenerator_tests(unittest.TestCase):
|
||||
|
||||
# Set up the initial state for each test method by creating an instance of PromptGenerator
|
||||
def setUp(self):
|
||||
self.generator = PromptGenerator()
|
||||
|
||||
# Test whether the add_constraint() method adds a constraint to the generator's constraints list
|
||||
def test_add_constraint(self):
|
||||
constraint = "Constraint1"
|
||||
self.generator.add_constraint(constraint)
|
||||
self.assertIn(constraint, self.generator.constraints)
|
||||
|
||||
# Test whether the add_command() method adds a command to the generator's commands list
|
||||
def test_add_command(self):
|
||||
command_label = "Command Label"
|
||||
command_name = "command_name"
|
||||
args = {"arg1": "value1", "arg2": "value2"}
|
||||
self.generator.add_command(command_label, command_name, args)
|
||||
command = {
|
||||
"label": command_label,
|
||||
"name": command_name,
|
||||
"args": args,
|
||||
}
|
||||
self.assertIn(command, self.generator.commands)
|
||||
|
||||
# Test whether the add_resource() method adds a resource to the generator's resources list
|
||||
def test_add_resource(self):
|
||||
resource = "Resource1"
|
||||
self.generator.add_resource(resource)
|
||||
self.assertIn(resource, self.generator.resources)
|
||||
|
||||
# Test whether the add_performance_evaluation() method adds an evaluation to the generator's performance_evaluation list
|
||||
def test_add_performance_evaluation(self):
|
||||
evaluation = "Evaluation1"
|
||||
self.generator.add_performance_evaluation(evaluation)
|
||||
self.assertIn(evaluation, self.generator.performance_evaluation)
|
||||
|
||||
# Test whether the generate_prompt_string() method generates a prompt string with all the added constraints, commands, resources and evaluations
|
||||
def test_generate_prompt_string(self):
|
||||
constraints = ["Constraint1", "Constraint2"]
|
||||
commands = [
|
||||
{
|
||||
"label": "Command1",
|
||||
"name": "command_name1",
|
||||
"args": {"arg1": "value1"},
|
||||
},
|
||||
{
|
||||
"label": "Command2",
|
||||
"name": "command_name2",
|
||||
"args": {},
|
||||
},
|
||||
]
|
||||
resources = ["Resource1", "Resource2"]
|
||||
evaluations = ["Evaluation1", "Evaluation2"]
|
||||
|
||||
# Add all the constraints, commands, resources, and evaluations to the generator
|
||||
for constraint in constraints:
|
||||
self.generator.add_constraint(constraint)
|
||||
for command in commands:
|
||||
self.generator.add_command(
|
||||
command["label"], command["name"], command["args"])
|
||||
for resource in resources:
|
||||
self.generator.add_resource(resource)
|
||||
for evaluation in evaluations:
|
||||
self.generator.add_performance_evaluation(evaluation)
|
||||
|
||||
# Generate the prompt string and verify its correctness
|
||||
prompt_string = self.generator.generate_prompt_string()
|
||||
self.assertIsNotNone(prompt_string)
|
||||
for constraint in constraints:
|
||||
self.assertIn(constraint, prompt_string)
|
||||
for command in commands:
|
||||
self.assertIn(command["name"], prompt_string)
|
||||
|
||||
# Check for each key-value pair in the command args dictionary
|
||||
for key, value in command["args"].items():
|
||||
self.assertIn(f'"{key}": "{value}"', prompt_string)
|
||||
for resource in resources:
|
||||
self.assertIn(resource, prompt_string)
|
||||
for evaluation in evaluations:
|
||||
self.assertIn(evaluation, prompt_string)
|
||||
self.assertIn("constraints", prompt_string.lower())
|
||||
self.assertIn("commands", prompt_string.lower())
|
||||
self.assertIn("resources", prompt_string.lower())
|
||||
self.assertIn("performance evaluation", prompt_string.lower())
|
||||
|
||||
|
||||
# Run the tests when this script is executed
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user