Generate system prompt based on agent state

* Rename PromptConfig to AIDirectives

* Make PromptGenerator primarily responsible for generating prompt instead of AIConfig

* Refactor PromptGenerator

* Add `available` attribute to `Command` class, to determine availability based on `Agent` state
This commit is contained in:
Reinier van der Leer
2023-08-22 22:57:14 +02:00
parent 4e64519a26
commit 2520ec6e08
15 changed files with 266 additions and 371 deletions

View File

@@ -10,11 +10,13 @@ if TYPE_CHECKING:
from autogpt.models.command_registry import CommandRegistry
from autogpt.agents.utils.exceptions import InvalidAgentResponseError
from autogpt.config.ai_directives import AIDirectives
from autogpt.llm.base import ChatModelResponse, ChatSequence, Message
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs
from autogpt.llm.utils import count_message_tokens, create_chat_completion
from autogpt.memory.message_history import MessageHistory
from autogpt.models.agent_actions import ActionResult
from autogpt.prompts.generator import PromptGenerator
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
logger = logging.getLogger(__name__)
@@ -47,6 +49,13 @@ class BaseAgent(metaclass=ABCMeta):
self.command_registry = command_registry
"""The registry containing all commands available to the agent."""
self.prompt_generator = PromptGenerator(
ai_config=ai_config,
ai_directives=AIDirectives.from_file(config.prompt_settings_file),
command_registry=command_registry,
)
"""The prompt generator used for generating the system prompt."""
self.config = config
"""The applicable application configuration."""
@@ -74,12 +83,6 @@ class BaseAgent(metaclass=ABCMeta):
self.cycle_count = 0
"""The number of cycles that the agent has run since its initialization."""
self.system_prompt = ai_config.construct_full_prompt(config)
"""
The system prompt sets up the AI's personality and explains its goals,
available resources, and restrictions.
"""
llm_name = self.config.smart_llm if self.big_brain else self.config.fast_llm
self.llm = OPEN_AI_CHAT_MODELS[llm_name]
"""The LLM that the agent uses to think."""
@@ -95,9 +98,17 @@ class BaseAgent(metaclass=ABCMeta):
max_summary_tlength=summary_max_tlength or self.send_token_limit // 6,
)
# Support multi-inheritance
# Support multi-inheritance and mixins for subclasses
super(BaseAgent, self).__init__()
@property
def system_prompt(self):
"""
The system prompt sets up the AI's personality and explains its goals,
available resources, and restrictions.
"""
return self.prompt_generator.construct_system_prompt(self)
def think(
self,
instruction: Optional[str] = None,

View File

@@ -155,7 +155,6 @@ def run_auto_gpt(
role=ai_role,
goals=ai_goals,
)
ai_config.command_registry = command_registry
# print(prompt)
# Initialize memory and make sure it is empty.

View File

@@ -1,9 +1,10 @@
from __future__ import annotations
import functools
from typing import TYPE_CHECKING, Any, Callable, Optional, TypedDict
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, TypedDict
if TYPE_CHECKING:
from autogpt.agents.base import BaseAgent
from autogpt.config import Config
from autogpt.models.command import Command, CommandOutput, CommandParameter
@@ -22,9 +23,10 @@ def command(
name: str,
description: str,
parameters: dict[str, CommandParameterSpec],
enabled: bool | Callable[[Config], bool] = True,
enabled: Literal[True] | Callable[[Config], bool] = True,
disabled_reason: Optional[str] = None,
aliases: list[str] = [],
available: Literal[True] | Callable[[BaseAgent], bool] = True,
) -> Callable[..., CommandOutput]:
"""The command decorator is used to create Command objects from ordinary functions."""
@@ -46,6 +48,7 @@ def command(
enabled=enabled,
disabled_reason=disabled_reason,
aliases=aliases,
available=available,
)
@functools.wraps(func)

View File

@@ -11,7 +11,6 @@ import yaml
from bs4 import BeautifulSoup
from pylatexenc.latex2text import LatexNodes2Text
logger = logging.getLogger(__name__)

View File

@@ -2,11 +2,13 @@
This module contains the configuration classes for AutoGPT.
"""
from .ai_config import AIConfig
from .ai_directives import AIDirectives
from .config import Config, ConfigBuilder, check_openai_api_key
__all__ = [
"check_openai_api_key",
"AIConfig",
"AIDirectives",
"Config",
"ConfigBuilder",
]

View File

@@ -1,20 +1,13 @@
"""A module that contains the AIConfig class object that contains the configuration"""
from __future__ import annotations
import platform
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Optional
import distro
import yaml
if TYPE_CHECKING:
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.generator import PromptGenerator
from .config import Config
@dataclass
class AIConfig:
"""
A class object that contains the configuration information for the AI
@@ -26,30 +19,10 @@ class AIConfig:
api_budget (float): The maximum dollar value for API calls (0.0 means infinite)
"""
def __init__(
self,
ai_name: str = "",
ai_role: str = "",
ai_goals: list[str] = [],
api_budget: float = 0.0,
) -> None:
"""
Initialize a class instance
Parameters:
ai_name (str): The name of the AI.
ai_role (str): The description of the AI's role.
ai_goals (list): The list of objectives the AI is supposed to complete.
api_budget (float): The maximum dollar value for API calls (0.0 means infinite)
Returns:
None
"""
self.ai_name = ai_name
self.ai_role = ai_role
self.ai_goals = ai_goals
self.api_budget = api_budget
self.prompt_generator: PromptGenerator | None = None
self.command_registry: CommandRegistry | None = None
ai_name: str = ""
ai_role: str = ""
ai_goals: list[str] = field(default_factory=list[str])
api_budget: float = 0.0
@staticmethod
def load(ai_settings_file: str | Path) -> "AIConfig":
@@ -101,75 +74,3 @@ class AIConfig:
}
with open(ai_settings_file, "w", encoding="utf-8") as file:
yaml.dump(config, file, allow_unicode=True)
def construct_full_prompt(
self, config: Config, prompt_generator: Optional[PromptGenerator] = None
) -> str:
"""
Returns a prompt to the user with the class information in an organized fashion.
Parameters:
None
Returns:
full_prompt (str): A string containing the initial prompt for the user
including the ai_name, ai_role, ai_goals, and api_budget.
"""
from autogpt.prompts.prompt import build_default_prompt_generator
prompt_generator = prompt_generator or self.prompt_generator
if prompt_generator is None:
prompt_generator = build_default_prompt_generator(config)
prompt_generator.command_registry = self.command_registry
self.prompt_generator = prompt_generator
for plugin in config.plugins:
if not plugin.can_handle_post_prompt():
continue
prompt_generator = plugin.post_prompt(prompt_generator)
# Construct full prompt
full_prompt_parts = [
f"You are {self.ai_name}, {self.ai_role.rstrip('.')}.",
"Your decisions must always be made independently without seeking "
"user assistance. Play to your strengths as an LLM and pursue "
"simple strategies with no legal complications.",
]
if config.execute_local_commands:
# add OS info to prompt
os_name = platform.system()
os_info = (
platform.platform(terse=True)
if os_name != "Linux"
else distro.name(pretty=True)
)
full_prompt_parts.append(f"The OS you are running on is: {os_info}")
additional_constraints: list[str] = []
if self.api_budget > 0.0:
additional_constraints.append(
f"It takes money to let you run. "
f"Your API budget is ${self.api_budget:.3f}"
)
full_prompt_parts.append(
prompt_generator.generate_prompt_string(
additional_constraints=additional_constraints
)
)
if self.ai_goals:
full_prompt_parts.append(
"\n".join(
[
"## Goals",
"For your task, you must fulfill the following goals:",
*[f"{i+1}. {goal}" for i, goal in enumerate(self.ai_goals)],
]
)
)
return "\n\n".join(full_prompt_parts).strip("\n")

View File

@@ -0,0 +1,43 @@
from __future__ import annotations
import logging
from dataclasses import dataclass
import yaml
from autogpt.logs.helpers import request_user_double_check
from autogpt.utils import validate_yaml_file
logger = logging.getLogger(__name__)
@dataclass
class AIDirectives:
"""An object that contains the basic directives for the AI prompt.
Attributes:
constraints (list): A list of constraints that the AI should adhere to.
resources (list): A list of resources that the AI can utilize.
best_practices (list): A list of best practices that the AI should follow.
"""
constraints: list[str]
resources: list[str]
best_practices: list[str]
@staticmethod
def from_file(prompt_settings_file: str) -> AIDirectives:
(validated, message) = validate_yaml_file(prompt_settings_file)
if not validated:
logger.error(message, extra={"title": "FAILED FILE VALIDATION"})
request_user_double_check()
exit(1)
with open(prompt_settings_file, encoding="utf-8") as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
return AIDirectives(
constraints=config_params.get("constraints", []),
resources=config_params.get("resources", []),
best_practices=config_params.get("best_practices", []),
)

View File

@@ -1,50 +0,0 @@
# sourcery skip: do-not-use-staticmethod
"""
A module that contains the PromptConfig class object that contains the configuration
"""
import logging
import yaml
from autogpt import utils
from autogpt.logs.helpers import request_user_double_check
logger = logging.getLogger(__name__)
class PromptConfig:
"""
A class object that contains the configuration information for the prompt, which will be used by the prompt generator
Attributes:
constraints (list): Constraints list for the prompt generator.
resources (list): Resources list for the prompt generator.
performance_evaluations (list): Performance evaluation list for the prompt generator.
"""
def __init__(self, prompt_settings_file: str) -> None:
"""
Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
yaml file if yaml file exists,
else raises error.
Parameters:
constraints (list): Constraints list for the prompt generator.
resources (list): Resources list for the prompt generator.
performance_evaluations (list): Performance evaluation list for the prompt generator.
Returns:
None
"""
# Validate file
(validated, message) = utils.validate_yaml_file(prompt_settings_file)
if not validated:
logger.error(message, extra={"title": "FAILED FILE VALIDATION"})
request_user_double_check()
exit(1)
with open(prompt_settings_file, encoding="utf-8") as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
self.constraints = config_params.get("constraints", [])
self.resources = config_params.get("resources", [])
self.best_practices = config_params.get("best_practices", [])

View File

@@ -1,8 +1,9 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Optional
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional
if TYPE_CHECKING:
from autogpt.agents.base import BaseAgent
from autogpt.config import Config
from .command_parameter import CommandParameter
@@ -27,9 +28,10 @@ class Command:
description: str,
method: Callable[..., CommandOutput],
parameters: list[CommandParameter],
enabled: bool | Callable[[Config], bool] = True,
enabled: Literal[True] | Callable[[Config], bool] = True,
disabled_reason: Optional[str] = None,
aliases: list[str] = [],
available: Literal[True] | Callable[[BaseAgent], bool] = True,
):
self.name = name
self.description = description
@@ -38,15 +40,18 @@ class Command:
self.enabled = enabled
self.disabled_reason = disabled_reason
self.aliases = aliases
self.available = available
def __call__(self, *args, **kwargs) -> Any:
if hasattr(kwargs, "config") and callable(self.enabled):
self.enabled = self.enabled(kwargs["config"])
if not self.enabled:
def __call__(self, *args, agent: BaseAgent, **kwargs) -> Any:
if callable(self.enabled) and not self.enabled(agent.config):
if self.disabled_reason:
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
return f"Command '{self.name}' is disabled"
return self.method(*args, **kwargs)
if callable(self.available) and not self.available(agent):
return f"Command '{self.name}' is not available"
return self.method(*args, **kwargs, agent=agent)
def __str__(self) -> str:
params = [

View File

@@ -5,9 +5,10 @@ import inspect
import logging
from dataclasses import dataclass, field
from types import ModuleType
from typing import TYPE_CHECKING, Any
from typing import TYPE_CHECKING, Any, Iterator
if TYPE_CHECKING:
from autogpt.agents.base import BaseAgent
from autogpt.config import Config
@@ -97,14 +98,30 @@ class CommandRegistry:
return command(**kwargs)
raise KeyError(f"Command '{command_name}' not found in registry")
def command_prompt(self) -> str:
def list_available_commands(self, agent: BaseAgent) -> Iterator[Command]:
"""Iterates over all registered commands and yields those that are available.
Params:
agent (BaseAgent): The agent that the commands will be checked against.
Yields:
Command: The next available command.
"""
Returns a string representation of all registered `Command` objects for use in a prompt
"""
commands_list = [
f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values())
]
return "\n".join(commands_list)
for cmd in self.commands.values():
available = cmd.available
if callable(cmd.available):
available = cmd.available(agent)
if available:
yield cmd
# def command_specs(self) -> str:
# """Returns a technical declaration of all commands in the registry for use in a prompt"""
#
# Declaring functions or commands should be done in a model-specific way to achieve
# optimal results. For this reason, it should NOT be implemented here, but in an
# LLM provider module.
# MUST take command AVAILABILITY into account.
@staticmethod
def with_command_modules(modules: list[str], config: Config) -> CommandRegistry:
@@ -125,18 +142,13 @@ class CommandRegistry:
new_registry.import_command_module(command_module)
# Unregister commands that are incompatible with the current config
incompatible_commands: list[Command] = []
for command in new_registry.commands.values():
for command in [c for c in new_registry.commands.values()]:
if callable(command.enabled) and not command.enabled(config):
command.enabled = False
incompatible_commands.append(command)
for command in incompatible_commands:
new_registry.unregister(command)
logger.debug(
f"Unregistering incompatible command: {command.name}, "
f"reason - {command.disabled_reason or 'Disabled by current config.'}"
)
new_registry.unregister(command)
logger.debug(
f"Unregistering incompatible command '{command.name}':"
f" \"{command.disabled_reason or 'Disabled by current config.'}\""
)
return new_registry

View File

@@ -1,10 +1,15 @@
""" A module for generating custom prompt strings."""
from __future__ import annotations
import platform
from dataclasses import dataclass
from typing import TYPE_CHECKING, Callable, Optional
import distro
if TYPE_CHECKING:
from autogpt.agents.base import BaseAgent
from autogpt.config import AIConfig, AIDirectives, Config
from autogpt.models.command_registry import CommandRegistry
@@ -14,6 +19,28 @@ class PromptGenerator:
resources, and performance evaluations.
"""
ai_config: AIConfig
best_practices: list[str]
constraints: list[str]
resources: list[str]
commands: list[Command]
command_registry: CommandRegistry
def __init__(
self,
ai_config: AIConfig,
ai_directives: AIDirectives,
command_registry: CommandRegistry,
):
self.ai_config = ai_config
self.best_practices = ai_directives.best_practices
self.constraints = ai_directives.constraints
self.resources = ai_directives.resources
self.commands = []
self.command_registry = command_registry
@dataclass
class Command:
label: str
@@ -28,24 +55,11 @@ class PromptGenerator:
)
return f'{self.label}: "{self.name}", params: ({params_string})'
constraints: list[str]
commands: list[Command]
resources: list[str]
best_practices: list[str]
command_registry: CommandRegistry | None
def __init__(self):
self.constraints = []
self.commands = []
self.resources = []
self.best_practices = []
self.command_registry = None
def add_constraint(self, constraint: str) -> None:
"""
Add a constraint to the constraints list.
Args:
Params:
constraint (str): The constraint to be added.
"""
self.constraints.append(constraint)
@@ -63,7 +77,7 @@ class PromptGenerator:
*Should only be used by plugins.* Native commands should be added
directly to the CommandRegistry.
Args:
Params:
command_label (str): The label of the command.
command_name (str): The name of the command.
params (dict, optional): A dictionary containing argument names and their
@@ -85,7 +99,7 @@ class PromptGenerator:
"""
Add a resource to the resources list.
Args:
Params:
resource (str): The resource to be added.
"""
self.resources.append(resource)
@@ -94,7 +108,7 @@ class PromptGenerator:
"""
Add an item to the list of best practices.
Args:
Params:
best_practice (str): The best practice item to be added.
"""
self.best_practices.append(best_practice)
@@ -103,7 +117,7 @@ class PromptGenerator:
"""
Generate a numbered list containing the given items.
Args:
Params:
items (list): A list of items to be numbered.
start_at (int, optional): The number to start the sequence with; defaults to 1.
@@ -112,19 +126,119 @@ class PromptGenerator:
"""
return "\n".join(f"{i}. {item}" for i, item in enumerate(items, start_at))
def generate_prompt_string(
def construct_system_prompt(self, agent: BaseAgent) -> str:
"""Constructs a system prompt containing the most important information for the AI.
Params:
agent: The agent for which the system prompt is being constructed.
Returns:
str: The constructed system prompt.
"""
for plugin in agent.config.plugins:
if not plugin.can_handle_post_prompt():
continue
plugin.post_prompt(self)
# Construct full prompt
full_prompt_parts = self._generate_intro_prompt()
full_prompt_parts.append(self._generate_os_info(agent.config))
full_prompt_parts.append(
self._generate_body(
agent=agent,
additional_constraints=[self._generate_budget_info()],
)
)
full_prompt_parts.append(self._generate_goals_info())
# Join non-empty parts together into paragraph format
return "\n\n".join(filter(None, full_prompt_parts)).strip("\n")
def _generate_intro_prompt(self) -> list[str]:
"""Generates the introduction part of the prompt.
Returns:
list[str]: A list of strings forming the introduction part of the prompt.
"""
return [
f"You are {self.ai_config.ai_name}, {self.ai_config.ai_role.rstrip('.')}.",
"Your decisions must always be made independently without seeking "
"user assistance. Play to your strengths as an LLM and pursue "
"simple strategies with no legal complications.",
]
def _generate_os_info(self, config: Config) -> str:
"""Generates the OS information part of the prompt.
Params:
config (Config): The configuration object.
Returns:
str: The OS information part of the prompt.
"""
if config.execute_local_commands:
os_name = platform.system()
os_info = (
platform.platform(terse=True)
if os_name != "Linux"
else distro.name(pretty=True)
)
return f"The OS you are running on is: {os_info}"
return ""
def _generate_budget_info(self) -> str:
"""Generates the budget information part of the prompt.
Returns:
str: The budget information part of the prompt.
"""
if self.ai_config.api_budget > 0.0:
return (
f"It takes money to let you run. "
f"Your API budget is ${self.ai_config.api_budget:.3f}"
)
return ""
def _generate_goals_info(self) -> str:
"""Generates the goals information part of the prompt.
Returns:
str: The goals information part of the prompt.
"""
if self.ai_config.ai_goals:
return "\n".join(
[
"## Goals",
"For your task, you must fulfill the following goals:",
*[
f"{i+1}. {goal}"
for i, goal in enumerate(self.ai_config.ai_goals)
],
]
)
return ""
def _generate_body(
self,
agent: BaseAgent,
*,
additional_constraints: list[str] = [],
additional_resources: list[str] = [],
additional_best_practices: list[str] = [],
) -> str:
"""
Generate a prompt string based on the constraints, commands, resources,
and best practices.
Generates a prompt section containing the constraints, commands, resources,
and best practices.
Params:
agent: The agent for which the prompt string is being generated.
additional_constraints: Additional constraints to be included in the prompt string.
additional_resources: Additional resources to be included in the prompt string.
additional_best_practices: Additional best practices to be included in the prompt string.
Returns:
str: The generated prompt string.
str: The generated prompt section.
"""
return (
@@ -136,18 +250,24 @@ class PromptGenerator:
f"{self._generate_numbered_list(self.resources + additional_resources)}\n\n"
"## Commands\n"
"You have access to the following commands:\n"
f"{self._generate_commands()}\n\n"
f"{self.list_commands(agent)}\n\n"
"## Best practices\n"
f"{self._generate_numbered_list(self.best_practices + additional_best_practices)}"
)
def _generate_commands(self) -> str:
def list_commands(self, agent: BaseAgent) -> str:
"""Lists the commands available to the agent.
Params:
agent: The agent for which the commands are being listed.
Returns:
str: A string containing a numbered list of commands.
"""
command_strings = []
if self.command_registry:
command_strings += [
str(cmd)
for cmd in self.command_registry.commands.values()
if cmd.enabled
str(cmd) for cmd in self.command_registry.list_available_commands(agent)
]
# Add commands from plugins etc.

View File

@@ -1,39 +1,5 @@
from autogpt.config.config import Config
from autogpt.config.prompt_config import PromptConfig
from autogpt.prompts.generator import PromptGenerator
DEFAULT_TRIGGERING_PROMPT = (
"Determine exactly one command to use based on the given goals "
"and the progress you have made so far, "
"and respond using the JSON schema specified previously:"
)
def build_default_prompt_generator(config: Config) -> PromptGenerator:
"""
This function generates a prompt string that includes various constraints,
commands, resources, and best practices.
Returns:
str: The generated prompt string.
"""
# Initialize the PromptGenerator object
prompt_generator = PromptGenerator()
# Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml)
prompt_config = PromptConfig(config.prompt_settings_file)
# Add constraints to the PromptGenerator object
for constraint in prompt_config.constraints:
prompt_generator.add_constraint(constraint)
# Add resources to the PromptGenerator object
for resource in prompt_config.resources:
prompt_generator.add_resource(resource)
# Add best practices to the PromptGenerator object
for best_practice in prompt_config.best_practices:
prompt_generator.add_best_practice(best_practice)
return prompt_generator

View File

@@ -172,22 +172,6 @@ def test_call_nonexistent_command():
registry.call("nonexistent_command", arg1=1, arg2="test")
def test_get_command_prompt():
"""Test that the command prompt is correctly formatted."""
registry = CommandRegistry()
cmd = Command(
name="example",
description="Example command",
method=example_command_method,
parameters=PARAMETERS,
)
registry.register(cmd)
command_prompt = registry.command_prompt()
assert f"(arg1: int, arg2: Optional[str])" in command_prompt
def test_import_mock_commands_module():
"""Test that the registry can import a module with mock command plugins."""
registry = CommandRegistry()

View File

@@ -1,4 +1,4 @@
from autogpt.config.prompt_config import PromptConfig
from autogpt.config.ai_directives import AIDirectives
"""
Test cases for the PromptConfig class, which handles loads the Prompts configuration
@@ -26,7 +26,7 @@ best_practices:
prompt_settings_file = tmp_path / "test_prompt_settings.yaml"
prompt_settings_file.write_text(yaml_content)
prompt_config = PromptConfig(prompt_settings_file)
prompt_config = AIDirectives.from_file(prompt_settings_file)
assert len(prompt_config.constraints) == 3
assert prompt_config.constraints[0] == "A test constraint"

View File

@@ -1,100 +0,0 @@
from autogpt.prompts.generator import PromptGenerator
def test_add_constraint():
"""
Test if the add_constraint() method adds a constraint to the generator's constraints list.
"""
constraint = "Constraint1"
generator = PromptGenerator()
generator.add_constraint(constraint)
assert constraint in generator.constraints
def test_add_command():
"""
Test if the add_command() method adds a command to the generator's commands list.
"""
command_label = "Command Label"
command_name = "command_name"
params = {"arg1": "value1", "arg2": "value2"}
generator = PromptGenerator()
generator.add_command(command_label, command_name, params)
assert generator.commands[0].__dict__ == {
"label": command_label,
"name": command_name,
"params": params,
"function": None,
}
def test_add_resource():
"""
Test if the add_resource() method adds a resource to the generator's resources list.
"""
resource = "Resource1"
generator = PromptGenerator()
generator.add_resource(resource)
assert resource in generator.resources
def test_add_best_practice():
"""
Test if the add_best_practice() method adds a best practice to the generator's
best_practices list.
"""
practice = "Practice1"
generator = PromptGenerator()
generator.add_best_practice(practice)
assert practice in generator.best_practices
def test_generate_prompt_string():
"""
Test if the generate_prompt_string() method generates a prompt string with all the added
constraints, commands, resources, and evaluations.
"""
# Define the test data
constraints = ["Constraint1", "Constraint2"]
commands = [
{
"label": "Command1",
"name": "command_name1",
"params": {"arg1": "value1"},
},
{
"label": "Command2",
"name": "command_name2",
"params": {},
},
]
resources = ["Resource1", "Resource2"]
evaluations = ["Evaluation1", "Evaluation2"]
# Add test data to the generator
generator = PromptGenerator()
for constraint in constraints:
generator.add_constraint(constraint)
for command in commands:
generator.add_command(command["label"], command["name"], command["params"])
for resource in resources:
generator.add_resource(resource)
for evaluation in evaluations:
generator.add_best_practice(evaluation)
# Generate the prompt string and verify its correctness
prompt_string = generator.generate_prompt_string()
assert prompt_string is not None
# Check if all constraints, commands, resources, and evaluations are present in the prompt string
for constraint in constraints:
assert constraint in prompt_string
for command in commands:
assert command["name"] in prompt_string
for key, value in command["params"].items():
assert f'"{key}": "{value}"' in prompt_string
for resource in resources:
assert resource in prompt_string
for evaluation in evaluations:
assert evaluation in prompt_string