mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-31 11:54:30 +01:00
Make prompt parameters configurable (#3375)
Co-authored-by: Nicholas Tindle <nick@ntindle.com> Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com>
This commit is contained in:
@@ -13,6 +13,9 @@
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
|
||||
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
|
||||
@@ -15,6 +15,11 @@ import click
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
@@ -66,6 +71,7 @@ def main(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -91,6 +97,7 @@ def main(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
|
||||
@@ -38,6 +38,9 @@ class Config(metaclass=Singleton):
|
||||
self.disabled_command_categories = []
|
||||
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.prompt_settings_file = os.getenv(
|
||||
"PROMPT_SETTINGS_FILE", "prompt_settings.yaml"
|
||||
)
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
|
||||
53
autogpt/config/prompt_config.py
Normal file
53
autogpt/config/prompt_config.py
Normal file
@@ -0,0 +1,53 @@
|
||||
# sourcery skip: do-not-use-staticmethod
|
||||
"""
|
||||
A module that contains the PromptConfig class object that contains the configuration
|
||||
"""
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class PromptConfig:
|
||||
"""
|
||||
A class object that contains the configuration information for the prompt, which will be used by the prompt generator
|
||||
|
||||
Attributes:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config_file: str = CFG.prompt_settings_file,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else raises error.
|
||||
|
||||
Parameters:
|
||||
constraints (list): Constraints list for the prompt generator.
|
||||
resources (list): Resources list for the prompt generator.
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(config_file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
|
||||
self.constraints = config_params.get("constraints", [])
|
||||
self.resources = config_params.get("resources", [])
|
||||
self.performance_evaluations = config_params.get("performance_evaluations", [])
|
||||
@@ -14,6 +14,7 @@ def create_config(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
prompt_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -30,6 +31,7 @@ def create_config(
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
prompt_settings_file (str): The path to the prompt_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
@@ -112,6 +114,19 @@ def create_config(
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if prompt_settings_file:
|
||||
file = prompt_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
|
||||
CFG.prompt_settings_file = file
|
||||
|
||||
if browser_name:
|
||||
CFG.selenium_web_browser = browser_name
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ def run_auto_gpt(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
prompt_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
@@ -50,6 +51,7 @@ def run_auto_gpt(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
prompt_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
|
||||
@@ -2,6 +2,7 @@ from colorama import Fore
|
||||
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.config.prompt_config import PromptConfig
|
||||
from autogpt.llm import ApiManager
|
||||
from autogpt.logs import logger
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
@@ -27,46 +28,21 @@ def build_default_prompt_generator() -> PromptGenerator:
|
||||
# Initialize the PromptGenerator object
|
||||
prompt_generator = PromptGenerator()
|
||||
|
||||
# Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml)
|
||||
prompt_config = PromptConfig(CFG.prompt_settings_file)
|
||||
|
||||
# Add constraints to the PromptGenerator object
|
||||
prompt_generator.add_constraint(
|
||||
"~4000 word limit for short term memory. Your short term memory is short, so"
|
||||
" immediately save important information to files."
|
||||
)
|
||||
prompt_generator.add_constraint(
|
||||
"If you are unsure how you previously did something or want to recall past"
|
||||
" events, thinking about similar events will help you remember."
|
||||
)
|
||||
prompt_generator.add_constraint("No user assistance")
|
||||
prompt_generator.add_constraint(
|
||||
"Exclusively use the commands listed below e.g. command_name"
|
||||
)
|
||||
for constraint in prompt_config.constraints:
|
||||
prompt_generator.add_constraint(constraint)
|
||||
|
||||
# Add resources to the PromptGenerator object
|
||||
prompt_generator.add_resource(
|
||||
"Internet access for searches and information gathering."
|
||||
)
|
||||
prompt_generator.add_resource("Long Term memory management.")
|
||||
prompt_generator.add_resource(
|
||||
"GPT-3.5 powered Agents for delegation of simple tasks."
|
||||
)
|
||||
prompt_generator.add_resource("File output.")
|
||||
for resource in prompt_config.resources:
|
||||
prompt_generator.add_resource(resource)
|
||||
|
||||
# Add performance evaluations to the PromptGenerator object
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Continuously review and analyze your actions to ensure you are performing to"
|
||||
" the best of your abilities."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Constructively self-criticize your big-picture behavior constantly."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Reflect on past decisions and strategies to refine your approach."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation(
|
||||
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
||||
" the least number of steps."
|
||||
)
|
||||
prompt_generator.add_performance_evaluation("Write all code to a file.")
|
||||
for performance_evaluation in prompt_config.performance_evaluations:
|
||||
prompt_generator.add_performance_evaluation(performance_evaluation)
|
||||
|
||||
return prompt_generator
|
||||
|
||||
|
||||
|
||||
@@ -23,10 +23,13 @@ Running with `--help` lists all the possible command line arguments you can pass
|
||||
Here are some common arguments you can use when running Auto-GPT:
|
||||
|
||||
* Run Auto-GPT with a different AI Settings file
|
||||
|
||||
:::shell
|
||||
./run.sh --ai-settings <filename>
|
||||
|
||||
``` shell
|
||||
./run.sh --ai-settings <filename>
|
||||
```
|
||||
* Run Auto-GPT with a different Prompt Settings file
|
||||
``` shell
|
||||
./run.sh --prompt-settings <filename>
|
||||
```
|
||||
* Specify a memory backend
|
||||
|
||||
:::shell
|
||||
|
||||
19
prompt_settings.yaml
Normal file
19
prompt_settings.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
constraints: [
|
||||
'~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.',
|
||||
'If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.',
|
||||
'No user assistance',
|
||||
'Exclusively use the commands listed below e.g. command_name'
|
||||
]
|
||||
resources: [
|
||||
'Internet access for searches and information gathering.',
|
||||
'Long Term memory management.',
|
||||
'GPT-3.5 powered Agents for delegation of simple tasks.',
|
||||
'File output.'
|
||||
]
|
||||
performance_evaluations: [
|
||||
'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.',
|
||||
'Constructively self-criticize your big-picture behavior constantly.',
|
||||
'Reflect on past decisions and strategies to refine your approach.',
|
||||
'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.',
|
||||
'Write all code to a file.'
|
||||
]
|
||||
48
tests/test_prompt_config.py
Normal file
48
tests/test_prompt_config.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from autogpt.config.prompt_config import PromptConfig
|
||||
|
||||
"""
|
||||
Test cases for the PromptConfig class, which handles loads the Prompts configuration
|
||||
settings from a YAML file.
|
||||
"""
|
||||
|
||||
|
||||
def test_prompt_config_loading(tmp_path):
|
||||
"""Test if the prompt configuration loads correctly"""
|
||||
|
||||
yaml_content = """
|
||||
constraints:
|
||||
- A test constraint
|
||||
- Another test constraint
|
||||
- A third test constraint
|
||||
resources:
|
||||
- A test resource
|
||||
- Another test resource
|
||||
- A third test resource
|
||||
performance_evaluations:
|
||||
- A test performance evaluation
|
||||
- Another test performance evaluation
|
||||
- A third test performance evaluation
|
||||
"""
|
||||
config_file = tmp_path / "test_prompt_settings.yaml"
|
||||
config_file.write_text(yaml_content)
|
||||
|
||||
prompt_config = PromptConfig(config_file)
|
||||
|
||||
assert len(prompt_config.constraints) == 3
|
||||
assert prompt_config.constraints[0] == "A test constraint"
|
||||
assert prompt_config.constraints[1] == "Another test constraint"
|
||||
assert prompt_config.constraints[2] == "A third test constraint"
|
||||
assert len(prompt_config.resources) == 3
|
||||
assert prompt_config.resources[0] == "A test resource"
|
||||
assert prompt_config.resources[1] == "Another test resource"
|
||||
assert prompt_config.resources[2] == "A third test resource"
|
||||
assert len(prompt_config.performance_evaluations) == 3
|
||||
assert prompt_config.performance_evaluations[0] == "A test performance evaluation"
|
||||
assert (
|
||||
prompt_config.performance_evaluations[1]
|
||||
== "Another test performance evaluation"
|
||||
)
|
||||
assert (
|
||||
prompt_config.performance_evaluations[2]
|
||||
== "A third test performance evaluation"
|
||||
)
|
||||
Reference in New Issue
Block a user