AutoGPT: extract core.prompting module out of core.planning

This commit is contained in:
Reinier van der Leer
2023-09-18 00:05:06 +02:00
parent b6fd54f599
commit d8f1d34345
13 changed files with 106 additions and 94 deletions

View File

@@ -1,7 +1,5 @@
"""The planning system organizes the Agent's activities."""
from autogpt.core.planning.schema import (
LanguageModelClassification,
LanguageModelPrompt,
Task,
TaskStatus,
TaskType,

View File

@@ -1,11 +1,3 @@
import abc
from autogpt.core.configuration import SystemConfiguration
from autogpt.core.planning.schema import (
LanguageModelClassification,
LanguageModelPrompt,
)
# class Planner(abc.ABC):
# """Manages the agent's planning and goal-setting by constructing language model prompts."""
#
@@ -57,20 +49,3 @@ from autogpt.core.planning.schema import (
#
# """
# ...
class PromptStrategy(abc.ABC):
default_configuration: SystemConfiguration
@property
@abc.abstractmethod
def model_classification(self) -> LanguageModelClassification:
...
@abc.abstractmethod
def build_prompt(self, *_, **kwargs) -> LanguageModelPrompt:
...
@abc.abstractmethod
def parse_response_content(self, response_content: dict) -> dict:
...

View File

@@ -0,0 +1,12 @@
from .initial_plan import (
InitialPlan,
InitialPlanConfiguration,
)
from .name_and_goals import (
NameAndGoals,
NameAndGoalsConfiguration,
)
from .next_ability import (
NextAbility,
NextAbilityConfiguration,
)

View File

@@ -1,14 +1,13 @@
import logging
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.planning.base import PromptStrategy
from autogpt.core.planning.schema import (
from autogpt.core.planning.schema import Task, TaskType
from autogpt.core.prompting import PromptStrategy
from autogpt.core.prompting.schema import (
LanguageModelClassification,
LanguageModelPrompt,
Task,
TaskType,
)
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
from autogpt.core.prompting.utils import json_loads, to_numbered_list
from autogpt.core.resource.model_providers import (
LanguageModelFunction,
LanguageModelMessage,

View File

@@ -1,12 +1,12 @@
import logging
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.planning.base import PromptStrategy
from autogpt.core.planning.schema import (
from autogpt.core.prompting import PromptStrategy
from autogpt.core.prompting.schema import (
LanguageModelClassification,
LanguageModelPrompt,
)
from autogpt.core.planning.strategies.utils import json_loads
from autogpt.core.prompting.utils import json_loads
from autogpt.core.resource.model_providers import (
LanguageModelFunction,
LanguageModelMessage,
@@ -15,6 +15,7 @@ from autogpt.core.resource.model_providers import (
logger = logging.getLogger(__name__)
class NameAndGoalsConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable()
system_prompt: str = UserConfigurable()

View File

@@ -1,13 +1,13 @@
import logging
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.planning.base import PromptStrategy
from autogpt.core.planning.schema import (
from autogpt.core.prompting import PromptStrategy
from autogpt.core.prompting.schema import (
LanguageModelClassification,
LanguageModelPrompt,
Task,
)
from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
from autogpt.core.prompting.utils import json_loads, to_numbered_list
from autogpt.core.planning.schema import Task
from autogpt.core.resource.model_providers import (
LanguageModelFunction,
LanguageModelMessage,

View File

@@ -4,34 +4,6 @@ from typing import Optional
from pydantic import BaseModel, Field
from autogpt.core.ability.schema import AbilityResult
from autogpt.core.resource.model_providers.schema import (
LanguageModelFunction,
LanguageModelMessage,
)
class LanguageModelClassification(str, enum.Enum):
"""The LanguageModelClassification is a functional description of the model.
This is used to determine what kind of model to use for a given prompt.
Sometimes we prefer a faster or cheaper model to accomplish a task when
possible.
"""
FAST_MODEL = "fast_model"
SMART_MODEL = "smart_model"
class LanguageModelPrompt(BaseModel):
messages: list[LanguageModelMessage]
functions: list[LanguageModelFunction] = Field(default_factory=list)
def __str__(self):
return "\n\n".join(
f"{m.role.value.upper()}: {m.content}"
for m in self.messages
)
class TaskType(str, enum.Enum):

View File

@@ -10,12 +10,10 @@ from autogpt.core.configuration import (
SystemSettings,
UserConfigurable,
)
from autogpt.core.planning import strategies
from autogpt.core.planning.base import PromptStrategy
from autogpt.core.planning.schema import (
LanguageModelClassification,
Task,
)
from autogpt.core.planning import prompt_strategies
from autogpt.core.planning.schema import Task
from autogpt.core.prompting import PromptStrategy
from autogpt.core.prompting.schema import LanguageModelClassification
from autogpt.core.resource.model_providers import (
LanguageModelProvider,
LanguageModelResponse,
@@ -35,9 +33,9 @@ class LanguageModelConfiguration(SystemConfiguration):
class PromptStrategiesConfiguration(SystemConfiguration):
name_and_goals: strategies.NameAndGoalsConfiguration
initial_plan: strategies.InitialPlanConfiguration
next_ability: strategies.NextAbilityConfiguration
name_and_goals: prompt_strategies.NameAndGoalsConfiguration
initial_plan: prompt_strategies.InitialPlanConfiguration
next_ability: prompt_strategies.NextAbilityConfiguration
class PlannerConfiguration(SystemConfiguration):
@@ -73,9 +71,9 @@ class SimplePlanner(Configurable):
),
},
prompt_strategies=PromptStrategiesConfiguration(
name_and_goals=strategies.NameAndGoals.default_configuration,
initial_plan=strategies.InitialPlan.default_configuration,
next_ability=strategies.NextAbility.default_configuration,
name_and_goals=prompt_strategies.NameAndGoals.default_configuration,
initial_plan=prompt_strategies.InitialPlan.default_configuration,
next_ability=prompt_strategies.NextAbility.default_configuration,
),
),
)
@@ -96,13 +94,13 @@ class SimplePlanner(Configurable):
self._providers[model] = model_providers[model_config.provider_name]
self._prompt_strategies = {
"name_and_goals": strategies.NameAndGoals(
"name_and_goals": prompt_strategies.NameAndGoals(
**self._configuration.prompt_strategies.name_and_goals.dict()
),
"initial_plan": strategies.InitialPlan(
"initial_plan": prompt_strategies.InitialPlan(
**self._configuration.prompt_strategies.initial_plan.dict()
),
"next_ability": strategies.NextAbility(
"next_ability": prompt_strategies.NextAbility(
**self._configuration.prompt_strategies.next_ability.dict()
),
}

View File

@@ -1,12 +0,0 @@
from autogpt.core.planning.strategies.initial_plan import (
InitialPlan,
InitialPlanConfiguration,
)
from autogpt.core.planning.strategies.name_and_goals import (
NameAndGoals,
NameAndGoalsConfiguration,
)
from autogpt.core.planning.strategies.next_ability import (
NextAbility,
NextAbilityConfiguration,
)

View File

@@ -0,0 +1,8 @@
from .base import PromptStrategy
from .schema import LanguageModelClassification, LanguageModelPrompt
__all__ = [
"LanguageModelClassification",
"LanguageModelPrompt",
"PromptStrategy",
]

View File

@@ -0,0 +1,30 @@
import abc
from typing import Generic, TypeVar
from autogpt.core.configuration import SystemConfiguration
from .schema import (
LanguageModelClassification,
LanguageModelPrompt,
)
IN = TypeVar("IN", bound=dict)
OUT = TypeVar("OUT")
class PromptStrategy(abc.ABC, Generic[IN, OUT]):
default_configuration: SystemConfiguration
@property
@abc.abstractmethod
def model_classification(self) -> LanguageModelClassification:
...
@abc.abstractmethod
def build_prompt(self, *_, **kwargs: IN) -> LanguageModelPrompt:
...
@abc.abstractmethod
def parse_response_content(self, response_content: dict) -> OUT:
...

View File

@@ -0,0 +1,31 @@
import enum
from pydantic import BaseModel, Field
from autogpt.core.resource.model_providers.schema import (
LanguageModelFunction,
LanguageModelMessage,
)
class LanguageModelClassification(str, enum.Enum):
"""The LanguageModelClassification is a functional description of the model.
This is used to determine what kind of model to use for a given prompt.
Sometimes we prefer a faster or cheaper model to accomplish a task when
possible.
"""
FAST_MODEL = "fast_model"
SMART_MODEL = "smart_model"
class LanguageModelPrompt(BaseModel):
messages: list[LanguageModelMessage]
functions: list[LanguageModelFunction] = Field(default_factory=list)
def __str__(self):
return "\n\n".join(
f"{m.role.value.upper()}: {m.content}"
for m in self.messages
)