mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-17 21:14:21 +01:00
Rough sketching out of a hello world using our refactored autogpt library. See the tracking issue here: #4770. # Run instructions There are two client applications for Auto-GPT included. ## CLI Application 🌟 **This is the reference application I'm working with for now** 🌟 The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic. - [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_app/cli.py) - [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_app/main.py) To run, you first need a settings file. Run ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings ``` where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine. This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model. You can then run Auto-GPT with ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings ``` to launch the interaction loop. ## CLI Web App The second app is still a CLI, but it sets up a local webserver that the client application talks to rather than invoking calls to the Agent library code directly. This application is essentially a sketch at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed. - [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_web_app/cli.py) - [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_web_app/client/client.py) - [Server API](https://github.com/Significant-Gravitas/Auto-GPT/blob/re-arch/hello-world/autogpt/core/runner/cli_web_app/server/api.py) To run, you still need to generate a default configuration. You can do ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py make-settings ``` It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key. To run, do ``` python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py client ``` This will launch a webserver and then start the client cli application to communicate with it. ⚠️ I am not actively developing this application. It is a very good place to get involved if you have web application design experience and are looking to get involved in the re-arch. --------- Co-authored-by: David Wurtz <davidjwurtz@gmail.com> Co-authored-by: Media <12145726+rihp@users.noreply.github.com> Co-authored-by: Richard Beales <rich@richbeales.net> Co-authored-by: Daryl Rodrigo <darylrodrigo@gmail.com> Co-authored-by: Daryl Rodrigo <daryl@orkestro.com> Co-authored-by: Swifty <craigswift13@gmail.com> Co-authored-by: Nicholas Tindle <nick@ntindle.com> Co-authored-by: Merwane Hamadi <merwanehamadi@gmail.com>
220 lines
5.8 KiB
Python
220 lines
5.8 KiB
Python
import abc
|
|
import enum
|
|
from typing import Callable, ClassVar
|
|
|
|
from pydantic import BaseModel, Field, SecretStr, validator
|
|
|
|
from autogpt.core.configuration import UserConfigurable
|
|
from autogpt.core.resource.schema import (
|
|
Embedding,
|
|
ProviderBudget,
|
|
ProviderCredentials,
|
|
ProviderSettings,
|
|
ProviderUsage,
|
|
ResourceType,
|
|
)
|
|
|
|
|
|
class ModelProviderService(str, enum.Enum):
|
|
"""A ModelService describes what kind of service the model provides."""
|
|
|
|
EMBEDDING: str = "embedding"
|
|
LANGUAGE: str = "language"
|
|
TEXT: str = "text"
|
|
|
|
|
|
class ModelProviderName(str, enum.Enum):
|
|
OPENAI: str = "openai"
|
|
|
|
|
|
class MessageRole(str, enum.Enum):
|
|
USER = "user"
|
|
SYSTEM = "system"
|
|
ASSISTANT = "assistant"
|
|
|
|
|
|
class LanguageModelMessage(BaseModel):
|
|
role: MessageRole
|
|
content: str
|
|
|
|
|
|
class LanguageModelFunction(BaseModel):
|
|
json_schema: dict
|
|
|
|
|
|
class ModelProviderModelInfo(BaseModel):
|
|
"""Struct for model information.
|
|
|
|
Would be lovely to eventually get this directly from APIs, but needs to be
|
|
scraped from websites for now.
|
|
|
|
"""
|
|
|
|
name: str
|
|
service: ModelProviderService
|
|
provider_name: ModelProviderName
|
|
prompt_token_cost: float = 0.0
|
|
completion_token_cost: float = 0.0
|
|
|
|
|
|
class ModelProviderModelResponse(BaseModel):
|
|
"""Standard response struct for a response from a model."""
|
|
|
|
prompt_tokens_used: int
|
|
completion_tokens_used: int
|
|
model_info: ModelProviderModelInfo
|
|
|
|
|
|
class ModelProviderCredentials(ProviderCredentials):
|
|
"""Credentials for a model provider."""
|
|
|
|
api_key: SecretStr | None = UserConfigurable(default=None)
|
|
api_type: SecretStr | None = UserConfigurable(default=None)
|
|
api_base: SecretStr | None = UserConfigurable(default=None)
|
|
api_version: SecretStr | None = UserConfigurable(default=None)
|
|
deployment_id: SecretStr | None = UserConfigurable(default=None)
|
|
|
|
def unmasked(self) -> dict:
|
|
return unmask(self)
|
|
|
|
class Config:
|
|
extra = "ignore"
|
|
|
|
|
|
def unmask(model: BaseModel):
|
|
unmasked_fields = {}
|
|
for field_name, field in model.__fields__.items():
|
|
value = getattr(model, field_name)
|
|
if isinstance(value, SecretStr):
|
|
unmasked_fields[field_name] = value.get_secret_value()
|
|
else:
|
|
unmasked_fields[field_name] = value
|
|
return unmasked_fields
|
|
|
|
|
|
class ModelProviderUsage(ProviderUsage):
|
|
"""Usage for a particular model from a model provider."""
|
|
|
|
completion_tokens: int = 0
|
|
prompt_tokens: int = 0
|
|
total_tokens: int = 0
|
|
|
|
def update_usage(
|
|
self,
|
|
model_response: ModelProviderModelResponse,
|
|
) -> None:
|
|
self.completion_tokens += model_response.completion_tokens_used
|
|
self.prompt_tokens += model_response.prompt_tokens_used
|
|
self.total_tokens += (
|
|
model_response.completion_tokens_used + model_response.prompt_tokens_used
|
|
)
|
|
|
|
|
|
class ModelProviderBudget(ProviderBudget):
|
|
total_budget: float = UserConfigurable()
|
|
total_cost: float
|
|
remaining_budget: float
|
|
usage: ModelProviderUsage
|
|
|
|
def update_usage_and_cost(
|
|
self,
|
|
model_response: ModelProviderModelResponse,
|
|
) -> None:
|
|
"""Update the usage and cost of the provider."""
|
|
model_info = model_response.model_info
|
|
self.usage.update_usage(model_response)
|
|
incremental_cost = (
|
|
model_response.completion_tokens_used * model_info.completion_token_cost
|
|
+ model_response.prompt_tokens_used * model_info.prompt_token_cost
|
|
) / 1000.0
|
|
self.total_cost += incremental_cost
|
|
self.remaining_budget -= incremental_cost
|
|
|
|
|
|
class ModelProviderSettings(ProviderSettings):
|
|
resource_type = ResourceType.MODEL
|
|
credentials: ModelProviderCredentials
|
|
budget: ModelProviderBudget
|
|
|
|
|
|
class ModelProvider(abc.ABC):
|
|
"""A ModelProvider abstracts the details of a particular provider of models."""
|
|
|
|
defaults: ClassVar[ModelProviderSettings]
|
|
|
|
@abc.abstractmethod
|
|
def get_token_limit(self, model_name: str) -> int:
|
|
...
|
|
|
|
@abc.abstractmethod
|
|
def get_remaining_budget(self) -> float:
|
|
...
|
|
|
|
|
|
####################
|
|
# Embedding Models #
|
|
####################
|
|
|
|
|
|
class EmbeddingModelProviderModelInfo(ModelProviderModelInfo):
|
|
"""Struct for embedding model information."""
|
|
|
|
model_service = ModelProviderService.EMBEDDING
|
|
embedding_dimensions: int
|
|
|
|
|
|
class EmbeddingModelProviderModelResponse(ModelProviderModelResponse):
|
|
"""Standard response struct for a response from an embedding model."""
|
|
|
|
embedding: Embedding = Field(default_factory=list)
|
|
|
|
@classmethod
|
|
@validator("completion_tokens_used")
|
|
def _verify_no_completion_tokens_used(cls, v):
|
|
if v > 0:
|
|
raise ValueError("Embeddings should not have completion tokens used.")
|
|
return v
|
|
|
|
|
|
class EmbeddingModelProvider(ModelProvider):
|
|
@abc.abstractmethod
|
|
async def create_embedding(
|
|
self,
|
|
text: str,
|
|
model_name: str,
|
|
embedding_parser: Callable[[Embedding], Embedding],
|
|
**kwargs,
|
|
) -> EmbeddingModelProviderModelResponse:
|
|
...
|
|
|
|
|
|
###################
|
|
# Language Models #
|
|
###################
|
|
|
|
|
|
class LanguageModelProviderModelInfo(ModelProviderModelInfo):
|
|
"""Struct for language model information."""
|
|
|
|
model_service = ModelProviderService.LANGUAGE
|
|
max_tokens: int
|
|
|
|
|
|
class LanguageModelProviderModelResponse(ModelProviderModelResponse):
|
|
"""Standard response struct for a response from a language model."""
|
|
|
|
content: dict = None
|
|
|
|
|
|
class LanguageModelProvider(ModelProvider):
|
|
@abc.abstractmethod
|
|
async def create_language_completion(
|
|
self,
|
|
model_prompt: list[LanguageModelMessage],
|
|
functions: list[LanguageModelFunction],
|
|
model_name: str,
|
|
completion_parser: Callable[[dict], dict],
|
|
**kwargs,
|
|
) -> LanguageModelProviderModelResponse:
|
|
...
|