feat(agent/serve): Add TaskID and UserID headers to outgoing LLM requests

- Update `AgentProtocolServer` to include `X-AP-TaskID` and `X-AutoGPT-UserID` headers in outgoing requests for Agent Protocol tasks.
- Modify `ModelProvider` and `OpenAIProvider` to allow configuring extra headers to be added to all outgoing requests.
- Fix the type of the `task_id` parameter in `AgentProtocolServer.get_task`
This commit is contained in:
Reinier van der Leer
2023-12-08 13:41:37 +01:00
parent f734bdb314
commit 6b19b78f87
4 changed files with 51 additions and 17 deletions

View File

@@ -1,3 +1,4 @@
import copy
import logging
import os
import pathlib
@@ -115,16 +116,16 @@ class AgentProtocolServer:
"""
Create a task for the agent.
"""
logger.debug(f"Creating agent for task: '{task_request.input}'")
task_agent = await generate_agent_for_task(
task=task_request.input,
app_config=self.app_config,
llm_provider=self.llm_provider,
)
task = await self.db.create_task(
input=task_request.input,
additional_input=task_request.additional_input,
)
logger.debug(f"Creating agent for task: '{task.input}'")
task_agent = await generate_agent_for_task(
task=task.input,
app_config=self.app_config,
llm_provider=self._get_task_llm_provider(task),
)
agent_id = task_agent.state.agent_id = task_agent_id(task.task_id)
logger.debug(f"New agent ID: {agent_id}")
task_agent.attach_fs(self.app_config.app_data_dir / "agents" / agent_id)
@@ -140,7 +141,7 @@ class AgentProtocolServer:
response = TaskListResponse(tasks=tasks, pagination=pagination)
return response
async def get_task(self, task_id: int) -> Task:
async def get_task(self, task_id: str) -> Task:
"""
Get a task by ID.
"""
@@ -164,10 +165,11 @@ class AgentProtocolServer:
logger.debug(f"Creating a step for task with ID: {task_id}...")
# Restore Agent instance
task = await self.get_task(task_id)
agent = configure_agent_with_state(
state=self.agent_manager.retrieve_state(task_agent_id(task_id)),
app_config=self.app_config,
llm_provider=self.llm_provider,
llm_provider=self._get_task_llm_provider(task),
)
# According to the Agent Protocol spec, the first execute_step request contains
@@ -405,6 +407,19 @@ class AgentProtocolServer:
workspace.initialize()
return workspace
def _get_task_llm_provider(self, task: Task) -> ChatModelProvider:
"""
Configures the LLM provider with headers to link outgoing requests to the task.
"""
task_llm_provider = copy.deepcopy(self.llm_provider)
_extra_request_headers = task_llm_provider._configuration.extra_request_headers
_extra_request_headers["X-AP-TaskID"] = task.task_id
if task.additional_input and (user_id := task.additional_input.get("user_id")):
_extra_request_headers["X-AutoGPT-UserID"] = user_id
return task_llm_provider
def task_agent_id(task_id: str | int) -> str:
return f"AutoGPT-{task_id}"

View File

@@ -13,11 +13,7 @@ import yaml
from openai.error import APIError, RateLimitError
from pydantic import SecretStr
from autogpt.core.configuration import (
Configurable,
SystemConfiguration,
UserConfigurable,
)
from autogpt.core.configuration import Configurable, UserConfigurable
from autogpt.core.resource.model_providers.schema import (
AssistantChatMessageDict,
AssistantToolCallDict,
@@ -31,6 +27,7 @@ from autogpt.core.resource.model_providers.schema import (
EmbeddingModelProvider,
EmbeddingModelResponse,
ModelProviderBudget,
ModelProviderConfiguration,
ModelProviderCredentials,
ModelProviderName,
ModelProviderService,
@@ -167,8 +164,8 @@ OPEN_AI_MODELS = {
}
class OpenAIConfiguration(SystemConfiguration):
retries_per_request: int = UserConfigurable()
class OpenAIConfiguration(ModelProviderConfiguration):
pass
class OpenAICredentials(ModelProviderCredentials):
@@ -268,6 +265,8 @@ class OpenAIProvider(
),
)
_configuration: OpenAIConfiguration
def __init__(
self,
settings: OpenAISettings,
@@ -447,6 +446,12 @@ class OpenAIProvider(
# Provide compatibility with older models
_functions_compat_fix_kwargs(functions, completion_kwargs)
if extra_headers := self._configuration.extra_request_headers:
if completion_kwargs.get("headers"):
completion_kwargs["headers"].update(extra_headers)
else:
completion_kwargs["headers"] = extra_headers.copy()
return completion_kwargs
def _get_embedding_kwargs(
@@ -470,6 +475,12 @@ class OpenAIProvider(
**self._credentials.unmasked(),
}
if extra_headers := self._configuration.extra_request_headers:
if embedding_kwargs.get("headers"):
embedding_kwargs["headers"].update(extra_headers)
else:
embedding_kwargs["headers"] = extra_headers.copy()
return embedding_kwargs
def __repr__(self):

View File

@@ -13,7 +13,7 @@ from typing import (
from pydantic import BaseModel, Field, SecretStr, validator
from autogpt.core.configuration import UserConfigurable
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.resource.schema import (
Embedding,
ProviderBudget,
@@ -163,6 +163,11 @@ class ModelResponse(BaseModel):
model_info: ModelInfo
class ModelProviderConfiguration(SystemConfiguration):
retries_per_request: int = UserConfigurable()
extra_request_headers: dict[str, str] = Field(default_factory=dict)
class ModelProviderCredentials(ProviderCredentials):
"""Credentials for a model provider."""
@@ -217,6 +222,7 @@ class ModelProviderBudget(ProviderBudget):
class ModelProviderSettings(ProviderSettings):
resource_type: ResourceType = ResourceType.MODEL
configuration: ModelProviderConfiguration
credentials: ModelProviderCredentials
budget: ModelProviderBudget
@@ -226,6 +232,8 @@ class ModelProvider(abc.ABC):
default_settings: ClassVar[ModelProviderSettings]
_configuration: ModelProviderConfiguration
@abc.abstractmethod
def count_tokens(self, text: str, model_name: str) -> int:
...

View File

@@ -331,7 +331,7 @@ benchmark = ["agbenchmark @ git+https://github.com/Significant-Gravitas/AutoGPT.
type = "git"
url = "https://github.com/Significant-Gravitas/AutoGPT.git"
reference = "HEAD"
resolved_reference = "20cf4cce9223565ab5637228e37dc2ddf94ec73e"
resolved_reference = "f734bdb3142f42e0acd7bc2305e5583ce832e625"
subdirectory = "autogpts/forge"
[[package]]