mirror of
https://github.com/aljazceru/goose.git
synced 2025-12-18 14:44:21 +01:00
Support custom headers for openai provider (#1801)
This commit is contained in:
@@ -2,6 +2,7 @@ use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use reqwest::Client;
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use super::base::{ConfigKey, Provider, ProviderMetadata, ProviderUsage, Usage};
|
||||
@@ -33,6 +34,7 @@ pub struct OpenAiProvider {
|
||||
organization: Option<String>,
|
||||
project: Option<String>,
|
||||
model: ModelConfig,
|
||||
custom_headers: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
impl Default for OpenAiProvider {
|
||||
@@ -54,6 +56,10 @@ impl OpenAiProvider {
|
||||
.unwrap_or_else(|_| "v1/chat/completions".to_string());
|
||||
let organization: Option<String> = config.get_param("OPENAI_ORGANIZATION").ok();
|
||||
let project: Option<String> = config.get_param("OPENAI_PROJECT").ok();
|
||||
let custom_headers: Option<HashMap<String, String>> = config
|
||||
.get_secret("OPENAI_CUSTOM_HEADERS")
|
||||
.ok()
|
||||
.map(parse_custom_headers);
|
||||
let timeout_secs: u64 = config.get_param("OPENAI_TIMEOUT").unwrap_or(600);
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(timeout_secs))
|
||||
@@ -67,6 +73,7 @@ impl OpenAiProvider {
|
||||
organization,
|
||||
project,
|
||||
model,
|
||||
custom_headers,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -92,6 +99,12 @@ impl OpenAiProvider {
|
||||
request = request.header("OpenAI-Project", project);
|
||||
}
|
||||
|
||||
if let Some(custom_headers) = &self.custom_headers {
|
||||
for (key, value) in custom_headers {
|
||||
request = request.header(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
let response = request.json(&payload).send().await?;
|
||||
|
||||
handle_response_openai_compat(response).await
|
||||
@@ -117,6 +130,7 @@ impl Provider for OpenAiProvider {
|
||||
ConfigKey::new("OPENAI_BASE_PATH", true, false, Some("v1/chat/completions")),
|
||||
ConfigKey::new("OPENAI_ORGANIZATION", false, false, None),
|
||||
ConfigKey::new("OPENAI_PROJECT", false, false, None),
|
||||
ConfigKey::new("OPENAI_CUSTOM_HEADERS", false, true, None),
|
||||
ConfigKey::new("OPENAI_TIMEOUT", false, false, Some("600")),
|
||||
],
|
||||
)
|
||||
@@ -156,3 +170,14 @@ impl Provider for OpenAiProvider {
|
||||
Ok((message, ProviderUsage::new(model, usage)))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_custom_headers(s: String) -> HashMap<String, String> {
|
||||
s.split(',')
|
||||
.filter_map(|header| {
|
||||
let mut parts = header.splitn(2, '=');
|
||||
let key = parts.next().map(|s| s.trim().to_string())?;
|
||||
let value = parts.next().map(|s| s.trim().to_string())?;
|
||||
Some((key, value))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ Goose relies heavily on tool calling capabilities and currently works best with
|
||||
| [GCP Vertex AI](https://cloud.google.com/vertex-ai) | Google Cloud's Vertex AI platform, supporting Gemini and Claude models. **Credentials must be configured in advance. Follow the instructions at https://cloud.google.com/vertex-ai/docs/authentication.** | `GCP_PROJECT_ID`, `GCP_LOCATION` and optional `GCP_MAX_RETRIES` (6), `GCP_INITIAL_RETRY_INTERVAL_MS` (5000), `GCP_BACKOFF_MULTIPLIER` (2.0), `GCP_MAX_RETRY_INTERVAL_MS` (320_000). |
|
||||
| [Groq](https://groq.com/) | High-performance inference hardware and tools for LLMs. | `GROQ_API_KEY` |
|
||||
| [Ollama](https://ollama.com/) | Local model runner supporting Qwen, Llama, DeepSeek, and other open-source models. **Because this provider runs locally, you must first [download and run a model](/docs/getting-started/providers#local-llms-ollama).** | `OLLAMA_HOST` |
|
||||
| [OpenAI](https://platform.openai.com/api-keys) | Provides gpt-4o, o1, and other advanced language models. Also supports OpenAI-compatible endpoints (e.g., self-hosted LLaMA, vLLM, KServe). **o1-mini and o1-preview are not supported because Goose uses tool calling.** | `OPENAI_API_KEY`, `OPENAI_HOST` (optional), `OPENAI_ORGANIZATION` (optional), `OPENAI_PROJECT` (optional) |
|
||||
| [OpenAI](https://platform.openai.com/api-keys) | Provides gpt-4o, o1, and other advanced language models. Also supports OpenAI-compatible endpoints (e.g., self-hosted LLaMA, vLLM, KServe). **o1-mini and o1-preview are not supported because Goose uses tool calling.** | `OPENAI_API_KEY`, `OPENAI_HOST` (optional), `OPENAI_ORGANIZATION` (optional), `OPENAI_PROJECT` (optional), `OPENAI_CUSTOM_HEADERS` (optional) |
|
||||
| [OpenRouter](https://openrouter.ai/) | API gateway for unified access to various models with features like rate-limiting management. | `OPENROUTER_API_KEY` |
|
||||
|
||||
|
||||
@@ -128,6 +128,7 @@ Goose supports using custom OpenAI-compatible endpoints, which is particularly u
|
||||
| `OPENAI_HOST` | No | Custom endpoint URL (defaults to api.openai.com) |
|
||||
| `OPENAI_ORGANIZATION` | No | Organization ID for usage tracking and governance |
|
||||
| `OPENAI_PROJECT` | No | Project identifier for resource management |
|
||||
| `OPENAI_CUSTOM_HEADERS` | No | Additional headers to include in the request, in the format "HEADER_A=VALUE_A,HEADER_B=VALUE_B" |
|
||||
|
||||
### Example Configurations
|
||||
|
||||
@@ -156,6 +157,15 @@ Goose supports using custom OpenAI-compatible endpoints, which is particularly u
|
||||
OPENAI_PROJECT=compliance-approved
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="custom-headers" label="Custom Headers">
|
||||
For OpenAI-compatible endpoints that require custom headers:
|
||||
```sh
|
||||
OPENAI_API_KEY=your-api-key
|
||||
OPENAI_ORGANIZATION=org-id123
|
||||
OPENAI_PROJECT=compliance-approved
|
||||
OPENAI_CUSTOM_HEADERS="X-Header-A=abc,X-Header-B=def"
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Setup Instructions
|
||||
|
||||
Reference in New Issue
Block a user