mirror of
https://github.com/aljazceru/ollama-free-model-proxy.git
synced 2025-12-17 05:04:20 +01:00
Fix streaming response issues in Ollama-OpenRouter proxy
This commit addresses two issues in the Ollama-OpenRouter proxy: Fixed "Cannot read properties of undefined (reading 'content')" error: Added missing "message" field with empty content to the final streaming response This ensures the frontend can always access message.content without errors Improved model name handling in GetFullModelName: Added check to populate model list if empty Implemented multi-stage matching (exact match, then suffix match) Fallback to using the provided model name directly if no match found This allows direct use of model names like 'deepseek-chat-v3-0324:free' Added logging for model name resolution to help with debugging
This commit is contained in:
67
main.go
67
main.go
@@ -158,18 +158,71 @@ func main() {
|
||||
// для сбора полного ответа и отправки его одним JSON.
|
||||
// Пока реализуем только стриминг.
|
||||
if !streamRequested {
|
||||
// TODO: Реализовать не-потоковый ответ, если нужно
|
||||
c.JSON(http.StatusNotImplemented, gin.H{"error": "Non-streaming response not implemented yet"})
|
||||
return
|
||||
}
|
||||
|
||||
// Handle non-streaming response
|
||||
fullModelName, err := provider.GetFullModelName(request.Model)
|
||||
if err != nil {
|
||||
slog.Error("Error getting full model name", "Error", err)
|
||||
// Ollama returns 404 for invalid model names
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Call Chat to get the complete response
|
||||
response, err := provider.Chat(request.Messages, fullModelName)
|
||||
if err != nil {
|
||||
slog.Error("Failed to get chat response", "Error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Format the response according to Ollama's format
|
||||
if len(response.Choices) == 0 {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "No response from model"})
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the content from the response
|
||||
content := ""
|
||||
if len(response.Choices) > 0 && response.Choices[0].Message.Content != "" {
|
||||
content = response.Choices[0].Message.Content
|
||||
}
|
||||
|
||||
// Get finish reason, default to "stop" if not provided
|
||||
finishReason := "stop"
|
||||
if response.Choices[0].FinishReason != "" {
|
||||
finishReason = string(response.Choices[0].FinishReason)
|
||||
}
|
||||
|
||||
// Create Ollama-compatible response
|
||||
ollamaResponse := map[string]interface{}{
|
||||
"model": fullModelName,
|
||||
"created_at": time.Now().Format(time.RFC3339),
|
||||
"message": map[string]string{
|
||||
"role": "assistant",
|
||||
"content": content,
|
||||
},
|
||||
"done": true,
|
||||
"finish_reason": finishReason,
|
||||
"total_duration": response.Usage.TotalTokens * 10, // Approximate duration based on token count
|
||||
"load_duration": 0,
|
||||
"prompt_eval_count": response.Usage.PromptTokens,
|
||||
"eval_count": response.Usage.CompletionTokens,
|
||||
"eval_duration": response.Usage.CompletionTokens * 10, // Approximate duration based on token count
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, ollamaResponse)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info("Requested model", "model", request.Model)
|
||||
fullModelName, err := provider.GetFullModelName(request.Model)
|
||||
if err != nil {
|
||||
slog.Error("Error getting full model name", "Error", err, "model", request.Model)
|
||||
// Ollama возвращает 404 на неправильное имя модели
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
slog.Info("Using model", "fullModelName", fullModelName)
|
||||
|
||||
// Call ChatStream to get the stream
|
||||
stream, err := provider.ChatStream(request.Messages, fullModelName)
|
||||
@@ -258,6 +311,10 @@ func main() {
|
||||
finalResponse := map[string]interface{}{
|
||||
"model": fullModelName,
|
||||
"created_at": time.Now().Format(time.RFC3339),
|
||||
"message": map[string]string{
|
||||
"role": "assistant",
|
||||
"content": "", // Пустой контент для финального сообщения
|
||||
},
|
||||
"done": true,
|
||||
"finish_reason": lastFinishReason, // Необязательно для /api/chat Ollama, но не вредит
|
||||
"total_duration": 0,
|
||||
|
||||
41
provider.go
41
provider.go
@@ -23,6 +23,24 @@ func NewOpenrouterProvider(apiKey string) *OpenrouterProvider {
|
||||
}
|
||||
}
|
||||
|
||||
func (o *OpenrouterProvider) Chat(messages []openai.ChatCompletionMessage, modelName string) (openai.ChatCompletionResponse, error) {
|
||||
// Create a chat completion request
|
||||
req := openai.ChatCompletionRequest{
|
||||
Model: modelName,
|
||||
Messages: messages,
|
||||
Stream: false,
|
||||
}
|
||||
|
||||
// Call the OpenAI API to get a complete response
|
||||
resp, err := o.client.CreateChatCompletion(context.Background(), req)
|
||||
if err != nil {
|
||||
return openai.ChatCompletionResponse{}, err
|
||||
}
|
||||
|
||||
// Return the complete response
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (o *OpenrouterProvider) ChatStream(messages []openai.ChatCompletionMessage, modelName string) (*openai.ChatCompletionStream, error) {
|
||||
// Create a chat completion request
|
||||
req := openai.ChatCompletionRequest{
|
||||
@@ -123,10 +141,29 @@ func (o *OpenrouterProvider) GetModelDetails(modelName string) (map[string]inter
|
||||
}
|
||||
|
||||
func (o *OpenrouterProvider) GetFullModelName(alias string) (string, error) {
|
||||
// If modelNames is empty or not populated yet, try to get models first
|
||||
if len(o.modelNames) == 0 {
|
||||
_, err := o.GetModels()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get models: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// First try exact match
|
||||
for _, fullName := range o.modelNames {
|
||||
if strings.HasSuffix(fullName, alias) { // Match by alias suffix
|
||||
if fullName == alias {
|
||||
return fullName, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("model alias '%s' not found", alias)
|
||||
|
||||
// Then try suffix match
|
||||
for _, fullName := range o.modelNames {
|
||||
if strings.HasSuffix(fullName, alias) {
|
||||
return fullName, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no match found, just use the alias as is
|
||||
// This allows direct use of model names that might not be in the list
|
||||
return alias, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user