zammad fixes

This commit is contained in:
2025-08-21 11:34:01 +02:00
parent 27ee8b4cdb
commit c8e82edc4c
11 changed files with 273 additions and 149 deletions

View File

@@ -213,9 +213,15 @@ async def update_configuration(
if not zammad_module:
raise HTTPException(status_code=503, detail="Zammad module not available")
# For updates, pass the existing api_token if not provided in the request
config_data = config_request.dict()
if not config_data.get("api_token"):
# Use existing encrypted token for the new config
config_data["existing_encrypted_token"] = existing_config.api_token_encrypted
request_data = {
"action": "save_configuration",
"configuration": config_request.dict()
"configuration": config_data
}
context = {

View File

@@ -15,7 +15,7 @@ from urllib.parse import urljoin
import aiohttp
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, and_, or_
from sqlalchemy import select, and_, or_, func
from sqlalchemy.orm import selectinload
from app.services.base_module import BaseModule, Permission, ModuleHealth
@@ -183,53 +183,102 @@ class ZammadModule(BaseModule):
"result": result
}
async def _handle_get_ticket_summary(self, request: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
"""Handle get ticket summary request"""
ticket_id = request.get("ticket_id")
if not ticket_id:
raise ValueError("ticket_id is required")
async with async_session_factory() as db:
# Get ticket from database
stmt = select(ZammadTicket).where(ZammadTicket.zammad_ticket_id == ticket_id)
result = await db.execute(stmt)
ticket = result.scalar_one_or_none()
if not ticket:
return {"error": "Ticket not found", "ticket_id": ticket_id}
return {"ticket": ticket.to_dict()}
async def _handle_process_single_ticket(self, request: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
"""Handle single ticket processing request"""
async def _handle_save_configuration(self, request: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
"""Handle save configuration request"""
user_id = context.get("user_id")
ticket_id = request.get("ticket_id")
config_id = request.get("config_id")
if not ticket_id:
raise ValueError("ticket_id is required")
# Get user configuration
config = await self._get_user_configuration(user_id, config_id)
if not config:
raise ValueError("Configuration not found")
# Process single ticket
result = await self._process_single_ticket(config, ticket_id, user_id)
return {"ticket_id": ticket_id, "result": result}
async def _handle_get_status(self, request: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
"""Handle get module status request"""
user_id = context.get("user_id")
config_data = request.get("configuration", {})
# Validate required fields for new config
required_fields = ["name", "zammad_url", "chatbot_id"]
for field in required_fields:
if not config_data.get(field):
raise ValueError(f"Required field missing: {field}")
async with async_session_factory() as db:
# Import func for count queries
from sqlalchemy import func
# If updating existing config, fetch it
config_id = config_data.get("id")
config = None
if config_id:
config = await db.get(ZammadConfiguration, config_id)
# Verify chatbot exists and user has access
chatbot_stmt = select(ChatbotInstance).where(
and_(
ChatbotInstance.id == config_data["chatbot_id"],
ChatbotInstance.created_by == str(user_id),
ChatbotInstance.is_active == True
)
)
chatbot = await db.scalar(chatbot_stmt)
if not chatbot:
raise ValueError("Chatbot not found or access denied")
# Handle api_token: only update if provided, else keep existing
if config:
# Update existing config
config.name = config_data["name"]
config.description = config_data.get("description")
config.is_default = config_data.get("is_default", False)
config.zammad_url = config_data["zammad_url"].rstrip("/")
config.chatbot_id = config_data["chatbot_id"]
config.process_state = config_data.get("process_state", "open")
config.max_tickets = config_data.get("max_tickets", 10)
config.skip_existing = config_data.get("skip_existing", True)
config.auto_process = config_data.get("auto_process", False)
config.process_interval = config_data.get("process_interval", 30)
config.summary_template = config_data.get("summary_template")
config.custom_settings = config_data.get("custom_settings", {})
if "api_token" in config_data and config_data["api_token"]:
config.api_token_encrypted = self._encrypt_data(config_data["api_token"])
# If this is set as default, unset other defaults
if config.is_default:
await db.execute(
ZammadConfiguration.__table__.update()
.where(ZammadConfiguration.user_id == user_id)
.values(is_default=False)
)
await db.commit()
await db.refresh(config)
result = {"configuration": config.to_dict()}
else:
# Creating new config, require api_token OR existing_encrypted_token
if not config_data.get("api_token") and not config_data.get("existing_encrypted_token"):
raise ValueError("Required field missing: api_token")
# Get processing statistics - use func.count() to get actual counts
# Use provided token or existing encrypted token
if config_data.get("api_token"):
encrypted_token = self._encrypt_data(config_data["api_token"])
else:
encrypted_token = config_data["existing_encrypted_token"]
config = ZammadConfiguration(
user_id=user_id,
name=config_data["name"],
description=config_data.get("description"),
is_default=config_data.get("is_default", False),
zammad_url=config_data["zammad_url"].rstrip("/"),
api_token_encrypted=encrypted_token,
chatbot_id=config_data["chatbot_id"],
process_state=config_data.get("process_state", "open"),
max_tickets=config_data.get("max_tickets", 10),
skip_existing=config_data.get("skip_existing", True),
auto_process=config_data.get("auto_process", False),
process_interval=config_data.get("process_interval", 30),
summary_template=config_data.get("summary_template"),
custom_settings=config_data.get("custom_settings", {})
)
# If this is set as default, unset other defaults
if config.is_default:
await db.execute(
ZammadConfiguration.__table__.update()
.where(ZammadConfiguration.user_id == user_id)
.values(is_default=False)
)
db.add(config)
await db.commit()
await db.refresh(config)
result = {"configuration": config.to_dict()}
# Calculate total tickets and processed tickets
total_tickets_result = await db.scalar(
select(func.count(ZammadTicket.id)).where(ZammadTicket.processed_by_user_id == user_id)
select(func.count(ZammadTicket.id)).where(
ZammadTicket.processed_by_user_id == user_id
)
)
total_tickets = total_tickets_result or 0
@@ -290,64 +339,6 @@ class ZammadModule(BaseModule):
return {"configurations": configs}
async def _handle_save_configuration(self, request: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
"""Handle save configuration request"""
user_id = context.get("user_id")
config_data = request.get("configuration", {})
# Validate required fields
required_fields = ["name", "zammad_url", "api_token", "chatbot_id"]
for field in required_fields:
if not config_data.get(field):
raise ValueError(f"Required field missing: {field}")
async with async_session_factory() as db:
# Verify chatbot exists and user has access
chatbot_stmt = select(ChatbotInstance).where(
and_(
ChatbotInstance.id == config_data["chatbot_id"],
ChatbotInstance.created_by == str(user_id),
ChatbotInstance.is_active == True
)
)
chatbot = await db.scalar(chatbot_stmt)
if not chatbot:
raise ValueError("Chatbot not found or access denied")
# Encrypt API token
encrypted_token = self._encrypt_data(config_data["api_token"])
# Create new configuration
config = ZammadConfiguration(
user_id=user_id,
name=config_data["name"],
description=config_data.get("description"),
is_default=config_data.get("is_default", False),
zammad_url=config_data["zammad_url"].rstrip("/"),
api_token_encrypted=encrypted_token,
chatbot_id=config_data["chatbot_id"],
process_state=config_data.get("process_state", "open"),
max_tickets=config_data.get("max_tickets", 10),
skip_existing=config_data.get("skip_existing", True),
auto_process=config_data.get("auto_process", False),
process_interval=config_data.get("process_interval", 30),
summary_template=config_data.get("summary_template"),
custom_settings=config_data.get("custom_settings", {})
)
# If this is set as default, unset other defaults
if config.is_default:
await db.execute(
ZammadConfiguration.__table__.update()
.where(ZammadConfiguration.user_id == user_id)
.values(is_default=False)
)
db.add(config)
await db.commit()
await db.refresh(config)
return {"configuration": config.to_dict()}
async def _handle_test_connection(self, request: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
"""Handle test Zammad connection request"""
@@ -360,6 +351,74 @@ class ZammadModule(BaseModule):
result = await self._test_zammad_connection(zammad_url, api_token)
return result
async def _handle_get_status(self, request: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
"""Handle get module status request"""
user_id = context.get("user_id")
try:
async with async_session_factory() as db:
# Get user's configurations count
config_stmt = select(ZammadConfiguration).where(
and_(
ZammadConfiguration.user_id == user_id,
ZammadConfiguration.is_active == True
)
)
config_result = await db.execute(config_stmt)
configurations = list(config_result.scalars())
# Get processing statistics
processed_stmt = select(ZammadTicket).where(
ZammadTicket.processed_by_user_id == user_id
)
processed_result = await db.execute(processed_stmt)
processed_tickets = list(processed_result.scalars())
# Get recent processing logs
logs_stmt = select(ZammadProcessingLog).where(
ZammadProcessingLog.initiated_by_user_id == user_id
).order_by(ZammadProcessingLog.started_at.desc()).limit(5)
logs_result = await db.execute(logs_stmt)
recent_logs = list(logs_result.scalars())
# Calculate statistics for frontend
total_processed = len(processed_tickets)
successful = len([t for t in processed_tickets if t.processing_status == ProcessingStatus.COMPLETED])
failed = len([t for t in processed_tickets if t.processing_status == ProcessingStatus.FAILED])
success_rate = (successful / total_processed * 100) if total_processed > 0 else 0
health = self.get_health()
return {
"module_health": {
"status": health.status,
"message": health.message,
"uptime": health.uptime
},
"statistics": {
"total_tickets": total_processed, # Frontend expects this name
"processed_tickets": successful, # Successfully processed tickets
"failed_tickets": failed, # Failed tickets
"success_rate": success_rate # Calculated percentage
}
}
except Exception as e:
logger.error(f"Error getting Zammad module status: {e}")
return {
"module_health": {
"status": "error",
"message": f"Error getting module status: {str(e)}",
"uptime": 0.0
},
"statistics": {
"total_tickets": 0,
"processed_tickets": 0,
"failed_tickets": 0,
"success_rate": 0.0
}
}
async def _process_tickets_batch(self, config: ZammadConfiguration, batch_id: str, user_id: int, filters: Dict[str, Any]) -> Dict[str, Any]:
"""Process a batch of tickets"""
async with async_session_factory() as db:

View File

@@ -38,7 +38,7 @@ services:
- enclava-postgres
- enclava-redis
- enclava-qdrant
- litellm-proxy
- privatemode-proxy
ports:
- "58000:8000"
volumes:
@@ -92,30 +92,6 @@ services:
- enclava-net
restart: unless-stopped
# LiteLLM proxy for unified LLM API
litellm-proxy:
image: ghcr.io/berriai/litellm:main-latest
environment:
- UI_USERNAME=admin
- UI_PASSWORD=${LITELLM_UI_PASSWORD:-admin123}
- DATABASE_URL=postgresql://enclava_user:enclava_pass@enclava-postgres:5432/enclava_db
- OPENROUTER_API_KEY=${OPENROUTER_API_KEY}
#- OLLAMA_BASE_URL=http://enclava-ollama-proxy:11434/v1
#- OLLAMA_API_KEY=ollama
- PRIVATEMODE_API_KEY=${PRIVATEMODE_API_KEY:-}
- LITELLM_MASTER_KEY=${LITELLM_MASTER_KEY}
- LITELLM_UI_PASSWORD=${LITELLM_UI_PASSWORD:-admin123}
volumes:
- ./litellm_config.yaml:/app/config.yaml
command: --config /app/config.yaml --port 4000 --num_workers 1
depends_on:
- enclava-postgres
ports:
- "54000:4000"
networks:
- enclava-net
restart: unless-stopped
# Ollama Free Model Proxy
#enclava-ollama-proxy:
# build:

View File

@@ -115,7 +115,7 @@ export default function ApiKeysPage() {
try {
setLoading(true);
const token = localStorage.getItem("token");
const response = await fetch("/api/llm/api-keys", {
const response = await fetch("/api/v1/api-keys", {
headers: {
"Authorization": `Bearer ${token}`,
"Content-Type": "application/json",
@@ -145,7 +145,7 @@ export default function ApiKeysPage() {
setActionLoading("create");
const token = localStorage.getItem("token");
const response = await fetch("/api/llm/api-keys", {
const response = await fetch("/api/v1/api-keys", {
method: "POST",
headers: {
"Authorization": `Bearer ${token}`,
@@ -199,7 +199,7 @@ export default function ApiKeysPage() {
setActionLoading(`toggle-${keyId}`);
const token = localStorage.getItem("token");
const response = await fetch(`/api/llm/api-keys/${keyId}`, {
const response = await fetch(`/api/v1/api-keys/${keyId}`, {
method: "PUT",
headers: {
"Authorization": `Bearer ${token}`,
@@ -236,7 +236,7 @@ export default function ApiKeysPage() {
setActionLoading(`regenerate-${keyId}`);
const token = localStorage.getItem("token");
const response = await fetch(`/api/llm/api-keys/${keyId}/regenerate`, {
const response = await fetch(`/api/v1/api-keys/${keyId}/regenerate`, {
method: "POST",
headers: {
"Authorization": `Bearer ${token}`,
@@ -280,7 +280,7 @@ export default function ApiKeysPage() {
setActionLoading(`delete-${keyId}`);
const token = localStorage.getItem("token");
const response = await fetch(`/api/llm/api-keys/${keyId}`, {
const response = await fetch(`/api/v1/api-keys/${keyId}`, {
method: "DELETE",
headers: {
"Authorization": `Bearer ${token}`,
@@ -316,7 +316,7 @@ export default function ApiKeysPage() {
setActionLoading(`edit-${keyId}`);
const token = localStorage.getItem("token");
const response = await fetch(`/api/llm/api-keys/${keyId}`, {
const response = await fetch(`/api/v1/api-keys/${keyId}`, {
method: "PUT",
headers: {
"Authorization": `Bearer ${token}`,

View File

@@ -0,0 +1,45 @@
import { NextRequest, NextResponse } from "next/server"
const BACKEND_URL = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || "http://enclava-backend:8000"
export async function GET(request: NextRequest) {
try {
const token = request.headers.get("authorization")
if (!token) {
return NextResponse.json({ error: "Unauthorized" }, { status: 401 })
}
const response = await fetch(`${BACKEND_URL}/api/v1/llm/models`, {
method: "GET",
headers: {
"Authorization": token,
"Content-Type": "application/json",
},
})
if (!response.ok) {
const errorData = await response.text()
return NextResponse.json(
{ error: "Failed to fetch models", details: errorData },
{ status: response.status }
)
}
const data = await response.json()
const transformedModels = data.data?.map((model: any) => ({
id: model.id,
name: model.id,
provider: model.owned_by || "unknown"
})) || []
return NextResponse.json({ data: transformedModels })
} catch (error) {
console.error("Error fetching models:", error)
return NextResponse.json(
{ error: "Internal server error" },
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,38 @@
import { NextRequest, NextResponse } from "next/server"
const BACKEND_URL = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_URL || "http://enclava-backend:8000"
export async function GET(request: NextRequest) {
try {
const token = request.headers.get("authorization")
if (!token) {
return NextResponse.json({ error: "Unauthorized" }, { status: 401 })
}
const response = await fetch(`${BACKEND_URL}/api/v1/llm/providers/status`, {
method: "GET",
headers: {
"Authorization": token,
"Content-Type": "application/json",
},
})
if (!response.ok) {
const errorData = await response.text()
return NextResponse.json(
{ error: "Failed to fetch provider status", details: errorData },
{ status: response.status }
)
}
const data = await response.json()
return NextResponse.json(data)
} catch (error) {
console.error("Error fetching provider status:", error)
return NextResponse.json(
{ error: "Internal server error" },
{ status: 500 }
)
}
}

View File

@@ -118,9 +118,9 @@ function LLMPageContent() {
// Fetch API keys, budgets, and models
const [keysRes, budgetsRes, modelsRes] = await Promise.all([
fetch('/api/llm/api-keys', { headers }),
fetch('/api/llm/budgets', { headers }),
fetch('/api/llm/models', { headers })
fetch('/api/v1/api-keys', { headers }),
fetch('/api/v1/llm/budget/status', { headers }),
fetch('/api/v1/llm/models', { headers })
])
console.log('API keys response status:', keysRes.status)
@@ -163,7 +163,7 @@ function LLMPageContent() {
const createAPIKey = async () => {
try {
const token = localStorage.getItem('token')
const response = await fetch('/api/llm/api-keys', {
const response = await fetch('/api/v1/api-keys', {
method: 'POST',
headers: {
'Authorization': `Bearer ${token}`,
@@ -209,7 +209,7 @@ function LLMPageContent() {
throw new Error('No authentication token found')
}
const response = await fetch(`/api/llm/api-keys/${keyId}`, {
const response = await fetch(`/api/v1/api-keys/${keyId}`, {
method: 'DELETE',
headers: {
'Authorization': `Bearer ${token}`,

View File

@@ -69,7 +69,7 @@ export default function ChatPlayground({ selectedModel, onRequestComplete }: Cha
{ role: 'user', content: userMessage.content }
]
const response = await fetch('/api/llm/chat/completions', {
const response = await fetch('/api/v1/llm/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json'

View File

@@ -61,8 +61,8 @@ export default function ModelSelector({ value, onValueChange, filter = 'all', cl
// Fetch models and provider status in parallel
const [modelsResponse, statusResponse] = await Promise.allSettled([
fetch('/api/llm/models', { headers }),
fetch('/api/llm/providers/status', { headers })
fetch('/api/v1/llm/models', { headers }),
fetch('/api/v1/llm/providers/status', { headers })
])
// Handle models response
@@ -234,7 +234,7 @@ export default function ModelSelector({ value, onValueChange, filter = 'all', cl
</div>
</div>
<Select value={value ?? ''} onValueChange={onValueChange}>
<Select value={value || ''} onValueChange={onValueChange}>
<SelectTrigger>
<SelectValue placeholder="Select a model">
{selectedModel && (

View File

@@ -55,8 +55,8 @@ export default function ProviderHealthDashboard() {
}
const [statusResponse, metricsResponse] = await Promise.allSettled([
fetch('/api/llm/providers/status', { headers }),
fetch('/api/llm/metrics', { headers })
fetch('/api/v1/llm/providers/status', { headers }),
fetch('/api/v1/llm/metrics', { headers })
])
// Handle provider status

View File

@@ -39,7 +39,7 @@ export function useBudgetStatus(autoRefresh = true, refreshInterval = 30000) {
try {
setLoading(true)
const response = await fetch('/api/llm/budget/status')
const response = await fetch('/api/v1/llm/budget/status')
if (!response.ok) {
if (response.status === 401) {