mirror of
https://github.com/aljazceru/enclava.git
synced 2025-12-17 07:24:34 +01:00
before rbac and tool use
This commit is contained in:
@@ -21,7 +21,7 @@ from app.core.security import (
|
||||
get_current_user,
|
||||
get_current_active_user,
|
||||
)
|
||||
from app.db.database import get_db
|
||||
from app.db.database import get_db, create_default_admin
|
||||
from app.models.user import User
|
||||
from app.utils.exceptions import AuthenticationError, ValidationError
|
||||
|
||||
@@ -201,23 +201,45 @@ async def login(
|
||||
user = result.scalar_one_or_none()
|
||||
|
||||
if not user:
|
||||
logger.warning("LOGIN_USER_NOT_FOUND", identifier=identifier)
|
||||
# List available users for debugging
|
||||
try:
|
||||
all_users_stmt = select(User).limit(5)
|
||||
all_users_result = await db.execute(all_users_stmt)
|
||||
all_users = all_users_result.scalars().all()
|
||||
logger.info(
|
||||
"LOGIN_USER_LIST",
|
||||
users=[u.email for u in all_users],
|
||||
bootstrap_attempted = False
|
||||
identifier_lower = identifier.lower() if identifier else ""
|
||||
admin_email = settings.ADMIN_EMAIL.lower() if settings.ADMIN_EMAIL else None
|
||||
|
||||
if user_data.email and admin_email and identifier_lower == admin_email and settings.ADMIN_PASSWORD:
|
||||
bootstrap_attempted = True
|
||||
logger.info("LOGIN_ADMIN_BOOTSTRAP_START", email=user_data.email)
|
||||
try:
|
||||
await create_default_admin()
|
||||
# Re-run lookup after bootstrap attempt
|
||||
stmt = select(User).where(User.email == user_data.email)
|
||||
result = await db.execute(stmt)
|
||||
user = result.scalar_one_or_none()
|
||||
if user:
|
||||
logger.info("LOGIN_ADMIN_BOOTSTRAP_SUCCESS", email=user.email)
|
||||
except Exception as bootstrap_exc:
|
||||
logger.error("LOGIN_ADMIN_BOOTSTRAP_FAILED", error=str(bootstrap_exc))
|
||||
|
||||
if not user:
|
||||
logger.warning("LOGIN_USER_NOT_FOUND", identifier=identifier)
|
||||
# List available users for debugging
|
||||
try:
|
||||
all_users_stmt = select(User).limit(5)
|
||||
all_users_result = await db.execute(all_users_stmt)
|
||||
all_users = all_users_result.scalars().all()
|
||||
logger.info(
|
||||
"LOGIN_USER_LIST",
|
||||
users=[u.email for u in all_users],
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("LOGIN_USER_LIST_FAILURE", error=str(e))
|
||||
|
||||
if bootstrap_attempted:
|
||||
logger.warning("LOGIN_ADMIN_BOOTSTRAP_UNSUCCESSFUL", email=user_data.email)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Incorrect email or password"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("LOGIN_USER_LIST_FAILURE", error=str(e))
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Incorrect email or password"
|
||||
)
|
||||
|
||||
logger.info("LOGIN_USER_FOUND", email=user.email, is_active=user.is_active)
|
||||
logger.info("LOGIN_PASSWORD_VERIFY_START")
|
||||
|
||||
@@ -158,7 +158,7 @@ async def create_chatbot(
|
||||
raise HTTPException(status_code=500, detail="Chatbot module not available")
|
||||
|
||||
# Import needed types
|
||||
from modules.chatbot.main import ChatbotConfig
|
||||
from app.modules.chatbot.main import ChatbotConfig
|
||||
|
||||
# Create chatbot config object
|
||||
config = ChatbotConfig(
|
||||
|
||||
@@ -7,6 +7,7 @@ from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, update, delete
|
||||
from sqlalchemy.dialects.postgresql import insert
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
|
||||
@@ -513,21 +514,33 @@ async def seed_default_templates(
|
||||
inactive_template.updated_at = datetime.utcnow()
|
||||
updated_templates.append(type_key)
|
||||
else:
|
||||
# Create new template
|
||||
new_template = PromptTemplate(
|
||||
id=str(uuid.uuid4()),
|
||||
name=template_data["name"],
|
||||
type_key=type_key,
|
||||
description=template_data["description"],
|
||||
system_prompt=template_data["prompt"],
|
||||
is_default=True,
|
||||
is_active=True,
|
||||
version=1,
|
||||
created_at=datetime.utcnow(),
|
||||
updated_at=datetime.utcnow()
|
||||
# Create new template, gracefully skipping if another request created it first
|
||||
now = datetime.utcnow()
|
||||
stmt = (
|
||||
insert(PromptTemplate)
|
||||
.values(
|
||||
id=str(uuid.uuid4()),
|
||||
name=template_data["name"],
|
||||
type_key=type_key,
|
||||
description=template_data["description"],
|
||||
system_prompt=template_data["prompt"],
|
||||
is_default=True,
|
||||
is_active=True,
|
||||
version=1,
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
.on_conflict_do_nothing(index_elements=[PromptTemplate.type_key])
|
||||
)
|
||||
db.add(new_template)
|
||||
created_templates.append(type_key)
|
||||
|
||||
result = await db.execute(stmt)
|
||||
if result.rowcount:
|
||||
created_templates.append(type_key)
|
||||
else:
|
||||
log_api_request(
|
||||
"prompt_template_seed_skipped",
|
||||
{"type_key": type_key, "reason": "already_exists"},
|
||||
)
|
||||
|
||||
await db.commit()
|
||||
|
||||
@@ -541,4 +554,4 @@ async def seed_default_templates(
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
log_api_request("seed_default_templates_error", {"error": str(e), "user_id": user_id})
|
||||
raise HTTPException(status_code=500, detail=f"Failed to seed default templates: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to seed default templates: {str(e)}")
|
||||
|
||||
@@ -5,6 +5,7 @@ Database connection and session management
|
||||
import logging
|
||||
from typing import AsyncGenerator
|
||||
from sqlalchemy import create_engine, MetaData
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
|
||||
from sqlalchemy.orm import sessionmaker, declarative_base
|
||||
from sqlalchemy.pool import StaticPool
|
||||
@@ -141,21 +142,27 @@ async def create_default_admin():
|
||||
from app.core.security import get_password_hash
|
||||
from app.core.config import settings
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
try:
|
||||
admin_email = settings.ADMIN_EMAIL
|
||||
admin_password = settings.ADMIN_PASSWORD
|
||||
|
||||
if not admin_email or not admin_password:
|
||||
logger.info("Admin bootstrap skipped: ADMIN_EMAIL or ADMIN_PASSWORD unset")
|
||||
return
|
||||
|
||||
async with async_session_factory() as session:
|
||||
# Check if user with ADMIN_EMAIL exists
|
||||
stmt = select(User).where(User.email == settings.ADMIN_EMAIL)
|
||||
stmt = select(User).where(User.email == admin_email)
|
||||
result = await session.execute(stmt)
|
||||
existing_user = result.scalar_one_or_none()
|
||||
|
||||
if existing_user:
|
||||
logger.info(f"User with email {settings.ADMIN_EMAIL} already exists - skipping admin creation")
|
||||
logger.info(f"User with email {admin_email} already exists - skipping admin creation")
|
||||
return
|
||||
|
||||
# Create admin user from environment variables
|
||||
admin_email = settings.ADMIN_EMAIL
|
||||
admin_password = settings.ADMIN_PASSWORD
|
||||
# Generate username from email (part before @)
|
||||
admin_username = admin_email.split('@')[0]
|
||||
|
||||
@@ -176,6 +183,10 @@ async def create_default_admin():
|
||||
logger.warning("PLEASE CHANGE THE PASSWORD AFTER FIRST LOGIN")
|
||||
logger.warning("=" * 60)
|
||||
|
||||
except SQLAlchemyError as e:
|
||||
logger.error(f"Failed to create default admin user due to database error: {e}")
|
||||
except AttributeError as e:
|
||||
logger.error(f"Failed to create default admin user: invalid ADMIN_EMAIL '{settings.ADMIN_EMAIL}'")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create default admin user: {e}")
|
||||
# Don't raise here as this shouldn't block the application startup
|
||||
|
||||
@@ -103,6 +103,7 @@ async def lifespan(app: FastAPI):
|
||||
Application lifespan handler
|
||||
"""
|
||||
logger.info("Starting Enclava platform...")
|
||||
background_tasks = []
|
||||
|
||||
# Initialize core cache service (before database to provide caching for auth)
|
||||
from app.core.cache import core_cache
|
||||
@@ -125,16 +126,27 @@ async def lifespan(app: FastAPI):
|
||||
# Initialize config manager
|
||||
await init_config_manager()
|
||||
|
||||
# Initialize LLM service (needed by RAG module)
|
||||
# Ensure platform permissions are registered before module discovery
|
||||
from app.services.permission_manager import permission_registry
|
||||
permission_registry.register_platform_permissions()
|
||||
|
||||
# Initialize LLM service (needed by RAG module) concurrently
|
||||
from app.services.llm.service import llm_service
|
||||
try:
|
||||
await llm_service.initialize()
|
||||
logger.info("LLM service initialized successfully")
|
||||
except Exception as e:
|
||||
logger.warning(f"LLM service initialization failed: {e}")
|
||||
|
||||
async def initialize_llm_service():
|
||||
try:
|
||||
await llm_service.initialize()
|
||||
logger.info("LLM service initialized successfully")
|
||||
except Exception as exc:
|
||||
logger.warning(f"LLM service initialization failed: {exc}")
|
||||
|
||||
background_tasks.append(asyncio.create_task(initialize_llm_service()))
|
||||
|
||||
# Initialize analytics service
|
||||
init_analytics_service()
|
||||
try:
|
||||
init_analytics_service()
|
||||
except Exception as exc:
|
||||
logger.warning(f"Analytics service initialization failed: {exc}")
|
||||
|
||||
# Initialize module manager with FastAPI app for router registration
|
||||
logger.info("Initializing module manager...")
|
||||
@@ -142,62 +154,78 @@ async def lifespan(app: FastAPI):
|
||||
app.state.module_manager = module_manager
|
||||
logger.info("Module manager initialized successfully")
|
||||
|
||||
# Initialize permission registry
|
||||
logger.info("Initializing permission registry...")
|
||||
from app.services.permission_manager import permission_registry
|
||||
permission_registry.register_platform_permissions()
|
||||
logger.info("Permission registry initialized successfully")
|
||||
|
||||
# Initialize document processor
|
||||
from app.services.document_processor import document_processor
|
||||
await document_processor.start()
|
||||
app.state.document_processor = document_processor
|
||||
try:
|
||||
await document_processor.start()
|
||||
app.state.document_processor = document_processor
|
||||
except Exception as exc:
|
||||
logger.error(f"Document processor failed to start: {exc}")
|
||||
app.state.document_processor = None
|
||||
|
||||
# Setup metrics
|
||||
setup_metrics(app)
|
||||
try:
|
||||
setup_metrics(app)
|
||||
except Exception as exc:
|
||||
logger.warning(f"Metrics setup failed: {exc}")
|
||||
|
||||
# Start background audit worker
|
||||
from app.services.audit_service import start_audit_worker
|
||||
start_audit_worker()
|
||||
|
||||
# Initialize plugin auto-discovery service
|
||||
from app.services.plugin_autodiscovery import initialize_plugin_autodiscovery
|
||||
try:
|
||||
discovery_results = await initialize_plugin_autodiscovery()
|
||||
app.state.plugin_discovery_results = discovery_results
|
||||
logger.info(f"Plugin auto-discovery completed: {discovery_results['summary']}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Plugin auto-discovery failed: {e}")
|
||||
start_audit_worker()
|
||||
except Exception as exc:
|
||||
logger.warning(f"Audit worker failed to start: {exc}")
|
||||
|
||||
# Initialize plugin auto-discovery service concurrently
|
||||
async def initialize_plugins():
|
||||
from app.services.plugin_autodiscovery import initialize_plugin_autodiscovery
|
||||
try:
|
||||
discovery_results = await initialize_plugin_autodiscovery()
|
||||
app.state.plugin_discovery_results = discovery_results
|
||||
logger.info(f"Plugin auto-discovery completed: {discovery_results.get('summary')}")
|
||||
except Exception as exc:
|
||||
logger.warning(f"Plugin auto-discovery failed: {exc}")
|
||||
app.state.plugin_discovery_results = {"error": str(exc)}
|
||||
|
||||
background_tasks.append(asyncio.create_task(initialize_plugins()))
|
||||
|
||||
if background_tasks:
|
||||
results = await asyncio.gather(*background_tasks, return_exceptions=True)
|
||||
for result in results:
|
||||
if isinstance(result, Exception):
|
||||
logger.warning(f"Background startup task failed: {result}")
|
||||
|
||||
logger.info("Platform started successfully")
|
||||
|
||||
yield
|
||||
|
||||
# Cleanup
|
||||
logger.info("Shutting down platform...")
|
||||
|
||||
# Cleanup embedding service HTTP sessions
|
||||
from app.services.embedding_service import embedding_service
|
||||
try:
|
||||
await embedding_service.cleanup()
|
||||
logger.info("Embedding service cleaned up successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up embedding service: {e}")
|
||||
yield
|
||||
finally:
|
||||
# Cleanup
|
||||
logger.info("Shutting down platform...")
|
||||
|
||||
# Close core cache service
|
||||
from app.core.cache import core_cache
|
||||
await core_cache.cleanup()
|
||||
# Cleanup embedding service HTTP sessions
|
||||
from app.services.embedding_service import embedding_service
|
||||
try:
|
||||
await embedding_service.cleanup()
|
||||
logger.info("Embedding service cleaned up successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up embedding service: {e}")
|
||||
|
||||
# Close Redis connection for cached API key service
|
||||
from app.services.cached_api_key import cached_api_key_service
|
||||
await cached_api_key_service.close()
|
||||
# Close core cache service
|
||||
from app.core.cache import core_cache
|
||||
await core_cache.cleanup()
|
||||
|
||||
# Stop document processor
|
||||
if hasattr(app.state, 'document_processor'):
|
||||
await app.state.document_processor.stop()
|
||||
# Close Redis connection for cached API key service
|
||||
from app.services.cached_api_key import cached_api_key_service
|
||||
await cached_api_key_service.close()
|
||||
|
||||
await module_manager.cleanup()
|
||||
logger.info("Platform shutdown complete")
|
||||
# Stop document processor
|
||||
processor = getattr(app.state, 'document_processor', None)
|
||||
if processor:
|
||||
await processor.stop()
|
||||
|
||||
await module_manager.cleanup()
|
||||
logger.info("Platform shutdown complete")
|
||||
|
||||
|
||||
# Create FastAPI application
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import Dict, Any, Optional, List
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, update
|
||||
from sqlalchemy.orm import selectinload
|
||||
@@ -99,12 +100,15 @@ class DocumentProcessor:
|
||||
try:
|
||||
task = ProcessingTask(document_id=document_id, priority=priority)
|
||||
|
||||
# Check if queue is full
|
||||
if self.processing_queue.full():
|
||||
logger.warning(f"Processing queue is full, dropping task for document {document_id}")
|
||||
try:
|
||||
await asyncio.wait_for(self.processing_queue.put(task), timeout=5.0)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(
|
||||
"Processing queue saturated, could not enqueue document %s within timeout",
|
||||
document_id,
|
||||
)
|
||||
return False
|
||||
|
||||
await self.processing_queue.put(task)
|
||||
self.stats["queue_size"] = self.processing_queue.qsize()
|
||||
|
||||
logger.info(f"Added processing task for document {document_id} (priority: {priority})")
|
||||
@@ -119,6 +123,7 @@ class DocumentProcessor:
|
||||
logger.info(f"Started worker: {worker_name}")
|
||||
|
||||
while self.running:
|
||||
task: Optional[ProcessingTask] = None
|
||||
try:
|
||||
# Get task from queue (wait up to 1 second)
|
||||
task = await asyncio.wait_for(
|
||||
@@ -142,14 +147,21 @@ class DocumentProcessor:
|
||||
if task.retry_count < task.max_retries:
|
||||
task.retry_count += 1
|
||||
await asyncio.sleep(2 ** task.retry_count) # Exponential backoff
|
||||
await self.processing_queue.put(task)
|
||||
try:
|
||||
await asyncio.wait_for(self.processing_queue.put(task), timeout=5.0)
|
||||
except asyncio.TimeoutError:
|
||||
logger.error(
|
||||
"%s: Failed to requeue document %s due to saturated queue",
|
||||
worker_name,
|
||||
task.document_id,
|
||||
)
|
||||
self.stats["error_count"] += 1
|
||||
continue
|
||||
logger.warning(f"{worker_name}: Retrying document {task.document_id} (attempt {task.retry_count})")
|
||||
else:
|
||||
self.stats["error_count"] += 1
|
||||
logger.error(f"{worker_name}: Failed to process document {task.document_id} after {task.max_retries} retries")
|
||||
|
||||
self.stats["active_workers"] -= 1
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# No tasks in queue, continue
|
||||
continue
|
||||
@@ -157,9 +169,14 @@ class DocumentProcessor:
|
||||
# Worker cancelled, exit
|
||||
break
|
||||
except Exception as e:
|
||||
self.stats["active_workers"] -= 1
|
||||
logger.error(f"{worker_name}: Unexpected error: {e}")
|
||||
await asyncio.sleep(1) # Brief pause before continuing
|
||||
finally:
|
||||
if task is not None:
|
||||
self.processing_queue.task_done()
|
||||
if self.stats["active_workers"] > 0:
|
||||
self.stats["active_workers"] -= 1
|
||||
self.stats["queue_size"] = self.processing_queue.qsize()
|
||||
|
||||
logger.info(f"Worker stopped: {worker_name}")
|
||||
|
||||
@@ -172,16 +189,24 @@ class DocumentProcessor:
|
||||
if not module_manager.initialized:
|
||||
await module_manager.initialize()
|
||||
|
||||
rag_module = module_manager.modules.get('rag')
|
||||
rag_module = module_manager.get_module('rag')
|
||||
|
||||
if not rag_module or not getattr(rag_module, 'enabled', False):
|
||||
if not rag_module:
|
||||
enabled = await module_manager.enable_module('rag')
|
||||
if not enabled:
|
||||
raise Exception("Failed to enable RAG module")
|
||||
rag_module = module_manager.modules.get('rag')
|
||||
raise RuntimeError("Failed to enable RAG module")
|
||||
rag_module = module_manager.get_module('rag')
|
||||
|
||||
if not rag_module or not getattr(rag_module, 'enabled', False):
|
||||
raise Exception("RAG module not available or not enabled")
|
||||
if not rag_module:
|
||||
raise RuntimeError("RAG module not available after enable attempt")
|
||||
|
||||
if not getattr(rag_module, 'enabled', True):
|
||||
enabled = await module_manager.enable_module('rag')
|
||||
if not enabled:
|
||||
raise RuntimeError("RAG module is disabled and could not be re-enabled")
|
||||
rag_module = module_manager.get_module('rag')
|
||||
if not rag_module or not getattr(rag_module, 'enabled', True):
|
||||
raise RuntimeError("RAG module is disabled and could not be re-enabled")
|
||||
|
||||
self._rag_module = rag_module
|
||||
logger.info("DocumentProcessor cached RAG module instance for reuse")
|
||||
@@ -224,8 +249,21 @@ class DocumentProcessor:
|
||||
|
||||
# Read file content
|
||||
logger.info(f"Reading file content for document {task.document_id}: {document.file_path}")
|
||||
with open(document.file_path, 'rb') as f:
|
||||
file_content = f.read()
|
||||
file_path = Path(document.file_path)
|
||||
try:
|
||||
file_content = await asyncio.to_thread(file_path.read_bytes)
|
||||
except FileNotFoundError:
|
||||
logger.error(f"File not found for document {task.document_id}: {document.file_path}")
|
||||
document.status = ProcessingStatus.ERROR
|
||||
document.processing_error = "Document file not found on disk"
|
||||
await session.commit()
|
||||
return False
|
||||
except Exception as exc:
|
||||
logger.error(f"Failed reading file for document {task.document_id}: {exc}")
|
||||
document.status = ProcessingStatus.ERROR
|
||||
document.processing_error = f"Failed to read file: {exc}"
|
||||
await session.commit()
|
||||
return False
|
||||
|
||||
logger.info(f"File content read successfully for document {task.document_id}, size: {len(file_content)} bytes")
|
||||
|
||||
|
||||
@@ -78,15 +78,16 @@ class LLMServiceConfig(BaseModel):
|
||||
|
||||
|
||||
|
||||
def create_default_config() -> LLMServiceConfig:
|
||||
def create_default_config(env_vars=None) -> LLMServiceConfig:
|
||||
"""Create default LLM service configuration"""
|
||||
|
||||
env = env_vars or EnvironmentVariables()
|
||||
|
||||
# PrivateMode.ai configuration (via proxy)
|
||||
# Models will be fetched dynamically from proxy /models endpoint
|
||||
privatemode_config = ProviderConfig(
|
||||
name="privatemode",
|
||||
provider_type="privatemode",
|
||||
enabled=True,
|
||||
enabled=bool(env.PRIVATEMODE_API_KEY),
|
||||
base_url=settings.PRIVATEMODE_PROXY_URL,
|
||||
api_key_env_var="PRIVATEMODE_API_KEY",
|
||||
default_model="privatemode-latest",
|
||||
@@ -108,13 +109,105 @@ def create_default_config() -> LLMServiceConfig:
|
||||
)
|
||||
)
|
||||
|
||||
providers: Dict[str, ProviderConfig] = {
|
||||
"privatemode": privatemode_config
|
||||
}
|
||||
|
||||
if env.OPENAI_API_KEY:
|
||||
providers["openai"] = ProviderConfig(
|
||||
name="openai",
|
||||
provider_type="openai",
|
||||
enabled=True,
|
||||
base_url="https://api.openai.com/v1",
|
||||
api_key_env_var="OPENAI_API_KEY",
|
||||
default_model="gpt-4o-mini",
|
||||
supported_models=[
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o",
|
||||
"gpt-3.5-turbo",
|
||||
"text-embedding-3-large",
|
||||
"text-embedding-3-small"
|
||||
],
|
||||
capabilities=["chat", "embeddings"],
|
||||
priority=2,
|
||||
supports_streaming=True,
|
||||
supports_function_calling=True,
|
||||
max_context_window=128000,
|
||||
max_output_tokens=8192,
|
||||
resilience=ResilienceConfig(
|
||||
max_retries=2,
|
||||
retry_delay_ms=750,
|
||||
timeout_ms=45000,
|
||||
circuit_breaker_threshold=6,
|
||||
circuit_breaker_reset_timeout_ms=60000
|
||||
)
|
||||
)
|
||||
|
||||
if env.ANTHROPIC_API_KEY:
|
||||
providers["anthropic"] = ProviderConfig(
|
||||
name="anthropic",
|
||||
provider_type="anthropic",
|
||||
enabled=True,
|
||||
base_url="https://api.anthropic.com/v1",
|
||||
api_key_env_var="ANTHROPIC_API_KEY",
|
||||
default_model="claude-3-opus-20240229",
|
||||
supported_models=[
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307"
|
||||
],
|
||||
capabilities=["chat"],
|
||||
priority=3,
|
||||
supports_streaming=True,
|
||||
supports_function_calling=False,
|
||||
max_context_window=200000,
|
||||
max_output_tokens=4096,
|
||||
resilience=ResilienceConfig(
|
||||
max_retries=3,
|
||||
retry_delay_ms=1000,
|
||||
timeout_ms=60000,
|
||||
circuit_breaker_threshold=5,
|
||||
circuit_breaker_reset_timeout_ms=90000
|
||||
)
|
||||
)
|
||||
|
||||
if env.GOOGLE_API_KEY:
|
||||
providers["google"] = ProviderConfig(
|
||||
name="google",
|
||||
provider_type="google",
|
||||
enabled=True,
|
||||
base_url="https://generativelanguage.googleapis.com/v1beta",
|
||||
api_key_env_var="GOOGLE_API_KEY",
|
||||
default_model="models/gemini-1.5-pro-latest",
|
||||
supported_models=[
|
||||
"models/gemini-1.5-pro-latest",
|
||||
"models/gemini-1.5-flash-latest"
|
||||
],
|
||||
capabilities=["chat", "multimodal"],
|
||||
priority=4,
|
||||
supports_streaming=True,
|
||||
supports_function_calling=True,
|
||||
max_context_window=200000,
|
||||
max_output_tokens=8192,
|
||||
resilience=ResilienceConfig(
|
||||
max_retries=2,
|
||||
retry_delay_ms=1000,
|
||||
timeout_ms=45000,
|
||||
circuit_breaker_threshold=4,
|
||||
circuit_breaker_reset_timeout_ms=60000
|
||||
)
|
||||
)
|
||||
|
||||
default_provider = next(
|
||||
(name for name, provider in providers.items() if provider.enabled),
|
||||
"privatemode"
|
||||
)
|
||||
|
||||
# Create main configuration
|
||||
config = LLMServiceConfig(
|
||||
default_provider="privatemode",
|
||||
default_provider=default_provider,
|
||||
enable_detailed_logging=settings.LOG_LLM_PROMPTS,
|
||||
providers={
|
||||
"privatemode": privatemode_config
|
||||
},
|
||||
providers=providers,
|
||||
model_routing={} # Will be populated dynamically from provider models
|
||||
)
|
||||
|
||||
@@ -174,7 +267,7 @@ class ConfigurationManager:
|
||||
def get_config(self) -> LLMServiceConfig:
|
||||
"""Get current configuration"""
|
||||
if self._config is None:
|
||||
self._config = create_default_config()
|
||||
self._config = create_default_config(self._env_vars)
|
||||
self._validate_configuration()
|
||||
|
||||
return self._config
|
||||
@@ -271,4 +364,4 @@ class ConfigurationManager:
|
||||
|
||||
|
||||
# Global configuration manager
|
||||
config_manager = ConfigurationManager()
|
||||
config_manager = ConfigurationManager()
|
||||
|
||||
@@ -186,7 +186,13 @@ class LLMService:
|
||||
total_latency = (time.time() - start_time) * 1000
|
||||
error_code = getattr(e, 'error_code', e.__class__.__name__)
|
||||
|
||||
|
||||
logger.exception(
|
||||
"Chat completion failed for provider %s (model=%s, latency=%.2fms, error=%s)",
|
||||
provider_name,
|
||||
request.model,
|
||||
total_latency,
|
||||
error_code,
|
||||
)
|
||||
raise
|
||||
|
||||
async def create_chat_completion_stream(self, request: ChatRequest) -> AsyncGenerator[Dict[str, Any], None]:
|
||||
@@ -220,6 +226,12 @@ class LLMService:
|
||||
except Exception as e:
|
||||
# Record streaming failure - metrics disabled
|
||||
error_code = getattr(e, 'error_code', e.__class__.__name__)
|
||||
logger.exception(
|
||||
"Streaming chat completion failed for provider %s (model=%s, error=%s)",
|
||||
provider_name,
|
||||
request.model,
|
||||
error_code,
|
||||
)
|
||||
raise
|
||||
|
||||
async def create_embedding(self, request: EmbeddingRequest) -> EmbeddingResponse:
|
||||
@@ -261,6 +273,13 @@ class LLMService:
|
||||
# Record failed request - metrics disabled
|
||||
total_latency = (time.time() - start_time) * 1000
|
||||
error_code = getattr(e, 'error_code', e.__class__.__name__)
|
||||
logger.exception(
|
||||
"Embedding request failed for provider %s (model=%s, latency=%.2fms, error=%s)",
|
||||
provider_name,
|
||||
request.model,
|
||||
total_latency,
|
||||
error_code,
|
||||
)
|
||||
raise
|
||||
|
||||
async def get_models(self, provider_name: Optional[str] = None) -> List[ModelInfo]:
|
||||
@@ -378,4 +397,4 @@ class LLMService:
|
||||
|
||||
|
||||
# Global LLM service instance
|
||||
llm_service = LLMService()
|
||||
llm_service = LLMService()
|
||||
|
||||
@@ -38,26 +38,49 @@ class ModuleConfig:
|
||||
class ModuleFileWatcher(FileSystemEventHandler):
|
||||
"""Watch for changes in module files"""
|
||||
|
||||
def __init__(self, module_manager):
|
||||
def __init__(self, module_manager, modules_root: Path):
|
||||
self.module_manager = module_manager
|
||||
self.modules_root = modules_root.resolve()
|
||||
|
||||
def _resolve_module_name(self, src_path: str) -> Optional[str]:
|
||||
try:
|
||||
relative_path = Path(src_path).resolve().relative_to(self.modules_root)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
parts = relative_path.parts
|
||||
return parts[0] if parts else None
|
||||
|
||||
def on_modified(self, event):
|
||||
if event.is_directory or not event.src_path.endswith('.py'):
|
||||
return
|
||||
|
||||
# Extract module name from path
|
||||
path_parts = Path(event.src_path).parts
|
||||
if 'modules' in path_parts:
|
||||
modules_index = path_parts.index('modules')
|
||||
if modules_index + 1 < len(path_parts):
|
||||
module_name = path_parts[modules_index + 1]
|
||||
if module_name in self.module_manager.modules:
|
||||
log_module_event("hot_reload", "file_changed", {
|
||||
"module": module_name,
|
||||
"file": event.src_path
|
||||
})
|
||||
# Schedule reload
|
||||
asyncio.create_task(self.module_manager.reload_module(module_name))
|
||||
module_name = self._resolve_module_name(event.src_path)
|
||||
if not module_name or module_name not in self.module_manager.modules:
|
||||
return
|
||||
|
||||
log_module_event("hot_reload", "file_changed", {
|
||||
"module": module_name,
|
||||
"file": event.src_path
|
||||
})
|
||||
|
||||
loop = self.module_manager.loop
|
||||
if not loop or loop.is_closed():
|
||||
logger.debug("Hot reload skipped for %s; event loop unavailable", module_name)
|
||||
return
|
||||
|
||||
try:
|
||||
future = asyncio.run_coroutine_threadsafe(
|
||||
self.module_manager.reload_module(module_name),
|
||||
loop,
|
||||
)
|
||||
future.add_done_callback(
|
||||
lambda f: f.exception() and logger.warning(
|
||||
"Module reload error for %s: %s", module_name, f.exception()
|
||||
)
|
||||
)
|
||||
except RuntimeError as exc:
|
||||
logger.debug("Hot reload scheduling failed for %s: %s", module_name, exc)
|
||||
|
||||
|
||||
class ModuleManager:
|
||||
@@ -71,12 +94,16 @@ class ModuleManager:
|
||||
self.hot_reload_enabled = True
|
||||
self.file_observer = None
|
||||
self.fastapi_app = None
|
||||
self.loop: Optional[asyncio.AbstractEventLoop] = None
|
||||
self.modules_root = (Path(__file__).resolve().parent.parent / "modules").resolve()
|
||||
|
||||
async def initialize(self, fastapi_app=None):
|
||||
"""Initialize the module manager and load all modules"""
|
||||
if self.initialized:
|
||||
return
|
||||
|
||||
self.loop = asyncio.get_running_loop()
|
||||
|
||||
# Store FastAPI app reference for router registration
|
||||
self.fastapi_app = fastapi_app
|
||||
|
||||
@@ -103,10 +130,15 @@ class ModuleManager:
|
||||
"""Load module configurations from dynamic discovery"""
|
||||
# Initialize permission system
|
||||
permission_registry.register_platform_permissions()
|
||||
self.module_configs = {}
|
||||
|
||||
# Discover modules dynamically from filesystem
|
||||
try:
|
||||
discovered_manifests = await module_config_manager.discover_modules("modules")
|
||||
if not self.modules_root.exists():
|
||||
logger.warning("Modules directory not found at %s", self.modules_root)
|
||||
return
|
||||
|
||||
discovered_manifests = await module_config_manager.discover_modules(str(self.modules_root))
|
||||
|
||||
# Load saved configurations
|
||||
await module_config_manager.load_saved_configs()
|
||||
@@ -206,45 +238,26 @@ class ModuleManager:
|
||||
try:
|
||||
log_module_event(module_name, "loading", {"config": config.config})
|
||||
|
||||
# Check if module exists in the modules directory
|
||||
# Try multiple possible locations in order of preference
|
||||
possible_paths = [
|
||||
Path(f"modules/{module_name}"), # Docker container path
|
||||
Path(f"modules/{module_name}"), # Container path
|
||||
Path(f"app/modules/{module_name}") # Legacy path
|
||||
]
|
||||
# Check if module exists in the canonical modules directory
|
||||
module_dir = self.modules_root / module_name
|
||||
modules_base_path = self.modules_root.parent
|
||||
|
||||
module_dir = None
|
||||
modules_base_path = None
|
||||
if not module_dir.exists():
|
||||
raise ModuleLoadError(f"Module {module_name} not found at {module_dir}")
|
||||
|
||||
for path in possible_paths:
|
||||
if path.exists():
|
||||
module_dir = path
|
||||
modules_base_path = path.parent
|
||||
break
|
||||
# Ensure the parent app directory is on sys.path for imports
|
||||
modules_path_str = str(modules_base_path.absolute())
|
||||
if modules_path_str not in sys.path:
|
||||
sys.path.insert(0, modules_path_str)
|
||||
|
||||
if module_dir and module_dir.exists():
|
||||
# Use direct import from modules directory
|
||||
module_path = f"modules.{module_name}.main"
|
||||
|
||||
# Add modules directory to Python path if not already there
|
||||
modules_path_str = str(modules_base_path.absolute())
|
||||
if modules_path_str not in sys.path:
|
||||
sys.path.insert(0, modules_path_str)
|
||||
|
||||
# Force reload if already imported
|
||||
if module_path in sys.modules:
|
||||
importlib.reload(sys.modules[module_path])
|
||||
module = sys.modules[module_path]
|
||||
else:
|
||||
module = importlib.import_module(module_path)
|
||||
module_path = f"app.modules.{module_name}.main"
|
||||
|
||||
# Force reload if already imported
|
||||
if module_path in sys.modules:
|
||||
importlib.reload(sys.modules[module_path])
|
||||
module = sys.modules[module_path]
|
||||
else:
|
||||
# Final fallback - try app.modules path (legacy)
|
||||
try:
|
||||
module_path = f"app.modules.{module_name}.main"
|
||||
module = importlib.import_module(module_path)
|
||||
except ImportError:
|
||||
raise ModuleLoadError(f"Module {module_name} not found in any expected location: {[str(p) for p in possible_paths]}")
|
||||
module = importlib.import_module(module_path)
|
||||
|
||||
# Get the module instance - try multiple patterns
|
||||
module_instance = None
|
||||
@@ -484,7 +497,15 @@ class ModuleManager:
|
||||
except Exception as e:
|
||||
log_module_event(module_name, "shutdown_error", {"error": str(e)})
|
||||
|
||||
if self.file_observer:
|
||||
try:
|
||||
self.file_observer.stop()
|
||||
await asyncio.to_thread(self.file_observer.join)
|
||||
finally:
|
||||
self.file_observer = None
|
||||
|
||||
self.initialized = False
|
||||
self.loop = None
|
||||
log_module_event("module_manager", "shutdown_complete", {"success": True})
|
||||
|
||||
async def cleanup(self):
|
||||
@@ -494,27 +515,18 @@ class ModuleManager:
|
||||
async def _start_file_watcher(self):
|
||||
"""Start watching module files for changes"""
|
||||
try:
|
||||
# Try multiple possible locations for modules directory
|
||||
possible_modules_paths = [
|
||||
Path("modules"), # Docker container path
|
||||
Path("modules"), # Container path
|
||||
Path("app/modules") # Legacy path
|
||||
]
|
||||
if self.file_observer:
|
||||
return
|
||||
|
||||
modules_path = None
|
||||
for path in possible_modules_paths:
|
||||
if path.exists():
|
||||
modules_path = path
|
||||
break
|
||||
if not self.modules_root.exists():
|
||||
log_module_event("hot_reload", "watcher_skipped", {"reason": f"No modules directory at {self.modules_root}"})
|
||||
return
|
||||
|
||||
if modules_path and modules_path.exists():
|
||||
self.file_observer = Observer()
|
||||
event_handler = ModuleFileWatcher(self)
|
||||
self.file_observer.schedule(event_handler, str(modules_path), recursive=True)
|
||||
self.file_observer.start()
|
||||
log_module_event("hot_reload", "watcher_started", {"path": str(modules_path)})
|
||||
else:
|
||||
log_module_event("hot_reload", "watcher_skipped", {"reason": "No modules directory found"})
|
||||
self.file_observer = Observer()
|
||||
event_handler = ModuleFileWatcher(self, self.modules_root)
|
||||
self.file_observer.schedule(event_handler, str(self.modules_root), recursive=True)
|
||||
self.file_observer.start()
|
||||
log_module_event("hot_reload", "watcher_started", {"path": str(self.modules_root)})
|
||||
except Exception as e:
|
||||
log_module_event("hot_reload", "watcher_failed", {"error": str(e)})
|
||||
|
||||
@@ -672,4 +684,4 @@ class ModuleManager:
|
||||
|
||||
|
||||
# Global module manager instance
|
||||
module_manager = ModuleManager()
|
||||
module_manager = ModuleManager()
|
||||
|
||||
@@ -132,6 +132,7 @@ class ModulePermissionRegistry:
|
||||
self.module_permissions: Dict[str, List[Permission]] = {}
|
||||
self.role_permissions: Dict[str, List[str]] = {}
|
||||
self.default_roles = self._initialize_default_roles()
|
||||
self._platform_permissions_registered = False
|
||||
|
||||
def _initialize_default_roles(self) -> Dict[str, List[str]]:
|
||||
"""Initialize default permission roles"""
|
||||
@@ -177,6 +178,9 @@ class ModulePermissionRegistry:
|
||||
|
||||
def register_platform_permissions(self):
|
||||
"""Register core platform permissions"""
|
||||
if self._platform_permissions_registered:
|
||||
return
|
||||
|
||||
platform_permissions = [
|
||||
Permission("users", "create", "Create users"),
|
||||
Permission("users", "read", "View users"),
|
||||
@@ -232,6 +236,7 @@ class ModulePermissionRegistry:
|
||||
self.tree.add_permission(perm_string, perm)
|
||||
|
||||
logger.info("Registered platform and LLM permissions")
|
||||
self._platform_permissions_registered = True
|
||||
|
||||
def check_permission(self, user_permissions: List[str], required: str,
|
||||
context: Dict[str, Any] = None) -> bool:
|
||||
@@ -407,4 +412,4 @@ def require_permission(user_permissions: List[str], required_permission: str, co
|
||||
|
||||
|
||||
# Global permission registry instance
|
||||
permission_registry = ModulePermissionRegistry()
|
||||
permission_registry = ModulePermissionRegistry()
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
"""
|
||||
Chatbot Module - AI Chatbot with RAG Integration
|
||||
|
||||
This module provides AI chatbot capabilities with:
|
||||
- Multiple personality types (Assistant, Customer Support, Teacher, etc.)
|
||||
- RAG integration for knowledge-based responses
|
||||
- Conversation memory and context management
|
||||
- Workflow integration as building blocks
|
||||
- UI-configurable settings
|
||||
"""
|
||||
|
||||
from .main import ChatbotModule, create_module
|
||||
|
||||
__version__ = "1.0.0"
|
||||
__author__ = "Enclava Team"
|
||||
|
||||
# Export main classes for easy importing
|
||||
__all__ = [
|
||||
"ChatbotModule",
|
||||
"create_module"
|
||||
]
|
||||
@@ -1,126 +0,0 @@
|
||||
{
|
||||
"title": "Chatbot Configuration",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Chatbot Name",
|
||||
"description": "Display name for this chatbot instance",
|
||||
"minLength": 1,
|
||||
"maxLength": 100
|
||||
},
|
||||
"chatbot_type": {
|
||||
"type": "string",
|
||||
"title": "Chatbot Type",
|
||||
"description": "Select the type of chatbot personality",
|
||||
"enum": ["assistant", "customer_support", "teacher", "researcher", "creative_writer", "custom"],
|
||||
"enumNames": ["General Assistant", "Customer Support", "Teacher", "Researcher", "Creative Writer", "Custom"],
|
||||
"default": "assistant"
|
||||
},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"title": "AI Model",
|
||||
"description": "Choose the LLM model for responses",
|
||||
"enum": ["gpt-4", "gpt-3.5-turbo", "claude-3-sonnet", "claude-3-opus", "llama-70b"],
|
||||
"default": "gpt-3.5-turbo"
|
||||
},
|
||||
"system_prompt": {
|
||||
"type": "string",
|
||||
"title": "System Prompt",
|
||||
"description": "Define the chatbot's personality and behavior instructions",
|
||||
"ui:widget": "textarea",
|
||||
"ui:options": {
|
||||
"rows": 6,
|
||||
"placeholder": "You are a helpful AI assistant..."
|
||||
}
|
||||
},
|
||||
"use_rag": {
|
||||
"type": "boolean",
|
||||
"title": "Enable Knowledge Base",
|
||||
"description": "Use RAG to search knowledge base for context",
|
||||
"default": false
|
||||
},
|
||||
"rag_collection": {
|
||||
"type": "string",
|
||||
"title": "Knowledge Base Collection",
|
||||
"description": "Select which document collection to search",
|
||||
"ui:widget": "rag-collection-selector",
|
||||
"ui:condition": "use_rag === true"
|
||||
},
|
||||
"rag_top_k": {
|
||||
"type": "integer",
|
||||
"title": "Knowledge Base Results",
|
||||
"description": "Number of relevant documents to include",
|
||||
"minimum": 1,
|
||||
"maximum": 10,
|
||||
"default": 5,
|
||||
"ui:condition": "use_rag === true"
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"title": "Response Creativity",
|
||||
"description": "Controls randomness (0.0 = focused, 1.0 = creative)",
|
||||
"minimum": 0,
|
||||
"maximum": 1,
|
||||
"default": 0.7,
|
||||
"ui:widget": "range",
|
||||
"ui:options": {
|
||||
"step": 0.1
|
||||
}
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "integer",
|
||||
"title": "Maximum Response Length",
|
||||
"description": "Maximum number of tokens in response",
|
||||
"minimum": 50,
|
||||
"maximum": 4000,
|
||||
"default": 1000,
|
||||
"ui:widget": "range",
|
||||
"ui:options": {
|
||||
"step": 50
|
||||
}
|
||||
},
|
||||
"memory_length": {
|
||||
"type": "integer",
|
||||
"title": "Conversation Memory",
|
||||
"description": "Number of previous message pairs to remember",
|
||||
"minimum": 1,
|
||||
"maximum": 50,
|
||||
"default": 10,
|
||||
"ui:widget": "range"
|
||||
},
|
||||
"fallback_responses": {
|
||||
"type": "array",
|
||||
"title": "Fallback Responses",
|
||||
"description": "Responses to use when the AI cannot answer",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"title": "Fallback Response"
|
||||
},
|
||||
"default": [
|
||||
"I'm not sure how to help with that. Could you please rephrase your question?",
|
||||
"I don't have enough information to answer that question accurately.",
|
||||
"That's outside my knowledge area. Is there something else I can help you with?"
|
||||
],
|
||||
"ui:options": {
|
||||
"orderable": true,
|
||||
"addable": true,
|
||||
"removable": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["name", "chatbot_type", "model"],
|
||||
"ui:order": [
|
||||
"name",
|
||||
"chatbot_type",
|
||||
"model",
|
||||
"system_prompt",
|
||||
"use_rag",
|
||||
"rag_collection",
|
||||
"rag_top_k",
|
||||
"temperature",
|
||||
"max_tokens",
|
||||
"memory_length",
|
||||
"fallback_responses"
|
||||
]
|
||||
}
|
||||
@@ -1,182 +0,0 @@
|
||||
{
|
||||
"name": "Customer Support Workflow",
|
||||
"description": "Intelligent customer support workflow with intent classification, knowledge base search, and chatbot response generation",
|
||||
"version": "1.0",
|
||||
"variables": {
|
||||
"support_chatbot_id": "cs-bot-001",
|
||||
"escalation_threshold": 0.3,
|
||||
"max_attempts": 3
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"id": "classify_intent",
|
||||
"name": "Classify Customer Intent",
|
||||
"type": "llm_call",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are an intent classifier for customer support. Classify the customer message into one of these categories: technical_issue, billing_question, feature_request, complaint, general_inquiry. Also provide a confidence score between 0 and 1. Respond with JSON: {\"intent\": \"category\", \"confidence\": 0.95, \"reasoning\": \"explanation\"}"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "{{ inputs.customer_message }}"
|
||||
}
|
||||
],
|
||||
"output_variable": "intent_classification"
|
||||
},
|
||||
|
||||
{
|
||||
"id": "search_knowledge_base",
|
||||
"name": "Search Knowledge Base",
|
||||
"type": "workflow_step",
|
||||
"module": "rag",
|
||||
"action": "search",
|
||||
"config": {
|
||||
"query": "{{ inputs.customer_message }}",
|
||||
"collection": "support_documentation",
|
||||
"top_k": 5,
|
||||
"include_metadata": true
|
||||
},
|
||||
"output_variable": "knowledge_results"
|
||||
},
|
||||
|
||||
{
|
||||
"id": "check_confidence",
|
||||
"name": "Check Intent Confidence",
|
||||
"type": "condition",
|
||||
"condition": "JSON.parse(steps.classify_intent.result).confidence > variables.escalation_threshold",
|
||||
"true_steps": [
|
||||
{
|
||||
"id": "generate_chatbot_response",
|
||||
"name": "Generate Chatbot Response",
|
||||
"type": "workflow_step",
|
||||
"module": "chatbot",
|
||||
"action": "workflow_chat_step",
|
||||
"config": {
|
||||
"message": "{{ inputs.customer_message }}",
|
||||
"chatbot_id": "{{ variables.support_chatbot_id }}",
|
||||
"use_rag": true,
|
||||
"context": {
|
||||
"intent": "{{ steps.classify_intent.result }}",
|
||||
"knowledge_base_results": "{{ steps.search_knowledge_base.result }}",
|
||||
"customer_history": "{{ inputs.customer_history }}",
|
||||
"additional_instructions": "Be empathetic and professional. If you cannot fully resolve the issue, offer to escalate to a human agent."
|
||||
}
|
||||
},
|
||||
"output_variable": "chatbot_response"
|
||||
},
|
||||
|
||||
{
|
||||
"id": "analyze_response_quality",
|
||||
"name": "Analyze Response Quality",
|
||||
"type": "llm_call",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "Analyze if this customer support response adequately addresses the customer's question. Consider completeness, accuracy, and helpfulness. Respond with JSON: {\"quality_score\": 0.85, \"is_adequate\": true, \"requires_escalation\": false, \"reasoning\": \"explanation\"}"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Customer Question: {{ inputs.customer_message }}\\n\\nChatbot Response: {{ steps.generate_chatbot_response.result.response }}\\n\\nKnowledge Base Context: {{ steps.search_knowledge_base.result }}"
|
||||
}
|
||||
],
|
||||
"output_variable": "response_quality"
|
||||
},
|
||||
|
||||
{
|
||||
"id": "final_response_decision",
|
||||
"name": "Final Response Decision",
|
||||
"type": "condition",
|
||||
"condition": "JSON.parse(steps.analyze_response_quality.result).is_adequate === true",
|
||||
"true_steps": [
|
||||
{
|
||||
"id": "send_chatbot_response",
|
||||
"name": "Send Chatbot Response",
|
||||
"type": "output",
|
||||
"config": {
|
||||
"response_type": "chatbot_response",
|
||||
"message": "{{ steps.generate_chatbot_response.result.response }}",
|
||||
"sources": "{{ steps.generate_chatbot_response.result.sources }}",
|
||||
"confidence": "{{ JSON.parse(steps.classify_intent.result).confidence }}",
|
||||
"quality_score": "{{ JSON.parse(steps.analyze_response_quality.result).quality_score }}"
|
||||
}
|
||||
}
|
||||
],
|
||||
"false_steps": [
|
||||
{
|
||||
"id": "escalate_to_human",
|
||||
"name": "Escalate to Human Agent",
|
||||
"type": "output",
|
||||
"config": {
|
||||
"response_type": "human_escalation",
|
||||
"message": "I'd like to connect you with one of our human support agents who can better assist with your specific situation. Please hold on while I transfer you.",
|
||||
"escalation_reason": "Response quality below threshold",
|
||||
"intent": "{{ steps.classify_intent.result }}",
|
||||
"attempted_response": "{{ steps.generate_chatbot_response.result.response }}",
|
||||
"priority": "normal"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"false_steps": [
|
||||
{
|
||||
"id": "low_confidence_escalation",
|
||||
"name": "Low Confidence Escalation",
|
||||
"type": "output",
|
||||
"config": {
|
||||
"response_type": "human_escalation",
|
||||
"message": "I want to make sure you get the best possible help. Let me connect you with one of our human support agents.",
|
||||
"escalation_reason": "Low intent classification confidence",
|
||||
"intent": "{{ steps.classify_intent.result }}",
|
||||
"priority": "high"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"id": "log_interaction",
|
||||
"name": "Log Customer Interaction",
|
||||
"type": "workflow_step",
|
||||
"module": "analytics",
|
||||
"action": "log_event",
|
||||
"config": {
|
||||
"event_type": "customer_support_interaction",
|
||||
"data": {
|
||||
"customer_message": "{{ inputs.customer_message }}",
|
||||
"intent_classification": "{{ steps.classify_intent.result }}",
|
||||
"response_generated": "{{ steps.generate_chatbot_response.result.response }}",
|
||||
"knowledge_base_used": "{{ steps.search_knowledge_base.result }}",
|
||||
"escalated": "{{ outputs.response_type === 'human_escalation' }}",
|
||||
"workflow_execution_time": "{{ execution_time }}",
|
||||
"timestamp": "{{ current_timestamp }}"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
"outputs": {
|
||||
"response_type": "string",
|
||||
"message": "string",
|
||||
"sources": "array",
|
||||
"escalation_reason": "string",
|
||||
"confidence": "number",
|
||||
"quality_score": "number"
|
||||
},
|
||||
|
||||
"error_handling": {
|
||||
"retry_failed_steps": true,
|
||||
"max_retries": 2,
|
||||
"fallback_response": "I apologize, but I'm experiencing technical difficulties. Please contact our support team directly for assistance."
|
||||
},
|
||||
|
||||
"metadata": {
|
||||
"created_by": "support_team",
|
||||
"use_case": "customer_support_automation",
|
||||
"tags": ["customer_support", "chatbot", "rag", "escalation"],
|
||||
"estimated_execution_time": "5-15 seconds"
|
||||
}
|
||||
}
|
||||
@@ -1,949 +0,0 @@
|
||||
"""
|
||||
Chatbot Module Implementation
|
||||
|
||||
Provides AI chatbot capabilities with:
|
||||
- RAG integration for knowledge-based responses
|
||||
- Custom prompts and personalities
|
||||
- Conversation memory and context
|
||||
- Workflow integration as building blocks
|
||||
- UI-configurable settings
|
||||
"""
|
||||
|
||||
import json
|
||||
from pprint import pprint
|
||||
import uuid
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any, Optional, Union
|
||||
from dataclasses import dataclass
|
||||
from pydantic import BaseModel, Field
|
||||
from enum import Enum
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.core.logging import get_logger
|
||||
from app.services.llm.service import llm_service
|
||||
from app.services.llm.models import ChatRequest as LLMChatRequest, ChatMessage as LLMChatMessage
|
||||
from app.services.llm.exceptions import LLMError, ProviderError, SecurityError
|
||||
from app.services.base_module import BaseModule, Permission
|
||||
from app.models.user import User
|
||||
from app.models.chatbot import ChatbotInstance as DBChatbotInstance, ChatbotConversation as DBConversation, ChatbotMessage as DBMessage, ChatbotAnalytics
|
||||
from app.core.security import get_current_user
|
||||
from app.db.database import get_db
|
||||
from app.core.config import settings
|
||||
|
||||
# Import protocols for type hints and dependency injection
|
||||
from ..protocols import RAGServiceProtocol
|
||||
# Note: LiteLLMClientProtocol replaced with direct LLM service usage
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class ChatbotType(str, Enum):
|
||||
"""Types of chatbot personalities"""
|
||||
ASSISTANT = "assistant"
|
||||
CUSTOMER_SUPPORT = "customer_support"
|
||||
TEACHER = "teacher"
|
||||
RESEARCHER = "researcher"
|
||||
CREATIVE_WRITER = "creative_writer"
|
||||
CUSTOM = "custom"
|
||||
|
||||
|
||||
class MessageRole(str, Enum):
|
||||
"""Message roles in conversation"""
|
||||
USER = "user"
|
||||
ASSISTANT = "assistant"
|
||||
SYSTEM = "system"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatbotConfig:
|
||||
"""Chatbot configuration"""
|
||||
name: str
|
||||
chatbot_type: str # Changed from ChatbotType enum to str to allow custom types
|
||||
model: str
|
||||
rag_collection: Optional[str] = None
|
||||
system_prompt: str = ""
|
||||
temperature: float = 0.7
|
||||
max_tokens: int = 1000
|
||||
memory_length: int = 10 # Number of previous messages to remember
|
||||
use_rag: bool = False
|
||||
rag_top_k: int = 5
|
||||
rag_score_threshold: float = 0.02 # Lowered from default 0.3 to allow more results
|
||||
fallback_responses: List[str] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if self.fallback_responses is None:
|
||||
self.fallback_responses = [
|
||||
"I'm not sure how to help with that. Could you please rephrase your question?",
|
||||
"I don't have enough information to answer that question accurately.",
|
||||
"That's outside my knowledge area. Is there something else I can help you with?"
|
||||
]
|
||||
|
||||
|
||||
class ChatMessage(BaseModel):
|
||||
"""Individual chat message"""
|
||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||
role: MessageRole
|
||||
content: str
|
||||
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
sources: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
|
||||
class Conversation(BaseModel):
|
||||
"""Conversation state"""
|
||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||
chatbot_id: str
|
||||
user_id: str
|
||||
messages: List[ChatMessage] = Field(default_factory=list)
|
||||
created_at: datetime = Field(default_factory=datetime.utcnow)
|
||||
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class ChatRequest(BaseModel):
|
||||
"""Chat completion request"""
|
||||
message: str
|
||||
conversation_id: Optional[str] = None
|
||||
chatbot_id: str
|
||||
use_rag: Optional[bool] = None
|
||||
context: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class ChatResponse(BaseModel):
|
||||
"""Chat completion response"""
|
||||
response: str
|
||||
conversation_id: str
|
||||
message_id: str
|
||||
sources: Optional[List[Dict[str, Any]]] = None
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class ChatbotInstance(BaseModel):
|
||||
"""Configured chatbot instance"""
|
||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||
name: str
|
||||
config: ChatbotConfig
|
||||
created_by: str
|
||||
created_at: datetime = Field(default_factory=datetime.utcnow)
|
||||
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
||||
is_active: bool = True
|
||||
|
||||
|
||||
class ChatbotModule(BaseModule):
|
||||
"""Main chatbot module implementation"""
|
||||
|
||||
def __init__(self, rag_service: Optional[RAGServiceProtocol] = None):
|
||||
super().__init__("chatbot")
|
||||
self.rag_module = rag_service # Keep same name for compatibility
|
||||
self.db_session = None
|
||||
|
||||
# System prompts will be loaded from database
|
||||
self.system_prompts = {}
|
||||
|
||||
async def initialize(self, **kwargs):
|
||||
"""Initialize the chatbot module"""
|
||||
await super().initialize(**kwargs)
|
||||
|
||||
# Initialize the LLM service
|
||||
await llm_service.initialize()
|
||||
|
||||
# Get RAG module dependency if not already injected
|
||||
if not self.rag_module:
|
||||
try:
|
||||
# Try to get RAG module from module manager
|
||||
from app.services.module_manager import module_manager
|
||||
if hasattr(module_manager, 'modules') and 'rag' in module_manager.modules:
|
||||
self.rag_module = module_manager.modules['rag']
|
||||
logger.info("RAG module injected from module manager")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not inject RAG module: {e}")
|
||||
|
||||
# Load prompt templates from database
|
||||
await self._load_prompt_templates()
|
||||
|
||||
logger.info("Chatbot module initialized")
|
||||
logger.info(f"LLM service available: {llm_service._initialized}")
|
||||
logger.info(f"RAG module available after init: {self.rag_module is not None}")
|
||||
logger.info(f"Loaded {len(self.system_prompts)} prompt templates")
|
||||
|
||||
async def _ensure_dependencies(self):
|
||||
"""Lazy load dependencies if not available"""
|
||||
# Ensure LLM service is initialized
|
||||
if not llm_service._initialized:
|
||||
await llm_service.initialize()
|
||||
logger.info("LLM service lazy loaded")
|
||||
|
||||
if not self.rag_module:
|
||||
try:
|
||||
# Try to get RAG module from module manager
|
||||
from app.services.module_manager import module_manager
|
||||
if hasattr(module_manager, 'modules') and 'rag' in module_manager.modules:
|
||||
self.rag_module = module_manager.modules['rag']
|
||||
logger.info("RAG module lazy loaded from module manager")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not lazy load RAG module: {e}")
|
||||
|
||||
async def _load_prompt_templates(self):
|
||||
"""Load prompt templates from database"""
|
||||
try:
|
||||
from app.db.database import SessionLocal
|
||||
from app.models.prompt_template import PromptTemplate
|
||||
from sqlalchemy import select
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
result = db.execute(
|
||||
select(PromptTemplate)
|
||||
.where(PromptTemplate.is_active == True)
|
||||
)
|
||||
templates = result.scalars().all()
|
||||
|
||||
for template in templates:
|
||||
self.system_prompts[template.type_key] = template.system_prompt
|
||||
|
||||
logger.info(f"Loaded {len(self.system_prompts)} prompt templates from database")
|
||||
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load prompt templates from database: {e}")
|
||||
# Fallback to hardcoded prompts
|
||||
self.system_prompts = {
|
||||
"assistant": "You are a helpful AI assistant. Provide accurate, concise, and friendly responses. Always aim to be helpful while being honest about your limitations.",
|
||||
"customer_support": "You are a professional customer support representative. Be empathetic, professional, and solution-focused in all interactions.",
|
||||
"teacher": "You are an experienced educational tutor. Break down complex concepts into understandable parts. Be patient, supportive, and encouraging.",
|
||||
"researcher": "You are a thorough research assistant with a focus on accuracy and evidence-based information.",
|
||||
"creative_writer": "You are an experienced creative writing mentor and storytelling expert.",
|
||||
"custom": "You are a helpful AI assistant. Your personality and behavior will be defined by custom instructions."
|
||||
}
|
||||
|
||||
async def get_system_prompt_for_type(self, chatbot_type: str) -> str:
|
||||
"""Get system prompt for a specific chatbot type"""
|
||||
if chatbot_type in self.system_prompts:
|
||||
return self.system_prompts[chatbot_type]
|
||||
|
||||
# If not found, try to reload templates
|
||||
await self._load_prompt_templates()
|
||||
|
||||
return self.system_prompts.get(chatbot_type, self.system_prompts.get("assistant",
|
||||
"You are a helpful AI assistant. Provide accurate, concise, and friendly responses."))
|
||||
|
||||
async def create_chatbot(self, config: ChatbotConfig, user_id: str, db: Session) -> ChatbotInstance:
|
||||
"""Create a new chatbot instance"""
|
||||
|
||||
# Set system prompt based on type if not provided or empty
|
||||
if not config.system_prompt or config.system_prompt.strip() == "":
|
||||
config.system_prompt = await self.get_system_prompt_for_type(config.chatbot_type)
|
||||
|
||||
# Create database record
|
||||
db_chatbot = DBChatbotInstance(
|
||||
name=config.name,
|
||||
description=f"{config.chatbot_type.replace('_', ' ').title()} chatbot",
|
||||
config=config.__dict__,
|
||||
created_by=user_id
|
||||
)
|
||||
|
||||
db.add(db_chatbot)
|
||||
db.commit()
|
||||
db.refresh(db_chatbot)
|
||||
|
||||
# Convert to response model
|
||||
chatbot = ChatbotInstance(
|
||||
id=db_chatbot.id,
|
||||
name=db_chatbot.name,
|
||||
config=ChatbotConfig(**db_chatbot.config),
|
||||
created_by=db_chatbot.created_by,
|
||||
created_at=db_chatbot.created_at,
|
||||
updated_at=db_chatbot.updated_at,
|
||||
is_active=db_chatbot.is_active
|
||||
)
|
||||
|
||||
logger.info(f"Created new chatbot: {chatbot.name} ({chatbot.id})")
|
||||
return chatbot
|
||||
|
||||
async def chat_completion(self, request: ChatRequest, user_id: str, db: Session) -> ChatResponse:
|
||||
"""Generate chat completion response"""
|
||||
|
||||
# Get chatbot configuration from database
|
||||
db_chatbot = db.query(DBChatbotInstance).filter(DBChatbotInstance.id == request.chatbot_id).first()
|
||||
if not db_chatbot:
|
||||
raise HTTPException(status_code=404, detail="Chatbot not found")
|
||||
|
||||
chatbot_config = ChatbotConfig(**db_chatbot.config)
|
||||
|
||||
# Get or create conversation
|
||||
conversation = await self._get_or_create_conversation(
|
||||
request.conversation_id, request.chatbot_id, user_id, db
|
||||
)
|
||||
|
||||
# Create user message
|
||||
user_message = DBMessage(
|
||||
conversation_id=conversation.id,
|
||||
role=MessageRole.USER.value,
|
||||
content=request.message
|
||||
)
|
||||
db.add(user_message)
|
||||
db.commit()
|
||||
db.refresh(user_message)
|
||||
|
||||
logger.info(f"Created user message with ID {user_message.id} for conversation {conversation.id}")
|
||||
|
||||
try:
|
||||
# Force the session to see the committed changes
|
||||
db.expire_all()
|
||||
|
||||
# Get conversation history for context - includes the current message we just created
|
||||
# Fetch up to memory_length pairs of messages (user + assistant)
|
||||
# The +1 ensures we include the current message if we're at the limit
|
||||
messages = db.query(DBMessage).filter(
|
||||
DBMessage.conversation_id == conversation.id
|
||||
).order_by(DBMessage.timestamp.desc()).limit(chatbot_config.memory_length * 2 + 1).all()
|
||||
|
||||
logger.info(f"Query for conversation_id={conversation.id}, memory_length={chatbot_config.memory_length}")
|
||||
logger.info(f"Found {len(messages)} messages in conversation history")
|
||||
|
||||
# If we don't have any messages, manually add the user message we just created
|
||||
if len(messages) == 0:
|
||||
logger.warning(f"No messages found in query, but we just created message {user_message.id}")
|
||||
logger.warning(f"Using the user message we just created")
|
||||
messages = [user_message]
|
||||
|
||||
for idx, msg in enumerate(messages):
|
||||
logger.info(f"Message {idx}: id={msg.id}, role={msg.role}, content_preview={msg.content[:50] if msg.content else 'None'}...")
|
||||
|
||||
# Generate response
|
||||
response_content, sources = await self._generate_response(
|
||||
request.message, messages, chatbot_config, request.context, db
|
||||
)
|
||||
|
||||
# Create assistant message
|
||||
assistant_message = DBMessage(
|
||||
conversation_id=conversation.id,
|
||||
role=MessageRole.ASSISTANT.value,
|
||||
content=response_content,
|
||||
sources=sources,
|
||||
metadata={"model": chatbot_config.model, "temperature": chatbot_config.temperature}
|
||||
)
|
||||
db.add(assistant_message)
|
||||
db.commit()
|
||||
db.refresh(assistant_message)
|
||||
|
||||
# Update conversation timestamp
|
||||
conversation.updated_at = datetime.utcnow()
|
||||
db.commit()
|
||||
|
||||
return ChatResponse(
|
||||
response=response_content,
|
||||
conversation_id=conversation.id,
|
||||
message_id=assistant_message.id,
|
||||
sources=sources
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Chat completion failed: {e}")
|
||||
# Return fallback response
|
||||
fallback = chatbot_config.fallback_responses[0] if chatbot_config.fallback_responses else "I'm having trouble responding right now."
|
||||
|
||||
assistant_message = DBMessage(
|
||||
conversation_id=conversation.id,
|
||||
role=MessageRole.ASSISTANT.value,
|
||||
content=fallback,
|
||||
metadata={"error": str(e), "fallback": True}
|
||||
)
|
||||
db.add(assistant_message)
|
||||
db.commit()
|
||||
db.refresh(assistant_message)
|
||||
|
||||
return ChatResponse(
|
||||
response=fallback,
|
||||
conversation_id=conversation.id,
|
||||
message_id=assistant_message.id,
|
||||
metadata={"error": str(e), "fallback": True}
|
||||
)
|
||||
|
||||
async def _generate_response(self, message: str, db_messages: List[DBMessage],
|
||||
config: ChatbotConfig, context: Optional[Dict] = None, db: Session = None) -> tuple[str, Optional[List]]:
|
||||
"""Generate response using LLM with optional RAG"""
|
||||
|
||||
# Lazy load dependencies if not available
|
||||
await self._ensure_dependencies()
|
||||
|
||||
sources = None
|
||||
rag_context = ""
|
||||
|
||||
# RAG search if enabled
|
||||
if config.use_rag and config.rag_collection and self.rag_module:
|
||||
logger.info(f"RAG search enabled for collection: {config.rag_collection}")
|
||||
try:
|
||||
# Get the Qdrant collection name from RAG collection
|
||||
qdrant_collection_name = await self._get_qdrant_collection_name(config.rag_collection, db)
|
||||
logger.info(f"Qdrant collection name: {qdrant_collection_name}")
|
||||
|
||||
if qdrant_collection_name:
|
||||
logger.info(f"Searching RAG documents: query='{message[:50]}...', max_results={config.rag_top_k}")
|
||||
rag_results = await self.rag_module.search_documents(
|
||||
query=message,
|
||||
max_results=config.rag_top_k,
|
||||
collection_name=qdrant_collection_name,
|
||||
score_threshold=config.rag_score_threshold
|
||||
)
|
||||
|
||||
if rag_results:
|
||||
logger.info(f"RAG search found {len(rag_results)} results")
|
||||
sources = [{"title": f"Document {i+1}", "content": result.document.content[:200]}
|
||||
for i, result in enumerate(rag_results)]
|
||||
|
||||
# Build full RAG context from all results
|
||||
rag_context = "\n\nRelevant information from knowledge base:\n" + "\n\n".join([
|
||||
f"[Document {i+1}]:\n{result.document.content}" for i, result in enumerate(rag_results)
|
||||
])
|
||||
|
||||
# Detailed RAG logging - ALWAYS log for debugging
|
||||
logger.info("=== COMPREHENSIVE RAG SEARCH RESULTS ===")
|
||||
logger.info(f"Query: '{message}'")
|
||||
logger.info(f"Collection: {qdrant_collection_name}")
|
||||
logger.info(f"Number of results: {len(rag_results)}")
|
||||
for i, result in enumerate(rag_results):
|
||||
logger.info(f"\n--- RAG Result {i+1} ---")
|
||||
logger.info(f"Score: {getattr(result, 'score', 'N/A')}")
|
||||
logger.info(f"Document ID: {getattr(result.document, 'id', 'N/A')}")
|
||||
logger.info(f"Full Content ({len(result.document.content)} chars):")
|
||||
logger.info(f"{result.document.content}")
|
||||
if hasattr(result.document, 'metadata'):
|
||||
logger.info(f"Metadata: {result.document.metadata}")
|
||||
logger.info(f"\n=== RAG CONTEXT BEING ADDED TO PROMPT ({len(rag_context)} chars) ===")
|
||||
logger.info(rag_context)
|
||||
logger.info("=== END RAG SEARCH RESULTS ===")
|
||||
else:
|
||||
logger.warning("RAG search returned no results")
|
||||
else:
|
||||
logger.warning(f"RAG collection '{config.rag_collection}' not found in database")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"RAG search failed: {e}")
|
||||
import traceback
|
||||
logger.warning(f"RAG search traceback: {traceback.format_exc()}")
|
||||
|
||||
# Build conversation context (includes the current message from db_messages)
|
||||
messages = self._build_conversation_messages(db_messages, config, rag_context, context)
|
||||
|
||||
# Note: Current user message is already included in db_messages from the query
|
||||
logger.info(f"Built conversation context with {len(messages)} messages")
|
||||
|
||||
# LLM completion
|
||||
logger.info(f"Attempting LLM completion with model: {config.model}")
|
||||
logger.info(f"Messages to send: {len(messages)} messages")
|
||||
|
||||
# Always log detailed prompts for debugging
|
||||
logger.info("=== COMPREHENSIVE LLM REQUEST ===")
|
||||
logger.info(f"Model: {config.model}")
|
||||
logger.info(f"Temperature: {config.temperature}")
|
||||
logger.info(f"Max tokens: {config.max_tokens}")
|
||||
logger.info(f"RAG enabled: {config.use_rag}")
|
||||
logger.info(f"RAG collection: {config.rag_collection}")
|
||||
if config.use_rag and rag_context:
|
||||
logger.info(f"RAG context added: {len(rag_context)} characters")
|
||||
logger.info(f"RAG sources: {len(sources) if sources else 0} documents")
|
||||
logger.info("\n=== COMPLETE MESSAGES SENT TO LLM ===")
|
||||
for i, msg in enumerate(messages):
|
||||
logger.info(f"\n--- Message {i+1} ---")
|
||||
logger.info(f"Role: {msg['role']}")
|
||||
logger.info(f"Content ({len(msg['content'])} chars):")
|
||||
# Truncate long content for logging (full RAG context can be very long)
|
||||
if len(msg['content']) > 500:
|
||||
logger.info(f"{msg['content'][:500]}... [truncated, total {len(msg['content'])} chars]")
|
||||
else:
|
||||
logger.info(msg['content'])
|
||||
logger.info("=== END COMPREHENSIVE LLM REQUEST ===")
|
||||
|
||||
try:
|
||||
logger.info("Calling LLM service create_chat_completion...")
|
||||
|
||||
# Convert messages to LLM service format
|
||||
llm_messages = [LLMChatMessage(role=msg["role"], content=msg["content"]) for msg in messages]
|
||||
|
||||
# Create LLM service request
|
||||
llm_request = LLMChatRequest(
|
||||
model=config.model,
|
||||
messages=llm_messages,
|
||||
temperature=config.temperature,
|
||||
max_tokens=config.max_tokens,
|
||||
user_id="chatbot_user",
|
||||
api_key_id=0 # Chatbot module uses internal service
|
||||
)
|
||||
|
||||
# Make request to LLM service
|
||||
llm_response = await llm_service.create_chat_completion(llm_request)
|
||||
|
||||
# Extract response content
|
||||
if llm_response.choices:
|
||||
content = llm_response.choices[0].message.content
|
||||
logger.info(f"Response content length: {len(content)}")
|
||||
|
||||
# Always log response for debugging
|
||||
logger.info("=== COMPREHENSIVE LLM RESPONSE ===")
|
||||
logger.info(f"Response content ({len(content)} chars):")
|
||||
logger.info(content)
|
||||
if llm_response.usage:
|
||||
usage = llm_response.usage
|
||||
logger.info(f"Token usage - Prompt: {usage.prompt_tokens}, Completion: {usage.completion_tokens}, Total: {usage.total_tokens}")
|
||||
if sources:
|
||||
logger.info(f"RAG sources included: {len(sources)} documents")
|
||||
logger.info("=== END COMPREHENSIVE LLM RESPONSE ===")
|
||||
|
||||
return content, sources
|
||||
else:
|
||||
logger.warning("No choices in LLM response")
|
||||
return "I received an empty response from the AI model.", sources
|
||||
|
||||
except SecurityError as e:
|
||||
logger.error(f"Security error in LLM completion: {e}")
|
||||
raise HTTPException(status_code=400, detail=f"Security validation failed: {e.message}")
|
||||
except ProviderError as e:
|
||||
logger.error(f"Provider error in LLM completion: {e}")
|
||||
raise HTTPException(status_code=503, detail="LLM service temporarily unavailable")
|
||||
except LLMError as e:
|
||||
logger.error(f"LLM service error: {e}")
|
||||
raise HTTPException(status_code=500, detail="LLM service error")
|
||||
except Exception as e:
|
||||
logger.error(f"LLM completion failed: {e}")
|
||||
# Return fallback if available
|
||||
return "I'm currently unable to process your request. Please try again later.", None
|
||||
|
||||
def _build_conversation_messages(self, db_messages: List[DBMessage], config: ChatbotConfig,
|
||||
rag_context: str = "", context: Optional[Dict] = None) -> List[Dict]:
|
||||
"""Build messages array for LLM completion"""
|
||||
|
||||
messages = []
|
||||
|
||||
# System prompt
|
||||
system_prompt = config.system_prompt
|
||||
if rag_context:
|
||||
# Add explicit instruction to use RAG context
|
||||
system_prompt += "\n\nIMPORTANT: Use the following information from the knowledge base to answer the user's question. " \
|
||||
"This information is directly relevant to their query and should be your primary source:\n" + rag_context
|
||||
if context and context.get('additional_instructions'):
|
||||
system_prompt += f"\n\nAdditional instructions: {context['additional_instructions']}"
|
||||
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
|
||||
logger.info(f"Building messages from {len(db_messages)} database messages")
|
||||
|
||||
# Conversation history (messages are already limited by memory_length in the query)
|
||||
# Reverse to get chronological order
|
||||
# Include ALL messages - the current user message is needed for the LLM to respond!
|
||||
for idx, msg in enumerate(reversed(db_messages)):
|
||||
logger.info(f"Processing message {idx}: role={msg.role}, content_preview={msg.content[:50] if msg.content else 'None'}...")
|
||||
if msg.role in ["user", "assistant"]:
|
||||
messages.append({
|
||||
"role": msg.role,
|
||||
"content": msg.content
|
||||
})
|
||||
logger.info(f"Added message with role {msg.role} to LLM messages")
|
||||
else:
|
||||
logger.info(f"Skipped message with role {msg.role}")
|
||||
|
||||
logger.info(f"Final messages array has {len(messages)} messages") # For debugging, can be removed in production
|
||||
return messages
|
||||
|
||||
async def _get_or_create_conversation(self, conversation_id: Optional[str],
|
||||
chatbot_id: str, user_id: str, db: Session) -> DBConversation:
|
||||
"""Get existing conversation or create new one"""
|
||||
|
||||
if conversation_id:
|
||||
conversation = db.query(DBConversation).filter(DBConversation.id == conversation_id).first()
|
||||
if conversation:
|
||||
return conversation
|
||||
|
||||
# Create new conversation
|
||||
conversation = DBConversation(
|
||||
chatbot_id=chatbot_id,
|
||||
user_id=user_id,
|
||||
title="New Conversation"
|
||||
)
|
||||
|
||||
db.add(conversation)
|
||||
db.commit()
|
||||
db.refresh(conversation)
|
||||
return conversation
|
||||
|
||||
def get_router(self) -> APIRouter:
|
||||
"""Get FastAPI router for chatbot endpoints"""
|
||||
router = APIRouter(prefix="/chatbot", tags=["chatbot"])
|
||||
|
||||
@router.post("/chat", response_model=ChatResponse)
|
||||
async def chat_endpoint(
|
||||
request: ChatRequest,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""Chat completion endpoint"""
|
||||
return await self.chat_completion(request, str(current_user['id']), db)
|
||||
|
||||
@router.post("/create", response_model=ChatbotInstance)
|
||||
async def create_chatbot_endpoint(
|
||||
config: ChatbotConfig,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""Create new chatbot instance"""
|
||||
return await self.create_chatbot(config, str(current_user['id']), db)
|
||||
|
||||
@router.get("/list", response_model=List[ChatbotInstance])
|
||||
async def list_chatbots_endpoint(
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""List user's chatbots"""
|
||||
db_chatbots = db.query(DBChatbotInstance).filter(
|
||||
(DBChatbotInstance.created_by == str(current_user['id'])) |
|
||||
(DBChatbotInstance.created_by == "system")
|
||||
).all()
|
||||
|
||||
chatbots = []
|
||||
for db_chatbot in db_chatbots:
|
||||
chatbot = ChatbotInstance(
|
||||
id=db_chatbot.id,
|
||||
name=db_chatbot.name,
|
||||
config=ChatbotConfig(**db_chatbot.config),
|
||||
created_by=db_chatbot.created_by,
|
||||
created_at=db_chatbot.created_at,
|
||||
updated_at=db_chatbot.updated_at,
|
||||
is_active=db_chatbot.is_active
|
||||
)
|
||||
chatbots.append(chatbot)
|
||||
|
||||
return chatbots
|
||||
|
||||
@router.get("/conversations/{conversation_id}", response_model=Conversation)
|
||||
async def get_conversation_endpoint(
|
||||
conversation_id: str,
|
||||
current_user: User = Depends(get_current_user),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""Get conversation history"""
|
||||
conversation = db.query(DBConversation).filter(
|
||||
DBConversation.id == conversation_id
|
||||
).first()
|
||||
|
||||
if not conversation:
|
||||
raise HTTPException(status_code=404, detail="Conversation not found")
|
||||
|
||||
# Check if user owns this conversation
|
||||
if conversation.user_id != str(current_user['id']):
|
||||
raise HTTPException(status_code=403, detail="Not authorized")
|
||||
|
||||
# Get messages
|
||||
messages = db.query(DBMessage).filter(
|
||||
DBMessage.conversation_id == conversation_id
|
||||
).order_by(DBMessage.timestamp).all()
|
||||
|
||||
# Convert to response model
|
||||
chat_messages = []
|
||||
for msg in messages:
|
||||
chat_message = ChatMessage(
|
||||
id=msg.id,
|
||||
role=MessageRole(msg.role),
|
||||
content=msg.content,
|
||||
timestamp=msg.timestamp,
|
||||
metadata=msg.metadata or {},
|
||||
sources=msg.sources
|
||||
)
|
||||
chat_messages.append(chat_message)
|
||||
|
||||
response_conversation = Conversation(
|
||||
id=conversation.id,
|
||||
chatbot_id=conversation.chatbot_id,
|
||||
user_id=conversation.user_id,
|
||||
messages=chat_messages,
|
||||
created_at=conversation.created_at,
|
||||
updated_at=conversation.updated_at,
|
||||
metadata=conversation.context_data or {}
|
||||
)
|
||||
|
||||
return response_conversation
|
||||
|
||||
@router.get("/types", response_model=List[Dict[str, str]])
|
||||
async def get_chatbot_types_endpoint():
|
||||
"""Get available chatbot types and their descriptions"""
|
||||
return [
|
||||
{"type": "assistant", "name": "General Assistant", "description": "Helpful AI assistant for general questions"},
|
||||
{"type": "customer_support", "name": "Customer Support", "description": "Professional customer service chatbot"},
|
||||
{"type": "teacher", "name": "Teacher", "description": "Educational tutor and learning assistant"},
|
||||
{"type": "researcher", "name": "Researcher", "description": "Research assistant with fact-checking focus"},
|
||||
{"type": "creative_writer", "name": "Creative Writer", "description": "Creative writing and storytelling assistant"},
|
||||
{"type": "custom", "name": "Custom", "description": "Custom chatbot with user-defined personality"}
|
||||
]
|
||||
|
||||
return router
|
||||
|
||||
# API Compatibility Methods
|
||||
async def chat(self, chatbot_config: Dict[str, Any], message: str,
|
||||
conversation_history: List = None, user_id: str = "anonymous") -> Dict[str, Any]:
|
||||
"""Chat method for API compatibility"""
|
||||
logger.info(f"Chat method called with message: {message[:50]}... by user: {user_id}")
|
||||
|
||||
# Lazy load dependencies
|
||||
await self._ensure_dependencies()
|
||||
|
||||
logger.info(f"LLM service available: {llm_service._initialized}")
|
||||
logger.info(f"RAG module available: {self.rag_module is not None}")
|
||||
|
||||
try:
|
||||
# Create a minimal database session for the chat
|
||||
from app.db.database import SessionLocal
|
||||
db = SessionLocal()
|
||||
|
||||
try:
|
||||
# Convert config dict to ChatbotConfig
|
||||
config = ChatbotConfig(
|
||||
name=chatbot_config.get("name", "Unknown"),
|
||||
chatbot_type=chatbot_config.get("chatbot_type", "assistant"),
|
||||
model=chatbot_config.get("model", "gpt-3.5-turbo"),
|
||||
system_prompt=chatbot_config.get("system_prompt", ""),
|
||||
temperature=chatbot_config.get("temperature", 0.7),
|
||||
max_tokens=chatbot_config.get("max_tokens", 1000),
|
||||
memory_length=chatbot_config.get("memory_length", 10),
|
||||
use_rag=chatbot_config.get("use_rag", False),
|
||||
rag_collection=chatbot_config.get("rag_collection"),
|
||||
rag_top_k=chatbot_config.get("rag_top_k", 5),
|
||||
fallback_responses=chatbot_config.get("fallback_responses", [])
|
||||
)
|
||||
|
||||
# Generate response using internal method
|
||||
# Create a temporary message object for the current user message
|
||||
temp_messages = [
|
||||
DBMessage(
|
||||
id=0,
|
||||
conversation_id=0,
|
||||
role="user",
|
||||
content=message,
|
||||
timestamp=datetime.utcnow(),
|
||||
metadata={}
|
||||
)
|
||||
]
|
||||
|
||||
response_content, sources = await self._generate_response(
|
||||
message, temp_messages, config, None, db
|
||||
)
|
||||
|
||||
return {
|
||||
"response": response_content,
|
||||
"sources": sources,
|
||||
"conversation_id": None,
|
||||
"message_id": f"msg_{uuid.uuid4()}"
|
||||
}
|
||||
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Chat method failed: {e}")
|
||||
fallback_responses = chatbot_config.get("fallback_responses", [
|
||||
"I'm sorry, I'm having trouble processing your request right now."
|
||||
])
|
||||
return {
|
||||
"response": fallback_responses[0] if fallback_responses else "I'm sorry, I couldn't process your request.",
|
||||
"sources": None,
|
||||
"conversation_id": None,
|
||||
"message_id": f"msg_{uuid.uuid4()}"
|
||||
}
|
||||
|
||||
# Workflow Integration Methods
|
||||
async def workflow_chat_step(self, context: Dict[str, Any], step_config: Dict[str, Any], db: Session) -> Dict[str, Any]:
|
||||
"""Execute chatbot as a workflow step"""
|
||||
|
||||
message = step_config.get('message', '')
|
||||
chatbot_id = step_config.get('chatbot_id')
|
||||
use_rag = step_config.get('use_rag', False)
|
||||
|
||||
# Template substitution from context
|
||||
message = self._substitute_template_variables(message, context)
|
||||
|
||||
request = ChatRequest(
|
||||
message=message,
|
||||
chatbot_id=chatbot_id,
|
||||
use_rag=use_rag,
|
||||
context=step_config.get('context', {})
|
||||
)
|
||||
|
||||
# Use system user for workflow executions
|
||||
response = await self.chat_completion(request, "workflow_system", db)
|
||||
|
||||
return {
|
||||
"response": response.response,
|
||||
"conversation_id": response.conversation_id,
|
||||
"sources": response.sources,
|
||||
"metadata": response.metadata
|
||||
}
|
||||
|
||||
def _substitute_template_variables(self, template: str, context: Dict[str, Any]) -> str:
|
||||
"""Simple template variable substitution"""
|
||||
import re
|
||||
|
||||
def replace_var(match):
|
||||
var_path = match.group(1)
|
||||
try:
|
||||
# Simple dot notation support: context.user.name
|
||||
value = context
|
||||
for part in var_path.split('.'):
|
||||
value = value[part]
|
||||
return str(value)
|
||||
except (KeyError, TypeError):
|
||||
return match.group(0) # Return original if not found
|
||||
|
||||
return re.sub(r'\\{\\{\\s*([^}]+)\\s*\\}\\}', replace_var, template)
|
||||
|
||||
async def _get_qdrant_collection_name(self, collection_identifier: str, db: Session) -> Optional[str]:
|
||||
"""Get Qdrant collection name from RAG collection ID, name, or direct Qdrant collection"""
|
||||
try:
|
||||
from app.models.rag_collection import RagCollection
|
||||
from sqlalchemy import select
|
||||
|
||||
logger.info(f"Looking up RAG collection with identifier: '{collection_identifier}'")
|
||||
|
||||
# First check if this collection exists in Qdrant directly
|
||||
# Qdrant is the source of truth for collections
|
||||
if True: # Always check Qdrant first
|
||||
# Check if this collection exists in Qdrant directly
|
||||
actual_collection_name = collection_identifier
|
||||
# Remove "ext_" prefix if present
|
||||
if collection_identifier.startswith("ext_"):
|
||||
actual_collection_name = collection_identifier[4:]
|
||||
|
||||
logger.info(f"Checking if '{actual_collection_name}' exists in Qdrant directly")
|
||||
if self.rag_module:
|
||||
try:
|
||||
# Try to verify the collection exists in Qdrant
|
||||
from qdrant_client import QdrantClient
|
||||
qdrant_client = QdrantClient(host="enclava-qdrant", port=6333)
|
||||
collections = qdrant_client.get_collections()
|
||||
collection_names = [c.name for c in collections.collections]
|
||||
|
||||
if actual_collection_name in collection_names:
|
||||
logger.info(f"Found Qdrant collection directly: {actual_collection_name}")
|
||||
|
||||
# Auto-register the collection in the database if not found
|
||||
await self._auto_register_collection(actual_collection_name, db)
|
||||
|
||||
return actual_collection_name
|
||||
except Exception as e:
|
||||
logger.warning(f"Error checking Qdrant collections: {e}")
|
||||
|
||||
rag_collection = None
|
||||
|
||||
# Then try PostgreSQL lookup by ID if numeric
|
||||
if collection_identifier.isdigit():
|
||||
logger.info(f"Treating '{collection_identifier}' as collection ID")
|
||||
stmt = select(RagCollection).where(
|
||||
RagCollection.id == int(collection_identifier),
|
||||
RagCollection.is_active == True
|
||||
)
|
||||
result = db.execute(stmt)
|
||||
rag_collection = result.scalar_one_or_none()
|
||||
|
||||
# If not found by ID, try to look up by name in PostgreSQL
|
||||
if not rag_collection:
|
||||
logger.info(f"Collection not found by ID, trying by name: '{collection_identifier}'")
|
||||
stmt = select(RagCollection).where(
|
||||
RagCollection.name == collection_identifier,
|
||||
RagCollection.is_active == True
|
||||
)
|
||||
result = db.execute(stmt)
|
||||
rag_collection = result.scalar_one_or_none()
|
||||
|
||||
if rag_collection:
|
||||
logger.info(f"Found RAG collection: ID={rag_collection.id}, name='{rag_collection.name}', qdrant_collection='{rag_collection.qdrant_collection_name}'")
|
||||
return rag_collection.qdrant_collection_name
|
||||
else:
|
||||
logger.warning(f"RAG collection '{collection_identifier}' not found in database (tried both ID and name)")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error looking up RAG collection '{collection_identifier}': {e}")
|
||||
import traceback
|
||||
logger.error(f"Traceback: {traceback.format_exc()}")
|
||||
return None
|
||||
|
||||
async def _auto_register_collection(self, collection_name: str, db: Session) -> None:
|
||||
"""Automatically register a Qdrant collection in the database"""
|
||||
try:
|
||||
from app.models.rag_collection import RagCollection
|
||||
from sqlalchemy import select
|
||||
|
||||
# Check if already registered
|
||||
stmt = select(RagCollection).where(
|
||||
RagCollection.qdrant_collection_name == collection_name
|
||||
)
|
||||
result = db.execute(stmt)
|
||||
existing = result.scalar_one_or_none()
|
||||
|
||||
if existing:
|
||||
logger.info(f"Collection '{collection_name}' already registered in database")
|
||||
return
|
||||
|
||||
# Create a readable name from collection name
|
||||
display_name = collection_name.replace("-", " ").replace("_", " ").title()
|
||||
|
||||
# Auto-register the collection
|
||||
new_collection = RagCollection(
|
||||
name=display_name,
|
||||
qdrant_collection_name=collection_name,
|
||||
description=f"Auto-discovered collection from Qdrant: {collection_name}",
|
||||
is_active=True
|
||||
)
|
||||
|
||||
db.add(new_collection)
|
||||
db.commit()
|
||||
|
||||
logger.info(f"Auto-registered Qdrant collection '{collection_name}' in database")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to auto-register collection '{collection_name}': {e}")
|
||||
# Don't re-raise - this should not block collection usage
|
||||
|
||||
# Required abstract methods from BaseModule
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup chatbot module resources"""
|
||||
logger.info("Chatbot module cleanup completed")
|
||||
|
||||
def get_required_permissions(self) -> List[Permission]:
|
||||
"""Get required permissions for chatbot module"""
|
||||
return [
|
||||
Permission("chatbots", "create", "Create chatbot instances"),
|
||||
Permission("chatbots", "configure", "Configure chatbot settings"),
|
||||
Permission("chatbots", "chat", "Use chatbot for conversations"),
|
||||
Permission("chatbots", "manage", "Manage all chatbots")
|
||||
]
|
||||
|
||||
async def process_request(self, request_type: str, data: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Process chatbot requests"""
|
||||
if request_type == "chat":
|
||||
# Handle chat requests
|
||||
chat_request = ChatRequest(**data)
|
||||
user_id = context.get("user_id", "anonymous")
|
||||
db = context.get("db")
|
||||
|
||||
if db:
|
||||
response = await self.chat_completion(chat_request, user_id, db)
|
||||
return {
|
||||
"success": True,
|
||||
"response": response.response,
|
||||
"conversation_id": response.conversation_id,
|
||||
"sources": response.sources
|
||||
}
|
||||
|
||||
return {"success": False, "error": f"Unknown request type: {request_type}"}
|
||||
|
||||
|
||||
# Module factory function
|
||||
def create_module(rag_service: Optional[RAGServiceProtocol] = None) -> ChatbotModule:
|
||||
"""Factory function to create chatbot module instance"""
|
||||
return ChatbotModule(rag_service=rag_service)
|
||||
|
||||
# Create module instance (dependencies will be injected via factory)
|
||||
chatbot_module = ChatbotModule()
|
||||
@@ -1,110 +0,0 @@
|
||||
name: chatbot
|
||||
version: 1.0.0
|
||||
description: "AI Chatbot with RAG integration and customizable prompts"
|
||||
author: "Enclava Team"
|
||||
category: "conversation"
|
||||
|
||||
# Module lifecycle
|
||||
enabled: true
|
||||
auto_start: true
|
||||
dependencies:
|
||||
- rag
|
||||
optional_dependencies:
|
||||
- analytics
|
||||
|
||||
# Configuration
|
||||
config_schema: "./config_schema.json"
|
||||
ui_components: "./ui_components/"
|
||||
|
||||
# Module capabilities
|
||||
provides:
|
||||
- "chat_completion"
|
||||
- "conversation_management"
|
||||
- "chatbot_configuration"
|
||||
|
||||
consumes:
|
||||
- "rag_search"
|
||||
- "llm_completion"
|
||||
|
||||
# API endpoints
|
||||
endpoints:
|
||||
- path: "/chatbot/chat"
|
||||
method: "POST"
|
||||
description: "Generate chat completion"
|
||||
|
||||
- path: "/chatbot/create"
|
||||
method: "POST"
|
||||
description: "Create new chatbot instance"
|
||||
|
||||
- path: "/chatbot/list"
|
||||
method: "GET"
|
||||
description: "List user chatbots"
|
||||
|
||||
# UI Configuration
|
||||
ui_config:
|
||||
icon: "message-circle"
|
||||
color: "#10B981"
|
||||
category: "AI & ML"
|
||||
|
||||
# Configuration forms
|
||||
forms:
|
||||
- name: "basic_config"
|
||||
title: "Basic Settings"
|
||||
fields: ["name", "chatbot_type", "model"]
|
||||
|
||||
- name: "personality"
|
||||
title: "Personality & Prompts"
|
||||
fields: ["system_prompt", "temperature", "fallback_responses"]
|
||||
|
||||
- name: "knowledge_base"
|
||||
title: "Knowledge Base"
|
||||
fields: ["use_rag", "rag_collection", "rag_top_k"]
|
||||
|
||||
- name: "advanced"
|
||||
title: "Advanced Settings"
|
||||
fields: ["max_tokens", "memory_length"]
|
||||
|
||||
# Permissions
|
||||
permissions:
|
||||
- name: "chatbot.create"
|
||||
description: "Create new chatbot instances"
|
||||
|
||||
- name: "chatbot.configure"
|
||||
description: "Configure chatbot settings"
|
||||
|
||||
- name: "chatbot.chat"
|
||||
description: "Use chatbot for conversations"
|
||||
|
||||
- name: "chatbot.manage"
|
||||
description: "Manage all chatbots (admin)"
|
||||
|
||||
# Analytics events
|
||||
analytics_events:
|
||||
- name: "chatbot_created"
|
||||
description: "New chatbot instance created"
|
||||
|
||||
- name: "chat_message_sent"
|
||||
description: "User sent message to chatbot"
|
||||
|
||||
- name: "chat_response_generated"
|
||||
description: "Chatbot generated response"
|
||||
|
||||
- name: "rag_context_used"
|
||||
description: "RAG context was used in response"
|
||||
|
||||
# Health checks
|
||||
health_checks:
|
||||
- name: "llm_connectivity"
|
||||
description: "Check LLM client connection"
|
||||
|
||||
- name: "rag_availability"
|
||||
description: "Check RAG module availability"
|
||||
|
||||
- name: "conversation_memory"
|
||||
description: "Check conversation storage health"
|
||||
|
||||
# Documentation
|
||||
documentation:
|
||||
readme: "./README.md"
|
||||
examples: "./examples/"
|
||||
api_docs: "./docs/api.md"
|
||||
@@ -1,225 +0,0 @@
|
||||
"""
|
||||
Module Factory for Confidential Empire
|
||||
|
||||
This factory creates and wires up all modules with their dependencies.
|
||||
It ensures proper dependency injection while maintaining optimal performance
|
||||
through direct method calls and minimal indirection.
|
||||
"""
|
||||
|
||||
from typing import Dict, Optional, Any
|
||||
import logging
|
||||
|
||||
# Import all modules
|
||||
from .rag.main import RAGModule
|
||||
from .chatbot.main import ChatbotModule, create_module as create_chatbot_module
|
||||
from .workflow.main import WorkflowModule
|
||||
|
||||
# Import services that modules depend on
|
||||
from app.services.litellm_client import LiteLLMClient
|
||||
|
||||
# Import protocols for type safety
|
||||
from .protocols import (
|
||||
RAGServiceProtocol,
|
||||
ChatbotServiceProtocol,
|
||||
LiteLLMClientProtocol,
|
||||
WorkflowServiceProtocol,
|
||||
ServiceRegistry
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ModuleFactory:
|
||||
"""Factory for creating and wiring module dependencies"""
|
||||
|
||||
def __init__(self):
|
||||
self.modules: Dict[str, Any] = {}
|
||||
self.initialized = False
|
||||
|
||||
async def create_all_modules(self, config: Optional[Dict[str, Any]] = None) -> ServiceRegistry:
|
||||
"""
|
||||
Create all modules with proper dependency injection
|
||||
|
||||
Args:
|
||||
config: Optional configuration for modules
|
||||
|
||||
Returns:
|
||||
Dictionary of created modules with their dependencies wired
|
||||
"""
|
||||
config = config or {}
|
||||
|
||||
logger.info("Creating modules with dependency injection...")
|
||||
|
||||
# Step 1: Create LiteLLM client (shared dependency)
|
||||
litellm_client = LiteLLMClient()
|
||||
|
||||
# Step 2: Create RAG module (no dependencies on other modules)
|
||||
rag_module = RAGModule(config=config.get("rag", {}))
|
||||
|
||||
# Step 3: Create chatbot module with RAG dependency
|
||||
chatbot_module = create_chatbot_module(
|
||||
litellm_client=litellm_client,
|
||||
rag_service=rag_module # RAG module implements RAGServiceProtocol
|
||||
)
|
||||
|
||||
# Step 4: Create workflow module with chatbot dependency
|
||||
workflow_module = WorkflowModule(
|
||||
chatbot_service=chatbot_module # Chatbot module implements ChatbotServiceProtocol
|
||||
)
|
||||
|
||||
# Store all modules
|
||||
modules = {
|
||||
"rag": rag_module,
|
||||
"chatbot": chatbot_module,
|
||||
"workflow": workflow_module
|
||||
}
|
||||
|
||||
logger.info(f"Created {len(modules)} modules with dependencies wired")
|
||||
|
||||
# Initialize all modules
|
||||
await self._initialize_modules(modules, config)
|
||||
|
||||
self.modules = modules
|
||||
self.initialized = True
|
||||
|
||||
return modules
|
||||
|
||||
async def _initialize_modules(self, modules: Dict[str, Any], config: Dict[str, Any]):
|
||||
"""Initialize all modules in dependency order"""
|
||||
|
||||
# Initialize in dependency order (modules with no deps first)
|
||||
initialization_order = [
|
||||
("rag", modules["rag"]),
|
||||
("chatbot", modules["chatbot"]), # Depends on RAG
|
||||
("workflow", modules["workflow"]) # Depends on Chatbot
|
||||
]
|
||||
|
||||
for module_name, module in initialization_order:
|
||||
try:
|
||||
logger.info(f"Initializing {module_name} module...")
|
||||
module_config = config.get(module_name, {})
|
||||
|
||||
# Different modules have different initialization patterns
|
||||
if hasattr(module, 'initialize'):
|
||||
if module_name == "rag":
|
||||
await module.initialize()
|
||||
else:
|
||||
await module.initialize(**module_config)
|
||||
|
||||
logger.info(f"✅ {module_name} module initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to initialize {module_name} module: {e}")
|
||||
raise RuntimeError(f"Module initialization failed: {module_name}") from e
|
||||
|
||||
async def cleanup_all_modules(self):
|
||||
"""Cleanup all modules in reverse dependency order"""
|
||||
if not self.initialized:
|
||||
return
|
||||
|
||||
# Cleanup in reverse order
|
||||
cleanup_order = ["workflow", "chatbot", "rag"]
|
||||
|
||||
for module_name in cleanup_order:
|
||||
if module_name in self.modules:
|
||||
try:
|
||||
logger.info(f"Cleaning up {module_name} module...")
|
||||
module = self.modules[module_name]
|
||||
if hasattr(module, 'cleanup'):
|
||||
await module.cleanup()
|
||||
logger.info(f"✅ {module_name} module cleaned up")
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error cleaning up {module_name}: {e}")
|
||||
|
||||
self.modules.clear()
|
||||
self.initialized = False
|
||||
|
||||
def get_module(self, name: str) -> Optional[Any]:
|
||||
"""Get a module by name"""
|
||||
return self.modules.get(name)
|
||||
|
||||
def is_initialized(self) -> bool:
|
||||
"""Check if factory is initialized"""
|
||||
return self.initialized
|
||||
|
||||
|
||||
# Global factory instance
|
||||
module_factory = ModuleFactory()
|
||||
|
||||
|
||||
# Convenience functions for external use
|
||||
async def create_modules(config: Optional[Dict[str, Any]] = None) -> ServiceRegistry:
|
||||
"""Create all modules with dependencies wired"""
|
||||
return await module_factory.create_all_modules(config)
|
||||
|
||||
|
||||
async def cleanup_modules():
|
||||
"""Cleanup all modules"""
|
||||
await module_factory.cleanup_all_modules()
|
||||
|
||||
|
||||
def get_module(name: str) -> Optional[Any]:
|
||||
"""Get a module by name"""
|
||||
return module_factory.get_module(name)
|
||||
|
||||
|
||||
def get_all_modules() -> Dict[str, Any]:
|
||||
"""Get all modules"""
|
||||
return module_factory.modules.copy()
|
||||
|
||||
|
||||
# Factory functions for individual modules (for testing/special cases)
|
||||
def create_rag_module(config: Optional[Dict[str, Any]] = None) -> RAGModule:
|
||||
"""Create RAG module"""
|
||||
return RAGModule(config=config or {})
|
||||
|
||||
|
||||
def create_chatbot_with_rag(rag_service: RAGServiceProtocol,
|
||||
litellm_client: LiteLLMClientProtocol) -> ChatbotModule:
|
||||
"""Create chatbot module with RAG dependency"""
|
||||
return create_chatbot_module(litellm_client=litellm_client, rag_service=rag_service)
|
||||
|
||||
|
||||
def create_workflow_with_chatbot(chatbot_service: ChatbotServiceProtocol) -> WorkflowModule:
|
||||
"""Create workflow module with chatbot dependency"""
|
||||
return WorkflowModule(chatbot_service=chatbot_service)
|
||||
|
||||
|
||||
# Module registry for backward compatibility
|
||||
class ModuleRegistry:
|
||||
"""Registry that provides access to modules (for backward compatibility)"""
|
||||
|
||||
def __init__(self, factory: ModuleFactory):
|
||||
self._factory = factory
|
||||
|
||||
@property
|
||||
def modules(self) -> Dict[str, Any]:
|
||||
"""Get all modules (compatible with existing module_manager interface)"""
|
||||
return self._factory.modules
|
||||
|
||||
def get(self, name: str) -> Optional[Any]:
|
||||
"""Get module by name"""
|
||||
return self._factory.get_module(name)
|
||||
|
||||
def __getitem__(self, name: str) -> Any:
|
||||
"""Support dictionary-style access"""
|
||||
module = self.get(name)
|
||||
if module is None:
|
||||
raise KeyError(f"Module '{name}' not found")
|
||||
return module
|
||||
|
||||
def keys(self):
|
||||
"""Get module names"""
|
||||
return self._factory.modules.keys()
|
||||
|
||||
def values(self):
|
||||
"""Get module instances"""
|
||||
return self._factory.modules.values()
|
||||
|
||||
def items(self):
|
||||
"""Get module name-instance pairs"""
|
||||
return self._factory.modules.items()
|
||||
|
||||
|
||||
# Create registry instance for backward compatibility
|
||||
module_registry = ModuleRegistry(module_factory)
|
||||
@@ -1,258 +0,0 @@
|
||||
"""
|
||||
Module Protocols for Confidential Empire
|
||||
|
||||
This file defines the interface contracts that modules must implement for inter-module communication.
|
||||
Using Python protocols provides compile-time type checking with zero runtime overhead.
|
||||
"""
|
||||
|
||||
from typing import Protocol, Dict, List, Any, Optional, Union
|
||||
from datetime import datetime
|
||||
from abc import abstractmethod
|
||||
|
||||
|
||||
class RAGServiceProtocol(Protocol):
|
||||
"""Protocol for RAG (Retrieval-Augmented Generation) service interface"""
|
||||
|
||||
@abstractmethod
|
||||
async def search(self, query: str, collection_name: str, top_k: int) -> Dict[str, Any]:
|
||||
"""
|
||||
Search for relevant documents
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
collection_name: Name of the collection to search in
|
||||
top_k: Number of top results to return
|
||||
|
||||
Returns:
|
||||
Dictionary containing search results with 'results' key
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def index_document(self, content: str, metadata: Dict[str, Any] = None) -> str:
|
||||
"""
|
||||
Index a document in the vector database
|
||||
|
||||
Args:
|
||||
content: Document content to index
|
||||
metadata: Optional metadata for the document
|
||||
|
||||
Returns:
|
||||
Document ID
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def delete_document(self, document_id: str) -> bool:
|
||||
"""
|
||||
Delete a document from the vector database
|
||||
|
||||
Args:
|
||||
document_id: ID of document to delete
|
||||
|
||||
Returns:
|
||||
True if successfully deleted
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class ChatbotServiceProtocol(Protocol):
|
||||
"""Protocol for Chatbot service interface"""
|
||||
|
||||
@abstractmethod
|
||||
async def chat_completion(self, request: Any, user_id: str, db: Any) -> Any:
|
||||
"""
|
||||
Generate chat completion response
|
||||
|
||||
Args:
|
||||
request: Chat request object
|
||||
user_id: ID of the user making the request
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Chat response object
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def create_chatbot(self, config: Any, user_id: str, db: Any) -> Any:
|
||||
"""
|
||||
Create a new chatbot instance
|
||||
|
||||
Args:
|
||||
config: Chatbot configuration
|
||||
user_id: ID of the user creating the chatbot
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
Created chatbot instance
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class LiteLLMClientProtocol(Protocol):
|
||||
"""Protocol for LiteLLM client interface"""
|
||||
|
||||
@abstractmethod
|
||||
async def completion(self, model: str, messages: List[Dict[str, str]], **kwargs) -> Any:
|
||||
"""
|
||||
Create a completion using the specified model
|
||||
|
||||
Args:
|
||||
model: Model name to use
|
||||
messages: List of messages for the conversation
|
||||
**kwargs: Additional parameters for the completion
|
||||
|
||||
Returns:
|
||||
Completion response object
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def create_chat_completion(self, model: str, messages: List[Dict[str, str]],
|
||||
user_id: str, api_key_id: str, **kwargs) -> Any:
|
||||
"""
|
||||
Create a chat completion with user tracking
|
||||
|
||||
Args:
|
||||
model: Model name to use
|
||||
messages: List of messages for the conversation
|
||||
user_id: ID of the user making the request
|
||||
api_key_id: API key identifier
|
||||
**kwargs: Additional parameters
|
||||
|
||||
Returns:
|
||||
Chat completion response
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class CacheServiceProtocol(Protocol):
|
||||
"""Protocol for Cache service interface"""
|
||||
|
||||
@abstractmethod
|
||||
async def get(self, key: str, default: Any = None) -> Any:
|
||||
"""
|
||||
Get value from cache
|
||||
|
||||
Args:
|
||||
key: Cache key
|
||||
default: Default value if key not found
|
||||
|
||||
Returns:
|
||||
Cached value or default
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def set(self, key: str, value: Any, ttl: Optional[int] = None) -> bool:
|
||||
"""
|
||||
Set value in cache
|
||||
|
||||
Args:
|
||||
key: Cache key
|
||||
value: Value to cache
|
||||
ttl: Time to live in seconds
|
||||
|
||||
Returns:
|
||||
True if successfully cached
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def delete(self, key: str) -> bool:
|
||||
"""
|
||||
Delete key from cache
|
||||
|
||||
Args:
|
||||
key: Cache key to delete
|
||||
|
||||
Returns:
|
||||
True if successfully deleted
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class SecurityServiceProtocol(Protocol):
|
||||
"""Protocol for Security service interface"""
|
||||
|
||||
@abstractmethod
|
||||
async def analyze_request(self, request: Any) -> Any:
|
||||
"""
|
||||
Perform security analysis on a request
|
||||
|
||||
Args:
|
||||
request: Request object to analyze
|
||||
|
||||
Returns:
|
||||
Security analysis result
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def validate_request(self, request: Any) -> bool:
|
||||
"""
|
||||
Validate request for security compliance
|
||||
|
||||
Args:
|
||||
request: Request object to validate
|
||||
|
||||
Returns:
|
||||
True if request is valid/safe
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class WorkflowServiceProtocol(Protocol):
|
||||
"""Protocol for Workflow service interface"""
|
||||
|
||||
@abstractmethod
|
||||
async def execute_workflow(self, workflow: Any, input_data: Dict[str, Any] = None) -> Any:
|
||||
"""
|
||||
Execute a workflow definition
|
||||
|
||||
Args:
|
||||
workflow: Workflow definition to execute
|
||||
input_data: Optional input data for the workflow
|
||||
|
||||
Returns:
|
||||
Workflow execution result
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def get_execution(self, execution_id: str) -> Any:
|
||||
"""
|
||||
Get workflow execution status
|
||||
|
||||
Args:
|
||||
execution_id: ID of the execution to retrieve
|
||||
|
||||
Returns:
|
||||
Execution status object
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class ModuleServiceProtocol(Protocol):
|
||||
"""Base protocol for all module services"""
|
||||
|
||||
@abstractmethod
|
||||
async def initialize(self, **kwargs) -> None:
|
||||
"""Initialize the module"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def cleanup(self) -> None:
|
||||
"""Cleanup module resources"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def get_required_permissions(self) -> List[Any]:
|
||||
"""Get required permissions for this module"""
|
||||
...
|
||||
|
||||
|
||||
# Type aliases for common service combinations
|
||||
ServiceRegistry = Dict[str, ModuleServiceProtocol]
|
||||
ServiceDependencies = Dict[str, Optional[ModuleServiceProtocol]]
|
||||
@@ -1,6 +0,0 @@
|
||||
"""
|
||||
RAG (Retrieval-Augmented Generation) module for Confidential Empire platform
|
||||
"""
|
||||
from .main import RAGModule
|
||||
|
||||
__all__ = ["RAGModule"]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,82 +0,0 @@
|
||||
name: rag
|
||||
version: 1.0.0
|
||||
description: "Document search, retrieval, and vector storage"
|
||||
author: "Enclava Team"
|
||||
category: "ai"
|
||||
|
||||
# Module lifecycle
|
||||
enabled: true
|
||||
auto_start: true
|
||||
dependencies: []
|
||||
optional_dependencies:
|
||||
- cache
|
||||
|
||||
# Module capabilities
|
||||
provides:
|
||||
- "document_storage"
|
||||
- "semantic_search"
|
||||
- "vector_embeddings"
|
||||
- "document_processing"
|
||||
|
||||
consumes:
|
||||
- "qdrant_connection"
|
||||
- "llm_embeddings"
|
||||
- "document_parsing"
|
||||
|
||||
# API endpoints
|
||||
endpoints:
|
||||
- path: "/rag/collections"
|
||||
method: "GET"
|
||||
description: "List document collections"
|
||||
|
||||
- path: "/rag/upload"
|
||||
method: "POST"
|
||||
description: "Upload and process documents"
|
||||
|
||||
- path: "/rag/search"
|
||||
method: "POST"
|
||||
description: "Semantic search in documents"
|
||||
|
||||
- path: "/rag/collections/{collection_id}/documents"
|
||||
method: "GET"
|
||||
description: "List documents in collection"
|
||||
|
||||
# UI Configuration
|
||||
ui_config:
|
||||
icon: "search"
|
||||
color: "#8B5CF6"
|
||||
category: "AI & ML"
|
||||
|
||||
forms:
|
||||
- name: "collection_config"
|
||||
title: "Collection Settings"
|
||||
fields: ["name", "description", "embedding_model"]
|
||||
|
||||
- name: "search_config"
|
||||
title: "Search Configuration"
|
||||
fields: ["top_k", "similarity_threshold", "rerank_enabled"]
|
||||
|
||||
# Permissions
|
||||
permissions:
|
||||
- name: "rag.create"
|
||||
description: "Create document collections"
|
||||
|
||||
- name: "rag.upload"
|
||||
description: "Upload documents to collections"
|
||||
|
||||
- name: "rag.search"
|
||||
description: "Search document collections"
|
||||
|
||||
- name: "rag.manage"
|
||||
description: "Manage all collections (admin)"
|
||||
|
||||
# Health checks
|
||||
health_checks:
|
||||
- name: "qdrant_connectivity"
|
||||
description: "Check Qdrant vector database connection"
|
||||
|
||||
- name: "embeddings_service"
|
||||
description: "Check LLM embeddings service"
|
||||
|
||||
- name: "document_processing"
|
||||
description: "Check document parsing capabilities"
|
||||
@@ -17,19 +17,17 @@ import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add both backend and modules directories to path
|
||||
# Add backend directory to Python path for app package imports
|
||||
backend_path = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(backend_path))
|
||||
sys.path.insert(0, str(backend_path / "modules"))
|
||||
|
||||
try:
|
||||
from modules.rag.main import RAGModule
|
||||
from modules.chatbot.main import ChatbotModule
|
||||
from app.modules.rag.main import RAGModule
|
||||
from app.modules.chatbot.main import ChatbotModule
|
||||
|
||||
from app.services.module_manager import ModuleManager, ModuleConfig
|
||||
except ImportError as e:
|
||||
print(f"Import error: {e}")
|
||||
print("Available modules path:", backend_path / "modules")
|
||||
# Create mock modules for testing if imports fail
|
||||
class MockModule:
|
||||
def __init__(self):
|
||||
@@ -346,4 +344,4 @@ if __name__ == "__main__":
|
||||
print("\nAll basic tests completed successfully! 🎉")
|
||||
|
||||
# Run the tests
|
||||
asyncio.run(run_basic_tests())
|
||||
asyncio.run(run_basic_tests())
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
import React, { createContext, useContext, useState, useEffect, useCallback, ReactNode } from 'react';
|
||||
import { useAuth } from '@/components/providers/auth-provider';
|
||||
import { apiClient } from '@/lib/api-client';
|
||||
import { useToast } from "@/hooks/use-toast";
|
||||
|
||||
export interface PluginInfo {
|
||||
id: string;
|
||||
@@ -122,6 +123,7 @@ interface PluginProviderProps {
|
||||
|
||||
export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
const { user, isAuthenticated } = useAuth();
|
||||
const { toast } = useToast();
|
||||
const [installedPlugins, setInstalledPlugins] = useState<PluginInfo[]>([]);
|
||||
const [availablePlugins, setAvailablePlugins] = useState<AvailablePlugin[]>([]);
|
||||
const [pluginConfigurations, setPluginConfigurations] = useState<Record<string, PluginConfiguration>>({});
|
||||
@@ -130,6 +132,24 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
|
||||
// Plugin component registry
|
||||
const [pluginComponents, setPluginComponents] = useState<Record<string, Record<string, React.ComponentType>>>({});
|
||||
|
||||
const userPermissions = user?.permissions ?? [];
|
||||
|
||||
const hasPermission = useCallback((required: string) => {
|
||||
if (!required) {
|
||||
return true;
|
||||
}
|
||||
return userPermissions.some(granted => {
|
||||
if (granted === "*" || granted === required) {
|
||||
return true;
|
||||
}
|
||||
if (granted.endsWith(":*")) {
|
||||
const prefix = granted.slice(0, -1);
|
||||
return required.startsWith(prefix);
|
||||
}
|
||||
return false;
|
||||
});
|
||||
}, [userPermissions]);
|
||||
|
||||
const apiRequest = async (endpoint: string, options: RequestInit = {}) => {
|
||||
if (!isAuthenticated) {
|
||||
@@ -175,16 +195,23 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
[plugin.id]: config
|
||||
}));
|
||||
}
|
||||
} catch (e) {
|
||||
} catch (configError) {
|
||||
console.warn(`Failed to load configuration for plugin ${plugin.id}`, configError);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to load installed plugins');
|
||||
const message = err instanceof Error ? err.message : 'Failed to load installed plugins';
|
||||
setError(message);
|
||||
toast({
|
||||
title: "Plugin load failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}, [user, isAuthenticated]);
|
||||
}, [user, isAuthenticated, toast]);
|
||||
|
||||
const searchAvailablePlugins = useCallback(async (query = '', tags: string[] = [], category = '') => {
|
||||
if (!user || !isAuthenticated) {
|
||||
@@ -205,11 +232,17 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
setAvailablePlugins(data.plugins);
|
||||
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to search plugins');
|
||||
const message = err instanceof Error ? err.message : 'Failed to search plugins';
|
||||
setError(message);
|
||||
toast({
|
||||
title: "Plugin search failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}, [user, isAuthenticated]);
|
||||
}, [user, isAuthenticated, toast]);
|
||||
|
||||
const installPlugin = useCallback(async (pluginId: string, version: string): Promise<boolean> => {
|
||||
try {
|
||||
@@ -231,12 +264,18 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Installation failed');
|
||||
const message = err instanceof Error ? err.message : 'Installation failed';
|
||||
setError(message);
|
||||
toast({
|
||||
title: "Plugin installation failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
return false;
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}, [refreshInstalledPlugins, searchAvailablePlugins]);
|
||||
}, [refreshInstalledPlugins, searchAvailablePlugins, toast]);
|
||||
|
||||
const uninstallPlugin = async (pluginId: string, keepData = true): Promise<boolean> => {
|
||||
try {
|
||||
@@ -263,7 +302,13 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Uninstallation failed');
|
||||
const message = err instanceof Error ? err.message : 'Uninstallation failed';
|
||||
setError(message);
|
||||
toast({
|
||||
title: "Plugin uninstall failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
return false;
|
||||
} finally {
|
||||
setLoading(false);
|
||||
@@ -281,7 +326,13 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Enable failed');
|
||||
const message = err instanceof Error ? err.message : 'Enable failed';
|
||||
setError(message);
|
||||
toast({
|
||||
title: "Plugin enable failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@@ -297,7 +348,13 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Disable failed');
|
||||
const message = err instanceof Error ? err.message : 'Disable failed';
|
||||
setError(message);
|
||||
toast({
|
||||
title: "Plugin disable failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@@ -316,7 +373,13 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Load failed');
|
||||
const message = err instanceof Error ? err.message : 'Load failed';
|
||||
setError(message);
|
||||
toast({
|
||||
title: "Plugin load failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@@ -338,7 +401,13 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Unload failed');
|
||||
const message = err instanceof Error ? err.message : 'Unload failed';
|
||||
setError(message);
|
||||
toast({
|
||||
title: "Plugin unload failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@@ -348,6 +417,7 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
const data = await apiRequest(`/${pluginId}/config`);
|
||||
return data;
|
||||
} catch (err) {
|
||||
console.warn(`Failed to fetch configuration for plugin ${pluginId}`, err);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
@@ -371,7 +441,13 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to save configuration');
|
||||
const message = err instanceof Error ? err.message : 'Failed to save configuration';
|
||||
setError(message);
|
||||
toast({
|
||||
title: "Save failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@@ -402,6 +478,7 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
}));
|
||||
}
|
||||
} catch (chatbotError) {
|
||||
console.warn(`Failed to populate chatbot options for plugin ${pluginId}`, chatbotError);
|
||||
}
|
||||
|
||||
// Populate model options for AI settings
|
||||
@@ -426,6 +503,7 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
schema.properties.draft_settings.properties.model.options = modelOptions;
|
||||
}
|
||||
} catch (modelError) {
|
||||
console.warn(`Failed to populate model options for plugin ${pluginId}`, modelError);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -442,11 +520,19 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
}));
|
||||
}
|
||||
} catch (modelError) {
|
||||
console.warn(`Failed to populate Signal model options for plugin ${pluginId}`, modelError);
|
||||
}
|
||||
}
|
||||
|
||||
return schema;
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : 'Failed to load plugin schema';
|
||||
console.error(`Failed to load schema for plugin ${pluginId}`, err);
|
||||
toast({
|
||||
title: "Schema load failed",
|
||||
description: message,
|
||||
variant: "destructive",
|
||||
});
|
||||
return null;
|
||||
}
|
||||
};
|
||||
@@ -478,6 +564,12 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
}));
|
||||
|
||||
} catch (err) {
|
||||
console.error(`Failed to load plugin components for ${pluginId}`, err);
|
||||
toast({
|
||||
title: "Component load failed",
|
||||
description: err instanceof Error ? err.message : 'Unable to load plugin components',
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -500,9 +592,51 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
};
|
||||
|
||||
const isPluginPageAuthorized = (pluginId: string, pagePath: string): boolean => {
|
||||
// TODO: Implement authorization logic based on user permissions
|
||||
const plugin = installedPlugins.find(p => p.id === pluginId);
|
||||
return plugin?.status === 'enabled' && plugin?.loaded;
|
||||
if (!plugin || plugin.status !== 'enabled' || !plugin.loaded) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const manifestPages = plugin.manifest?.spec?.ui_config?.pages ?? plugin.pages ?? [];
|
||||
const page = manifestPages.find((p: any) => p.path === pagePath);
|
||||
|
||||
const requiresAuth = page?.requiresAuth !== false;
|
||||
if (requiresAuth && !isAuthenticated) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const requiredPermissions: string[] =
|
||||
page?.required_permissions ??
|
||||
page?.requiredPermissions ??
|
||||
[];
|
||||
|
||||
if (requiredPermissions.length > 0) {
|
||||
return requiredPermissions.every(perm => hasPermission(perm));
|
||||
}
|
||||
|
||||
if (!requiresAuth) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (user?.role && ['super_admin', 'admin'].includes(user.role)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const modulePrefix = `modules:${pluginId}`;
|
||||
const hasModuleAccess = userPermissions.some(granted => {
|
||||
if (granted === 'modules:*') {
|
||||
return true;
|
||||
}
|
||||
if (granted.startsWith(`${modulePrefix}:`)) {
|
||||
return true;
|
||||
}
|
||||
if (granted.endsWith(':*') && granted.startsWith(modulePrefix)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
return hasModuleAccess;
|
||||
};
|
||||
|
||||
const getPluginComponent = (pluginId: string, componentName: string): React.ComponentType | null => {
|
||||
@@ -556,4 +690,4 @@ export const PluginProvider: React.FC<PluginProviderProps> = ({ children }) => {
|
||||
{children}
|
||||
</PluginContext.Provider>
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user