mirror of
https://github.com/aljazceru/enclava.git
synced 2025-12-17 07:24:34 +01:00
remove workflow and fix dependencies
This commit is contained in:
@@ -11,7 +11,6 @@ from .rag_collection import RagCollection
|
||||
from .rag_document import RagDocument
|
||||
from .chatbot import ChatbotInstance, ChatbotConversation, ChatbotMessage, ChatbotAnalytics
|
||||
from .prompt_template import PromptTemplate, ChatbotPromptVariable
|
||||
from .workflow import WorkflowDefinition, WorkflowExecution, WorkflowStepLog
|
||||
from .plugin import Plugin, PluginConfiguration, PluginInstance, PluginAuditLog, PluginCronJob, PluginAPIGateway
|
||||
|
||||
__all__ = [
|
||||
@@ -28,9 +27,6 @@ __all__ = [
|
||||
"ChatbotAnalytics",
|
||||
"PromptTemplate",
|
||||
"ChatbotPromptVariable",
|
||||
"WorkflowDefinition",
|
||||
"WorkflowExecution",
|
||||
"WorkflowStepLog",
|
||||
"Plugin",
|
||||
"PluginConfiguration",
|
||||
"PluginInstance",
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
"""
|
||||
Database models for workflow module
|
||||
"""
|
||||
from sqlalchemy import Column, Integer, String, Text, Boolean, DateTime, JSON, ForeignKey, Enum as SQLEnum
|
||||
from sqlalchemy.orm import relationship
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
import enum
|
||||
|
||||
from app.db.database import Base
|
||||
|
||||
|
||||
class WorkflowStatus(enum.Enum):
|
||||
"""Workflow execution statuses"""
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
CANCELLED = "cancelled"
|
||||
|
||||
|
||||
class WorkflowDefinition(Base):
|
||||
"""Workflow definition/template"""
|
||||
__tablename__ = "workflow_definitions"
|
||||
|
||||
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
name = Column(String(255), nullable=False)
|
||||
description = Column(Text)
|
||||
version = Column(String(50), default="1.0.0")
|
||||
|
||||
# Workflow definition stored as JSON
|
||||
steps = Column(JSON, nullable=False)
|
||||
variables = Column(JSON, default={})
|
||||
workflow_metadata = Column("metadata", JSON, default={})
|
||||
|
||||
# Configuration
|
||||
timeout = Column(Integer) # Timeout in seconds
|
||||
is_active = Column(Boolean, default=True)
|
||||
|
||||
# Metadata
|
||||
created_by = Column(String, nullable=False) # User ID
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
executions = relationship("WorkflowExecution", back_populates="workflow", cascade="all, delete-orphan")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<WorkflowDefinition(id='{self.id}', name='{self.name}')>"
|
||||
|
||||
|
||||
class WorkflowExecution(Base):
|
||||
"""Workflow execution instance"""
|
||||
__tablename__ = "workflow_executions"
|
||||
|
||||
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
workflow_id = Column(String, ForeignKey("workflow_definitions.id"), nullable=False)
|
||||
|
||||
# Execution state
|
||||
status = Column(SQLEnum(WorkflowStatus), default=WorkflowStatus.PENDING)
|
||||
current_step = Column(String) # Current step ID
|
||||
|
||||
# Execution data
|
||||
input_data = Column(JSON, default={})
|
||||
context = Column(JSON, default={})
|
||||
results = Column(JSON, default={})
|
||||
error = Column(Text)
|
||||
|
||||
# Timing
|
||||
started_at = Column(DateTime)
|
||||
completed_at = Column(DateTime)
|
||||
|
||||
# Metadata
|
||||
executed_by = Column(String, nullable=False) # User ID or system
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
workflow = relationship("WorkflowDefinition", back_populates="executions")
|
||||
step_logs = relationship("WorkflowStepLog", back_populates="execution", cascade="all, delete-orphan")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<WorkflowExecution(id='{self.id}', workflow_id='{self.workflow_id}', status='{self.status}')>"
|
||||
|
||||
|
||||
class WorkflowStepLog(Base):
|
||||
"""Individual step execution log"""
|
||||
__tablename__ = "workflow_step_logs"
|
||||
|
||||
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
execution_id = Column(String, ForeignKey("workflow_executions.id"), nullable=False)
|
||||
|
||||
# Step information
|
||||
step_id = Column(String, nullable=False)
|
||||
step_name = Column(String(255), nullable=False)
|
||||
step_type = Column(String(50), nullable=False)
|
||||
|
||||
# Execution details
|
||||
status = Column(String(50), nullable=False) # started, completed, failed
|
||||
input_data = Column(JSON, default={})
|
||||
output_data = Column(JSON, default={})
|
||||
error = Column(Text)
|
||||
|
||||
# Timing
|
||||
started_at = Column(DateTime, default=datetime.utcnow)
|
||||
completed_at = Column(DateTime)
|
||||
duration_ms = Column(Integer) # Duration in milliseconds
|
||||
|
||||
# Metadata
|
||||
retry_count = Column(Integer, default=0)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
execution = relationship("WorkflowExecution", back_populates="step_logs")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<WorkflowStepLog(id='{self.id}', step_name='{self.step_name}', status='{self.status}')>"
|
||||
@@ -32,7 +32,6 @@ class ModuleManifest:
|
||||
provides: List[str] = None
|
||||
consumes: List[str] = None
|
||||
endpoints: List[Dict] = None
|
||||
workflow_steps: List[Dict] = None
|
||||
permissions: List[Dict] = None
|
||||
analytics_events: List[Dict] = None
|
||||
health_checks: List[Dict] = None
|
||||
@@ -50,8 +49,6 @@ class ModuleManifest:
|
||||
self.consumes = []
|
||||
if self.endpoints is None:
|
||||
self.endpoints = []
|
||||
if self.workflow_steps is None:
|
||||
self.workflow_steps = []
|
||||
if self.permissions is None:
|
||||
self.permissions = []
|
||||
if self.analytics_events is None:
|
||||
@@ -266,15 +263,6 @@ class ModuleConfigManager:
|
||||
|
||||
return modules
|
||||
|
||||
def get_workflow_steps(self) -> Dict[str, List[Dict]]:
|
||||
"""Get all available workflow steps from modules"""
|
||||
workflow_steps = {}
|
||||
|
||||
for name, manifest in self.manifests.items():
|
||||
if manifest.workflow_steps:
|
||||
workflow_steps[name] = manifest.workflow_steps
|
||||
|
||||
return workflow_steps
|
||||
|
||||
async def update_module_status(self, module_name: str, enabled: bool) -> bool:
|
||||
"""Update module enabled status"""
|
||||
|
||||
@@ -155,8 +155,7 @@ class ModuleManager:
|
||||
logger.warning("Falling back to legacy module configuration")
|
||||
|
||||
default_modules = [
|
||||
ModuleConfig(name="rag", enabled=True, config={}),
|
||||
ModuleConfig(name="workflow", enabled=True, config={})
|
||||
ModuleConfig(name="rag", enabled=True, config={})
|
||||
]
|
||||
|
||||
for config in default_modules:
|
||||
@@ -591,7 +590,6 @@ class ModuleManager:
|
||||
"provides": manifest.provides,
|
||||
"consumes": manifest.consumes,
|
||||
"endpoints": manifest.endpoints,
|
||||
"workflow_steps": manifest.workflow_steps,
|
||||
"permissions": manifest.permissions,
|
||||
"ui_config": manifest.ui_config,
|
||||
"has_schema": module_config_manager.get_module_schema(module_name) is not None,
|
||||
@@ -630,9 +628,6 @@ class ModuleManager:
|
||||
log_module_event(module_name, "config_update_failed", {"error": str(e)})
|
||||
return False
|
||||
|
||||
def get_workflow_steps(self) -> Dict[str, List[Dict]]:
|
||||
"""Get all available workflow steps from modules"""
|
||||
return module_config_manager.get_workflow_steps()
|
||||
|
||||
async def get_module_health(self, module_name: str) -> Dict:
|
||||
"""Get module health status"""
|
||||
|
||||
@@ -1,434 +0,0 @@
|
||||
"""
|
||||
Workflow Execution Service
|
||||
Handles workflow execution tracking with proper user context and audit trails
|
||||
"""
|
||||
import asyncio
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Any, Optional
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import select, update
|
||||
import json
|
||||
|
||||
from app.core.logging import get_logger
|
||||
from app.models.workflow import WorkflowDefinition, WorkflowExecution, WorkflowStepLog, WorkflowStatus
|
||||
from app.models.user import User
|
||||
from app.utils.exceptions import APIException
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class WorkflowExecutionService:
|
||||
"""Service for managing workflow executions with user context tracking"""
|
||||
|
||||
def __init__(self, db: AsyncSession):
|
||||
self.db = db
|
||||
|
||||
async def create_execution_record(
|
||||
self,
|
||||
workflow_id: str,
|
||||
user_context: Dict[str, Any],
|
||||
execution_params: Optional[Dict] = None
|
||||
) -> WorkflowExecution:
|
||||
"""Create a new workflow execution record with user context"""
|
||||
|
||||
# Extract user information from context
|
||||
user_id = user_context.get("user_id") or user_context.get("id", "system")
|
||||
username = user_context.get("username", "system")
|
||||
session_id = user_context.get("session_id")
|
||||
|
||||
# Create execution record
|
||||
execution_record = WorkflowExecution(
|
||||
id=str(uuid.uuid4()),
|
||||
workflow_id=workflow_id,
|
||||
status=WorkflowStatus.PENDING,
|
||||
input_data=execution_params or {},
|
||||
context={
|
||||
"user_id": user_id,
|
||||
"username": username,
|
||||
"session_id": session_id,
|
||||
"started_by": "workflow_execution_service",
|
||||
"created_at": datetime.utcnow().isoformat()
|
||||
},
|
||||
executed_by=str(user_id),
|
||||
started_at=datetime.utcnow()
|
||||
)
|
||||
|
||||
try:
|
||||
self.db.add(execution_record)
|
||||
await self.db.commit()
|
||||
await self.db.refresh(execution_record)
|
||||
|
||||
logger.info(f"Created workflow execution record {execution_record.id} for workflow {workflow_id} by user {username} ({user_id})")
|
||||
return execution_record
|
||||
|
||||
except Exception as e:
|
||||
await self.db.rollback()
|
||||
logger.error(f"Failed to create execution record: {e}")
|
||||
raise APIException(f"Failed to create execution record: {e}")
|
||||
|
||||
async def start_execution(
|
||||
self,
|
||||
execution_id: str,
|
||||
workflow_context: Optional[Dict[str, Any]] = None
|
||||
) -> bool:
|
||||
"""Mark execution as started and update context"""
|
||||
|
||||
try:
|
||||
# Update execution record to running status
|
||||
stmt = update(WorkflowExecution).where(
|
||||
WorkflowExecution.id == execution_id
|
||||
).values(
|
||||
status=WorkflowStatus.RUNNING,
|
||||
started_at=datetime.utcnow(),
|
||||
context=workflow_context or {}
|
||||
)
|
||||
|
||||
await self.db.execute(stmt)
|
||||
await self.db.commit()
|
||||
|
||||
logger.info(f"Started workflow execution {execution_id}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
await self.db.rollback()
|
||||
logger.error(f"Failed to start execution {execution_id}: {e}")
|
||||
return False
|
||||
|
||||
async def complete_execution(
|
||||
self,
|
||||
execution_id: str,
|
||||
results: Dict[str, Any],
|
||||
step_history: Optional[List[Dict[str, Any]]] = None
|
||||
) -> bool:
|
||||
"""Mark execution as completed with results"""
|
||||
|
||||
try:
|
||||
# Update execution record
|
||||
stmt = update(WorkflowExecution).where(
|
||||
WorkflowExecution.id == execution_id
|
||||
).values(
|
||||
status=WorkflowStatus.COMPLETED,
|
||||
completed_at=datetime.utcnow(),
|
||||
results=results
|
||||
)
|
||||
|
||||
await self.db.execute(stmt)
|
||||
|
||||
# Log individual steps if provided
|
||||
if step_history:
|
||||
await self._log_execution_steps(execution_id, step_history)
|
||||
|
||||
await self.db.commit()
|
||||
|
||||
logger.info(f"Completed workflow execution {execution_id} with {len(results)} results")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
await self.db.rollback()
|
||||
logger.error(f"Failed to complete execution {execution_id}: {e}")
|
||||
return False
|
||||
|
||||
async def fail_execution(
|
||||
self,
|
||||
execution_id: str,
|
||||
error_message: str,
|
||||
step_history: Optional[List[Dict[str, Any]]] = None
|
||||
) -> bool:
|
||||
"""Mark execution as failed with error details"""
|
||||
|
||||
try:
|
||||
# Update execution record
|
||||
stmt = update(WorkflowExecution).where(
|
||||
WorkflowExecution.id == execution_id
|
||||
).values(
|
||||
status=WorkflowStatus.FAILED,
|
||||
completed_at=datetime.utcnow(),
|
||||
error=error_message
|
||||
)
|
||||
|
||||
await self.db.execute(stmt)
|
||||
|
||||
# Log individual steps if provided
|
||||
if step_history:
|
||||
await self._log_execution_steps(execution_id, step_history)
|
||||
|
||||
await self.db.commit()
|
||||
|
||||
logger.error(f"Failed workflow execution {execution_id}: {error_message}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
await self.db.rollback()
|
||||
logger.error(f"Failed to record execution failure {execution_id}: {e}")
|
||||
return False
|
||||
|
||||
async def cancel_execution(self, execution_id: str, reason: str = "User cancelled") -> bool:
|
||||
"""Cancel a workflow execution"""
|
||||
|
||||
try:
|
||||
stmt = update(WorkflowExecution).where(
|
||||
WorkflowExecution.id == execution_id
|
||||
).values(
|
||||
status=WorkflowStatus.CANCELLED,
|
||||
completed_at=datetime.utcnow(),
|
||||
error=f"Cancelled: {reason}"
|
||||
)
|
||||
|
||||
await self.db.execute(stmt)
|
||||
await self.db.commit()
|
||||
|
||||
logger.info(f"Cancelled workflow execution {execution_id}: {reason}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
await self.db.rollback()
|
||||
logger.error(f"Failed to cancel execution {execution_id}: {e}")
|
||||
return False
|
||||
|
||||
async def get_execution_status(self, execution_id: str) -> Optional[WorkflowExecution]:
|
||||
"""Get current execution status and details"""
|
||||
|
||||
try:
|
||||
stmt = select(WorkflowExecution).where(WorkflowExecution.id == execution_id)
|
||||
result = await self.db.execute(stmt)
|
||||
execution = result.scalar_one_or_none()
|
||||
|
||||
if execution:
|
||||
logger.debug(f"Retrieved execution status for {execution_id}: {execution.status}")
|
||||
return execution
|
||||
else:
|
||||
logger.warning(f"Execution {execution_id} not found")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get execution status for {execution_id}: {e}")
|
||||
return None
|
||||
|
||||
async def get_user_executions(
|
||||
self,
|
||||
user_id: str,
|
||||
limit: int = 50,
|
||||
status_filter: Optional[WorkflowStatus] = None
|
||||
) -> List[WorkflowExecution]:
|
||||
"""Get workflow executions for a specific user"""
|
||||
|
||||
try:
|
||||
stmt = select(WorkflowExecution).where(WorkflowExecution.executed_by == str(user_id))
|
||||
|
||||
if status_filter:
|
||||
stmt = stmt.where(WorkflowExecution.status == status_filter)
|
||||
|
||||
stmt = stmt.order_by(WorkflowExecution.created_at.desc()).limit(limit)
|
||||
|
||||
result = await self.db.execute(stmt)
|
||||
executions = result.scalars().all()
|
||||
|
||||
logger.debug(f"Retrieved {len(executions)} executions for user {user_id}")
|
||||
return list(executions)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get executions for user {user_id}: {e}")
|
||||
return []
|
||||
|
||||
async def get_workflow_executions(
|
||||
self,
|
||||
workflow_id: str,
|
||||
limit: int = 50
|
||||
) -> List[WorkflowExecution]:
|
||||
"""Get all executions for a specific workflow"""
|
||||
|
||||
try:
|
||||
stmt = select(WorkflowExecution).where(
|
||||
WorkflowExecution.workflow_id == workflow_id
|
||||
).order_by(WorkflowExecution.created_at.desc()).limit(limit)
|
||||
|
||||
result = await self.db.execute(stmt)
|
||||
executions = result.scalars().all()
|
||||
|
||||
logger.debug(f"Retrieved {len(executions)} executions for workflow {workflow_id}")
|
||||
return list(executions)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get executions for workflow {workflow_id}: {e}")
|
||||
return []
|
||||
|
||||
async def get_execution_history(self, execution_id: str) -> List[WorkflowStepLog]:
|
||||
"""Get detailed step history for an execution"""
|
||||
|
||||
try:
|
||||
stmt = select(WorkflowStepLog).where(
|
||||
WorkflowStepLog.execution_id == execution_id
|
||||
).order_by(WorkflowStepLog.started_at.asc())
|
||||
|
||||
result = await self.db.execute(stmt)
|
||||
step_logs = result.scalars().all()
|
||||
|
||||
logger.debug(f"Retrieved {len(step_logs)} step logs for execution {execution_id}")
|
||||
return list(step_logs)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get execution history for {execution_id}: {e}")
|
||||
return []
|
||||
|
||||
async def _log_execution_steps(
|
||||
self,
|
||||
execution_id: str,
|
||||
step_history: List[Dict[str, Any]]
|
||||
):
|
||||
"""Log individual step executions"""
|
||||
|
||||
try:
|
||||
step_logs = []
|
||||
for step_data in step_history:
|
||||
step_log = WorkflowStepLog(
|
||||
id=str(uuid.uuid4()),
|
||||
execution_id=execution_id,
|
||||
step_id=step_data.get("step_id", "unknown"),
|
||||
step_name=step_data.get("step_name", "Unknown Step"),
|
||||
step_type=step_data.get("step_type", "unknown"),
|
||||
status=step_data.get("status", "completed"),
|
||||
input_data=step_data.get("input_data", {}),
|
||||
output_data=step_data.get("output_data", {}),
|
||||
error=step_data.get("error"),
|
||||
started_at=datetime.fromisoformat(step_data.get("started_at", datetime.utcnow().isoformat())),
|
||||
completed_at=datetime.fromisoformat(step_data.get("completed_at", datetime.utcnow().isoformat())) if step_data.get("completed_at") else None,
|
||||
duration_ms=step_data.get("duration_ms"),
|
||||
retry_count=step_data.get("retry_count", 0)
|
||||
)
|
||||
step_logs.append(step_log)
|
||||
|
||||
if step_logs:
|
||||
self.db.add_all(step_logs)
|
||||
logger.debug(f"Added {len(step_logs)} step logs for execution {execution_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to log execution steps for {execution_id}: {e}")
|
||||
|
||||
async def get_execution_statistics(
|
||||
self,
|
||||
user_id: Optional[str] = None,
|
||||
workflow_id: Optional[str] = None,
|
||||
days: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
"""Get execution statistics for analytics"""
|
||||
|
||||
try:
|
||||
from sqlalchemy import func
|
||||
from datetime import timedelta
|
||||
|
||||
# Base query
|
||||
stmt = select(WorkflowExecution)
|
||||
|
||||
# Apply filters
|
||||
if user_id:
|
||||
stmt = stmt.where(WorkflowExecution.executed_by == str(user_id))
|
||||
if workflow_id:
|
||||
stmt = stmt.where(WorkflowExecution.workflow_id == workflow_id)
|
||||
|
||||
# Date filter
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=days)
|
||||
stmt = stmt.where(WorkflowExecution.created_at >= cutoff_date)
|
||||
|
||||
# Get all matching executions
|
||||
result = await self.db.execute(stmt)
|
||||
executions = result.scalars().all()
|
||||
|
||||
# Calculate statistics
|
||||
total_executions = len(executions)
|
||||
completed = len([e for e in executions if e.status == WorkflowStatus.COMPLETED])
|
||||
failed = len([e for e in executions if e.status == WorkflowStatus.FAILED])
|
||||
cancelled = len([e for e in executions if e.status == WorkflowStatus.CANCELLED])
|
||||
running = len([e for e in executions if e.status == WorkflowStatus.RUNNING])
|
||||
|
||||
# Calculate average execution time for completed workflows
|
||||
completed_executions = [e for e in executions if e.status == WorkflowStatus.COMPLETED and e.started_at and e.completed_at]
|
||||
avg_duration = None
|
||||
if completed_executions:
|
||||
total_duration = sum([(e.completed_at - e.started_at).total_seconds() for e in completed_executions])
|
||||
avg_duration = total_duration / len(completed_executions)
|
||||
|
||||
statistics = {
|
||||
"total_executions": total_executions,
|
||||
"completed": completed,
|
||||
"failed": failed,
|
||||
"cancelled": cancelled,
|
||||
"running": running,
|
||||
"success_rate": (completed / total_executions * 100) if total_executions > 0 else 0,
|
||||
"failure_rate": (failed / total_executions * 100) if total_executions > 0 else 0,
|
||||
"average_duration_seconds": avg_duration,
|
||||
"period_days": days,
|
||||
"generated_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
logger.debug(f"Generated execution statistics: {statistics}")
|
||||
return statistics
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate execution statistics: {e}")
|
||||
return {
|
||||
"error": str(e),
|
||||
"generated_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
def create_user_context(
|
||||
self,
|
||||
user_id: str,
|
||||
username: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
additional_context: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Create standardized user context for workflow execution"""
|
||||
|
||||
context = {
|
||||
"user_id": user_id,
|
||||
"username": username or f"user_{user_id}",
|
||||
"session_id": session_id or str(uuid.uuid4()),
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"source": "workflow_execution_service"
|
||||
}
|
||||
|
||||
if additional_context:
|
||||
context.update(additional_context)
|
||||
|
||||
return context
|
||||
|
||||
def extract_user_context_from_request(self, request_context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Extract user context from API request context"""
|
||||
|
||||
# Try to get user from different possible sources
|
||||
user = request_context.get("user") or request_context.get("current_user")
|
||||
|
||||
if user:
|
||||
if isinstance(user, dict):
|
||||
return self.create_user_context(
|
||||
user_id=str(user.get("id", "unknown")),
|
||||
username=user.get("username") or user.get("email"),
|
||||
session_id=request_context.get("session_id")
|
||||
)
|
||||
else:
|
||||
# Assume user is a model instance
|
||||
return self.create_user_context(
|
||||
user_id=str(getattr(user, 'id', 'unknown')),
|
||||
username=getattr(user, 'username', None) or getattr(user, 'email', None),
|
||||
session_id=request_context.get("session_id")
|
||||
)
|
||||
|
||||
# Fallback to API key or system context
|
||||
api_key_id = request_context.get("api_key_id")
|
||||
if api_key_id:
|
||||
return self.create_user_context(
|
||||
user_id=f"api_key_{api_key_id}",
|
||||
username=f"API Key {api_key_id}",
|
||||
session_id=request_context.get("session_id"),
|
||||
additional_context={"auth_type": "api_key"}
|
||||
)
|
||||
|
||||
# Last resort: system context
|
||||
return self.create_user_context(
|
||||
user_id="system",
|
||||
username="System",
|
||||
session_id=request_context.get("session_id"),
|
||||
additional_context={"auth_type": "system", "note": "No user context available"}
|
||||
)
|
||||
@@ -9,7 +9,6 @@ enabled: true
|
||||
auto_start: true
|
||||
dependencies:
|
||||
- rag
|
||||
- workflow
|
||||
optional_dependencies:
|
||||
- analytics
|
||||
|
||||
@@ -22,12 +21,10 @@ provides:
|
||||
- "chat_completion"
|
||||
- "conversation_management"
|
||||
- "chatbot_configuration"
|
||||
- "workflow_chat_step"
|
||||
|
||||
consumes:
|
||||
- "rag_search"
|
||||
- "llm_completion"
|
||||
- "workflow_execution"
|
||||
|
||||
# API endpoints
|
||||
endpoints:
|
||||
@@ -43,39 +40,6 @@ endpoints:
|
||||
method: "GET"
|
||||
description: "List user chatbots"
|
||||
|
||||
# Workflow integration
|
||||
workflow_steps:
|
||||
- name: "chatbot_response"
|
||||
description: "Generate chatbot response with optional RAG context"
|
||||
inputs:
|
||||
- name: "message"
|
||||
type: "string"
|
||||
required: true
|
||||
description: "User message to respond to"
|
||||
- name: "chatbot_id"
|
||||
type: "string"
|
||||
required: true
|
||||
description: "ID of configured chatbot instance"
|
||||
- name: "use_rag"
|
||||
type: "boolean"
|
||||
required: false
|
||||
default: false
|
||||
description: "Whether to use RAG for context"
|
||||
- name: "context"
|
||||
type: "object"
|
||||
required: false
|
||||
description: "Additional context data"
|
||||
outputs:
|
||||
- name: "response"
|
||||
type: "string"
|
||||
description: "Generated chatbot response"
|
||||
- name: "conversation_id"
|
||||
type: "string"
|
||||
description: "Conversation ID for follow-up"
|
||||
- name: "sources"
|
||||
type: "array"
|
||||
description: "RAG sources used (if any)"
|
||||
|
||||
# UI Configuration
|
||||
ui_config:
|
||||
icon: "message-circle"
|
||||
|
||||
@@ -17,7 +17,6 @@ provides:
|
||||
- "semantic_search"
|
||||
- "vector_embeddings"
|
||||
- "document_processing"
|
||||
- "workflow_rag_step"
|
||||
|
||||
consumes:
|
||||
- "qdrant_connection"
|
||||
@@ -42,32 +41,6 @@ endpoints:
|
||||
method: "GET"
|
||||
description: "List documents in collection"
|
||||
|
||||
# Workflow integration
|
||||
workflow_steps:
|
||||
- name: "rag_search"
|
||||
description: "Search documents for relevant context"
|
||||
inputs:
|
||||
- name: "query"
|
||||
type: "string"
|
||||
required: true
|
||||
description: "Search query"
|
||||
- name: "collection_id"
|
||||
type: "string"
|
||||
required: true
|
||||
description: "Document collection to search"
|
||||
- name: "top_k"
|
||||
type: "integer"
|
||||
required: false
|
||||
default: 5
|
||||
description: "Number of top results to return"
|
||||
outputs:
|
||||
- name: "results"
|
||||
type: "array"
|
||||
description: "Search results with content and metadata"
|
||||
- name: "context"
|
||||
type: "string"
|
||||
description: "Combined context from top results"
|
||||
|
||||
# UI Configuration
|
||||
ui_config:
|
||||
icon: "search"
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
"""
|
||||
Workflow Module for Confidential Empire
|
||||
|
||||
Provides workflow orchestration capabilities for chaining multiple LLM calls,
|
||||
conditional logic, and data transformations.
|
||||
"""
|
||||
|
||||
from .main import WorkflowModule
|
||||
|
||||
__all__ = ["WorkflowModule"]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,145 +0,0 @@
|
||||
name: workflow
|
||||
version: 1.0.0
|
||||
description: "Multi-step automation processes"
|
||||
author: "Enclava Team"
|
||||
category: "automation"
|
||||
|
||||
# Module lifecycle
|
||||
enabled: true
|
||||
auto_start: true
|
||||
dependencies: []
|
||||
optional_dependencies:
|
||||
- rag
|
||||
- chatbot
|
||||
- cache
|
||||
|
||||
# Module capabilities
|
||||
provides:
|
||||
- "workflow_execution"
|
||||
- "step_orchestration"
|
||||
- "automation_triggers"
|
||||
- "workflow_templates"
|
||||
|
||||
consumes:
|
||||
- "llm_completion"
|
||||
- "rag_search"
|
||||
- "chatbot_response"
|
||||
- "external_apis"
|
||||
|
||||
# API endpoints
|
||||
endpoints:
|
||||
- path: "/workflow/templates"
|
||||
method: "GET"
|
||||
description: "List available workflow templates"
|
||||
|
||||
- path: "/workflow/create"
|
||||
method: "POST"
|
||||
description: "Create new workflow"
|
||||
|
||||
- path: "/workflow/execute"
|
||||
method: "POST"
|
||||
description: "Execute workflow"
|
||||
|
||||
- path: "/workflow/status/{workflow_id}"
|
||||
method: "GET"
|
||||
description: "Get workflow execution status"
|
||||
|
||||
- path: "/workflow/history"
|
||||
method: "GET"
|
||||
description: "Get workflow execution history"
|
||||
|
||||
# Workflow integration
|
||||
workflow_steps:
|
||||
- name: "conditional_branch"
|
||||
description: "Conditional logic branching"
|
||||
inputs:
|
||||
- name: "condition"
|
||||
type: "string"
|
||||
required: true
|
||||
description: "Condition to evaluate"
|
||||
- name: "true_path"
|
||||
type: "object"
|
||||
required: true
|
||||
description: "Steps to execute if condition is true"
|
||||
- name: "false_path"
|
||||
type: "object"
|
||||
required: false
|
||||
description: "Steps to execute if condition is false"
|
||||
outputs:
|
||||
- name: "result"
|
||||
type: "object"
|
||||
description: "Result from executed branch"
|
||||
|
||||
- name: "loop_iteration"
|
||||
description: "Iterative processing loop"
|
||||
inputs:
|
||||
- name: "items"
|
||||
type: "array"
|
||||
required: true
|
||||
description: "Items to iterate over"
|
||||
- name: "steps"
|
||||
type: "object"
|
||||
required: true
|
||||
description: "Steps to execute for each item"
|
||||
outputs:
|
||||
- name: "results"
|
||||
type: "array"
|
||||
description: "Results from each iteration"
|
||||
|
||||
# UI Configuration
|
||||
ui_config:
|
||||
icon: "workflow"
|
||||
color: "#06B6D4"
|
||||
category: "Automation"
|
||||
|
||||
forms:
|
||||
- name: "workflow_config"
|
||||
title: "Workflow Settings"
|
||||
fields: ["name", "description", "trigger_type"]
|
||||
|
||||
- name: "step_config"
|
||||
title: "Step Configuration"
|
||||
fields: ["step_type", "parameters", "retry_attempts"]
|
||||
|
||||
- name: "scheduling"
|
||||
title: "Scheduling & Triggers"
|
||||
fields: ["schedule", "webhook_triggers", "event_triggers"]
|
||||
|
||||
# Permissions
|
||||
permissions:
|
||||
- name: "workflow.create"
|
||||
description: "Create new workflows"
|
||||
|
||||
- name: "workflow.execute"
|
||||
description: "Execute workflows"
|
||||
|
||||
- name: "workflow.configure"
|
||||
description: "Configure workflow settings"
|
||||
|
||||
- name: "workflow.manage"
|
||||
description: "Manage all workflows (admin)"
|
||||
|
||||
# Analytics events
|
||||
analytics_events:
|
||||
- name: "workflow_created"
|
||||
description: "New workflow template created"
|
||||
|
||||
- name: "workflow_executed"
|
||||
description: "Workflow execution started"
|
||||
|
||||
- name: "workflow_completed"
|
||||
description: "Workflow execution completed"
|
||||
|
||||
- name: "workflow_failed"
|
||||
description: "Workflow execution failed"
|
||||
|
||||
# Health checks
|
||||
health_checks:
|
||||
- name: "execution_engine"
|
||||
description: "Check workflow execution engine"
|
||||
|
||||
- name: "step_dependencies"
|
||||
description: "Check availability of workflow step dependencies"
|
||||
|
||||
- name: "template_validation"
|
||||
description: "Validate workflow templates"
|
||||
@@ -1,389 +0,0 @@
|
||||
{
|
||||
"templates": [
|
||||
{
|
||||
"id": "simple_chatbot_interaction",
|
||||
"name": "Simple Chatbot Interaction",
|
||||
"description": "Basic workflow that processes user input through a configured chatbot",
|
||||
"version": "1.0",
|
||||
"variables": {
|
||||
"user_message": "Hello, I need help with my account",
|
||||
"chatbot_id": "customer_support_bot"
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"id": "chatbot_response",
|
||||
"name": "Get Chatbot Response",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{chatbot_id}",
|
||||
"message_template": "{user_message}",
|
||||
"output_variable": "bot_response",
|
||||
"create_new_conversation": true,
|
||||
"save_conversation_id": "conversation_id"
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"response": "{bot_response}",
|
||||
"conversation_id": "{conversation_id}"
|
||||
},
|
||||
"metadata": {
|
||||
"created_by": "system",
|
||||
"use_case": "customer_support",
|
||||
"tags": ["chatbot", "simple", "customer_support"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "multi_turn_customer_support",
|
||||
"name": "Multi-Turn Customer Support Flow",
|
||||
"description": "Advanced customer support workflow with intent classification, knowledge base lookup, and escalation",
|
||||
"version": "1.0",
|
||||
"variables": {
|
||||
"user_message": "My order hasn't arrived yet",
|
||||
"customer_support_chatbot": "support_assistant",
|
||||
"escalation_chatbot": "human_handoff_bot",
|
||||
"rag_collection": "support_knowledge_base"
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"id": "classify_intent",
|
||||
"name": "Classify User Intent",
|
||||
"type": "llm_call",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are an intent classifier. Classify the user's message into one of: order_inquiry, technical_support, billing, general_question, escalation_needed. Respond with only the classification."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "{user_message}"
|
||||
}
|
||||
],
|
||||
"output_variable": "intent"
|
||||
},
|
||||
{
|
||||
"id": "handle_order_inquiry",
|
||||
"name": "Handle Order Inquiry",
|
||||
"type": "chatbot",
|
||||
"conditions": ["{intent} == 'order_inquiry'"],
|
||||
"chatbot_id": "{customer_support_chatbot}",
|
||||
"message_template": "Customer inquiry about order: {user_message}",
|
||||
"output_variable": "support_response",
|
||||
"context_variables": {
|
||||
"intent": "intent",
|
||||
"inquiry_type": "order_inquiry"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "handle_technical_support",
|
||||
"name": "Handle Technical Support",
|
||||
"type": "chatbot",
|
||||
"conditions": ["{intent} == 'technical_support'"],
|
||||
"chatbot_id": "{customer_support_chatbot}",
|
||||
"message_template": "Technical support request: {user_message}",
|
||||
"output_variable": "support_response",
|
||||
"context_variables": {
|
||||
"intent": "intent",
|
||||
"inquiry_type": "technical_support"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "escalate_to_human",
|
||||
"name": "Escalate to Human Agent",
|
||||
"type": "chatbot",
|
||||
"conditions": ["{intent} == 'escalation_needed'"],
|
||||
"chatbot_id": "{escalation_chatbot}",
|
||||
"message_template": "Customer needs human assistance: {user_message}",
|
||||
"output_variable": "escalation_response"
|
||||
},
|
||||
{
|
||||
"id": "general_response",
|
||||
"name": "General Support Response",
|
||||
"type": "chatbot",
|
||||
"conditions": ["{intent} == 'general_question' or {intent} == 'billing'"],
|
||||
"chatbot_id": "{customer_support_chatbot}",
|
||||
"message_template": "{user_message}",
|
||||
"output_variable": "support_response"
|
||||
},
|
||||
{
|
||||
"id": "format_final_response",
|
||||
"name": "Format Final Response",
|
||||
"type": "transform",
|
||||
"input_variable": "support_response",
|
||||
"output_variable": "final_response",
|
||||
"transformation": "extract:response"
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"intent": "{intent}",
|
||||
"response": "{final_response}",
|
||||
"escalation_response": "{escalation_response}"
|
||||
},
|
||||
"error_handling": {
|
||||
"retry_failed_steps": true,
|
||||
"max_retries": 2,
|
||||
"fallback_response": "I apologize, but I'm experiencing technical difficulties. Please try again later or contact support directly."
|
||||
},
|
||||
"metadata": {
|
||||
"created_by": "system",
|
||||
"use_case": "customer_support",
|
||||
"tags": ["chatbot", "multi_turn", "intent_classification", "escalation"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "research_assistant_workflow",
|
||||
"name": "AI Research Assistant",
|
||||
"description": "Research workflow that uses specialized chatbots for different research tasks",
|
||||
"version": "1.0",
|
||||
"variables": {
|
||||
"research_topic": "artificial intelligence trends 2024",
|
||||
"researcher_chatbot": "ai_researcher",
|
||||
"analyst_chatbot": "data_analyst",
|
||||
"writer_chatbot": "content_writer"
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"id": "initial_research",
|
||||
"name": "Conduct Initial Research",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{researcher_chatbot}",
|
||||
"message_template": "Please research the following topic and provide key findings: {research_topic}",
|
||||
"output_variable": "research_findings",
|
||||
"create_new_conversation": true,
|
||||
"save_conversation_id": "research_conversation"
|
||||
},
|
||||
{
|
||||
"id": "analyze_findings",
|
||||
"name": "Analyze Research Findings",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{analyst_chatbot}",
|
||||
"message_template": "Please analyze these research findings and identify key trends and insights: {research_findings}",
|
||||
"output_variable": "analysis_results",
|
||||
"create_new_conversation": true
|
||||
},
|
||||
{
|
||||
"id": "create_summary",
|
||||
"name": "Create Executive Summary",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{writer_chatbot}",
|
||||
"message_template": "Create an executive summary based on this research and analysis:\n\nTopic: {research_topic}\nResearch: {research_findings}\nAnalysis: {analysis_results}",
|
||||
"output_variable": "executive_summary"
|
||||
},
|
||||
{
|
||||
"id": "follow_up_questions",
|
||||
"name": "Generate Follow-up Questions",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{researcher_chatbot}",
|
||||
"message_template": "Based on this research on {research_topic}, what are 5 important follow-up questions that should be investigated further?",
|
||||
"output_variable": "follow_up_questions",
|
||||
"conversation_id": "research_conversation"
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"research_findings": "{research_findings}",
|
||||
"analysis": "{analysis_results}",
|
||||
"summary": "{executive_summary}",
|
||||
"next_steps": "{follow_up_questions}",
|
||||
"conversation_id": "{research_conversation}"
|
||||
},
|
||||
"metadata": {
|
||||
"created_by": "system",
|
||||
"use_case": "research_automation",
|
||||
"tags": ["chatbot", "research", "analysis", "multi_agent"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "content_creation_pipeline",
|
||||
"name": "AI Content Creation Pipeline",
|
||||
"description": "Multi-stage content creation using different specialized chatbots",
|
||||
"version": "1.0",
|
||||
"variables": {
|
||||
"content_brief": "Write a blog post about sustainable technology innovations",
|
||||
"target_audience": "tech-savvy professionals",
|
||||
"content_length": "1500 words",
|
||||
"research_bot": "researcher_assistant",
|
||||
"writer_bot": "creative_writer",
|
||||
"editor_bot": "content_editor"
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"id": "research_phase",
|
||||
"name": "Research Content Topic",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{research_bot}",
|
||||
"message_template": "Research this content brief: {content_brief}. Target audience: {target_audience}. Provide key points, statistics, and current trends.",
|
||||
"output_variable": "research_data",
|
||||
"create_new_conversation": true,
|
||||
"save_conversation_id": "content_conversation"
|
||||
},
|
||||
{
|
||||
"id": "create_outline",
|
||||
"name": "Create Content Outline",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{writer_bot}",
|
||||
"message_template": "Create a detailed outline for: {content_brief}\nTarget audience: {target_audience}\nLength: {content_length}\nResearch data: {research_data}",
|
||||
"output_variable": "content_outline"
|
||||
},
|
||||
{
|
||||
"id": "write_content",
|
||||
"name": "Write First Draft",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{writer_bot}",
|
||||
"message_template": "Write the full content based on this outline: {content_outline}\nBrief: {content_brief}\nResearch: {research_data}\nTarget length: {content_length}",
|
||||
"output_variable": "first_draft"
|
||||
},
|
||||
{
|
||||
"id": "edit_content",
|
||||
"name": "Edit and Polish Content",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{editor_bot}",
|
||||
"message_template": "Please edit and improve this content for clarity, engagement, and professional tone:\n\n{first_draft}",
|
||||
"output_variable": "final_content"
|
||||
},
|
||||
{
|
||||
"id": "generate_metadata",
|
||||
"name": "Generate SEO Metadata",
|
||||
"type": "parallel",
|
||||
"steps": [
|
||||
{
|
||||
"id": "create_title_options",
|
||||
"name": "Generate Title Options",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{writer_bot}",
|
||||
"message_template": "Generate 5 compelling SEO-optimized titles for this content: {final_content}",
|
||||
"output_variable": "title_options"
|
||||
},
|
||||
{
|
||||
"id": "create_meta_description",
|
||||
"name": "Create Meta Description",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "{editor_bot}",
|
||||
"message_template": "Create an SEO-optimized meta description (150-160 characters) for this content: {final_content}",
|
||||
"output_variable": "meta_description"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"research": "{research_data}",
|
||||
"outline": "{content_outline}",
|
||||
"draft": "{first_draft}",
|
||||
"final_content": "{final_content}",
|
||||
"titles": "{title_options}",
|
||||
"meta_description": "{meta_description}",
|
||||
"conversation_id": "{content_conversation}"
|
||||
},
|
||||
"metadata": {
|
||||
"created_by": "system",
|
||||
"use_case": "content_creation",
|
||||
"tags": ["chatbot", "content", "writing", "parallel", "seo"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "demo_chatbot_workflow",
|
||||
"name": "Demo Interactive Chatbot",
|
||||
"description": "A demonstration workflow showcasing interactive chatbot capabilities with multi-turn conversation, context awareness, and intelligent response handling",
|
||||
"version": "1.0.0",
|
||||
"steps": [
|
||||
{
|
||||
"id": "welcome_interaction",
|
||||
"name": "Welcome User",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "demo_assistant",
|
||||
"message_template": "Hello! I'm a demo AI assistant. What can I help you with today? Feel free to ask me about anything - technology, general questions, or just have a conversation!",
|
||||
"output_variable": "welcome_response",
|
||||
"create_new_conversation": true,
|
||||
"save_conversation_id": "demo_conversation_id",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"id": "analyze_user_intent",
|
||||
"name": "Analyze User Intent",
|
||||
"type": "llm_call",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "Analyze the user's response and classify their intent. Categories: question, casual_chat, technical_help, information_request, creative_task, other. Respond with only the category name."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "{user_input}"
|
||||
}
|
||||
],
|
||||
"output_variable": "user_intent",
|
||||
"parameters": {
|
||||
"temperature": 0.3,
|
||||
"max_tokens": 50
|
||||
},
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"id": "personalized_response",
|
||||
"name": "Generate Personalized Response",
|
||||
"type": "chatbot",
|
||||
"chatbot_id": "demo_assistant",
|
||||
"message_template": "User intent: {user_intent}. User message: {user_input}. Please provide a helpful, engaging response tailored to their specific need.",
|
||||
"output_variable": "personalized_response",
|
||||
"conversation_id": "demo_conversation_id",
|
||||
"context_variables": {
|
||||
"intent": "user_intent",
|
||||
"previous_welcome": "welcome_response"
|
||||
},
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"id": "follow_up_suggestions",
|
||||
"name": "Generate Follow-up Suggestions",
|
||||
"type": "llm_call",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "Based on the conversation, suggest 2-3 relevant follow-up questions or topics the user might be interested in. Format as a simple list."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "User intent: {user_intent}, Response given: {personalized_response}"
|
||||
}
|
||||
],
|
||||
"output_variable": "follow_up_suggestions",
|
||||
"parameters": {
|
||||
"temperature": 0.7,
|
||||
"max_tokens": 150
|
||||
},
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"id": "conversation_summary",
|
||||
"name": "Create Conversation Summary",
|
||||
"type": "transform",
|
||||
"input_variable": "personalized_response",
|
||||
"output_variable": "conversation_summary",
|
||||
"transformation": "extract:content",
|
||||
"enabled": true
|
||||
}
|
||||
],
|
||||
"variables": {
|
||||
"user_input": "I'm interested in learning about artificial intelligence and how it's changing the world",
|
||||
"demo_assistant": "assistant"
|
||||
},
|
||||
"metadata": {
|
||||
"created_by": "demo_system",
|
||||
"use_case": "demonstration",
|
||||
"tags": ["demo", "chatbot", "interactive", "multi_turn"],
|
||||
"demo_instructions": {
|
||||
"description": "This workflow demonstrates key chatbot capabilities including conversation continuity, intent analysis, personalized responses, and follow-up suggestions.",
|
||||
"usage": "Execute this workflow with different user inputs to see how the chatbot adapts its responses based on intent analysis and conversation context.",
|
||||
"features": [
|
||||
"Multi-turn conversation with persistent conversation ID",
|
||||
"Intent classification for tailored responses",
|
||||
"Context-aware personalized interactions",
|
||||
"Automatic follow-up suggestions",
|
||||
"Conversation summarization"
|
||||
]
|
||||
}
|
||||
},
|
||||
"timeout": 300
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,471 +0,0 @@
|
||||
"""
|
||||
Complete chatbot workflow tests with RAG integration.
|
||||
Test the entire pipeline from document upload to chat responses with knowledge retrieval.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import asyncio
|
||||
from typing import Dict, Any, List
|
||||
|
||||
from tests.clients.chatbot_api_client import ChatbotAPITestClient
|
||||
from tests.fixtures.test_data_manager import TestDataManager
|
||||
|
||||
|
||||
class TestChatbotRAGWorkflow:
|
||||
"""Test complete chatbot workflow with RAG integration"""
|
||||
|
||||
BASE_URL = "http://localhost:3001" # Through nginx
|
||||
|
||||
@pytest.fixture
|
||||
async def api_client(self):
|
||||
"""Chatbot API test client"""
|
||||
return ChatbotAPITestClient(self.BASE_URL)
|
||||
|
||||
@pytest.fixture
|
||||
async def authenticated_client(self, api_client):
|
||||
"""Pre-authenticated API client"""
|
||||
# Register and authenticate test user
|
||||
email = "ragtest@example.com"
|
||||
password = "testpass123"
|
||||
username = "ragtestuser"
|
||||
|
||||
# Register user
|
||||
register_result = await api_client.register_user(email, password, username)
|
||||
if register_result["status_code"] not in [201, 409]: # 409 = already exists
|
||||
pytest.fail(f"Failed to register user: {register_result}")
|
||||
|
||||
# Authenticate
|
||||
auth_result = await api_client.authenticate(email, password)
|
||||
if not auth_result["success"]:
|
||||
pytest.fail(f"Failed to authenticate: {auth_result}")
|
||||
|
||||
return api_client
|
||||
|
||||
@pytest.fixture
|
||||
def sample_documents(self):
|
||||
"""Sample documents for RAG testing"""
|
||||
return {
|
||||
"installation_guide": {
|
||||
"filename": "installation_guide.md",
|
||||
"content": """
|
||||
# Enclava Platform Installation Guide
|
||||
|
||||
## System Requirements
|
||||
- Python 3.8 or higher
|
||||
- Docker and Docker Compose
|
||||
- PostgreSQL 13+
|
||||
- Redis 6+
|
||||
- At least 4GB RAM
|
||||
|
||||
## Installation Steps
|
||||
1. Clone the repository
|
||||
2. Copy .env.example to .env
|
||||
3. Run docker-compose up --build
|
||||
4. Access the application at http://localhost:3000
|
||||
|
||||
## Troubleshooting
|
||||
- If port 3000 is in use, modify docker-compose.yml
|
||||
- Check Docker daemon is running
|
||||
- Ensure all required ports are available
|
||||
""",
|
||||
"test_questions": [
|
||||
{
|
||||
"question": "What are the system requirements for Enclava?",
|
||||
"expected_keywords": ["Python 3.8", "Docker", "PostgreSQL", "Redis", "4GB RAM"],
|
||||
"min_keywords": 3
|
||||
},
|
||||
{
|
||||
"question": "How do I install Enclava?",
|
||||
"expected_keywords": ["clone", "repository", ".env", "docker-compose up", "localhost:3000"],
|
||||
"min_keywords": 3
|
||||
},
|
||||
{
|
||||
"question": "What should I do if port 3000 is in use?",
|
||||
"expected_keywords": ["modify", "docker-compose.yml", "port"],
|
||||
"min_keywords": 2
|
||||
}
|
||||
]
|
||||
},
|
||||
"api_reference": {
|
||||
"filename": "api_reference.md",
|
||||
"content": """
|
||||
# Enclava API Reference
|
||||
|
||||
## Authentication
|
||||
All API requests require authentication using Bearer tokens or API keys.
|
||||
|
||||
## Endpoints
|
||||
|
||||
### GET /api/v1/models
|
||||
List available AI models
|
||||
Response: {"data": [{"id": "model-name", "object": "model", ...}]}
|
||||
|
||||
### POST /api/v1/chat/completions
|
||||
Create chat completion
|
||||
Body: {"model": "model-name", "messages": [...], "temperature": 0.7}
|
||||
Response: {"choices": [{"message": {"content": "response"}}]}
|
||||
|
||||
### POST /api/v1/embeddings
|
||||
Generate text embeddings
|
||||
Body: {"model": "embedding-model", "input": "text to embed"}
|
||||
Response: {"data": [{"embedding": [...]}]}
|
||||
|
||||
## Rate Limits
|
||||
- Free tier: 60 requests per minute
|
||||
- Pro tier: 600 requests per minute
|
||||
""",
|
||||
"test_questions": [
|
||||
{
|
||||
"question": "How do I authenticate with the Enclava API?",
|
||||
"expected_keywords": ["Bearer token", "API key", "authentication"],
|
||||
"min_keywords": 2
|
||||
},
|
||||
{
|
||||
"question": "What is the endpoint for chat completions?",
|
||||
"expected_keywords": ["/api/v1/chat/completions", "POST"],
|
||||
"min_keywords": 1
|
||||
},
|
||||
{
|
||||
"question": "What are the rate limits?",
|
||||
"expected_keywords": ["60 requests", "600 requests", "per minute", "free tier", "pro tier"],
|
||||
"min_keywords": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_complete_rag_workflow(self, authenticated_client, sample_documents):
|
||||
"""Test complete RAG workflow from document upload to chat response"""
|
||||
|
||||
# Test with installation guide document
|
||||
doc_info = sample_documents["installation_guide"]
|
||||
|
||||
result = await authenticated_client.test_rag_workflow(
|
||||
collection_name="Installation Guide Collection",
|
||||
document_content=doc_info["content"],
|
||||
chatbot_name="Installation Assistant",
|
||||
test_question=doc_info["test_questions"][0]["question"]
|
||||
)
|
||||
|
||||
assert result["success"], f"RAG workflow failed: {result.get('error')}"
|
||||
assert result["workflow_complete"], "Workflow did not complete successfully"
|
||||
assert result["rag_working"], "RAG functionality is not working"
|
||||
|
||||
# Verify all workflow steps succeeded
|
||||
workflow_results = result["results"]
|
||||
assert workflow_results["collection_creation"]["success"]
|
||||
assert workflow_results["document_upload"]["success"]
|
||||
assert workflow_results["document_processing"]["success"]
|
||||
assert workflow_results["chatbot_creation"]["success"]
|
||||
assert workflow_results["api_key_creation"]["success"]
|
||||
assert workflow_results["chat_test"]["success"]
|
||||
|
||||
# Verify RAG sources were provided
|
||||
rag_verification = workflow_results["rag_verification"]
|
||||
assert rag_verification["has_sources"]
|
||||
assert rag_verification["source_count"] > 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_rag_knowledge_accuracy(self, authenticated_client, sample_documents):
|
||||
"""Test RAG system accuracy with known documents and questions"""
|
||||
|
||||
for doc_key, doc_info in sample_documents.items():
|
||||
# Create RAG workflow for this document
|
||||
workflow_result = await authenticated_client.test_rag_workflow(
|
||||
collection_name=f"Test Collection - {doc_key}",
|
||||
document_content=doc_info["content"],
|
||||
chatbot_name=f"Test Assistant - {doc_key}",
|
||||
test_question=doc_info["test_questions"][0]["question"] # Use first question for setup
|
||||
)
|
||||
|
||||
if not workflow_result["success"]:
|
||||
pytest.fail(f"Failed to set up RAG workflow for {doc_key}: {workflow_result.get('error')}")
|
||||
|
||||
# Extract chatbot info for testing
|
||||
chatbot_id = workflow_result["results"]["chatbot_creation"]["data"]["id"]
|
||||
api_key = workflow_result["results"]["api_key_creation"]["data"]["key"]
|
||||
|
||||
# Test each question for this document
|
||||
for question_data in doc_info["test_questions"]:
|
||||
chat_result = await authenticated_client.chat_with_bot(
|
||||
chatbot_id=chatbot_id,
|
||||
message=question_data["question"],
|
||||
api_key=api_key
|
||||
)
|
||||
|
||||
assert chat_result["success"], f"Chat failed for question: {question_data['question']}"
|
||||
|
||||
# Analyze response accuracy
|
||||
response_text = chat_result["data"]["response"].lower()
|
||||
keywords_found = sum(
|
||||
1 for keyword in question_data["expected_keywords"]
|
||||
if keyword.lower() in response_text
|
||||
)
|
||||
|
||||
accuracy = keywords_found / len(question_data["expected_keywords"])
|
||||
min_accuracy = question_data["min_keywords"] / len(question_data["expected_keywords"])
|
||||
|
||||
assert accuracy >= min_accuracy, \
|
||||
f"Accuracy {accuracy:.2f} below minimum {min_accuracy:.2f} for question: {question_data['question']} in {doc_key}"
|
||||
|
||||
# Verify sources were provided
|
||||
assert "sources" in chat_result["data"], f"No sources provided for question in {doc_key}"
|
||||
assert len(chat_result["data"]["sources"]) > 0, f"Empty sources for question in {doc_key}"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_conversation_memory_with_rag(self, authenticated_client, sample_documents):
|
||||
"""Test conversation memory functionality with RAG"""
|
||||
|
||||
# Set up RAG chatbot
|
||||
doc_info = sample_documents["api_reference"]
|
||||
workflow_result = await authenticated_client.test_rag_workflow(
|
||||
collection_name="Memory Test Collection",
|
||||
document_content=doc_info["content"],
|
||||
chatbot_name="Memory Test Assistant",
|
||||
test_question="What is the API reference?"
|
||||
)
|
||||
|
||||
assert workflow_result["success"], f"Failed to set up RAG workflow: {workflow_result.get('error')}"
|
||||
|
||||
chatbot_id = workflow_result["results"]["chatbot_creation"]["data"]["id"]
|
||||
api_key = workflow_result["results"]["api_key_creation"]["data"]["key"]
|
||||
|
||||
# Test conversation memory
|
||||
memory_result = await authenticated_client.test_conversation_memory(chatbot_id, api_key)
|
||||
|
||||
# Verify conversation was maintained
|
||||
assert memory_result["conversation_maintained"], "Conversation ID was not maintained across messages"
|
||||
|
||||
# Verify memory is working (may be challenging with RAG, so we're lenient)
|
||||
conversation_results = memory_result["conversation_results"]
|
||||
assert len(conversation_results) >= 3, "Not all conversation messages were processed"
|
||||
|
||||
# All messages should have gotten responses
|
||||
for result in conversation_results:
|
||||
assert "response" in result or "error" in result, "Message did not get a response"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multi_document_rag(self, authenticated_client, sample_documents):
|
||||
"""Test RAG with multiple documents in one collection"""
|
||||
|
||||
# Create collection
|
||||
collection_result = await authenticated_client.create_rag_collection(
|
||||
name="Multi-Document Collection",
|
||||
description="Collection with multiple documents for testing"
|
||||
)
|
||||
assert collection_result["success"], f"Failed to create collection: {collection_result}"
|
||||
|
||||
collection_id = collection_result["data"]["id"]
|
||||
|
||||
# Upload multiple documents
|
||||
uploaded_docs = []
|
||||
for doc_key, doc_info in sample_documents.items():
|
||||
upload_result = await authenticated_client.upload_document(
|
||||
collection_id=collection_id,
|
||||
file_content=doc_info["content"],
|
||||
filename=doc_info["filename"]
|
||||
)
|
||||
|
||||
assert upload_result["success"], f"Failed to upload {doc_key}: {upload_result}"
|
||||
|
||||
# Wait for processing
|
||||
doc_id = upload_result["data"]["id"]
|
||||
processing_result = await authenticated_client.wait_for_document_processing(doc_id)
|
||||
assert processing_result["success"], f"Processing failed for {doc_key}: {processing_result}"
|
||||
|
||||
uploaded_docs.append(doc_key)
|
||||
|
||||
# Create chatbot with access to all documents
|
||||
chatbot_result = await authenticated_client.create_chatbot(
|
||||
name="Multi-Doc Assistant",
|
||||
use_rag=True,
|
||||
rag_collection="Multi-Document Collection"
|
||||
)
|
||||
assert chatbot_result["success"], f"Failed to create chatbot: {chatbot_result}"
|
||||
|
||||
chatbot_id = chatbot_result["data"]["id"]
|
||||
|
||||
# Create API key
|
||||
api_key_result = await authenticated_client.create_api_key_for_chatbot(chatbot_id)
|
||||
assert api_key_result["success"], f"Failed to create API key: {api_key_result}"
|
||||
|
||||
api_key = api_key_result["data"]["key"]
|
||||
|
||||
# Test questions that should draw from different documents
|
||||
test_questions = [
|
||||
"How do I install Enclava?", # Should use installation guide
|
||||
"What are the API endpoints?", # Should use API reference
|
||||
"Tell me about both installation and API usage" # Should use both documents
|
||||
]
|
||||
|
||||
for question in test_questions:
|
||||
chat_result = await authenticated_client.chat_with_bot(
|
||||
chatbot_id=chatbot_id,
|
||||
message=question,
|
||||
api_key=api_key
|
||||
)
|
||||
|
||||
assert chat_result["success"], f"Chat failed for multi-doc question: {question}"
|
||||
assert "sources" in chat_result["data"], f"No sources for multi-doc question: {question}"
|
||||
assert len(chat_result["data"]["sources"]) > 0, f"Empty sources for multi-doc question: {question}"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_rag_collection_isolation(self, authenticated_client, sample_documents):
|
||||
"""Test that RAG collections are properly isolated"""
|
||||
|
||||
# Create two separate collections with different documents
|
||||
doc1 = sample_documents["installation_guide"]
|
||||
doc2 = sample_documents["api_reference"]
|
||||
|
||||
# Collection 1 with installation guide
|
||||
workflow1 = await authenticated_client.test_rag_workflow(
|
||||
collection_name="Installation Only Collection",
|
||||
document_content=doc1["content"],
|
||||
chatbot_name="Installation Only Bot",
|
||||
test_question="What is installation?"
|
||||
)
|
||||
assert workflow1["success"], "Failed to create first RAG workflow"
|
||||
|
||||
# Collection 2 with API reference
|
||||
workflow2 = await authenticated_client.test_rag_workflow(
|
||||
collection_name="API Only Collection",
|
||||
document_content=doc2["content"],
|
||||
chatbot_name="API Only Bot",
|
||||
test_question="What is API?"
|
||||
)
|
||||
assert workflow2["success"], "Failed to create second RAG workflow"
|
||||
|
||||
# Extract chatbot info
|
||||
bot1_id = workflow1["results"]["chatbot_creation"]["data"]["id"]
|
||||
bot1_key = workflow1["results"]["api_key_creation"]["data"]["key"]
|
||||
|
||||
bot2_id = workflow2["results"]["chatbot_creation"]["data"]["id"]
|
||||
bot2_key = workflow2["results"]["api_key_creation"]["data"]["key"]
|
||||
|
||||
# Test cross-contamination
|
||||
# Bot 1 (installation only) should not know about API details
|
||||
api_question = "What are the rate limits?"
|
||||
result1 = await authenticated_client.chat_with_bot(bot1_id, api_question, api_key=bot1_key)
|
||||
|
||||
if result1["success"]:
|
||||
response1 = result1["data"]["response"].lower()
|
||||
# Should not have detailed API rate limit info since it only has installation docs
|
||||
has_rate_info = "60 requests" in response1 or "600 requests" in response1
|
||||
# This is a soft assertion since the bot might still give a generic response
|
||||
|
||||
# Bot 2 (API only) should not know about installation details
|
||||
install_question = "What are the system requirements?"
|
||||
result2 = await authenticated_client.chat_with_bot(bot2_id, install_question, api_key=bot2_key)
|
||||
|
||||
if result2["success"]:
|
||||
response2 = result2["data"]["response"].lower()
|
||||
# Should not have detailed system requirements since it only has API docs
|
||||
has_install_info = "python 3.8" in response2 or "docker" in response2
|
||||
# This is a soft assertion since the bot might still give a generic response
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_rag_error_handling(self, authenticated_client):
|
||||
"""Test RAG error handling scenarios"""
|
||||
|
||||
# Test chatbot with non-existent collection
|
||||
chatbot_result = await authenticated_client.create_chatbot(
|
||||
name="Error Test Bot",
|
||||
use_rag=True,
|
||||
rag_collection="NonExistentCollection"
|
||||
)
|
||||
|
||||
# Should either fail to create or handle gracefully
|
||||
if chatbot_result["success"]:
|
||||
# If creation succeeded, test that chat handles missing collection gracefully
|
||||
chatbot_id = chatbot_result["data"]["id"]
|
||||
|
||||
api_key_result = await authenticated_client.create_api_key_for_chatbot(chatbot_id)
|
||||
if api_key_result["success"]:
|
||||
api_key = api_key_result["data"]["key"]
|
||||
|
||||
chat_result = await authenticated_client.chat_with_bot(
|
||||
chatbot_id=chatbot_id,
|
||||
message="Tell me about something",
|
||||
api_key=api_key
|
||||
)
|
||||
|
||||
# Should handle gracefully - either succeed with fallback or fail gracefully
|
||||
# Don't assert success/failure, just ensure it doesn't crash
|
||||
assert "data" in chat_result or "error" in chat_result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_rag_document_types(self, authenticated_client):
|
||||
"""Test RAG with different document types and formats"""
|
||||
|
||||
document_types = {
|
||||
"markdown": {
|
||||
"filename": "test.md",
|
||||
"content": "# Markdown Test\n\nThis is **bold** text and *italic* text.\n\n- List item 1\n- List item 2"
|
||||
},
|
||||
"plain_text": {
|
||||
"filename": "test.txt",
|
||||
"content": "This is plain text content for testing document processing and retrieval."
|
||||
},
|
||||
"json_like": {
|
||||
"filename": "config.txt",
|
||||
"content": '{"setting": "value", "number": 42, "enabled": true}'
|
||||
}
|
||||
}
|
||||
|
||||
# Create collection
|
||||
collection_result = await authenticated_client.create_rag_collection(
|
||||
name="Document Types Collection",
|
||||
description="Testing different document formats"
|
||||
)
|
||||
assert collection_result["success"], f"Failed to create collection: {collection_result}"
|
||||
|
||||
collection_id = collection_result["data"]["id"]
|
||||
|
||||
# Upload each document type
|
||||
for doc_type, doc_info in document_types.items():
|
||||
upload_result = await authenticated_client.upload_document(
|
||||
collection_id=collection_id,
|
||||
file_content=doc_info["content"],
|
||||
filename=doc_info["filename"]
|
||||
)
|
||||
|
||||
assert upload_result["success"], f"Failed to upload {doc_type}: {upload_result}"
|
||||
|
||||
# Wait for processing
|
||||
doc_id = upload_result["data"]["id"]
|
||||
processing_result = await authenticated_client.wait_for_document_processing(doc_id, timeout=30)
|
||||
assert processing_result["success"], f"Processing failed for {doc_type}: {processing_result}"
|
||||
|
||||
# Create chatbot to test all document types
|
||||
chatbot_result = await authenticated_client.create_chatbot(
|
||||
name="Document Types Bot",
|
||||
use_rag=True,
|
||||
rag_collection="Document Types Collection"
|
||||
)
|
||||
assert chatbot_result["success"], f"Failed to create chatbot: {chatbot_result}"
|
||||
|
||||
chatbot_id = chatbot_result["data"]["id"]
|
||||
|
||||
api_key_result = await authenticated_client.create_api_key_for_chatbot(chatbot_id)
|
||||
assert api_key_result["success"], f"Failed to create API key: {api_key_result}"
|
||||
|
||||
api_key = api_key_result["data"]["key"]
|
||||
|
||||
# Test questions for different document types
|
||||
test_questions = [
|
||||
"What is bold text?", # Should find markdown
|
||||
"What is the plain text content?", # Should find plain text
|
||||
"What is the setting value?", # Should find JSON-like content
|
||||
]
|
||||
|
||||
for question in test_questions:
|
||||
chat_result = await authenticated_client.chat_with_bot(
|
||||
chatbot_id=chatbot_id,
|
||||
message=question,
|
||||
api_key=api_key
|
||||
)
|
||||
|
||||
assert chat_result["success"], f"Chat failed for document type question: {question}"
|
||||
# Should have sources even if the answer quality varies
|
||||
assert "sources" in chat_result["data"], f"No sources for question: {question}"
|
||||
@@ -1,186 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test the enhanced workflow system with brand-ai patterns
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import json
|
||||
import time
|
||||
|
||||
async def test_enhanced_workflow_system():
|
||||
async with aiohttp.ClientSession() as session:
|
||||
try:
|
||||
print("🔧 Testing Enhanced Workflow System")
|
||||
print("=" * 50)
|
||||
|
||||
# Register and login test user
|
||||
timestamp = int(time.time())
|
||||
user_data = {
|
||||
"email": f"workflowtest{timestamp}@example.com",
|
||||
"password": "TestPassword123!",
|
||||
"username": f"workflowtest{timestamp}"
|
||||
}
|
||||
|
||||
async with session.post("http://localhost:58000/api/v1/auth/register", json=user_data) as response:
|
||||
if response.status != 201:
|
||||
error_data = await response.json()
|
||||
print(f"❌ Registration failed: {error_data}")
|
||||
return
|
||||
print("✅ User registered")
|
||||
|
||||
# Login
|
||||
login_data = {"email": user_data["email"], "password": user_data["password"]}
|
||||
async with session.post("http://localhost:58000/api/v1/auth/login", json=login_data) as response:
|
||||
if response.status != 200:
|
||||
error_data = await response.json()
|
||||
print(f"❌ Login failed: {error_data}")
|
||||
return
|
||||
|
||||
login_result = await response.json()
|
||||
token = login_result['access_token']
|
||||
headers = {'Authorization': f'Bearer {token}'}
|
||||
print("✅ Login successful")
|
||||
|
||||
# Test 1: Check workflow module status
|
||||
print("\n📊 Test 1: Module Status")
|
||||
async with session.get("http://localhost:58000/api/v1/modules/", headers=headers) as response:
|
||||
if response.status == 200:
|
||||
modules_data = await response.json()
|
||||
workflow_module = None
|
||||
for module in modules_data.get('modules', []):
|
||||
if module.get('name') == 'workflow':
|
||||
workflow_module = module
|
||||
break
|
||||
|
||||
if workflow_module:
|
||||
print(f"✅ Workflow module found: {workflow_module.get('status', 'unknown')}")
|
||||
print(f" Initialized: {workflow_module.get('initialized', False)}")
|
||||
print(f" Version: {workflow_module.get('version', 'unknown')}")
|
||||
else:
|
||||
print("❌ Workflow module not found")
|
||||
return
|
||||
else:
|
||||
print(f"❌ Failed to get module status: {response.status}")
|
||||
return
|
||||
|
||||
# Test 2: Create a simple brand naming workflow
|
||||
print("\n🏭 Test 2: Brand Naming Workflow")
|
||||
brand_workflow = {
|
||||
"name": "Brand Name Generation",
|
||||
"description": "Generate brand names using AI generation step",
|
||||
"version": "1.0.0",
|
||||
"steps": [
|
||||
{
|
||||
"id": "ai_gen_step",
|
||||
"name": "Generate Brand Names",
|
||||
"type": "ai_generation",
|
||||
"config": {
|
||||
"operation": "brand_names",
|
||||
"category": "semantic",
|
||||
"model": "openrouter/anthropic/claude-3.5-sonnet",
|
||||
"temperature": 0.8,
|
||||
"max_tokens": 500,
|
||||
"output_key": "brand_names",
|
||||
"prompt_template": "Generate 3 creative brand names for a {industry} company targeting {target_audience}. Company description: {description}. Return as a JSON list with name and description fields."
|
||||
},
|
||||
"enabled": True
|
||||
},
|
||||
{
|
||||
"id": "filter_step",
|
||||
"name": "Filter Quality Names",
|
||||
"type": "filter",
|
||||
"config": {
|
||||
"input_key": "brand_names",
|
||||
"output_key": "filtered_names",
|
||||
"filter_expression": "len(item.get('name', '')) > 3",
|
||||
"keep_original": False
|
||||
},
|
||||
"enabled": True
|
||||
}
|
||||
],
|
||||
"variables": {
|
||||
"industry": "technology",
|
||||
"target_audience": "developers",
|
||||
"description": "A platform for AI development tools"
|
||||
}
|
||||
}
|
||||
|
||||
# Test workflow execution - FastAPI expects both parameters
|
||||
payload = {
|
||||
"workflow_def": brand_workflow,
|
||||
"input_data": {
|
||||
"industry": "technology",
|
||||
"target_audience": "developers",
|
||||
"description": "A platform for AI development tools"
|
||||
}
|
||||
}
|
||||
|
||||
async with session.post(
|
||||
"http://localhost:58000/api/modules/v1/workflow/execute",
|
||||
json=payload,
|
||||
headers=headers,
|
||||
timeout=aiohttp.ClientTimeout(total=30)
|
||||
) as response:
|
||||
print(f"Workflow execution status: {response.status}")
|
||||
if response.status == 200:
|
||||
result = await response.json()
|
||||
execution_id = result.get("execution_id")
|
||||
print(f"✅ Workflow started: {execution_id}")
|
||||
print(f" Status: {result.get('status', 'unknown')}")
|
||||
|
||||
# Check for results
|
||||
if result.get("results"):
|
||||
print(f" Results available: {len(result.get('results', {}))}")
|
||||
|
||||
# Display brand names if available
|
||||
brand_names = result.get("results", {}).get("brand_names", [])
|
||||
if brand_names:
|
||||
print(f" Generated brand names: {len(brand_names)}")
|
||||
for i, name_info in enumerate(brand_names[:3]): # Show first 3
|
||||
name = name_info.get('name', 'Unknown')
|
||||
desc = name_info.get('description', 'No description')
|
||||
print(f" {i+1}. {name}: {desc[:50]}...")
|
||||
|
||||
# Display filtered results
|
||||
filtered_names = result.get("results", {}).get("filtered_names", [])
|
||||
if filtered_names:
|
||||
print(f" Filtered to {len(filtered_names)} quality names")
|
||||
else:
|
||||
print(" No results yet (workflow may still be running)")
|
||||
|
||||
elif response.status == 503:
|
||||
print("⚠️ Workflow module not initialized (expected on first run)")
|
||||
else:
|
||||
error_data = await response.json()
|
||||
print(f"❌ Workflow execution failed: {error_data}")
|
||||
|
||||
# Test 3: Check workflow templates
|
||||
print("\n📈 Test 3: Workflow Templates")
|
||||
try:
|
||||
async with session.get("http://localhost:58000/api/modules/v1/workflow/templates", headers=headers) as response:
|
||||
if response.status == 200:
|
||||
templates = await response.json()
|
||||
print(f"✅ Workflow templates available: {len(templates.get('templates', []))}")
|
||||
for template in templates.get('templates', [])[:2]: # Show first 2
|
||||
print(f" - {template.get('name')}: {template.get('description')}")
|
||||
else:
|
||||
print(f"ℹ️ Templates not available: {response.status}")
|
||||
except Exception as e:
|
||||
print(f"ℹ️ Templates endpoint error: {e}")
|
||||
|
||||
print(f"\n🎯 Enhanced Workflow System Test Complete!")
|
||||
print("The workflow system now supports:")
|
||||
print(" ✅ Brand-AI inspired step types (AI Generation, Filter, Map, Reduce, etc.)")
|
||||
print(" ✅ AI-powered content generation")
|
||||
print(" ✅ Data transformation and filtering")
|
||||
print(" ✅ Complex workflow orchestration")
|
||||
print(" ✅ Variable templating and context management")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Test error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_enhanced_workflow_system())
|
||||
@@ -27,12 +27,12 @@ class LiveModuleIntegrationTest:
|
||||
print(f"✓ API Response: {response.status_code}")
|
||||
print(f"✓ Total modules: {data['total']}")
|
||||
|
||||
# Verify we have all 5 modules (updated after 2025-08-10 cleanup)
|
||||
assert data["total"] >= 5, f"Expected at least 5 modules, got {data['total']}"
|
||||
assert data["module_count"] >= 5
|
||||
# Verify we have all 2 modules (rag and chatbot only)
|
||||
assert data["total"] >= 2, f"Expected at least 2 modules, got {data['total']}"
|
||||
assert data["module_count"] >= 2
|
||||
assert data["initialized"] is True
|
||||
|
||||
expected_modules = ['cache', 'chatbot', 'rag', 'signal', 'workflow']
|
||||
expected_modules = ['chatbot', 'rag']
|
||||
loaded_modules = [mod["name"] for mod in data["modules"]]
|
||||
|
||||
for expected in expected_modules:
|
||||
@@ -64,14 +64,6 @@ class LiveModuleIntegrationTest:
|
||||
|
||||
modules_by_name = {mod["name"]: mod for mod in modules_data["modules"]}
|
||||
|
||||
# Test Cache Module
|
||||
if "cache" in modules_by_name:
|
||||
cache_stats = modules_by_name["cache"].get("stats", {})
|
||||
expected_cache_fields = ["hits", "misses", "errors", "total_requests"]
|
||||
for field in expected_cache_fields:
|
||||
assert field in cache_stats, f"Cache module missing {field} stat"
|
||||
print("✓ Cache module stats structure verified")
|
||||
|
||||
# Test Monitoring Module
|
||||
if "monitoring" in modules_by_name:
|
||||
monitor_stats = modules_by_name["monitoring"].get("stats", {})
|
||||
|
||||
@@ -23,10 +23,8 @@ sys.path.insert(0, str(backend_path))
|
||||
sys.path.insert(0, str(backend_path / "modules"))
|
||||
|
||||
try:
|
||||
from modules.cache.main import CacheModule
|
||||
from modules.rag.main import RAGModule
|
||||
from modules.chatbot.main import ChatbotModule
|
||||
from modules.workflow.main import WorkflowModule
|
||||
|
||||
from app.services.module_manager import ModuleManager, ModuleConfig
|
||||
except ImportError as e:
|
||||
@@ -50,17 +48,15 @@ except ImportError as e:
|
||||
def get_stats(self):
|
||||
return {"mock": True}
|
||||
|
||||
CacheModule = MockModule
|
||||
RAGModule = MockModule
|
||||
ChatbotModule = MockModule
|
||||
WorkflowModule = MockModule
|
||||
|
||||
# Mock ModuleManager for testing
|
||||
class MockModuleManager:
|
||||
def __init__(self):
|
||||
self.initialized = False
|
||||
self.modules = {}
|
||||
self.module_order = ['cache', 'rag', 'chatbot', 'workflow']
|
||||
self.module_order = ['rag', 'chatbot']
|
||||
|
||||
async def initialize(self):
|
||||
self.initialized = True
|
||||
@@ -94,26 +90,6 @@ except ImportError as e:
|
||||
class TestModuleIndividual:
|
||||
"""Test individual module functionality"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_module_initialization(self):
|
||||
"""Test cache module initialization and basic operations"""
|
||||
cache_module = CacheModule()
|
||||
|
||||
# Test initialization
|
||||
result = await cache_module.initialize()
|
||||
assert result is True
|
||||
assert cache_module.initialized is True
|
||||
|
||||
# Test stats retrieval
|
||||
stats = cache_module.get_stats()
|
||||
assert isinstance(stats, dict)
|
||||
assert 'hits' in stats
|
||||
assert 'misses' in stats
|
||||
assert 'errors' in stats
|
||||
|
||||
# Test cleanup
|
||||
await cache_module.cleanup()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chatbot_module_initialization(self):
|
||||
"""Test chatbot module initialization and basic operations"""
|
||||
@@ -148,60 +124,6 @@ class TestModuleIndividual:
|
||||
|
||||
await rag_module.cleanup()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_workflow_module_initialization(self):
|
||||
"""Test workflow module initialization and basic operations"""
|
||||
workflow_module = WorkflowModule()
|
||||
|
||||
# Test initialization
|
||||
result = await workflow_module.initialize()
|
||||
assert result is True
|
||||
assert workflow_module.initialized is True
|
||||
|
||||
# Test stats retrieval
|
||||
stats = workflow_module.get_stats()
|
||||
assert isinstance(stats, dict)
|
||||
|
||||
await workflow_module.cleanup()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_signal_module_initialization(self):
|
||||
"""Test signal bot module initialization and basic operations"""
|
||||
try:
|
||||
from modules.signal.main import SignalBotModule
|
||||
except ImportError as e:
|
||||
# Skip test if SignalBot dependencies not available
|
||||
pytest.skip(f"Signal bot module dependencies not available: {e}")
|
||||
|
||||
signal_module = SignalBotModule()
|
||||
|
||||
# Test initialization (may fail if SignalBot not available, which is OK)
|
||||
try:
|
||||
result = await signal_module.initialize()
|
||||
assert signal_module.module_id == "signal"
|
||||
|
||||
# Test configuration
|
||||
assert hasattr(signal_module, 'config')
|
||||
assert hasattr(signal_module, 'stats')
|
||||
|
||||
# Test permissions
|
||||
permissions = signal_module.get_required_permissions()
|
||||
assert len(permissions) == 3
|
||||
assert any(p.resource == "signal" and p.action == "manage" for p in permissions)
|
||||
|
||||
except ImportError:
|
||||
# SignalBot library not available, skip functionality tests
|
||||
pytest.skip("SignalBot library not available")
|
||||
except Exception as e:
|
||||
# Other initialization errors are acceptable for testing
|
||||
logger.info(f"Signal module initialization failed (expected): {e}")
|
||||
|
||||
# Test cleanup
|
||||
try:
|
||||
await signal_module.cleanup()
|
||||
except Exception:
|
||||
pass # Cleanup errors are acceptable
|
||||
|
||||
|
||||
class TestModuleIntegration:
|
||||
"""Test module integration and interactions"""
|
||||
@@ -216,19 +138,19 @@ class TestModuleIntegration:
|
||||
assert module_manager.initialized is True
|
||||
|
||||
# Check all expected modules are loaded
|
||||
expected_modules = ['cache', 'chatbot', 'rag', 'workflow', 'signal']
|
||||
expected_modules = ['chatbot', 'rag']
|
||||
loaded_modules = module_manager.list_modules()
|
||||
|
||||
for module_name in expected_modules:
|
||||
assert module_name in loaded_modules, f"Module {module_name} not loaded"
|
||||
|
||||
# Test module retrieval
|
||||
cache_module = module_manager.get_module('cache')
|
||||
assert cache_module is not None
|
||||
|
||||
rag_module = module_manager.get_module('rag')
|
||||
assert rag_module is not None
|
||||
|
||||
chatbot_module = module_manager.get_module('chatbot')
|
||||
assert chatbot_module is not None
|
||||
|
||||
await module_manager.cleanup()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -269,18 +191,18 @@ class TestModuleHotReload:
|
||||
await module_manager.initialize()
|
||||
|
||||
# Get initial module reference
|
||||
initial_module = module_manager.get_module('cache')
|
||||
initial_module = module_manager.get_module('rag')
|
||||
assert initial_module is not None
|
||||
|
||||
# Test reload
|
||||
await module_manager.reload_module('cache')
|
||||
await module_manager.reload_module('rag')
|
||||
|
||||
# Get module after reload
|
||||
reloaded_module = module_manager.get_module('cache')
|
||||
reloaded_module = module_manager.get_module('rag')
|
||||
assert reloaded_module is not None
|
||||
|
||||
# Module should be reloaded (may be same object or different)
|
||||
assert module_manager.is_module_loaded('cache')
|
||||
assert module_manager.is_module_loaded('rag')
|
||||
|
||||
await module_manager.cleanup()
|
||||
|
||||
@@ -304,26 +226,6 @@ class TestModulePerformance:
|
||||
|
||||
await module_manager.cleanup()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_module_performance(self):
|
||||
"""Test cache module performance"""
|
||||
cache_module = CacheModule()
|
||||
await cache_module.initialize()
|
||||
|
||||
# Test multiple rapid operations
|
||||
start_time = time.time()
|
||||
|
||||
for i in range(10):
|
||||
stats = cache_module.get_stats()
|
||||
assert isinstance(stats, dict)
|
||||
|
||||
operations_time = time.time() - start_time
|
||||
|
||||
# 10 operations should complete quickly
|
||||
assert operations_time < 1.0, f"Cache operations took {operations_time:.2f}s"
|
||||
|
||||
await cache_module.cleanup()
|
||||
|
||||
|
||||
|
||||
class TestModuleErrorHandling:
|
||||
@@ -338,7 +240,7 @@ class TestModuleErrorHandling:
|
||||
await module_manager.initialize()
|
||||
|
||||
# At least some modules should load successfully
|
||||
assert len(module_manager.list_modules()) >= 5
|
||||
assert len(module_manager.list_modules()) >= 2
|
||||
|
||||
await module_manager.cleanup()
|
||||
|
||||
@@ -426,10 +328,6 @@ if __name__ == "__main__":
|
||||
# Test individual modules
|
||||
test_individual = TestModuleIndividual()
|
||||
|
||||
print("Testing cache module...")
|
||||
await test_individual.test_cache_module_initialization()
|
||||
print("✓ Cache module test passed")
|
||||
|
||||
print("Testing chatbot module...")
|
||||
await test_individual.test_chatbot_module_initialization()
|
||||
print("✓ Chatbot module test passed")
|
||||
@@ -438,14 +336,6 @@ if __name__ == "__main__":
|
||||
await test_individual.test_rag_module_initialization()
|
||||
print("✓ RAG module with content processing test passed")
|
||||
|
||||
print("Testing workflow module...")
|
||||
await test_individual.test_workflow_module_initialization()
|
||||
print("✓ Workflow module test passed")
|
||||
|
||||
print("Testing signal bot module...")
|
||||
await test_individual.test_signal_module_initialization()
|
||||
print("✓ Signal bot module test passed")
|
||||
|
||||
# Test integration
|
||||
test_integration = TestModuleIntegration()
|
||||
|
||||
|
||||
@@ -1,167 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.INTERNAL_API_URL || 'http://enclava-backend:8000'
|
||||
|
||||
export async function GET(
|
||||
request: NextRequest,
|
||||
{ params }: { params: { id: string } }
|
||||
) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const workflowId = params.id
|
||||
|
||||
// Fetch workflow from the backend workflow module
|
||||
const response = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'get_workflow',
|
||||
workflow_id: workflowId
|
||||
}),
|
||||
cache: 'no-store'
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to fetch workflow', details: errorData },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json({
|
||||
workflow: data.response?.workflow
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error fetching workflow:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export async function PUT(
|
||||
request: NextRequest,
|
||||
{ params }: { params: { id: string } }
|
||||
) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const workflowId = params.id
|
||||
const workflowData = await request.json()
|
||||
|
||||
// Validate workflow first
|
||||
const validateResponse = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'validate_workflow',
|
||||
workflow_def: workflowData
|
||||
})
|
||||
})
|
||||
|
||||
if (!validateResponse.ok) {
|
||||
const errorData = await validateResponse.json()
|
||||
return NextResponse.json(
|
||||
{ error: 'Workflow validation failed', details: errorData },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Update workflow via backend workflow module
|
||||
const updateResponse = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'update_workflow',
|
||||
workflow_id: workflowId,
|
||||
workflow_def: workflowData
|
||||
})
|
||||
})
|
||||
|
||||
if (!updateResponse.ok) {
|
||||
const errorData = await updateResponse.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to update workflow', details: errorData },
|
||||
{ status: updateResponse.status }
|
||||
)
|
||||
}
|
||||
|
||||
const updateData = await updateResponse.json()
|
||||
return NextResponse.json({
|
||||
workflow: updateData.response?.workflow
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error updating workflow:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export async function DELETE(
|
||||
request: NextRequest,
|
||||
{ params }: { params: { id: string } }
|
||||
) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const workflowId = params.id
|
||||
|
||||
// Delete workflow via backend workflow module
|
||||
const response = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'delete_workflow',
|
||||
workflow_id: workflowId
|
||||
})
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to delete workflow', details: errorData },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
// Check if the backend returned an error
|
||||
if (data.response?.error) {
|
||||
return NextResponse.json(
|
||||
{ error: data.response.error },
|
||||
{ status: 404 }
|
||||
)
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: `Workflow ${workflowId} deleted successfully`,
|
||||
data: data.response
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error deleting workflow:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.INTERNAL_API_URL || 'http://enclava-backend:8000'
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const { workflow_def, input_data } = await request.json()
|
||||
|
||||
const response = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'execute_workflow',
|
||||
workflow_def,
|
||||
input_data: input_data || {}
|
||||
})
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to execute workflow', details: errorData },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json(data)
|
||||
} catch (error) {
|
||||
console.error('Error executing workflow:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.INTERNAL_API_URL || 'http://enclava-backend:8000'
|
||||
|
||||
export async function POST(
|
||||
request: NextRequest,
|
||||
{ params }: { params: { id: string } }
|
||||
) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const executionId = params.id
|
||||
|
||||
// Cancel execution via workflow module
|
||||
const response = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'cancel_execution',
|
||||
execution_id: executionId
|
||||
}),
|
||||
cache: 'no-store'
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to cancel execution', details: errorData },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: `Execution ${executionId} cancelled successfully`,
|
||||
data: data.response
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error cancelling execution:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.INTERNAL_API_URL || 'http://enclava-backend:8000'
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
// Fetch executions from the backend workflow module
|
||||
const response = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'get_executions'
|
||||
}),
|
||||
cache: 'no-store'
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to fetch workflow executions', details: errorData },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json({
|
||||
executions: data.response?.executions || []
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error fetching workflow executions:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.INTERNAL_API_URL || 'http://enclava-backend:8000'
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const formData = await request.formData()
|
||||
const file = formData.get('workflow_file') as File
|
||||
|
||||
if (!file) {
|
||||
return NextResponse.json(
|
||||
{ error: 'No file provided' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Check file type
|
||||
if (!file.name.endsWith('.json')) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Only JSON files are supported' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Check file size (max 10MB)
|
||||
if (file.size > 10 * 1024 * 1024) {
|
||||
return NextResponse.json(
|
||||
{ error: 'File too large. Maximum size is 10MB' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Read file content
|
||||
const fileContent = await file.text()
|
||||
|
||||
let workflowData
|
||||
try {
|
||||
workflowData = JSON.parse(fileContent)
|
||||
} catch (error) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid JSON format' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
const requiredFields = ['name', 'description', 'steps']
|
||||
for (const field of requiredFields) {
|
||||
if (!workflowData[field]) {
|
||||
return NextResponse.json(
|
||||
{ error: `Missing required field: ${field}` },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new ID if not provided
|
||||
if (!workflowData.id) {
|
||||
workflowData.id = `imported-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`
|
||||
}
|
||||
|
||||
// Add import metadata
|
||||
workflowData.metadata = {
|
||||
...workflowData.metadata,
|
||||
imported_at: new Date().toISOString(),
|
||||
imported_from: file.name,
|
||||
imported_by: 'user'
|
||||
}
|
||||
|
||||
// Validate workflow through backend
|
||||
const validateResponse = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'validate_workflow',
|
||||
workflow_def: workflowData
|
||||
})
|
||||
})
|
||||
|
||||
if (!validateResponse.ok) {
|
||||
const errorData = await validateResponse.json()
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Workflow validation failed',
|
||||
details: errorData.response?.errors || ['Unknown validation error'],
|
||||
workflow_data: workflowData
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const validationResult = await validateResponse.json()
|
||||
if (!validationResult.response?.valid) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Workflow validation failed',
|
||||
details: validationResult.response?.errors || ['Workflow is not valid'],
|
||||
workflow_data: workflowData
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Create workflow via backend
|
||||
const createResponse = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'create_workflow',
|
||||
workflow_def: workflowData
|
||||
})
|
||||
})
|
||||
|
||||
if (!createResponse.ok) {
|
||||
const errorData = await createResponse.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to create workflow', details: errorData },
|
||||
{ status: createResponse.status }
|
||||
)
|
||||
}
|
||||
|
||||
const createData = await createResponse.json()
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: 'Workflow imported successfully',
|
||||
workflow: createData.response?.workflow || workflowData,
|
||||
validation_passed: true
|
||||
})
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error importing workflow:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error', details: error instanceof Error ? error.message : 'Unknown error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.INTERNAL_API_URL || 'http://enclava-backend:8000'
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
// Fetch workflows from the backend workflow module
|
||||
const response = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'get_workflows'
|
||||
}),
|
||||
cache: 'no-store'
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to fetch workflows', details: errorData },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json({
|
||||
workflows: data.response?.workflows || []
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error fetching workflows:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const workflowData = await request.json()
|
||||
|
||||
// Validate workflow first
|
||||
const validateResponse = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'validate_workflow',
|
||||
workflow_def: workflowData
|
||||
})
|
||||
})
|
||||
|
||||
if (!validateResponse.ok) {
|
||||
const errorData = await validateResponse.json()
|
||||
return NextResponse.json(
|
||||
{ error: 'Workflow validation failed', details: errorData },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Create workflow via backend workflow module
|
||||
const createResponse = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'create_workflow',
|
||||
workflow_def: workflowData
|
||||
})
|
||||
})
|
||||
|
||||
if (!createResponse.ok) {
|
||||
const errorData = await createResponse.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to create workflow', details: errorData },
|
||||
{ status: createResponse.status }
|
||||
)
|
||||
}
|
||||
|
||||
const createData = await createResponse.json()
|
||||
return NextResponse.json({
|
||||
workflow: createData.response?.workflow
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error creating workflow:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.INTERNAL_API_URL || 'http://enclava-backend:8000'
|
||||
|
||||
export async function GET(
|
||||
request: NextRequest,
|
||||
{ params }: { params: { id: string } }
|
||||
) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const templateId = params.id
|
||||
|
||||
// First get all templates
|
||||
const response = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'get_workflow_templates'
|
||||
})
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to fetch workflow templates', details: errorData },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
const templates = data.templates || []
|
||||
|
||||
// Find the specific template
|
||||
const template = templates.find((t: any) => t.id === templateId)
|
||||
|
||||
if (!template) {
|
||||
return NextResponse.json(
|
||||
{ error: `Template with id '${templateId}' not found` },
|
||||
{ status: 404 }
|
||||
)
|
||||
}
|
||||
|
||||
return NextResponse.json({ template })
|
||||
} catch (error) {
|
||||
console.error('Error fetching workflow template:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.INTERNAL_API_URL || 'http://enclava-backend:8000'
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const response = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'get_workflow_templates'
|
||||
}),
|
||||
cache: 'no-store'
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.text()
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to fetch workflow templates', details: errorData },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return NextResponse.json(data)
|
||||
} catch (error) {
|
||||
console.error('Error fetching workflow templates:', error)
|
||||
return NextResponse.json(
|
||||
{ error: 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
|
||||
const BACKEND_URL = process.env.INTERNAL_API_URL || 'http://enclava-backend:8000'
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const adminToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImlzX3N1cGVydXNlciI6dHJ1ZSwicm9sZSI6InN1cGVyX2FkbWluIiwiZXhwIjoxNzg0Nzk2NDI2LjA0NDYxOX0.YOTlUY8nowkaLAXy5EKfnZEpbDgGCabru5R0jdq_DOQ'
|
||||
|
||||
const { workflow, test_data } = await request.json()
|
||||
|
||||
// First validate the workflow
|
||||
const validateResponse = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'validate_workflow',
|
||||
workflow_def: workflow
|
||||
})
|
||||
})
|
||||
|
||||
if (!validateResponse.ok) {
|
||||
const errorData = await validateResponse.json()
|
||||
return NextResponse.json(
|
||||
{
|
||||
status: 'failed',
|
||||
error: 'Workflow validation failed',
|
||||
details: errorData
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// If validation passes, try a test execution
|
||||
const executeResponse = await fetch(`${BACKEND_URL}/api/modules/workflow/execute`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${adminToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
action: 'execute_workflow',
|
||||
workflow_def: workflow,
|
||||
input_data: test_data || {}
|
||||
})
|
||||
})
|
||||
|
||||
if (!executeResponse.ok) {
|
||||
const errorData = await executeResponse.text()
|
||||
return NextResponse.json(
|
||||
{
|
||||
status: 'failed',
|
||||
error: 'Workflow test execution failed',
|
||||
details: errorData
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
|
||||
const executionData = await executeResponse.json()
|
||||
return NextResponse.json({
|
||||
status: 'success',
|
||||
execution: executionData
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error testing workflow:', error)
|
||||
return NextResponse.json(
|
||||
{
|
||||
status: 'failed',
|
||||
error: 'Internal server error'
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,805 +0,0 @@
|
||||
"use client"
|
||||
|
||||
import { useState, useEffect, useCallback } from "react"
|
||||
import { useRouter, useSearchParams } from "next/navigation"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Label } from "@/components/ui/label"
|
||||
import { Textarea } from "@/components/ui/textarea"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { useToast } from "@/hooks/use-toast"
|
||||
import { apiClient } from "@/lib/api-client"
|
||||
import { config } from "@/lib/config"
|
||||
import {
|
||||
Save,
|
||||
Play,
|
||||
ArrowLeft,
|
||||
Plus,
|
||||
Settings,
|
||||
Trash2,
|
||||
Move,
|
||||
Zap,
|
||||
GitBranch,
|
||||
MessageSquare,
|
||||
Database,
|
||||
Workflow as WorkflowIcon,
|
||||
GripVertical
|
||||
} from "lucide-react"
|
||||
import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle } from "@/components/ui/dialog"
|
||||
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import { Separator } from "@/components/ui/separator"
|
||||
import ProtectedRoute from "@/components/ProtectedRoute"
|
||||
import StepConfigurationPanel from "@/components/workflows/StepConfigurationPanel"
|
||||
|
||||
interface WorkflowStep {
|
||||
id: string
|
||||
name: string
|
||||
type: string
|
||||
config: Record<string, any>
|
||||
position: { x: number; y: number }
|
||||
connections: string[]
|
||||
}
|
||||
|
||||
interface WorkflowDefinition {
|
||||
id?: string
|
||||
name: string
|
||||
description: string
|
||||
version: string
|
||||
steps: WorkflowStep[]
|
||||
variables: Record<string, any>
|
||||
outputs: Record<string, string>
|
||||
metadata: Record<string, any>
|
||||
}
|
||||
|
||||
const stepTypes = [
|
||||
{
|
||||
type: "llm_call",
|
||||
name: "LLM Call",
|
||||
description: "Make a call to a language model",
|
||||
icon: <MessageSquare className="h-4 w-4" />,
|
||||
color: "bg-blue-100 text-blue-800 dark:bg-blue-900 dark:text-blue-300"
|
||||
},
|
||||
{
|
||||
type: "chatbot",
|
||||
name: "Chatbot",
|
||||
description: "Use a configured chatbot",
|
||||
icon: <MessageSquare className="h-4 w-4" />,
|
||||
color: "bg-green-100 text-green-800 dark:bg-green-900 dark:text-green-300"
|
||||
},
|
||||
{
|
||||
type: "transform",
|
||||
name: "Transform",
|
||||
description: "Transform or process data",
|
||||
icon: <Zap className="h-4 w-4" />,
|
||||
color: "bg-purple-100 text-purple-800 dark:bg-purple-900 dark:text-purple-300"
|
||||
},
|
||||
{
|
||||
type: "conditional",
|
||||
name: "Conditional",
|
||||
description: "Conditional logic and branching",
|
||||
icon: <GitBranch className="h-4 w-4" />,
|
||||
color: "bg-orange-100 text-orange-800 dark:bg-orange-900 dark:text-orange-300"
|
||||
},
|
||||
{
|
||||
type: "parallel",
|
||||
name: "Parallel",
|
||||
description: "Execute multiple steps in parallel",
|
||||
icon: <Move className="h-4 w-4" />,
|
||||
color: "bg-indigo-100 text-indigo-800 dark:bg-indigo-900 dark:text-indigo-300"
|
||||
},
|
||||
{
|
||||
type: "rag_search",
|
||||
name: "RAG Search",
|
||||
description: "Search documents in RAG collection",
|
||||
icon: <Database className="h-4 w-4" />,
|
||||
color: "bg-teal-100 text-teal-800 dark:bg-teal-900 dark:text-teal-300"
|
||||
}
|
||||
]
|
||||
|
||||
export default function WorkflowBuilderPage() {
|
||||
const { toast } = useToast()
|
||||
const router = useRouter()
|
||||
const searchParams = useSearchParams()
|
||||
const workflowId = searchParams.get('id')
|
||||
const templateId = searchParams.get('template')
|
||||
|
||||
const [workflow, setWorkflow] = useState<WorkflowDefinition>({
|
||||
name: "",
|
||||
description: "",
|
||||
version: "1.0.0",
|
||||
steps: [],
|
||||
variables: {},
|
||||
outputs: {},
|
||||
metadata: {}
|
||||
})
|
||||
|
||||
const [selectedStep, setSelectedStep] = useState<WorkflowStep | null>(null)
|
||||
const [showStepPalette, setShowStepPalette] = useState(false)
|
||||
const [showVariablePanel, setShowVariablePanel] = useState(false)
|
||||
const [loading, setLoading] = useState(true)
|
||||
const [saving, setSaving] = useState(false)
|
||||
const [draggedStepIndex, setDraggedStepIndex] = useState<number | null>(null)
|
||||
const [dragOverIndex, setDragOverIndex] = useState<number | null>(null)
|
||||
|
||||
// Configuration data
|
||||
const [availableChatbots, setAvailableChatbots] = useState<Array<{id: string, name: string}>>([])
|
||||
const [availableCollections, setAvailableCollections] = useState<string[]>([])
|
||||
|
||||
// Helper function to ensure all steps have position properties
|
||||
const ensureStepPositions = (steps: any[]): WorkflowStep[] => {
|
||||
return steps.map((step, index) => ({
|
||||
...step,
|
||||
position: step.position || { x: 100, y: 100 + index * 120 },
|
||||
connections: step.connections || []
|
||||
}))
|
||||
}
|
||||
|
||||
const loadWorkflow = useCallback(async () => {
|
||||
try {
|
||||
setLoading(true)
|
||||
|
||||
if (workflowId) {
|
||||
// Load existing workflow
|
||||
const data = await apiClient.get(`/api-internal/v1/workflows/${workflowId}`)
|
||||
setWorkflow({
|
||||
...data.workflow,
|
||||
steps: ensureStepPositions(data.workflow.steps || [])
|
||||
})
|
||||
} else if (templateId) {
|
||||
// Load from template
|
||||
const data = await apiClient.get(`/api-internal/v1/workflows/templates/${templateId}`)
|
||||
setWorkflow({
|
||||
...data.template.definition,
|
||||
id: undefined, // Remove ID for new workflow
|
||||
name: `${data.template.definition.name} (Copy)`,
|
||||
steps: ensureStepPositions(data.template.definition.steps || [])
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
toast({
|
||||
title: "Error",
|
||||
description: "Failed to load workflow",
|
||||
variant: "destructive"
|
||||
})
|
||||
} finally {
|
||||
setLoading(false)
|
||||
}
|
||||
}, [workflowId, templateId, toast])
|
||||
|
||||
const loadConfigurationData = useCallback(async () => {
|
||||
try {
|
||||
// Get auth token (same pattern as playground)
|
||||
const token = localStorage.getItem('token')
|
||||
const authHeaders = token ? { 'Authorization': `Bearer ${token}` } : {}
|
||||
|
||||
// Load available chatbots (existing platform component)
|
||||
try {
|
||||
const chatbotsData = await apiClient.get('/api-internal/v1/chatbot/list')
|
||||
|
||||
// Backend returns a direct array of chatbot objects
|
||||
let chatbotOptions: Array<{id: string, name: string}> = []
|
||||
|
||||
if (Array.isArray(chatbotsData)) {
|
||||
// Map to both ID and name for display purposes
|
||||
chatbotOptions = chatbotsData.map((chatbot: any) => ({
|
||||
id: chatbot.id || '',
|
||||
name: chatbot.name || chatbot.id || 'Unnamed Chatbot'
|
||||
}))
|
||||
}
|
||||
|
||||
// Store full chatbot objects for better UX (names + IDs)
|
||||
setAvailableChatbots(chatbotOptions)
|
||||
} catch (error) {
|
||||
// Silently handle error - chatbots will be empty array
|
||||
}
|
||||
|
||||
// Load available RAG collections (existing platform component)
|
||||
try {
|
||||
const collectionsData = await apiClient.get('/api-internal/v1/rag/collections')
|
||||
setAvailableCollections(collectionsData.collections?.map((c: any) => c.name) || [])
|
||||
} catch (error) {
|
||||
// Silently handle error - collections will be empty array
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently handle error - configuration will use defaults
|
||||
}
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
loadWorkflow()
|
||||
loadConfigurationData()
|
||||
}, [loadWorkflow, loadConfigurationData])
|
||||
|
||||
const saveWorkflow = async () => {
|
||||
try {
|
||||
setSaving(true)
|
||||
|
||||
const url = workflowId ? `/api-internal/v1/workflows/${workflowId}` : '/api-internal/v1/workflows'
|
||||
|
||||
if (workflowId) {
|
||||
await apiClient.put(url, workflow)
|
||||
} else {
|
||||
await apiClient.post(url, workflow)
|
||||
}
|
||||
|
||||
toast({
|
||||
title: "Success",
|
||||
description: workflowId ? "Workflow updated" : "Workflow created"
|
||||
})
|
||||
|
||||
// Redirect to workflow list if this was a new workflow
|
||||
if (!workflowId) {
|
||||
router.push('/workflows')
|
||||
}
|
||||
} catch (error) {
|
||||
toast({
|
||||
title: "Error",
|
||||
description: "Failed to save workflow",
|
||||
variant: "destructive"
|
||||
})
|
||||
} finally {
|
||||
setSaving(false)
|
||||
}
|
||||
}
|
||||
|
||||
const testWorkflow = async () => {
|
||||
try {
|
||||
const result = await apiClient.post('/api-internal/v1/workflows/test', {
|
||||
workflow,
|
||||
test_data: {}
|
||||
})
|
||||
|
||||
toast({
|
||||
title: "Test Result",
|
||||
description: `Workflow test completed: ${result.status}`
|
||||
})
|
||||
} catch (error) {
|
||||
toast({
|
||||
title: "Test Failed",
|
||||
description: "Failed to test workflow",
|
||||
variant: "destructive"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const addStep = (stepType: string) => {
|
||||
const newStep: WorkflowStep = {
|
||||
id: `step_${Date.now()}`,
|
||||
name: `New ${stepTypes.find(t => t.type === stepType)?.name || stepType}`,
|
||||
type: stepType,
|
||||
config: {},
|
||||
position: { x: 100, y: 100 + workflow.steps.length * 120 },
|
||||
connections: []
|
||||
}
|
||||
|
||||
setWorkflow(prev => ({
|
||||
...prev,
|
||||
steps: [...prev.steps, newStep]
|
||||
}))
|
||||
|
||||
setSelectedStep(newStep)
|
||||
setShowStepPalette(false)
|
||||
}
|
||||
|
||||
const updateStep = (stepId: string, updates: Partial<WorkflowStep>) => {
|
||||
setWorkflow(prev => ({
|
||||
...prev,
|
||||
steps: prev.steps.map(step =>
|
||||
step.id === stepId ? { ...step, ...updates } : step
|
||||
)
|
||||
}))
|
||||
|
||||
if (selectedStep?.id === stepId) {
|
||||
setSelectedStep(prev => prev ? { ...prev, ...updates } : null)
|
||||
}
|
||||
}
|
||||
|
||||
const deleteStep = (stepId: string) => {
|
||||
setWorkflow(prev => ({
|
||||
...prev,
|
||||
steps: prev.steps.filter(step => step.id !== stepId)
|
||||
}))
|
||||
|
||||
if (selectedStep?.id === stepId) {
|
||||
setSelectedStep(null)
|
||||
}
|
||||
}
|
||||
|
||||
const addVariable = (name: string, value: any) => {
|
||||
setWorkflow(prev => ({
|
||||
...prev,
|
||||
variables: { ...prev.variables, [name]: value }
|
||||
}))
|
||||
}
|
||||
|
||||
const removeVariable = (name: string) => {
|
||||
const { [name]: removed, ...rest } = workflow.variables
|
||||
setWorkflow(prev => ({ ...prev, variables: rest }))
|
||||
}
|
||||
|
||||
const reorderSteps = (fromIndex: number, toIndex: number) => {
|
||||
if (fromIndex === toIndex) return
|
||||
|
||||
const newSteps = [...workflow.steps]
|
||||
const [draggedStep] = newSteps.splice(fromIndex, 1)
|
||||
newSteps.splice(toIndex, 0, draggedStep)
|
||||
|
||||
// Recalculate canvas positions based on new order (vertical layout)
|
||||
const updatedSteps = newSteps.map((step, index) => ({
|
||||
...step,
|
||||
position: {
|
||||
x: 100,
|
||||
y: 100 + index * 120
|
||||
}
|
||||
}))
|
||||
|
||||
setWorkflow(prev => ({
|
||||
...prev,
|
||||
steps: updatedSteps
|
||||
}))
|
||||
|
||||
// Update selected step if it was the one being dragged
|
||||
if (selectedStep?.id === draggedStep.id) {
|
||||
setSelectedStep(draggedStep)
|
||||
}
|
||||
}
|
||||
|
||||
const handleDragStart = (e: React.DragEvent, index: number) => {
|
||||
setDraggedStepIndex(index)
|
||||
e.dataTransfer.effectAllowed = 'move'
|
||||
e.dataTransfer.setData('text/html', e.currentTarget.outerHTML)
|
||||
|
||||
// Add visual feedback
|
||||
if (e.currentTarget instanceof HTMLElement) {
|
||||
e.currentTarget.style.opacity = '0.5'
|
||||
}
|
||||
}
|
||||
|
||||
const handleDragEnd = (e: React.DragEvent) => {
|
||||
setDraggedStepIndex(null)
|
||||
setDragOverIndex(null)
|
||||
|
||||
// Reset visual feedback
|
||||
if (e.currentTarget instanceof HTMLElement) {
|
||||
e.currentTarget.style.opacity = '1'
|
||||
}
|
||||
}
|
||||
|
||||
const handleDragOver = (e: React.DragEvent, overIndex: number) => {
|
||||
e.preventDefault()
|
||||
e.dataTransfer.dropEffect = 'move'
|
||||
|
||||
if (draggedStepIndex !== null && draggedStepIndex !== overIndex) {
|
||||
setDragOverIndex(overIndex)
|
||||
}
|
||||
}
|
||||
|
||||
const handleDragLeave = (e: React.DragEvent) => {
|
||||
// Only clear if we're leaving the entire step element, not just a child
|
||||
if (!e.currentTarget.contains(e.relatedTarget as Node)) {
|
||||
setDragOverIndex(null)
|
||||
}
|
||||
}
|
||||
|
||||
const handleDrop = (e: React.DragEvent, dropIndex: number) => {
|
||||
e.preventDefault()
|
||||
|
||||
if (draggedStepIndex !== null && draggedStepIndex !== dropIndex) {
|
||||
reorderSteps(draggedStepIndex, dropIndex)
|
||||
}
|
||||
|
||||
setDraggedStepIndex(null)
|
||||
setDragOverIndex(null)
|
||||
}
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<ProtectedRoute>
|
||||
<div className="container mx-auto py-8">
|
||||
<div className="flex items-center justify-center py-8">
|
||||
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-primary"></div>
|
||||
</div>
|
||||
</div>
|
||||
</ProtectedRoute>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<ProtectedRoute>
|
||||
<div className="h-screen flex flex-col overflow-hidden">
|
||||
{/* Header */}
|
||||
<div className="border-b bg-background/95 backdrop-blur">
|
||||
<div className="container mx-auto px-4 py-3">
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-4">
|
||||
<Button variant="ghost" size="sm" onClick={() => router.back()}>
|
||||
<ArrowLeft className="h-4 w-4 mr-2" />
|
||||
Back
|
||||
</Button>
|
||||
<div>
|
||||
<h1 className="text-xl font-semibold flex items-center gap-2">
|
||||
<WorkflowIcon className="h-5 w-5" />
|
||||
{workflowId ? 'Edit Workflow' : 'New Workflow'}
|
||||
</h1>
|
||||
<p className="text-sm text-muted-foreground">
|
||||
{workflow.name || 'Untitled Workflow'}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center gap-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setShowVariablePanel(true)}
|
||||
>
|
||||
Variables ({Object.keys(workflow.variables).length})
|
||||
</Button>
|
||||
<Button variant="outline" size="sm" onClick={testWorkflow}>
|
||||
<Play className="h-4 w-4 mr-2" />
|
||||
Test
|
||||
</Button>
|
||||
<Button size="sm" onClick={saveWorkflow} disabled={saving}>
|
||||
<Save className="h-4 w-4 mr-2" />
|
||||
{saving ? 'Saving...' : 'Save'}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Main Content */}
|
||||
<div className="flex-1 flex overflow-hidden">
|
||||
{/* Left Panel - Step Palette */}
|
||||
<div className="w-64 border-r bg-muted/50 overflow-y-auto">
|
||||
<div className="p-4">
|
||||
<h3 className="font-medium mb-3">Add Steps</h3>
|
||||
<div className="space-y-2">
|
||||
{stepTypes.map((stepType) => (
|
||||
<Button
|
||||
key={stepType.type}
|
||||
variant="outline"
|
||||
size="sm"
|
||||
className="w-full justify-start"
|
||||
onClick={() => addStep(stepType.type)}
|
||||
>
|
||||
{stepType.icon}
|
||||
<span className="ml-2">{stepType.name}</span>
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<Separator className="my-4" />
|
||||
|
||||
<div className="mb-3">
|
||||
<h3 className="font-medium">Workflow Steps</h3>
|
||||
<p className="text-xs text-muted-foreground mt-1">
|
||||
Drag steps to reorder them
|
||||
</p>
|
||||
</div>
|
||||
<div className="space-y-1">
|
||||
{workflow.steps.map((step, index) => (
|
||||
<div
|
||||
key={step.id}
|
||||
draggable={true}
|
||||
onDragStart={(e) => handleDragStart(e, index)}
|
||||
onDragEnd={handleDragEnd}
|
||||
onDragOver={(e) => handleDragOver(e, index)}
|
||||
onDragLeave={handleDragLeave}
|
||||
onDrop={(e) => handleDrop(e, index)}
|
||||
className={`p-2 rounded text-sm cursor-pointer border transition-all ${
|
||||
selectedStep?.id === step.id
|
||||
? 'bg-primary/10 border-primary'
|
||||
: 'bg-background border-border hover:bg-accent'
|
||||
} ${
|
||||
draggedStepIndex === index ? 'opacity-50' : ''
|
||||
} ${
|
||||
dragOverIndex === index && draggedStepIndex !== index
|
||||
? 'border-primary/50 bg-primary/5'
|
||||
: ''
|
||||
}`}
|
||||
onClick={() => setSelectedStep(step)}
|
||||
>
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-2">
|
||||
<GripVertical className="h-3 w-3 text-muted-foreground cursor-grab" />
|
||||
<span className="text-xs text-muted-foreground">
|
||||
{index + 1}
|
||||
</span>
|
||||
{stepTypes.find(t => t.type === step.type)?.icon}
|
||||
<span className="truncate">{step.name}</span>
|
||||
</div>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="h-6 w-6"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
deleteStep(step.id)
|
||||
}}
|
||||
>
|
||||
<Trash2 className="h-3 w-3" />
|
||||
</Button>
|
||||
</div>
|
||||
<Badge variant="secondary" className="text-xs mt-1">
|
||||
{step.type}
|
||||
</Badge>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{workflow.steps.length === 0 && (
|
||||
<p className="text-sm text-muted-foreground text-center py-4">
|
||||
No steps added yet
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Center Panel - Canvas */}
|
||||
<div className="flex-1 overflow-hidden">
|
||||
<div className="h-full bg-muted/20 relative" style={{
|
||||
backgroundImage: 'radial-gradient(circle, #0000001a 1px, transparent 1px)',
|
||||
backgroundSize: '20px 20px'
|
||||
}}>
|
||||
{/* Canvas Content */}
|
||||
<div className="absolute inset-0 overflow-auto p-8">
|
||||
{workflow.steps.length === 0 ? (
|
||||
<div className="h-full flex items-center justify-center">
|
||||
<div className="text-center">
|
||||
<WorkflowIcon className="h-12 w-12 mx-auto text-muted-foreground mb-4" />
|
||||
<h3 className="text-lg font-semibold mb-2">Start Building Your Workflow</h3>
|
||||
<p className="text-muted-foreground mb-4">
|
||||
Add steps from the left panel to create your workflow
|
||||
</p>
|
||||
<Button onClick={() => addStep('llm_call')}>
|
||||
<Plus className="h-4 w-4 mr-2" />
|
||||
Add First Step
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div className="relative">
|
||||
{/* Workflow Steps */}
|
||||
{workflow.steps.map((step, index) => (
|
||||
<div
|
||||
key={step.id}
|
||||
className={`absolute p-4 bg-background border rounded-lg shadow-sm cursor-pointer transition-all ${
|
||||
selectedStep?.id === step.id
|
||||
? 'border-primary shadow-md'
|
||||
: 'border-border hover:shadow-md'
|
||||
}`}
|
||||
style={{
|
||||
left: step.position?.x || 100,
|
||||
top: step.position?.y || (100 + index * 120),
|
||||
width: '280px'
|
||||
}}
|
||||
onClick={() => setSelectedStep(step)}
|
||||
>
|
||||
<div className="flex items-center gap-2 mb-2">
|
||||
<div className="flex items-center gap-1">
|
||||
{stepTypes.find(t => t.type === step.type)?.icon}
|
||||
<span className="text-xs font-medium">{index + 1}</span>
|
||||
</div>
|
||||
<Badge variant="secondary" className="text-xs">
|
||||
{step.type}
|
||||
</Badge>
|
||||
</div>
|
||||
<h4 className="font-medium text-sm mb-1 truncate">{step.name}</h4>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{stepTypes.find(t => t.type === step.type)?.description}
|
||||
</p>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{/* Connection Lines - Vertical layout */}
|
||||
<svg className="absolute inset-0 pointer-events-none">
|
||||
{workflow.steps.map((step, index) => {
|
||||
if (index < workflow.steps.length - 1) {
|
||||
const nextStep = workflow.steps[index + 1]
|
||||
return (
|
||||
<line
|
||||
key={`${step.id}-${nextStep.id}`}
|
||||
x1={step.position.x + 140}
|
||||
y1={step.position.y + 80}
|
||||
x2={nextStep.position.x + 140}
|
||||
y2={nextStep.position.y}
|
||||
stroke="hsl(var(--border))"
|
||||
strokeWidth="2"
|
||||
markerEnd="url(#arrowhead)"
|
||||
/>
|
||||
)
|
||||
}
|
||||
return null
|
||||
})}
|
||||
<defs>
|
||||
<marker
|
||||
id="arrowhead"
|
||||
markerWidth="10"
|
||||
markerHeight="7"
|
||||
refX="9"
|
||||
refY="3.5"
|
||||
orient="auto"
|
||||
>
|
||||
<polygon
|
||||
points="0 0, 10 3.5, 0 7"
|
||||
fill="hsl(var(--border))"
|
||||
/>
|
||||
</marker>
|
||||
</defs>
|
||||
</svg>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Right Panel - Properties */}
|
||||
<div className="w-80 border-l bg-muted/50 overflow-y-auto">
|
||||
<div className="p-4">
|
||||
<Tabs value={selectedStep ? "step" : "workflow"} className="w-full">
|
||||
<TabsList className="grid w-full grid-cols-2">
|
||||
<TabsTrigger value="workflow">Workflow</TabsTrigger>
|
||||
<TabsTrigger value="step" disabled={!selectedStep}>
|
||||
Step
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
|
||||
{/* Workflow Properties */}
|
||||
<TabsContent value="workflow" className="space-y-4">
|
||||
<div>
|
||||
<Label htmlFor="name">Name</Label>
|
||||
<Input
|
||||
id="name"
|
||||
value={workflow.name}
|
||||
onChange={(e) => setWorkflow(prev => ({ ...prev, name: e.target.value }))}
|
||||
placeholder="Enter workflow name"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="description">Description</Label>
|
||||
<Textarea
|
||||
id="description"
|
||||
value={workflow.description}
|
||||
onChange={(e) => setWorkflow(prev => ({ ...prev, description: e.target.value }))}
|
||||
placeholder="Enter workflow description"
|
||||
rows={3}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="version">Version</Label>
|
||||
<Input
|
||||
id="version"
|
||||
value={workflow.version}
|
||||
onChange={(e) => setWorkflow(prev => ({ ...prev, version: e.target.value }))}
|
||||
placeholder="1.0.0"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<Separator />
|
||||
|
||||
<div>
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<Label>Variables</Label>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setShowVariablePanel(true)}
|
||||
>
|
||||
<Plus className="h-3 w-3 mr-1" />
|
||||
Add
|
||||
</Button>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
{Object.entries(workflow.variables).map(([key, value]) => (
|
||||
<div key={key} className="flex items-center justify-between p-2 bg-background rounded border">
|
||||
<div>
|
||||
<span className="text-sm font-medium">{key}</span>
|
||||
<p className="text-xs text-muted-foreground truncate">
|
||||
{String(value)}
|
||||
</p>
|
||||
</div>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="h-6 w-6"
|
||||
onClick={() => removeVariable(key)}
|
||||
>
|
||||
<Trash2 className="h-3 w-3" />
|
||||
</Button>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{Object.keys(workflow.variables).length === 0 && (
|
||||
<p className="text-sm text-muted-foreground text-center py-4">
|
||||
No variables defined
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</TabsContent>
|
||||
|
||||
{/* Step Properties */}
|
||||
<TabsContent value="step" className="space-y-4">
|
||||
{selectedStep ? (
|
||||
<>
|
||||
<div>
|
||||
<Label htmlFor="step-name">Step Name</Label>
|
||||
<Input
|
||||
id="step-name"
|
||||
value={selectedStep.name}
|
||||
onChange={(e) => updateStep(selectedStep.id, { name: e.target.value })}
|
||||
placeholder="Enter step name"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label>Step Type</Label>
|
||||
<div className="flex items-center gap-2 mt-1">
|
||||
{stepTypes.find(t => t.type === selectedStep.type)?.icon}
|
||||
<Badge variant="secondary">{selectedStep.type}</Badge>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<Separator />
|
||||
|
||||
<StepConfigurationPanel
|
||||
step={selectedStep}
|
||||
onUpdateStep={updateStep}
|
||||
availableVariables={Object.keys(workflow.variables)}
|
||||
availableChatbots={availableChatbots}
|
||||
availableCollections={availableCollections}
|
||||
/>
|
||||
</>
|
||||
) : (
|
||||
<div className="text-center py-8">
|
||||
<Settings className="h-8 w-8 mx-auto text-muted-foreground mb-2" />
|
||||
<p className="text-sm text-muted-foreground">
|
||||
Select a step to configure its properties
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</TabsContent>
|
||||
</Tabs>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Variables Dialog */}
|
||||
<Dialog open={showVariablePanel} onOpenChange={setShowVariablePanel}>
|
||||
<DialogContent>
|
||||
<DialogHeader>
|
||||
<DialogTitle>Manage Variables</DialogTitle>
|
||||
<DialogDescription>
|
||||
Add and manage workflow variables that can be used in step configurations
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<div className="space-y-4">
|
||||
<div className="grid grid-cols-2 gap-2">
|
||||
<Input placeholder="Variable name" id="var-name" />
|
||||
<Input placeholder="Variable value" id="var-value" />
|
||||
</div>
|
||||
<Button
|
||||
onClick={() => {
|
||||
const nameEl = document.getElementById('var-name') as HTMLInputElement
|
||||
const valueEl = document.getElementById('var-value') as HTMLInputElement
|
||||
if (nameEl?.value && valueEl?.value) {
|
||||
addVariable(nameEl.value, valueEl.value)
|
||||
nameEl.value = ''
|
||||
valueEl.value = ''
|
||||
}
|
||||
}}
|
||||
className="w-full"
|
||||
>
|
||||
Add Variable
|
||||
</Button>
|
||||
</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
</ProtectedRoute>
|
||||
)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -22,9 +22,6 @@ import { ChevronDown } from "lucide-react"
|
||||
const MODULE_NAV_MAP = {
|
||||
chatbot: { href: "/chatbot", label: "Chatbot" },
|
||||
rag: { href: "/rag", label: "RAG" },
|
||||
signal: { href: "/signal", label: "Signal Bot" },
|
||||
workflow: { href: "/workflows", label: "Workflows" },
|
||||
analytics: { href: "/analytics", label: "Analytics" },
|
||||
// Add more mappings as needed
|
||||
}
|
||||
|
||||
|
||||
@@ -1,597 +0,0 @@
|
||||
"use client"
|
||||
|
||||
import { useState } from "react"
|
||||
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Label } from "@/components/ui/label"
|
||||
import { Textarea } from "@/components/ui/textarea"
|
||||
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { Switch } from "@/components/ui/switch"
|
||||
import { Separator } from "@/components/ui/separator"
|
||||
import {
|
||||
Plus,
|
||||
Trash2,
|
||||
MessageSquare,
|
||||
Zap,
|
||||
GitBranch,
|
||||
Move,
|
||||
Database,
|
||||
Settings
|
||||
} from "lucide-react"
|
||||
import ModelSelector from "@/components/playground/ModelSelector"
|
||||
|
||||
interface WorkflowStep {
|
||||
id: string
|
||||
name: string
|
||||
type: string
|
||||
config: Record<string, any>
|
||||
position: { x: number; y: number }
|
||||
connections: string[]
|
||||
}
|
||||
|
||||
interface StepConfigurationPanelProps {
|
||||
step: WorkflowStep
|
||||
onUpdateStep: (stepId: string, updates: Partial<WorkflowStep>) => void
|
||||
availableVariables: string[]
|
||||
availableChatbots: Array<{id: string, name: string}>
|
||||
availableCollections: string[]
|
||||
}
|
||||
|
||||
export default function StepConfigurationPanel({
|
||||
step,
|
||||
onUpdateStep,
|
||||
availableVariables = [],
|
||||
availableChatbots = [],
|
||||
availableCollections = []
|
||||
}: StepConfigurationPanelProps) {
|
||||
|
||||
const updateConfig = (key: string, value: any) => {
|
||||
onUpdateStep(step.id, {
|
||||
config: { ...(step.config || {}), [key]: value }
|
||||
})
|
||||
}
|
||||
|
||||
const addMessage = () => {
|
||||
const messages = step.config?.messages || []
|
||||
updateConfig('messages', [
|
||||
...messages,
|
||||
{ role: 'user', content: '' }
|
||||
])
|
||||
}
|
||||
|
||||
const updateMessage = (index: number, field: string, value: string) => {
|
||||
const messages = [...(step.config?.messages || [])]
|
||||
messages[index] = { ...messages[index], [field]: value }
|
||||
updateConfig('messages', messages)
|
||||
}
|
||||
|
||||
const removeMessage = (index: number) => {
|
||||
const messages = step.config?.messages || []
|
||||
updateConfig('messages', messages.filter((_, i) => i !== index))
|
||||
}
|
||||
|
||||
const addCondition = () => {
|
||||
const conditions = step.config?.conditions || []
|
||||
updateConfig('conditions', [...conditions, ''])
|
||||
}
|
||||
|
||||
const updateCondition = (index: number, value: string) => {
|
||||
const conditions = [...(step.config?.conditions || [])]
|
||||
conditions[index] = value
|
||||
updateConfig('conditions', conditions)
|
||||
}
|
||||
|
||||
const removeCondition = (index: number) => {
|
||||
const conditions = step.config?.conditions || []
|
||||
updateConfig('conditions', conditions.filter((_, i) => i !== index))
|
||||
}
|
||||
|
||||
const renderLLMCallConfig = () => (
|
||||
<div className="space-y-4">
|
||||
<ModelSelector
|
||||
value={step.config?.model || ''}
|
||||
onValueChange={(value) => updateConfig('model', value)}
|
||||
filter="chat"
|
||||
/>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="temperature">Temperature</Label>
|
||||
<Input
|
||||
id="temperature"
|
||||
type="number"
|
||||
step="0.1"
|
||||
min="0"
|
||||
max="2"
|
||||
value={step.config?.temperature || 0.7}
|
||||
onChange={(e) => updateConfig('temperature', parseFloat(e.target.value))}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="max_tokens">Max Tokens</Label>
|
||||
<Input
|
||||
id="max_tokens"
|
||||
type="number"
|
||||
value={step.config?.max_tokens || 1000}
|
||||
onChange={(e) => updateConfig('max_tokens', parseInt(e.target.value))}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<Label>Messages</Label>
|
||||
<Button variant="outline" size="sm" onClick={addMessage}>
|
||||
<Plus className="h-3 w-3 mr-1" />
|
||||
Add Message
|
||||
</Button>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
{(step.config?.messages || []).map((message: any, index: number) => (
|
||||
<Card key={index}>
|
||||
<CardContent className="p-3">
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<Select
|
||||
value={message.role || 'user'}
|
||||
onValueChange={(value) => updateMessage(index, 'role', value)}
|
||||
>
|
||||
<SelectTrigger className="w-32">
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="system">System</SelectItem>
|
||||
<SelectItem value="user">User</SelectItem>
|
||||
<SelectItem value="assistant">Assistant</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="h-6 w-6"
|
||||
onClick={() => removeMessage(index)}
|
||||
>
|
||||
<Trash2 className="h-3 w-3" />
|
||||
</Button>
|
||||
</div>
|
||||
<Textarea
|
||||
value={message.content || ''}
|
||||
onChange={(e) => updateMessage(index, 'content', e.target.value)}
|
||||
placeholder="Enter message content..."
|
||||
rows={2}
|
||||
/>
|
||||
</CardContent>
|
||||
</Card>
|
||||
))}
|
||||
|
||||
{(step.config?.messages || []).length === 0 && (
|
||||
<p className="text-sm text-muted-foreground text-center py-4">
|
||||
No messages added yet
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="output_variable">Output Variable</Label>
|
||||
<Input
|
||||
id="output_variable"
|
||||
value={step.config?.output_variable || ''}
|
||||
onChange={(e) => updateConfig('output_variable', e.target.value)}
|
||||
placeholder="Variable name to store the response"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
const renderChatbotConfig = () => (
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<Label htmlFor="chatbot_id">Chatbot</Label>
|
||||
<Select value={step.config?.chatbot_id || ''} onValueChange={(value) => updateConfig('chatbot_id', value)}>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder={
|
||||
availableChatbots.length > 0
|
||||
? "Select a chatbot"
|
||||
: "No chatbots available"
|
||||
} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{availableChatbots.length > 0 ? (
|
||||
availableChatbots.map(chatbot => (
|
||||
<SelectItem key={chatbot.id} value={chatbot.id}>
|
||||
{/* Show human-readable names instead of IDs */}
|
||||
{chatbot.name}
|
||||
</SelectItem>
|
||||
))
|
||||
) : (
|
||||
<SelectItem value="" disabled>
|
||||
No chatbots created yet
|
||||
</SelectItem>
|
||||
)}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
{availableChatbots.length === 0 && (
|
||||
<p className="text-sm text-muted-foreground mt-1">
|
||||
Create chatbots in the Chatbot section to use them in workflows
|
||||
</p>
|
||||
)}
|
||||
{step.config?.chatbot_id && availableChatbots.length > 0 && (
|
||||
<p className="text-sm text-muted-foreground mt-1">
|
||||
Selected: {availableChatbots.find(c => c.id === step.config?.chatbot_id)?.name || 'Unknown'}
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="message_template">Message Template</Label>
|
||||
<Textarea
|
||||
id="message_template"
|
||||
value={step.config?.message_template || ''}
|
||||
onChange={(e) => updateConfig('message_template', e.target.value)}
|
||||
placeholder="Enter message template with variables like {variable_name}"
|
||||
rows={3}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center space-x-2">
|
||||
<Switch
|
||||
id="create_new_conversation"
|
||||
checked={step.config?.create_new_conversation || false}
|
||||
onCheckedChange={(checked) => updateConfig('create_new_conversation', checked)}
|
||||
/>
|
||||
<Label htmlFor="create_new_conversation">Create New Conversation</Label>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="conversation_id">Conversation ID Variable</Label>
|
||||
<Input
|
||||
id="conversation_id"
|
||||
value={step.config?.conversation_id || ''}
|
||||
onChange={(e) => updateConfig('conversation_id', e.target.value)}
|
||||
placeholder="Variable containing conversation ID"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="output_variable">Output Variable</Label>
|
||||
<Input
|
||||
id="output_variable"
|
||||
value={step.config?.output_variable || ''}
|
||||
onChange={(e) => updateConfig('output_variable', e.target.value)}
|
||||
placeholder="Variable name to store the response"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
const renderTransformConfig = () => (
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<Label htmlFor="input_variable">Input Variable</Label>
|
||||
<Select value={step.config?.input_variable || ''} onValueChange={(value) => updateConfig('input_variable', value)}>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder="Select input variable" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{availableVariables.map(variable => (
|
||||
<SelectItem key={variable} value={variable}>{variable}</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="transformation">Transformation</Label>
|
||||
<Select value={step.config?.transformation || ''} onValueChange={(value) => updateConfig('transformation', value)}>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder="Select transformation type" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="extract:response">Extract Response</SelectItem>
|
||||
<SelectItem value="json_parse">Parse JSON</SelectItem>
|
||||
<SelectItem value="text_clean">Clean Text</SelectItem>
|
||||
<SelectItem value="uppercase">To Uppercase</SelectItem>
|
||||
<SelectItem value="lowercase">To Lowercase</SelectItem>
|
||||
<SelectItem value="trim">Trim Whitespace</SelectItem>
|
||||
<SelectItem value="custom">Custom Transformation</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
|
||||
{step.config?.transformation === 'custom' && (
|
||||
<div>
|
||||
<Label htmlFor="custom_code">Custom Transformation Code</Label>
|
||||
<Textarea
|
||||
id="custom_code"
|
||||
value={step.config?.custom_code || ''}
|
||||
onChange={(e) => updateConfig('custom_code', e.target.value)}
|
||||
placeholder="Enter transformation logic (Python code)"
|
||||
rows={5}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div>
|
||||
<Label htmlFor="output_variable">Output Variable</Label>
|
||||
<Input
|
||||
id="output_variable"
|
||||
value={step.config?.output_variable || ''}
|
||||
onChange={(e) => updateConfig('output_variable', e.target.value)}
|
||||
placeholder="Variable name to store the result"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
const renderConditionalConfig = () => (
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<Label>Conditions</Label>
|
||||
<Button variant="outline" size="sm" onClick={addCondition}>
|
||||
<Plus className="h-3 w-3 mr-1" />
|
||||
Add Condition
|
||||
</Button>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
{(step.config?.conditions || []).map((condition: string, index: number) => (
|
||||
<div key={index} className="flex items-center gap-2">
|
||||
<Input
|
||||
value={condition}
|
||||
onChange={(e) => updateCondition(index, e.target.value)}
|
||||
placeholder="e.g., {variable} == 'value'"
|
||||
className="flex-1"
|
||||
/>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="h-8 w-8"
|
||||
onClick={() => removeCondition(index)}
|
||||
>
|
||||
<Trash2 className="h-3 w-3" />
|
||||
</Button>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{(step.config?.conditions || []).length === 0 && (
|
||||
<p className="text-sm text-muted-foreground text-center py-4">
|
||||
No conditions added yet
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="logic_operator">Logic Operator</Label>
|
||||
<Select value={step.config?.logic_operator || 'and'} onValueChange={(value) => updateConfig('logic_operator', value)}>
|
||||
<SelectTrigger>
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="and">AND (all conditions must be true)</SelectItem>
|
||||
<SelectItem value="or">OR (any condition must be true)</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="true_output">True Output Variable</Label>
|
||||
<Input
|
||||
id="true_output"
|
||||
value={step.config?.true_output || ''}
|
||||
onChange={(e) => updateConfig('true_output', e.target.value)}
|
||||
placeholder="Variable to set when conditions are true"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="false_output">False Output Variable</Label>
|
||||
<Input
|
||||
id="false_output"
|
||||
value={step.config?.false_output || ''}
|
||||
onChange={(e) => updateConfig('false_output', e.target.value)}
|
||||
placeholder="Variable to set when conditions are false"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
const renderParallelConfig = () => (
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<Label htmlFor="max_concurrent">Max Concurrent Steps</Label>
|
||||
<Input
|
||||
id="max_concurrent"
|
||||
type="number"
|
||||
min="1"
|
||||
max="10"
|
||||
value={step.config?.max_concurrent || 3}
|
||||
onChange={(e) => updateConfig('max_concurrent', parseInt(e.target.value))}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center space-x-2">
|
||||
<Switch
|
||||
id="wait_for_all"
|
||||
checked={step.config?.wait_for_all !== false}
|
||||
onCheckedChange={(checked) => updateConfig('wait_for_all', checked)}
|
||||
/>
|
||||
<Label htmlFor="wait_for_all">Wait for All Steps to Complete</Label>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="timeout">Timeout (seconds)</Label>
|
||||
<Input
|
||||
id="timeout"
|
||||
type="number"
|
||||
min="1"
|
||||
value={step.config?.timeout || 300}
|
||||
onChange={(e) => updateConfig('timeout', parseInt(e.target.value))}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="p-3 bg-muted rounded">
|
||||
<p className="text-sm text-muted-foreground">
|
||||
<strong>Note:</strong> Parallel steps are defined as sub-steps within this step.
|
||||
Use the visual editor to configure the parallel execution flow.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
const renderRAGSearchConfig = () => (
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<Label htmlFor="collection">RAG Collection</Label>
|
||||
<Select value={step.config?.collection || ''} onValueChange={(value) => updateConfig('collection', value)}>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder="Select a collection" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{availableCollections.length > 0 ? (
|
||||
availableCollections.map(collection => (
|
||||
<SelectItem key={collection} value={collection}>{collection}</SelectItem>
|
||||
))
|
||||
) : (
|
||||
<SelectItem value="default_collection">Default Collection</SelectItem>
|
||||
)}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="query_template">Query Template</Label>
|
||||
<Textarea
|
||||
id="query_template"
|
||||
value={step.config?.query_template || ''}
|
||||
onChange={(e) => updateConfig('query_template', e.target.value)}
|
||||
placeholder="Enter search query with variables like {variable_name}"
|
||||
rows={2}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="top_k">Top K Results</Label>
|
||||
<Input
|
||||
id="top_k"
|
||||
type="number"
|
||||
min="1"
|
||||
max="50"
|
||||
value={step.config?.top_k || 5}
|
||||
onChange={(e) => updateConfig('top_k', parseInt(e.target.value))}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="score_threshold">Score Threshold</Label>
|
||||
<Input
|
||||
id="score_threshold"
|
||||
type="number"
|
||||
step="0.01"
|
||||
min="0"
|
||||
max="1"
|
||||
value={step.config?.score_threshold || 0.5}
|
||||
onChange={(e) => updateConfig('score_threshold', parseFloat(e.target.value))}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Label htmlFor="output_variable">Output Variable</Label>
|
||||
<Input
|
||||
id="output_variable"
|
||||
value={step.config?.output_variable || ''}
|
||||
onChange={(e) => updateConfig('output_variable', e.target.value)}
|
||||
placeholder="Variable name to store the search results"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
const getStepIcon = (type: string) => {
|
||||
switch (type) {
|
||||
case 'llm_call': return <MessageSquare className="h-4 w-4" />
|
||||
case 'chatbot': return <MessageSquare className="h-4 w-4" />
|
||||
case 'transform': return <Zap className="h-4 w-4" />
|
||||
case 'conditional': return <GitBranch className="h-4 w-4" />
|
||||
case 'parallel': return <Move className="h-4 w-4" />
|
||||
case 'rag_search': return <Database className="h-4 w-4" />
|
||||
default: return <Settings className="h-4 w-4" />
|
||||
}
|
||||
}
|
||||
|
||||
const renderStepConfig = () => {
|
||||
switch (step.type) {
|
||||
case 'llm_call': return renderLLMCallConfig()
|
||||
case 'chatbot': return renderChatbotConfig()
|
||||
case 'transform': return renderTransformConfig()
|
||||
case 'conditional': return renderConditionalConfig()
|
||||
case 'parallel': return renderParallelConfig()
|
||||
case 'rag_search': return renderRAGSearchConfig()
|
||||
default:
|
||||
return (
|
||||
<div className="text-center py-8">
|
||||
<Settings className="h-8 w-8 mx-auto text-muted-foreground mb-2" />
|
||||
<p className="text-sm text-muted-foreground">
|
||||
Configuration for step type "{step.type}" is not yet implemented.
|
||||
</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
{/* Step Header */}
|
||||
<div className="flex items-center gap-2">
|
||||
{getStepIcon(step.type)}
|
||||
<div>
|
||||
<h3 className="font-medium">{step.name}</h3>
|
||||
<Badge variant="secondary" className="text-xs">
|
||||
{step.type}
|
||||
</Badge>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<Separator />
|
||||
|
||||
{/* Basic Configuration */}
|
||||
<div>
|
||||
<Label htmlFor="step-name">Step Name</Label>
|
||||
<Input
|
||||
id="step-name"
|
||||
value={step.name}
|
||||
onChange={(e) => onUpdateStep(step.id, { name: e.target.value })}
|
||||
placeholder="Enter step name"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<Separator />
|
||||
|
||||
{/* Step-specific Configuration */}
|
||||
<div>
|
||||
<Label className="text-base font-medium">Configuration</Label>
|
||||
<div className="mt-3">
|
||||
{renderStepConfig()}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Variable Hints */}
|
||||
{availableVariables.length > 0 && (
|
||||
<>
|
||||
<Separator />
|
||||
<div>
|
||||
<Label className="text-sm font-medium">Available Variables</Label>
|
||||
<div className="flex flex-wrap gap-1 mt-2">
|
||||
{availableVariables.map(variable => (
|
||||
<Badge key={variable} variant="outline" className="text-xs">
|
||||
{`{${variable}}`}
|
||||
</Badge>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
Reference in New Issue
Block a user