From c2409017e58135f7211507666da8f30f6e6c3c5d Mon Sep 17 00:00:00 2001
From: d-kimsuon
Date: Sat, 25 Oct 2025 00:26:59 +0900
Subject: [PATCH 1/4] remove speckit
---
.claude/commands/speckit.analyze.md | 184 -----
.claude/commands/speckit.checklist.md | 287 -------
.claude/commands/speckit.clarify.md | 176 -----
.claude/commands/speckit.constitution.md | 77 --
.claude/commands/speckit.implement.md | 128 ---
.claude/commands/speckit.plan.md | 80 --
.claude/commands/speckit.specify.md | 229 ------
.claude/commands/speckit.tasks.md | 128 ---
.specify/memory/constitution.md | 50 --
.specify/scripts/bash/check-prerequisites.sh | 166 ----
.specify/scripts/bash/common.sh | 156 ----
.specify/scripts/bash/create-new-feature.sh | 200 -----
.specify/scripts/bash/setup-plan.sh | 61 --
.specify/scripts/bash/update-agent-context.sh | 739 ------------------
.specify/templates/agent-file-template.md | 23 -
.specify/templates/checklist-template.md | 41 -
.specify/templates/plan-template.md | 105 ---
.specify/templates/spec-template.md | 116 ---
.specify/templates/tasks-template.md | 251 ------
19 files changed, 3197 deletions(-)
delete mode 100644 .claude/commands/speckit.analyze.md
delete mode 100644 .claude/commands/speckit.checklist.md
delete mode 100644 .claude/commands/speckit.clarify.md
delete mode 100644 .claude/commands/speckit.constitution.md
delete mode 100644 .claude/commands/speckit.implement.md
delete mode 100644 .claude/commands/speckit.plan.md
delete mode 100644 .claude/commands/speckit.specify.md
delete mode 100644 .claude/commands/speckit.tasks.md
delete mode 100644 .specify/memory/constitution.md
delete mode 100755 .specify/scripts/bash/check-prerequisites.sh
delete mode 100755 .specify/scripts/bash/common.sh
delete mode 100755 .specify/scripts/bash/create-new-feature.sh
delete mode 100755 .specify/scripts/bash/setup-plan.sh
delete mode 100755 .specify/scripts/bash/update-agent-context.sh
delete mode 100644 .specify/templates/agent-file-template.md
delete mode 100644 .specify/templates/checklist-template.md
delete mode 100644 .specify/templates/plan-template.md
delete mode 100644 .specify/templates/spec-template.md
delete mode 100644 .specify/templates/tasks-template.md
diff --git a/.claude/commands/speckit.analyze.md b/.claude/commands/speckit.analyze.md
deleted file mode 100644
index 8e510de..0000000
--- a/.claude/commands/speckit.analyze.md
+++ /dev/null
@@ -1,184 +0,0 @@
----
-description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation.
----
-
-## User Input
-
-```text
-$ARGUMENTS
-```
-
-You **MUST** consider the user input before proceeding (if not empty).
-
-## Goal
-
-Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/tasks` has successfully produced a complete `tasks.md`.
-
-## Operating Constraints
-
-**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
-
-**Constitution Authority**: The project constitution (`.specify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/analyze`.
-
-## Execution Steps
-
-### 1. Initialize Analysis Context
-
-Run `.specify/scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths:
-
-- SPEC = FEATURE_DIR/spec.md
-- PLAN = FEATURE_DIR/plan.md
-- TASKS = FEATURE_DIR/tasks.md
-
-Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command).
-For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
-
-### 2. Load Artifacts (Progressive Disclosure)
-
-Load only the minimal necessary context from each artifact:
-
-**From spec.md:**
-
-- Overview/Context
-- Functional Requirements
-- Non-Functional Requirements
-- User Stories
-- Edge Cases (if present)
-
-**From plan.md:**
-
-- Architecture/stack choices
-- Data Model references
-- Phases
-- Technical constraints
-
-**From tasks.md:**
-
-- Task IDs
-- Descriptions
-- Phase grouping
-- Parallel markers [P]
-- Referenced file paths
-
-**From constitution:**
-
-- Load `.specify/memory/constitution.md` for principle validation
-
-### 3. Build Semantic Models
-
-Create internal representations (do not include raw artifacts in output):
-
-- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`)
-- **User story/action inventory**: Discrete user actions with acceptance criteria
-- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases)
-- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements
-
-### 4. Detection Passes (Token-Efficient Analysis)
-
-Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary.
-
-#### A. Duplication Detection
-
-- Identify near-duplicate requirements
-- Mark lower-quality phrasing for consolidation
-
-#### B. Ambiguity Detection
-
-- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria
-- Flag unresolved placeholders (TODO, TKTK, ???, ``, etc.)
-
-#### C. Underspecification
-
-- Requirements with verbs but missing object or measurable outcome
-- User stories missing acceptance criteria alignment
-- Tasks referencing files or components not defined in spec/plan
-
-#### D. Constitution Alignment
-
-- Any requirement or plan element conflicting with a MUST principle
-- Missing mandated sections or quality gates from constitution
-
-#### E. Coverage Gaps
-
-- Requirements with zero associated tasks
-- Tasks with no mapped requirement/story
-- Non-functional requirements not reflected in tasks (e.g., performance, security)
-
-#### F. Inconsistency
-
-- Terminology drift (same concept named differently across files)
-- Data entities referenced in plan but absent in spec (or vice versa)
-- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note)
-- Conflicting requirements (e.g., one requires Next.js while other specifies Vue)
-
-### 5. Severity Assignment
-
-Use this heuristic to prioritize findings:
-
-- **CRITICAL**: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality
-- **HIGH**: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion
-- **MEDIUM**: Terminology drift, missing non-functional task coverage, underspecified edge case
-- **LOW**: Style/wording improvements, minor redundancy not affecting execution order
-
-### 6. Produce Compact Analysis Report
-
-Output a Markdown report (no file writes) with the following structure:
-
-## Specification Analysis Report
-
-| ID | Category | Severity | Location(s) | Summary | Recommendation |
-|----|----------|----------|-------------|---------|----------------|
-| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version |
-
-(Add one row per finding; generate stable IDs prefixed by category initial.)
-
-**Coverage Summary Table:**
-
-| Requirement Key | Has Task? | Task IDs | Notes |
-|-----------------|-----------|----------|-------|
-
-**Constitution Alignment Issues:** (if any)
-
-**Unmapped Tasks:** (if any)
-
-**Metrics:**
-
-- Total Requirements
-- Total Tasks
-- Coverage % (requirements with >=1 task)
-- Ambiguity Count
-- Duplication Count
-- Critical Issues Count
-
-### 7. Provide Next Actions
-
-At end of report, output a concise Next Actions block:
-
-- If CRITICAL issues exist: Recommend resolving before `/implement`
-- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions
-- Provide explicit command suggestions: e.g., "Run /specify with refinement", "Run /plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'"
-
-### 8. Offer Remediation
-
-Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.)
-
-## Operating Principles
-
-### Context Efficiency
-
-- **Minimal high-signal tokens**: Focus on actionable findings, not exhaustive documentation
-- **Progressive disclosure**: Load artifacts incrementally; don't dump all content into analysis
-- **Token-efficient output**: Limit findings table to 50 rows; summarize overflow
-- **Deterministic results**: Rerunning without changes should produce consistent IDs and counts
-
-### Analysis Guidelines
-
-- **NEVER modify files** (this is read-only analysis)
-- **NEVER hallucinate missing sections** (if absent, report them accurately)
-- **Prioritize constitution violations** (these are always CRITICAL)
-- **Use examples over exhaustive rules** (cite specific instances, not generic patterns)
-- **Report zero issues gracefully** (emit success report with coverage statistics)
-
-## Context
-
-$ARGUMENTS
diff --git a/.claude/commands/speckit.checklist.md b/.claude/commands/speckit.checklist.md
deleted file mode 100644
index 5417f6a..0000000
--- a/.claude/commands/speckit.checklist.md
+++ /dev/null
@@ -1,287 +0,0 @@
----
-description: Generate a custom checklist for the current feature based on user requirements.
----
-
-## Checklist Purpose: "Unit Tests for English"
-
-**CRITICAL CONCEPT**: Checklists are **UNIT TESTS FOR REQUIREMENTS WRITING** - they validate the quality, clarity, and completeness of requirements in a given domain.
-
-**NOT for verification/testing**:
-- ❌ NOT "Verify the button clicks correctly"
-- ❌ NOT "Test error handling works"
-- ❌ NOT "Confirm the API returns 200"
-- ❌ NOT checking if code/implementation matches the spec
-
-**FOR requirements quality validation**:
-- ✅ "Are visual hierarchy requirements defined for all card types?" (completeness)
-- ✅ "Is 'prominent display' quantified with specific sizing/positioning?" (clarity)
-- ✅ "Are hover state requirements consistent across all interactive elements?" (consistency)
-- ✅ "Are accessibility requirements defined for keyboard navigation?" (coverage)
-- ✅ "Does the spec define what happens when logo image fails to load?" (edge cases)
-
-**Metaphor**: If your spec is code written in English, the checklist is its unit test suite. You're testing whether the requirements are well-written, complete, unambiguous, and ready for implementation - NOT whether the implementation works.
-
-## User Input
-
-```text
-$ARGUMENTS
-```
-
-You **MUST** consider the user input before proceeding (if not empty).
-
-## Execution Steps
-
-1. **Setup**: Run `.specify/scripts/bash/check-prerequisites.sh --json` from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS list.
- - All file paths must be absolute.
- - For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
-
-2. **Clarify intent (dynamic)**: Derive up to THREE initial contextual clarifying questions (no pre-baked catalog). They MUST:
- - Be generated from the user's phrasing + extracted signals from spec/plan/tasks
- - Only ask about information that materially changes checklist content
- - Be skipped individually if already unambiguous in `$ARGUMENTS`
- - Prefer precision over breadth
-
- Generation algorithm:
- 1. Extract signals: feature domain keywords (e.g., auth, latency, UX, API), risk indicators ("critical", "must", "compliance"), stakeholder hints ("QA", "review", "security team"), and explicit deliverables ("a11y", "rollback", "contracts").
- 2. Cluster signals into candidate focus areas (max 4) ranked by relevance.
- 3. Identify probable audience & timing (author, reviewer, QA, release) if not explicit.
- 4. Detect missing dimensions: scope breadth, depth/rigor, risk emphasis, exclusion boundaries, measurable acceptance criteria.
- 5. Formulate questions chosen from these archetypes:
- - Scope refinement (e.g., "Should this include integration touchpoints with X and Y or stay limited to local module correctness?")
- - Risk prioritization (e.g., "Which of these potential risk areas should receive mandatory gating checks?")
- - Depth calibration (e.g., "Is this a lightweight pre-commit sanity list or a formal release gate?")
- - Audience framing (e.g., "Will this be used by the author only or peers during PR review?")
- - Boundary exclusion (e.g., "Should we explicitly exclude performance tuning items this round?")
- - Scenario class gap (e.g., "No recovery flows detected—are rollback / partial failure paths in scope?")
-
- Question formatting rules:
- - If presenting options, generate a compact table with columns: Option | Candidate | Why It Matters
- - Limit to A–E options maximum; omit table if a free-form answer is clearer
- - Never ask the user to restate what they already said
- - Avoid speculative categories (no hallucination). If uncertain, ask explicitly: "Confirm whether X belongs in scope."
-
- Defaults when interaction impossible:
- - Depth: Standard
- - Audience: Reviewer (PR) if code-related; Author otherwise
- - Focus: Top 2 relevance clusters
-
- Output the questions (label Q1/Q2/Q3). After answers: if ≥2 scenario classes (Alternate / Exception / Recovery / Non-Functional domain) remain unclear, you MAY ask up to TWO more targeted follow‑ups (Q4/Q5) with a one-line justification each (e.g., "Unresolved recovery path risk"). Do not exceed five total questions. Skip escalation if user explicitly declines more.
-
-3. **Understand user request**: Combine `$ARGUMENTS` + clarifying answers:
- - Derive checklist theme (e.g., security, review, deploy, ux)
- - Consolidate explicit must-have items mentioned by user
- - Map focus selections to category scaffolding
- - Infer any missing context from spec/plan/tasks (do NOT hallucinate)
-
-4. **Load feature context**: Read from FEATURE_DIR:
- - spec.md: Feature requirements and scope
- - plan.md (if exists): Technical details, dependencies
- - tasks.md (if exists): Implementation tasks
-
- **Context Loading Strategy**:
- - Load only necessary portions relevant to active focus areas (avoid full-file dumping)
- - Prefer summarizing long sections into concise scenario/requirement bullets
- - Use progressive disclosure: add follow-on retrieval only if gaps detected
- - If source docs are large, generate interim summary items instead of embedding raw text
-
-5. **Generate checklist** - Create "Unit Tests for Requirements":
- - Create `FEATURE_DIR/checklists/` directory if it doesn't exist
- - Generate unique checklist filename:
- - Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`)
- - Format: `[domain].md`
- - If file exists, append to existing file
- - Number items sequentially starting from CHK001
- - Each `/speckit.checklist` run creates a NEW file (never overwrites existing checklists)
-
- **CORE PRINCIPLE - Test the Requirements, Not the Implementation**:
- Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for:
- - **Completeness**: Are all necessary requirements present?
- - **Clarity**: Are requirements unambiguous and specific?
- - **Consistency**: Do requirements align with each other?
- - **Measurability**: Can requirements be objectively verified?
- - **Coverage**: Are all scenarios/edge cases addressed?
-
- **Category Structure** - Group items by requirement quality dimensions:
- - **Requirement Completeness** (Are all necessary requirements documented?)
- - **Requirement Clarity** (Are requirements specific and unambiguous?)
- - **Requirement Consistency** (Do requirements align without conflicts?)
- - **Acceptance Criteria Quality** (Are success criteria measurable?)
- - **Scenario Coverage** (Are all flows/cases addressed?)
- - **Edge Case Coverage** (Are boundary conditions defined?)
- - **Non-Functional Requirements** (Performance, Security, Accessibility, etc. - are they specified?)
- - **Dependencies & Assumptions** (Are they documented and validated?)
- - **Ambiguities & Conflicts** (What needs clarification?)
-
- **HOW TO WRITE CHECKLIST ITEMS - "Unit Tests for English"**:
-
- ❌ **WRONG** (Testing implementation):
- - "Verify landing page displays 3 episode cards"
- - "Test hover states work on desktop"
- - "Confirm logo click navigates home"
-
- ✅ **CORRECT** (Testing requirements quality):
- - "Are the exact number and layout of featured episodes specified?" [Completeness]
- - "Is 'prominent display' quantified with specific sizing/positioning?" [Clarity]
- - "Are hover state requirements consistent across all interactive elements?" [Consistency]
- - "Are keyboard navigation requirements defined for all interactive UI?" [Coverage]
- - "Is the fallback behavior specified when logo image fails to load?" [Edge Cases]
- - "Are loading states defined for asynchronous episode data?" [Completeness]
- - "Does the spec define visual hierarchy for competing UI elements?" [Clarity]
-
- **ITEM STRUCTURE**:
- Each item should follow this pattern:
- - Question format asking about requirement quality
- - Focus on what's WRITTEN (or not written) in the spec/plan
- - Include quality dimension in brackets [Completeness/Clarity/Consistency/etc.]
- - Reference spec section `[Spec §X.Y]` when checking existing requirements
- - Use `[Gap]` marker when checking for missing requirements
-
- **EXAMPLES BY QUALITY DIMENSION**:
-
- Completeness:
- - "Are error handling requirements defined for all API failure modes? [Gap]"
- - "Are accessibility requirements specified for all interactive elements? [Completeness]"
- - "Are mobile breakpoint requirements defined for responsive layouts? [Gap]"
-
- Clarity:
- - "Is 'fast loading' quantified with specific timing thresholds? [Clarity, Spec §NFR-2]"
- - "Are 'related episodes' selection criteria explicitly defined? [Clarity, Spec §FR-5]"
- - "Is 'prominent' defined with measurable visual properties? [Ambiguity, Spec §FR-4]"
-
- Consistency:
- - "Do navigation requirements align across all pages? [Consistency, Spec §FR-10]"
- - "Are card component requirements consistent between landing and detail pages? [Consistency]"
-
- Coverage:
- - "Are requirements defined for zero-state scenarios (no episodes)? [Coverage, Edge Case]"
- - "Are concurrent user interaction scenarios addressed? [Coverage, Gap]"
- - "Are requirements specified for partial data loading failures? [Coverage, Exception Flow]"
-
- Measurability:
- - "Are visual hierarchy requirements measurable/testable? [Acceptance Criteria, Spec §FR-1]"
- - "Can 'balanced visual weight' be objectively verified? [Measurability, Spec §FR-2]"
-
- **Scenario Classification & Coverage** (Requirements Quality Focus):
- - Check if requirements exist for: Primary, Alternate, Exception/Error, Recovery, Non-Functional scenarios
- - For each scenario class, ask: "Are [scenario type] requirements complete, clear, and consistent?"
- - If scenario class missing: "Are [scenario type] requirements intentionally excluded or missing? [Gap]"
- - Include resilience/rollback when state mutation occurs: "Are rollback requirements defined for migration failures? [Gap]"
-
- **Traceability Requirements**:
- - MINIMUM: ≥80% of items MUST include at least one traceability reference
- - Each item should reference: spec section `[Spec §X.Y]`, or use markers: `[Gap]`, `[Ambiguity]`, `[Conflict]`, `[Assumption]`
- - If no ID system exists: "Is a requirement & acceptance criteria ID scheme established? [Traceability]"
-
- **Surface & Resolve Issues** (Requirements Quality Problems):
- Ask questions about the requirements themselves:
- - Ambiguities: "Is the term 'fast' quantified with specific metrics? [Ambiguity, Spec §NFR-1]"
- - Conflicts: "Do navigation requirements conflict between §FR-10 and §FR-10a? [Conflict]"
- - Assumptions: "Is the assumption of 'always available podcast API' validated? [Assumption]"
- - Dependencies: "Are external podcast API requirements documented? [Dependency, Gap]"
- - Missing definitions: "Is 'visual hierarchy' defined with measurable criteria? [Gap]"
-
- **Content Consolidation**:
- - Soft cap: If raw candidate items > 40, prioritize by risk/impact
- - Merge near-duplicates checking the same requirement aspect
- - If >5 low-impact edge cases, create one item: "Are edge cases X, Y, Z addressed in requirements? [Coverage]"
-
- **🚫 ABSOLUTELY PROHIBITED** - These make it an implementation test, not a requirements test:
- - ❌ Any item starting with "Verify", "Test", "Confirm", "Check" + implementation behavior
- - ❌ References to code execution, user actions, system behavior
- - ❌ "Displays correctly", "works properly", "functions as expected"
- - ❌ "Click", "navigate", "render", "load", "execute"
- - ❌ Test cases, test plans, QA procedures
- - ❌ Implementation details (frameworks, APIs, algorithms)
-
- **✅ REQUIRED PATTERNS** - These test requirements quality:
- - ✅ "Are [requirement type] defined/specified/documented for [scenario]?"
- - ✅ "Is [vague term] quantified/clarified with specific criteria?"
- - ✅ "Are requirements consistent between [section A] and [section B]?"
- - ✅ "Can [requirement] be objectively measured/verified?"
- - ✅ "Are [edge cases/scenarios] addressed in requirements?"
- - ✅ "Does the spec define [missing aspect]?"
-
-6. **Structure Reference**: Generate the checklist following the canonical template in `.specify/templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### ` lines with globally incrementing IDs starting at CHK001.
-
-7. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize:
- - Focus areas selected
- - Depth level
- - Actor/timing
- - Any explicit user-specified must-have items incorporated
-
-**Important**: Each `/speckit.checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows:
-
-- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`)
-- Simple, memorable filenames that indicate checklist purpose
-- Easy identification and navigation in the `checklists/` folder
-
-To avoid clutter, use descriptive types and clean up obsolete checklists when done.
-
-## Example Checklist Types & Sample Items
-
-**UX Requirements Quality:** `ux.md`
-
-Sample items (testing the requirements, NOT the implementation):
-- "Are visual hierarchy requirements defined with measurable criteria? [Clarity, Spec §FR-1]"
-- "Is the number and positioning of UI elements explicitly specified? [Completeness, Spec §FR-1]"
-- "Are interaction state requirements (hover, focus, active) consistently defined? [Consistency]"
-- "Are accessibility requirements specified for all interactive elements? [Coverage, Gap]"
-- "Is fallback behavior defined when images fail to load? [Edge Case, Gap]"
-- "Can 'prominent display' be objectively measured? [Measurability, Spec §FR-4]"
-
-**API Requirements Quality:** `api.md`
-
-Sample items:
-- "Are error response formats specified for all failure scenarios? [Completeness]"
-- "Are rate limiting requirements quantified with specific thresholds? [Clarity]"
-- "Are authentication requirements consistent across all endpoints? [Consistency]"
-- "Are retry/timeout requirements defined for external dependencies? [Coverage, Gap]"
-- "Is versioning strategy documented in requirements? [Gap]"
-
-**Performance Requirements Quality:** `performance.md`
-
-Sample items:
-- "Are performance requirements quantified with specific metrics? [Clarity]"
-- "Are performance targets defined for all critical user journeys? [Coverage]"
-- "Are performance requirements under different load conditions specified? [Completeness]"
-- "Can performance requirements be objectively measured? [Measurability]"
-- "Are degradation requirements defined for high-load scenarios? [Edge Case, Gap]"
-
-**Security Requirements Quality:** `security.md`
-
-Sample items:
-- "Are authentication requirements specified for all protected resources? [Coverage]"
-- "Are data protection requirements defined for sensitive information? [Completeness]"
-- "Is the threat model documented and requirements aligned to it? [Traceability]"
-- "Are security requirements consistent with compliance obligations? [Consistency]"
-- "Are security failure/breach response requirements defined? [Gap, Exception Flow]"
-
-## Anti-Examples: What NOT To Do
-
-**❌ WRONG - These test implementation, not requirements:**
-
-```markdown
-- [ ] CHK001 - Verify landing page displays 3 episode cards [Spec §FR-001]
-- [ ] CHK002 - Test hover states work correctly on desktop [Spec §FR-003]
-- [ ] CHK003 - Confirm logo click navigates to home page [Spec §FR-010]
-- [ ] CHK004 - Check that related episodes section shows 3-5 items [Spec §FR-005]
-```
-
-**✅ CORRECT - These test requirements quality:**
-
-```markdown
-- [ ] CHK001 - Are the number and layout of featured episodes explicitly specified? [Completeness, Spec §FR-001]
-- [ ] CHK002 - Are hover state requirements consistently defined for all interactive elements? [Consistency, Spec §FR-003]
-- [ ] CHK003 - Are navigation requirements clear for all clickable brand elements? [Clarity, Spec §FR-010]
-- [ ] CHK004 - Is the selection criteria for related episodes documented? [Gap, Spec §FR-005]
-- [ ] CHK005 - Are loading state requirements defined for asynchronous episode data? [Gap]
-- [ ] CHK006 - Can "visual hierarchy" requirements be objectively measured? [Measurability, Spec §FR-001]
-```
-
-**Key Differences:**
-- Wrong: Tests if the system works correctly
-- Correct: Tests if the requirements are written correctly
-- Wrong: Verification of behavior
-- Correct: Validation of requirement quality
-- Wrong: "Does it do X?"
-- Correct: "Is X clearly specified?"
diff --git a/.claude/commands/speckit.clarify.md b/.claude/commands/speckit.clarify.md
deleted file mode 100644
index 0f11d41..0000000
--- a/.claude/commands/speckit.clarify.md
+++ /dev/null
@@ -1,176 +0,0 @@
----
-description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec.
----
-
-## User Input
-
-```text
-$ARGUMENTS
-```
-
-You **MUST** consider the user input before proceeding (if not empty).
-
-## Outline
-
-Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file.
-
-Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/speckit.plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases.
-
-Execution steps:
-
-1. Run `.specify/scripts/bash/check-prerequisites.sh --json --paths-only` from repo root **once** (combined `--json --paths-only` mode / `-Json -PathsOnly`). Parse minimal JSON payload fields:
- - `FEATURE_DIR`
- - `FEATURE_SPEC`
- - (Optionally capture `IMPL_PLAN`, `TASKS` for future chained flows.)
- - If JSON parsing fails, abort and instruct user to re-run `/speckit.specify` or verify feature branch environment.
- - For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
-
-2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked).
-
- Functional Scope & Behavior:
- - Core user goals & success criteria
- - Explicit out-of-scope declarations
- - User roles / personas differentiation
-
- Domain & Data Model:
- - Entities, attributes, relationships
- - Identity & uniqueness rules
- - Lifecycle/state transitions
- - Data volume / scale assumptions
-
- Interaction & UX Flow:
- - Critical user journeys / sequences
- - Error/empty/loading states
- - Accessibility or localization notes
-
- Non-Functional Quality Attributes:
- - Performance (latency, throughput targets)
- - Scalability (horizontal/vertical, limits)
- - Reliability & availability (uptime, recovery expectations)
- - Observability (logging, metrics, tracing signals)
- - Security & privacy (authN/Z, data protection, threat assumptions)
- - Compliance / regulatory constraints (if any)
-
- Integration & External Dependencies:
- - External services/APIs and failure modes
- - Data import/export formats
- - Protocol/versioning assumptions
-
- Edge Cases & Failure Handling:
- - Negative scenarios
- - Rate limiting / throttling
- - Conflict resolution (e.g., concurrent edits)
-
- Constraints & Tradeoffs:
- - Technical constraints (language, storage, hosting)
- - Explicit tradeoffs or rejected alternatives
-
- Terminology & Consistency:
- - Canonical glossary terms
- - Avoided synonyms / deprecated terms
-
- Completion Signals:
- - Acceptance criteria testability
- - Measurable Definition of Done style indicators
-
- Misc / Placeholders:
- - TODO markers / unresolved decisions
- - Ambiguous adjectives ("robust", "intuitive") lacking quantification
-
- For each category with Partial or Missing status, add a candidate question opportunity unless:
- - Clarification would not materially change implementation or validation strategy
- - Information is better deferred to planning phase (note internally)
-
-3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints:
- - Maximum of 10 total questions across the whole session.
- - Each question must be answerable with EITHER:
- * A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR
- * A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words").
- - Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation.
- - Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved.
- - Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness).
- - Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests.
- - If more than 5 categories remain unresolved, select the top 5 by (Impact * Uncertainty) heuristic.
-
-4. Sequential questioning loop (interactive):
- - Present EXACTLY ONE question at a time.
- - For multiple‑choice questions:
- * **Analyze all options** and determine the **most suitable option** based on:
- - Best practices for the project type
- - Common patterns in similar implementations
- - Risk reduction (security, performance, maintainability)
- - Alignment with any explicit project goals or constraints visible in the spec
- * Present your **recommended option prominently** at the top with clear reasoning (1-2 sentences explaining why this is the best choice).
- * Format as: `**Recommended:** Option [X] - `
- * Then render all options as a Markdown table:
-
- | Option | Description |
- |--------|-------------|
- | A | |
- | B | |
- | C | | (add D/E as needed up to 5)
- | Short | Provide a different short answer (<=5 words) | (Include only if free-form alternative is appropriate)
-
- * After the table, add: `You can reply with the option letter (e.g., "A"), accept the recommendation by saying "yes" or "recommended", or provide your own short answer.`
- - For short‑answer style (no meaningful discrete options):
- * Provide your **suggested answer** based on best practices and context.
- * Format as: `**Suggested:** - `
- * Then output: `Format: Short answer (<=5 words). You can accept the suggestion by saying "yes" or "suggested", or provide your own answer.`
- - After the user answers:
- * If the user replies with "yes", "recommended", or "suggested", use your previously stated recommendation/suggestion as the answer.
- * Otherwise, validate the answer maps to one option or fits the <=5 word constraint.
- * If ambiguous, ask for a quick disambiguation (count still belongs to same question; do not advance).
- * Once satisfactory, record it in working memory (do not yet write to disk) and move to the next queued question.
- - Stop asking further questions when:
- * All critical ambiguities resolved early (remaining queued items become unnecessary), OR
- * User signals completion ("done", "good", "no more"), OR
- * You reach 5 asked questions.
- - Never reveal future queued questions in advance.
- - If no valid questions exist at start, immediately report no critical ambiguities.
-
-5. Integration after EACH accepted answer (incremental update approach):
- - Maintain in-memory representation of the spec (loaded once at start) plus the raw file contents.
- - For the first integrated answer in this session:
- * Ensure a `## Clarifications` section exists (create it just after the highest-level contextual/overview section per the spec template if missing).
- * Under it, create (if not present) a `### Session YYYY-MM-DD` subheading for today.
- - Append a bullet line immediately after acceptance: `- Q: → A: `.
- - Then immediately apply the clarification to the most appropriate section(s):
- * Functional ambiguity → Update or add a bullet in Functional Requirements.
- * User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario.
- * Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly.
- * Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target).
- * Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it).
- * Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once.
- - If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text.
- - Save the spec file AFTER each integration to minimize risk of context loss (atomic overwrite).
- - Preserve formatting: do not reorder unrelated sections; keep heading hierarchy intact.
- - Keep each inserted clarification minimal and testable (avoid narrative drift).
-
-6. Validation (performed after EACH write plus final pass):
- - Clarifications session contains exactly one bullet per accepted answer (no duplicates).
- - Total asked (accepted) questions ≤ 5.
- - Updated sections contain no lingering vague placeholders the new answer was meant to resolve.
- - No contradictory earlier statement remains (scan for now-invalid alternative choices removed).
- - Markdown structure valid; only allowed new headings: `## Clarifications`, `### Session YYYY-MM-DD`.
- - Terminology consistency: same canonical term used across all updated sections.
-
-7. Write the updated spec back to `FEATURE_SPEC`.
-
-8. Report completion (after questioning loop ends or early termination):
- - Number of questions asked & answered.
- - Path to updated spec.
- - Sections touched (list names).
- - Coverage summary table listing each taxonomy category with Status: Resolved (was Partial/Missing and addressed), Deferred (exceeds question quota or better suited for planning), Clear (already sufficient), Outstanding (still Partial/Missing but low impact).
- - If any Outstanding or Deferred remain, recommend whether to proceed to `/speckit.plan` or run `/speckit.clarify` again later post-plan.
- - Suggested next command.
-
-Behavior rules:
-- If no meaningful ambiguities found (or all potential questions would be low-impact), respond: "No critical ambiguities detected worth formal clarification." and suggest proceeding.
-- If spec file missing, instruct user to run `/speckit.specify` first (do not create a new spec here).
-- Never exceed 5 total asked questions (clarification retries for a single question do not count as new questions).
-- Avoid speculative tech stack questions unless the absence blocks functional clarity.
-- Respect user early termination signals ("stop", "done", "proceed").
- - If no questions asked due to full coverage, output a compact coverage summary (all categories Clear) then suggest advancing.
- - If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale.
-
-Context for prioritization: $ARGUMENTS
diff --git a/.claude/commands/speckit.constitution.md b/.claude/commands/speckit.constitution.md
deleted file mode 100644
index 7f5684c..0000000
--- a/.claude/commands/speckit.constitution.md
+++ /dev/null
@@ -1,77 +0,0 @@
----
-description: Create or update the project constitution from interactive or provided principle inputs, ensuring all dependent templates stay in sync.
----
-
-## User Input
-
-```text
-$ARGUMENTS
-```
-
-You **MUST** consider the user input before proceeding (if not empty).
-
-## Outline
-
-You are updating the project constitution at `.specify/memory/constitution.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts.
-
-Follow this execution flow:
-
-1. Load the existing constitution template at `.specify/memory/constitution.md`.
- - Identify every placeholder token of the form `[ALL_CAPS_IDENTIFIER]`.
- **IMPORTANT**: The user might require less or more principles than the ones used in the template. If a number is specified, respect that - follow the general template. You will update the doc accordingly.
-
-2. Collect/derive values for placeholders:
- - If user input (conversation) supplies a value, use it.
- - Otherwise infer from existing repo context (README, docs, prior constitution versions if embedded).
- - For governance dates: `RATIFICATION_DATE` is the original adoption date (if unknown ask or mark TODO), `LAST_AMENDED_DATE` is today if changes are made, otherwise keep previous.
- - `CONSTITUTION_VERSION` must increment according to semantic versioning rules:
- * MAJOR: Backward incompatible governance/principle removals or redefinitions.
- * MINOR: New principle/section added or materially expanded guidance.
- * PATCH: Clarifications, wording, typo fixes, non-semantic refinements.
- - If version bump type ambiguous, propose reasoning before finalizing.
-
-3. Draft the updated constitution content:
- - Replace every placeholder with concrete text (no bracketed tokens left except intentionally retained template slots that the project has chosen not to define yet—explicitly justify any left).
- - Preserve heading hierarchy and comments can be removed once replaced unless they still add clarifying guidance.
- - Ensure each Principle section: succinct name line, paragraph (or bullet list) capturing non‑negotiable rules, explicit rationale if not obvious.
- - Ensure Governance section lists amendment procedure, versioning policy, and compliance review expectations.
-
-4. Consistency propagation checklist (convert prior checklist into active validations):
- - Read `.specify/templates/plan-template.md` and ensure any "Constitution Check" or rules align with updated principles.
- - Read `.specify/templates/spec-template.md` for scope/requirements alignment—update if constitution adds/removes mandatory sections or constraints.
- - Read `.specify/templates/tasks-template.md` and ensure task categorization reflects new or removed principle-driven task types (e.g., observability, versioning, testing discipline).
- - Read each command file in `.specify/templates/commands/*.md` (including this one) to verify no outdated references (agent-specific names like CLAUDE only) remain when generic guidance is required.
- - Read any runtime guidance docs (e.g., `README.md`, `docs/quickstart.md`, or agent-specific guidance files if present). Update references to principles changed.
-
-5. Produce a Sync Impact Report (prepend as an HTML comment at top of the constitution file after update):
- - Version change: old → new
- - List of modified principles (old title → new title if renamed)
- - Added sections
- - Removed sections
- - Templates requiring updates (✅ updated / ⚠ pending) with file paths
- - Follow-up TODOs if any placeholders intentionally deferred.
-
-6. Validation before final output:
- - No remaining unexplained bracket tokens.
- - Version line matches report.
- - Dates ISO format YYYY-MM-DD.
- - Principles are declarative, testable, and free of vague language ("should" → replace with MUST/SHOULD rationale where appropriate).
-
-7. Write the completed constitution back to `.specify/memory/constitution.md` (overwrite).
-
-8. Output a final summary to the user with:
- - New version and bump rationale.
- - Any files flagged for manual follow-up.
- - Suggested commit message (e.g., `docs: amend constitution to vX.Y.Z (principle additions + governance update)`).
-
-Formatting & Style Requirements:
-- Use Markdown headings exactly as in the template (do not demote/promote levels).
-- Wrap long rationale lines to keep readability (<100 chars ideally) but do not hard enforce with awkward breaks.
-- Keep a single blank line between sections.
-- Avoid trailing whitespace.
-
-If the user supplies partial updates (e.g., only one principle revision), still perform validation and version decision steps.
-
-If critical info missing (e.g., ratification date truly unknown), insert `TODO(): explanation` and include in the Sync Impact Report under deferred items.
-
-Do not create a new template; always operate on the existing `.specify/memory/constitution.md` file.
diff --git a/.claude/commands/speckit.implement.md b/.claude/commands/speckit.implement.md
deleted file mode 100644
index 1111ff3..0000000
--- a/.claude/commands/speckit.implement.md
+++ /dev/null
@@ -1,128 +0,0 @@
----
-description: Execute the implementation plan by processing and executing all tasks defined in tasks.md
----
-
-## User Input
-
-```text
-$ARGUMENTS
-```
-
-You **MUST** consider the user input before proceeding (if not empty).
-
-## Outline
-
-1. Run `.specify/scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
-
-2. **Check checklists status** (if FEATURE_DIR/checklists/ exists):
- - Scan all checklist files in the checklists/ directory
- - For each checklist, count:
- * Total items: All lines matching `- [ ]` or `- [X]` or `- [x]`
- * Completed items: Lines matching `- [X]` or `- [x]`
- * Incomplete items: Lines matching `- [ ]`
- - Create a status table:
- ```
- | Checklist | Total | Completed | Incomplete | Status |
- |-----------|-------|-----------|------------|--------|
- | ux.md | 12 | 12 | 0 | ✓ PASS |
- | test.md | 8 | 5 | 3 | ✗ FAIL |
- | security.md | 6 | 6 | 0 | ✓ PASS |
- ```
- - Calculate overall status:
- * **PASS**: All checklists have 0 incomplete items
- * **FAIL**: One or more checklists have incomplete items
-
- - **If any checklist is incomplete**:
- * Display the table with incomplete item counts
- * **STOP** and ask: "Some checklists are incomplete. Do you want to proceed with implementation anyway? (yes/no)"
- * Wait for user response before continuing
- * If user says "no" or "wait" or "stop", halt execution
- * If user says "yes" or "proceed" or "continue", proceed to step 3
-
- - **If all checklists are complete**:
- * Display the table showing all checklists passed
- * Automatically proceed to step 3
-
-3. Load and analyze the implementation context:
- - **REQUIRED**: Read tasks.md for the complete task list and execution plan
- - **REQUIRED**: Read plan.md for tech stack, architecture, and file structure
- - **IF EXISTS**: Read data-model.md for entities and relationships
- - **IF EXISTS**: Read contracts/ for API specifications and test requirements
- - **IF EXISTS**: Read research.md for technical decisions and constraints
- - **IF EXISTS**: Read quickstart.md for integration scenarios
-
-4. **Project Setup Verification**:
- - **REQUIRED**: Create/verify ignore files based on actual project setup:
-
- **Detection & Creation Logic**:
- - Check if the following command succeeds to determine if the repository is a git repo (create/verify .gitignore if so):
-
- ```sh
- git rev-parse --git-dir 2>/dev/null
- ```
- - Check if Dockerfile* exists or Docker in plan.md → create/verify .dockerignore
- - Check if .eslintrc* or eslint.config.* exists → create/verify .eslintignore
- - Check if .prettierrc* exists → create/verify .prettierignore
- - Check if .npmrc or package.json exists → create/verify .npmignore (if publishing)
- - Check if terraform files (*.tf) exist → create/verify .terraformignore
- - Check if .helmignore needed (helm charts present) → create/verify .helmignore
-
- **If ignore file already exists**: Verify it contains essential patterns, append missing critical patterns only
- **If ignore file missing**: Create with full pattern set for detected technology
-
- **Common Patterns by Technology** (from plan.md tech stack):
- - **Node.js/JavaScript**: `node_modules/`, `dist/`, `build/`, `*.log`, `.env*`
- - **Python**: `__pycache__/`, `*.pyc`, `.venv/`, `venv/`, `dist/`, `*.egg-info/`
- - **Java**: `target/`, `*.class`, `*.jar`, `.gradle/`, `build/`
- - **C#/.NET**: `bin/`, `obj/`, `*.user`, `*.suo`, `packages/`
- - **Go**: `*.exe`, `*.test`, `vendor/`, `*.out`
- - **Ruby**: `.bundle/`, `log/`, `tmp/`, `*.gem`, `vendor/bundle/`
- - **PHP**: `vendor/`, `*.log`, `*.cache`, `*.env`
- - **Rust**: `target/`, `debug/`, `release/`, `*.rs.bk`, `*.rlib`, `*.prof*`, `.idea/`, `*.log`, `.env*`
- - **Kotlin**: `build/`, `out/`, `.gradle/`, `.idea/`, `*.class`, `*.jar`, `*.iml`, `*.log`, `.env*`
- - **C++**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.so`, `*.a`, `*.exe`, `*.dll`, `.idea/`, `*.log`, `.env*`
- - **C**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.a`, `*.so`, `*.exe`, `Makefile`, `config.log`, `.idea/`, `*.log`, `.env*`
- - **Universal**: `.DS_Store`, `Thumbs.db`, `*.tmp`, `*.swp`, `.vscode/`, `.idea/`
-
- **Tool-Specific Patterns**:
- - **Docker**: `node_modules/`, `.git/`, `Dockerfile*`, `.dockerignore`, `*.log*`, `.env*`, `coverage/`
- - **ESLint**: `node_modules/`, `dist/`, `build/`, `coverage/`, `*.min.js`
- - **Prettier**: `node_modules/`, `dist/`, `build/`, `coverage/`, `package-lock.json`, `yarn.lock`, `pnpm-lock.yaml`
- - **Terraform**: `.terraform/`, `*.tfstate*`, `*.tfvars`, `.terraform.lock.hcl`
-
-5. Parse tasks.md structure and extract:
- - **Task phases**: Setup, Tests, Core, Integration, Polish
- - **Task dependencies**: Sequential vs parallel execution rules
- - **Task details**: ID, description, file paths, parallel markers [P]
- - **Execution flow**: Order and dependency requirements
-
-6. Execute implementation following the task plan:
- - **Phase-by-phase execution**: Complete each phase before moving to the next
- - **Respect dependencies**: Run sequential tasks in order, parallel tasks [P] can run together
- - **Follow TDD approach**: Execute test tasks before their corresponding implementation tasks
- - **File-based coordination**: Tasks affecting the same files must run sequentially
- - **Validation checkpoints**: Verify each phase completion before proceeding
-
-7. Implementation execution rules:
- - **Setup first**: Initialize project structure, dependencies, configuration
- - **Tests before code**: If you need to write tests for contracts, entities, and integration scenarios
- - **Core development**: Implement models, services, CLI commands, endpoints
- - **Integration work**: Database connections, middleware, logging, external services
- - **Polish and validation**: Unit tests, performance optimization, documentation
-
-8. Progress tracking and error handling:
- - Report progress after each completed task
- - Halt execution if any non-parallel task fails
- - For parallel tasks [P], continue with successful tasks, report failed ones
- - Provide clear error messages with context for debugging
- - Suggest next steps if implementation cannot proceed
- - **IMPORTANT** For completed tasks, make sure to mark the task off as [X] in the tasks file.
-
-9. Completion validation:
- - Verify all required tasks are completed
- - Check that implemented features match the original specification
- - Validate that tests pass and coverage meets requirements
- - Confirm the implementation follows the technical plan
- - Report final status with summary of completed work
-
-Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/tasks` first to regenerate the task list.
diff --git a/.claude/commands/speckit.plan.md b/.claude/commands/speckit.plan.md
deleted file mode 100644
index fc405f0..0000000
--- a/.claude/commands/speckit.plan.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-description: Execute the implementation planning workflow using the plan template to generate design artifacts.
----
-
-## User Input
-
-```text
-$ARGUMENTS
-```
-
-You **MUST** consider the user input before proceeding (if not empty).
-
-## Outline
-
-1. **Setup**: Run `.specify/scripts/bash/setup-plan.sh --json` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
-
-2. **Load context**: Read FEATURE_SPEC and `.specify/memory/constitution.md`. Load IMPL_PLAN template (already copied).
-
-3. **Execute plan workflow**: Follow the structure in IMPL_PLAN template to:
- - Fill Technical Context (mark unknowns as "NEEDS CLARIFICATION")
- - Fill Constitution Check section from constitution
- - Evaluate gates (ERROR if violations unjustified)
- - Phase 0: Generate research.md (resolve all NEEDS CLARIFICATION)
- - Phase 1: Generate data-model.md, contracts/, quickstart.md
- - Phase 1: Update agent context by running the agent script
- - Re-evaluate Constitution Check post-design
-
-4. **Stop and report**: Command ends after Phase 2 planning. Report branch, IMPL_PLAN path, and generated artifacts.
-
-## Phases
-
-### Phase 0: Outline & Research
-
-1. **Extract unknowns from Technical Context** above:
- - For each NEEDS CLARIFICATION → research task
- - For each dependency → best practices task
- - For each integration → patterns task
-
-2. **Generate and dispatch research agents**:
- ```
- For each unknown in Technical Context:
- Task: "Research {unknown} for {feature context}"
- For each technology choice:
- Task: "Find best practices for {tech} in {domain}"
- ```
-
-3. **Consolidate findings** in `research.md` using format:
- - Decision: [what was chosen]
- - Rationale: [why chosen]
- - Alternatives considered: [what else evaluated]
-
-**Output**: research.md with all NEEDS CLARIFICATION resolved
-
-### Phase 1: Design & Contracts
-
-**Prerequisites:** `research.md` complete
-
-1. **Extract entities from feature spec** → `data-model.md`:
- - Entity name, fields, relationships
- - Validation rules from requirements
- - State transitions if applicable
-
-2. **Generate API contracts** from functional requirements:
- - For each user action → endpoint
- - Use standard REST/GraphQL patterns
- - Output OpenAPI/GraphQL schema to `/contracts/`
-
-3. **Agent context update**:
- - Run `.specify/scripts/bash/update-agent-context.sh claude`
- - These scripts detect which AI agent is in use
- - Update the appropriate agent-specific context file
- - Add only new technology from current plan
- - Preserve manual additions between markers
-
-**Output**: data-model.md, /contracts/*, quickstart.md, agent-specific file
-
-## Key rules
-
-- Use absolute paths
-- ERROR on gate failures or unresolved clarifications
diff --git a/.claude/commands/speckit.specify.md b/.claude/commands/speckit.specify.md
deleted file mode 100644
index 5cc973d..0000000
--- a/.claude/commands/speckit.specify.md
+++ /dev/null
@@ -1,229 +0,0 @@
----
-description: Create or update the feature specification from a natural language feature description.
----
-
-## User Input
-
-```text
-$ARGUMENTS
-```
-
-You **MUST** consider the user input before proceeding (if not empty).
-
-## Outline
-
-The text the user typed after `/speckit.specify` in the triggering message **is** the feature description. Assume you always have it available in this conversation even if `$ARGUMENTS` appears literally below. Do not ask the user to repeat it unless they provided an empty command.
-
-Given that feature description, do this:
-
-1. **Generate a concise short name** (2-4 words) for the branch:
- - Analyze the feature description and extract the most meaningful keywords
- - Create a 2-4 word short name that captures the essence of the feature
- - Use action-noun format when possible (e.g., "add-user-auth", "fix-payment-bug")
- - Preserve technical terms and acronyms (OAuth2, API, JWT, etc.)
- - Keep it concise but descriptive enough to understand the feature at a glance
- - Examples:
- - "I want to add user authentication" → "user-auth"
- - "Implement OAuth2 integration for the API" → "oauth2-api-integration"
- - "Create a dashboard for analytics" → "analytics-dashboard"
- - "Fix payment processing timeout bug" → "fix-payment-timeout"
-
-2. Run the script `.specify/scripts/bash/create-new-feature.sh --json "$ARGUMENTS"` from repo root **with the short-name argument** and parse its JSON output for BRANCH_NAME and SPEC_FILE. All file paths must be absolute.
-
- **IMPORTANT**:
-
- - Append the short-name argument to the `.specify/scripts/bash/create-new-feature.sh --json "$ARGUMENTS"` command with the 2-4 word short name you created in step 1
- - Bash: `--short-name "your-generated-short-name"`
- - PowerShell: `-ShortName "your-generated-short-name"`
- - For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot")
- - You must only ever run this script once
- - The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for
-
-3. Load `.specify/templates/spec-template.md` to understand required sections.
-
-4. Follow this execution flow:
-
- 1. Parse user description from Input
- If empty: ERROR "No feature description provided"
- 2. Extract key concepts from description
- Identify: actors, actions, data, constraints
- 3. For unclear aspects:
- - Make informed guesses based on context and industry standards
- - Only mark with [NEEDS CLARIFICATION: specific question] if:
- - The choice significantly impacts feature scope or user experience
- - Multiple reasonable interpretations exist with different implications
- - No reasonable default exists
- - **LIMIT: Maximum 3 [NEEDS CLARIFICATION] markers total**
- - Prioritize clarifications by impact: scope > security/privacy > user experience > technical details
- 4. Fill User Scenarios & Testing section
- If no clear user flow: ERROR "Cannot determine user scenarios"
- 5. Generate Functional Requirements
- Each requirement must be testable
- Use reasonable defaults for unspecified details (document assumptions in Assumptions section)
- 6. Define Success Criteria
- Create measurable, technology-agnostic outcomes
- Include both quantitative metrics (time, performance, volume) and qualitative measures (user satisfaction, task completion)
- Each criterion must be verifiable without implementation details
- 7. Identify Key Entities (if data involved)
- 8. Return: SUCCESS (spec ready for planning)
-
-5. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings.
-
-6. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria:
-
- a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items:
-
- ```markdown
- # Specification Quality Checklist: [FEATURE NAME]
-
- **Purpose**: Validate specification completeness and quality before proceeding to planning
- **Created**: [DATE]
- **Feature**: [Link to spec.md]
-
- ## Content Quality
-
- - [ ] No implementation details (languages, frameworks, APIs)
- - [ ] Focused on user value and business needs
- - [ ] Written for non-technical stakeholders
- - [ ] All mandatory sections completed
-
- ## Requirement Completeness
-
- - [ ] No [NEEDS CLARIFICATION] markers remain
- - [ ] Requirements are testable and unambiguous
- - [ ] Success criteria are measurable
- - [ ] Success criteria are technology-agnostic (no implementation details)
- - [ ] All acceptance scenarios are defined
- - [ ] Edge cases are identified
- - [ ] Scope is clearly bounded
- - [ ] Dependencies and assumptions identified
-
- ## Feature Readiness
-
- - [ ] All functional requirements have clear acceptance criteria
- - [ ] User scenarios cover primary flows
- - [ ] Feature meets measurable outcomes defined in Success Criteria
- - [ ] No implementation details leak into specification
-
- ## Notes
-
- - Items marked incomplete require spec updates before `/speckit.clarify` or `/speckit.plan`
- ```
-
- b. **Run Validation Check**: Review the spec against each checklist item:
- - For each item, determine if it passes or fails
- - Document specific issues found (quote relevant spec sections)
-
- c. **Handle Validation Results**:
-
- - **If all items pass**: Mark checklist complete and proceed to step 6
-
- - **If items fail (excluding [NEEDS CLARIFICATION])**:
- 1. List the failing items and specific issues
- 2. Update the spec to address each issue
- 3. Re-run validation until all items pass (max 3 iterations)
- 4. If still failing after 3 iterations, document remaining issues in checklist notes and warn user
-
- - **If [NEEDS CLARIFICATION] markers remain**:
- 1. Extract all [NEEDS CLARIFICATION: ...] markers from the spec
- 2. **LIMIT CHECK**: If more than 3 markers exist, keep only the 3 most critical (by scope/security/UX impact) and make informed guesses for the rest
- 3. For each clarification needed (max 3), present options to user in this format:
-
- ```markdown
- ## Question [N]: [Topic]
-
- **Context**: [Quote relevant spec section]
-
- **What we need to know**: [Specific question from NEEDS CLARIFICATION marker]
-
- **Suggested Answers**:
-
- | Option | Answer | Implications |
- |--------|--------|--------------|
- | A | [First suggested answer] | [What this means for the feature] |
- | B | [Second suggested answer] | [What this means for the feature] |
- | C | [Third suggested answer] | [What this means for the feature] |
- | Custom | Provide your own answer | [Explain how to provide custom input] |
-
- **Your choice**: _[Wait for user response]_
- ```
-
- 4. **CRITICAL - Table Formatting**: Ensure markdown tables are properly formatted:
- - Use consistent spacing with pipes aligned
- - Each cell should have spaces around content: `| Content |` not `|Content|`
- - Header separator must have at least 3 dashes: `|--------|`
- - Test that the table renders correctly in markdown preview
- 5. Number questions sequentially (Q1, Q2, Q3 - max 3 total)
- 6. Present all questions together before waiting for responses
- 7. Wait for user to respond with their choices for all questions (e.g., "Q1: A, Q2: Custom - [details], Q3: B")
- 8. Update the spec by replacing each [NEEDS CLARIFICATION] marker with the user's selected or provided answer
- 9. Re-run validation after all clarifications are resolved
-
- d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status
-
-7. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit.clarify` or `/speckit.plan`).
-
-**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
-
-## General Guidelines
-
-## Quick Guidelines
-
-- Focus on **WHAT** users need and **WHY**.
-- Avoid HOW to implement (no tech stack, APIs, code structure).
-- Written for business stakeholders, not developers.
-- DO NOT create any checklists that are embedded in the spec. That will be a separate command.
-
-### Section Requirements
-
-- **Mandatory sections**: Must be completed for every feature
-- **Optional sections**: Include only when relevant to the feature
-- When a section doesn't apply, remove it entirely (don't leave as "N/A")
-
-### For AI Generation
-
-When creating this spec from a user prompt:
-
-1. **Make informed guesses**: Use context, industry standards, and common patterns to fill gaps
-2. **Document assumptions**: Record reasonable defaults in the Assumptions section
-3. **Limit clarifications**: Maximum 3 [NEEDS CLARIFICATION] markers - use only for critical decisions that:
- - Significantly impact feature scope or user experience
- - Have multiple reasonable interpretations with different implications
- - Lack any reasonable default
-4. **Prioritize clarifications**: scope > security/privacy > user experience > technical details
-5. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item
-6. **Common areas needing clarification** (only if no reasonable default exists):
- - Feature scope and boundaries (include/exclude specific use cases)
- - User types and permissions (if multiple conflicting interpretations possible)
- - Security/compliance requirements (when legally/financially significant)
-
-**Examples of reasonable defaults** (don't ask about these):
-
-- Data retention: Industry-standard practices for the domain
-- Performance targets: Standard web/mobile app expectations unless specified
-- Error handling: User-friendly messages with appropriate fallbacks
-- Authentication method: Standard session-based or OAuth2 for web apps
-- Integration patterns: RESTful APIs unless specified otherwise
-
-### Success Criteria Guidelines
-
-Success criteria must be:
-
-1. **Measurable**: Include specific metrics (time, percentage, count, rate)
-2. **Technology-agnostic**: No mention of frameworks, languages, databases, or tools
-3. **User-focused**: Describe outcomes from user/business perspective, not system internals
-4. **Verifiable**: Can be tested/validated without knowing implementation details
-
-**Good examples**:
-
-- "Users can complete checkout in under 3 minutes"
-- "System supports 10,000 concurrent users"
-- "95% of searches return results in under 1 second"
-- "Task completion rate improves by 40%"
-
-**Bad examples** (implementation-focused):
-
-- "API response time is under 200ms" (too technical, use "Users see results instantly")
-- "Database can handle 1000 TPS" (implementation detail, use user-facing metric)
-- "React components render efficiently" (framework-specific)
-- "Redis cache hit rate above 80%" (technology-specific)
diff --git a/.claude/commands/speckit.tasks.md b/.claude/commands/speckit.tasks.md
deleted file mode 100644
index 2c68568..0000000
--- a/.claude/commands/speckit.tasks.md
+++ /dev/null
@@ -1,128 +0,0 @@
----
-description: Generate an actionable, dependency-ordered tasks.md for the feature based on available design artifacts.
----
-
-## User Input
-
-```text
-$ARGUMENTS
-```
-
-You **MUST** consider the user input before proceeding (if not empty).
-
-## Outline
-
-1. **Setup**: Run `.specify/scripts/bash/check-prerequisites.sh --json` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
-
-2. **Load design documents**: Read from FEATURE_DIR:
- - **Required**: plan.md (tech stack, libraries, structure), spec.md (user stories with priorities)
- - **Optional**: data-model.md (entities), contracts/ (API endpoints), research.md (decisions), quickstart.md (test scenarios)
- - Note: Not all projects have all documents. Generate tasks based on what's available.
-
-3. **Execute task generation workflow**:
- - Load plan.md and extract tech stack, libraries, project structure
- - Load spec.md and extract user stories with their priorities (P1, P2, P3, etc.)
- - If data-model.md exists: Extract entities and map to user stories
- - If contracts/ exists: Map endpoints to user stories
- - If research.md exists: Extract decisions for setup tasks
- - Generate tasks organized by user story (see Task Generation Rules below)
- - Generate dependency graph showing user story completion order
- - Create parallel execution examples per user story
- - Validate task completeness (each user story has all needed tasks, independently testable)
-
-4. **Generate tasks.md**: Use `.specify.specify/templates/tasks-template.md` as structure, fill with:
- - Correct feature name from plan.md
- - Phase 1: Setup tasks (project initialization)
- - Phase 2: Foundational tasks (blocking prerequisites for all user stories)
- - Phase 3+: One phase per user story (in priority order from spec.md)
- - Each phase includes: story goal, independent test criteria, tests (if requested), implementation tasks
- - Final Phase: Polish & cross-cutting concerns
- - All tasks must follow the strict checklist format (see Task Generation Rules below)
- - Clear file paths for each task
- - Dependencies section showing story completion order
- - Parallel execution examples per story
- - Implementation strategy section (MVP first, incremental delivery)
-
-5. **Report**: Output path to generated tasks.md and summary:
- - Total task count
- - Task count per user story
- - Parallel opportunities identified
- - Independent test criteria for each story
- - Suggested MVP scope (typically just User Story 1)
- - Format validation: Confirm ALL tasks follow the checklist format (checkbox, ID, labels, file paths)
-
-Context for task generation: $ARGUMENTS
-
-The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context.
-
-## Task Generation Rules
-
-**CRITICAL**: Tasks MUST be organized by user story to enable independent implementation and testing.
-
-**Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature specification or if user requests TDD approach.
-
-### Checklist Format (REQUIRED)
-
-Every task MUST strictly follow this format:
-
-```text
-- [ ] [TaskID] [P?] [Story?] Description with file path
-```
-
-**Format Components**:
-
-1. **Checkbox**: ALWAYS start with `- [ ]` (markdown checkbox)
-2. **Task ID**: Sequential number (T001, T002, T003...) in execution order
-3. **[P] marker**: Include ONLY if task is parallelizable (different files, no dependencies on incomplete tasks)
-4. **[Story] label**: REQUIRED for user story phase tasks only
- - Format: [US1], [US2], [US3], etc. (maps to user stories from spec.md)
- - Setup phase: NO story label
- - Foundational phase: NO story label
- - User Story phases: MUST have story label
- - Polish phase: NO story label
-5. **Description**: Clear action with exact file path
-
-**Examples**:
-
-- ✅ CORRECT: `- [ ] T001 Create project structure per implementation plan`
-- ✅ CORRECT: `- [ ] T005 [P] Implement authentication middleware in src/middleware/auth.py`
-- ✅ CORRECT: `- [ ] T012 [P] [US1] Create User model in src/models/user.py`
-- ✅ CORRECT: `- [ ] T014 [US1] Implement UserService in src/services/user_service.py`
-- ❌ WRONG: `- [ ] Create User model` (missing ID and Story label)
-- ❌ WRONG: `T001 [US1] Create model` (missing checkbox)
-- ❌ WRONG: `- [ ] [US1] Create User model` (missing Task ID)
-- ❌ WRONG: `- [ ] T001 [US1] Create model` (missing file path)
-
-### Task Organization
-
-1. **From User Stories (spec.md)** - PRIMARY ORGANIZATION:
- - Each user story (P1, P2, P3...) gets its own phase
- - Map all related components to their story:
- - Models needed for that story
- - Services needed for that story
- - Endpoints/UI needed for that story
- - If tests requested: Tests specific to that story
- - Mark story dependencies (most stories should be independent)
-
-2. **From Contracts**:
- - Map each contract/endpoint → to the user story it serves
- - If tests requested: Each contract → contract test task [P] before implementation in that story's phase
-
-3. **From Data Model**:
- - Map each entity to the user story(ies) that need it
- - If entity serves multiple stories: Put in earliest story or Setup phase
- - Relationships → service layer tasks in appropriate story phase
-
-4. **From Setup/Infrastructure**:
- - Shared infrastructure → Setup phase (Phase 1)
- - Foundational/blocking tasks → Foundational phase (Phase 2)
- - Story-specific setup → within that story's phase
-
-### Phase Structure
-
-- **Phase 1**: Setup (project initialization)
-- **Phase 2**: Foundational (blocking prerequisites - MUST complete before user stories)
-- **Phase 3+**: User Stories in priority order (P1, P2, P3...)
- - Within each story: Tests (if requested) → Models → Services → Endpoints → Integration
- - Each phase should be a complete, independently testable increment
-- **Final Phase**: Polish & Cross-Cutting Concerns
diff --git a/.specify/memory/constitution.md b/.specify/memory/constitution.md
deleted file mode 100644
index a4670ff..0000000
--- a/.specify/memory/constitution.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# [PROJECT_NAME] Constitution
-
-
-## Core Principles
-
-### [PRINCIPLE_1_NAME]
-
-[PRINCIPLE_1_DESCRIPTION]
-
-
-### [PRINCIPLE_2_NAME]
-
-[PRINCIPLE_2_DESCRIPTION]
-
-
-### [PRINCIPLE_3_NAME]
-
-[PRINCIPLE_3_DESCRIPTION]
-
-
-### [PRINCIPLE_4_NAME]
-
-[PRINCIPLE_4_DESCRIPTION]
-
-
-### [PRINCIPLE_5_NAME]
-
-[PRINCIPLE_5_DESCRIPTION]
-
-
-## [SECTION_2_NAME]
-
-
-[SECTION_2_CONTENT]
-
-
-## [SECTION_3_NAME]
-
-
-[SECTION_3_CONTENT]
-
-
-## Governance
-
-
-[GOVERNANCE_RULES]
-
-
-**Version**: [CONSTITUTION_VERSION] | **Ratified**: [RATIFICATION_DATE] | **Last Amended**: [LAST_AMENDED_DATE]
-
diff --git a/.specify/scripts/bash/check-prerequisites.sh b/.specify/scripts/bash/check-prerequisites.sh
deleted file mode 100755
index 54f32ec..0000000
--- a/.specify/scripts/bash/check-prerequisites.sh
+++ /dev/null
@@ -1,166 +0,0 @@
-#!/usr/bin/env bash
-
-# Consolidated prerequisite checking script
-#
-# This script provides unified prerequisite checking for Spec-Driven Development workflow.
-# It replaces the functionality previously spread across multiple scripts.
-#
-# Usage: ./check-prerequisites.sh [OPTIONS]
-#
-# OPTIONS:
-# --json Output in JSON format
-# --require-tasks Require tasks.md to exist (for implementation phase)
-# --include-tasks Include tasks.md in AVAILABLE_DOCS list
-# --paths-only Only output path variables (no validation)
-# --help, -h Show help message
-#
-# OUTPUTS:
-# JSON mode: {"FEATURE_DIR":"...", "AVAILABLE_DOCS":["..."]}
-# Text mode: FEATURE_DIR:... \n AVAILABLE_DOCS: \n ✓/✗ file.md
-# Paths only: REPO_ROOT: ... \n BRANCH: ... \n FEATURE_DIR: ... etc.
-
-set -e
-
-# Parse command line arguments
-JSON_MODE=false
-REQUIRE_TASKS=false
-INCLUDE_TASKS=false
-PATHS_ONLY=false
-
-for arg in "$@"; do
- case "$arg" in
- --json)
- JSON_MODE=true
- ;;
- --require-tasks)
- REQUIRE_TASKS=true
- ;;
- --include-tasks)
- INCLUDE_TASKS=true
- ;;
- --paths-only)
- PATHS_ONLY=true
- ;;
- --help|-h)
- cat << 'EOF'
-Usage: check-prerequisites.sh [OPTIONS]
-
-Consolidated prerequisite checking for Spec-Driven Development workflow.
-
-OPTIONS:
- --json Output in JSON format
- --require-tasks Require tasks.md to exist (for implementation phase)
- --include-tasks Include tasks.md in AVAILABLE_DOCS list
- --paths-only Only output path variables (no prerequisite validation)
- --help, -h Show this help message
-
-EXAMPLES:
- # Check task prerequisites (plan.md required)
- ./check-prerequisites.sh --json
-
- # Check implementation prerequisites (plan.md + tasks.md required)
- ./check-prerequisites.sh --json --require-tasks --include-tasks
-
- # Get feature paths only (no validation)
- ./check-prerequisites.sh --paths-only
-
-EOF
- exit 0
- ;;
- *)
- echo "ERROR: Unknown option '$arg'. Use --help for usage information." >&2
- exit 1
- ;;
- esac
-done
-
-# Source common functions
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-source "$SCRIPT_DIR/common.sh"
-
-# Get feature paths and validate branch
-eval $(get_feature_paths)
-check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
-
-# If paths-only mode, output paths and exit (support JSON + paths-only combined)
-if $PATHS_ONLY; then
- if $JSON_MODE; then
- # Minimal JSON paths payload (no validation performed)
- printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \
- "$REPO_ROOT" "$CURRENT_BRANCH" "$FEATURE_DIR" "$FEATURE_SPEC" "$IMPL_PLAN" "$TASKS"
- else
- echo "REPO_ROOT: $REPO_ROOT"
- echo "BRANCH: $CURRENT_BRANCH"
- echo "FEATURE_DIR: $FEATURE_DIR"
- echo "FEATURE_SPEC: $FEATURE_SPEC"
- echo "IMPL_PLAN: $IMPL_PLAN"
- echo "TASKS: $TASKS"
- fi
- exit 0
-fi
-
-# Validate required directories and files
-if [[ ! -d "$FEATURE_DIR" ]]; then
- echo "ERROR: Feature directory not found: $FEATURE_DIR" >&2
- echo "Run /speckit.specify first to create the feature structure." >&2
- exit 1
-fi
-
-if [[ ! -f "$IMPL_PLAN" ]]; then
- echo "ERROR: plan.md not found in $FEATURE_DIR" >&2
- echo "Run /speckit.plan first to create the implementation plan." >&2
- exit 1
-fi
-
-# Check for tasks.md if required
-if $REQUIRE_TASKS && [[ ! -f "$TASKS" ]]; then
- echo "ERROR: tasks.md not found in $FEATURE_DIR" >&2
- echo "Run /speckit.tasks first to create the task list." >&2
- exit 1
-fi
-
-# Build list of available documents
-docs=()
-
-# Always check these optional docs
-[[ -f "$RESEARCH" ]] && docs+=("research.md")
-[[ -f "$DATA_MODEL" ]] && docs+=("data-model.md")
-
-# Check contracts directory (only if it exists and has files)
-if [[ -d "$CONTRACTS_DIR" ]] && [[ -n "$(ls -A "$CONTRACTS_DIR" 2>/dev/null)" ]]; then
- docs+=("contracts/")
-fi
-
-[[ -f "$QUICKSTART" ]] && docs+=("quickstart.md")
-
-# Include tasks.md if requested and it exists
-if $INCLUDE_TASKS && [[ -f "$TASKS" ]]; then
- docs+=("tasks.md")
-fi
-
-# Output results
-if $JSON_MODE; then
- # Build JSON array of documents
- if [[ ${#docs[@]} -eq 0 ]]; then
- json_docs="[]"
- else
- json_docs=$(printf '"%s",' "${docs[@]}")
- json_docs="[${json_docs%,}]"
- fi
-
- printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs"
-else
- # Text output
- echo "FEATURE_DIR:$FEATURE_DIR"
- echo "AVAILABLE_DOCS:"
-
- # Show status of each potential document
- check_file "$RESEARCH" "research.md"
- check_file "$DATA_MODEL" "data-model.md"
- check_dir "$CONTRACTS_DIR" "contracts/"
- check_file "$QUICKSTART" "quickstart.md"
-
- if $INCLUDE_TASKS; then
- check_file "$TASKS" "tasks.md"
- fi
-fi
diff --git a/.specify/scripts/bash/common.sh b/.specify/scripts/bash/common.sh
deleted file mode 100755
index 6931ecc..0000000
--- a/.specify/scripts/bash/common.sh
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/usr/bin/env bash
-# Common functions and variables for all scripts
-
-# Get repository root, with fallback for non-git repositories
-get_repo_root() {
- if git rev-parse --show-toplevel >/dev/null 2>&1; then
- git rev-parse --show-toplevel
- else
- # Fall back to script location for non-git repos
- local script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
- (cd "$script_dir/../../.." && pwd)
- fi
-}
-
-# Get current branch, with fallback for non-git repositories
-get_current_branch() {
- # First check if SPECIFY_FEATURE environment variable is set
- if [[ -n "${SPECIFY_FEATURE:-}" ]]; then
- echo "$SPECIFY_FEATURE"
- return
- fi
-
- # Then check git if available
- if git rev-parse --abbrev-ref HEAD >/dev/null 2>&1; then
- git rev-parse --abbrev-ref HEAD
- return
- fi
-
- # For non-git repos, try to find the latest feature directory
- local repo_root=$(get_repo_root)
- local specs_dir="$repo_root/specs"
-
- if [[ -d "$specs_dir" ]]; then
- local latest_feature=""
- local highest=0
-
- for dir in "$specs_dir"/*; do
- if [[ -d "$dir" ]]; then
- local dirname=$(basename "$dir")
- if [[ "$dirname" =~ ^([0-9]{3})- ]]; then
- local number=${BASH_REMATCH[1]}
- number=$((10#$number))
- if [[ "$number" -gt "$highest" ]]; then
- highest=$number
- latest_feature=$dirname
- fi
- fi
- fi
- done
-
- if [[ -n "$latest_feature" ]]; then
- echo "$latest_feature"
- return
- fi
- fi
-
- echo "main" # Final fallback
-}
-
-# Check if we have git available
-has_git() {
- git rev-parse --show-toplevel >/dev/null 2>&1
-}
-
-check_feature_branch() {
- local branch="$1"
- local has_git_repo="$2"
-
- # For non-git repos, we can't enforce branch naming but still provide output
- if [[ "$has_git_repo" != "true" ]]; then
- echo "[specify] Warning: Git repository not detected; skipped branch validation" >&2
- return 0
- fi
-
- if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
- echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
- echo "Feature branches should be named like: 001-feature-name" >&2
- return 1
- fi
-
- return 0
-}
-
-get_feature_dir() { echo "$1/specs/$2"; }
-
-# Find feature directory by numeric prefix instead of exact branch match
-# This allows multiple branches to work on the same spec (e.g., 004-fix-bug, 004-add-feature)
-find_feature_dir_by_prefix() {
- local repo_root="$1"
- local branch_name="$2"
- local specs_dir="$repo_root/specs"
-
- # Extract numeric prefix from branch (e.g., "004" from "004-whatever")
- if [[ ! "$branch_name" =~ ^([0-9]{3})- ]]; then
- # If branch doesn't have numeric prefix, fall back to exact match
- echo "$specs_dir/$branch_name"
- return
- fi
-
- local prefix="${BASH_REMATCH[1]}"
-
- # Search for directories in specs/ that start with this prefix
- local matches=()
- if [[ -d "$specs_dir" ]]; then
- for dir in "$specs_dir"/"$prefix"-*; do
- if [[ -d "$dir" ]]; then
- matches+=("$(basename "$dir")")
- fi
- done
- fi
-
- # Handle results
- if [[ ${#matches[@]} -eq 0 ]]; then
- # No match found - return the branch name path (will fail later with clear error)
- echo "$specs_dir/$branch_name"
- elif [[ ${#matches[@]} -eq 1 ]]; then
- # Exactly one match - perfect!
- echo "$specs_dir/${matches[0]}"
- else
- # Multiple matches - this shouldn't happen with proper naming convention
- echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
- echo "Please ensure only one spec directory exists per numeric prefix." >&2
- echo "$specs_dir/$branch_name" # Return something to avoid breaking the script
- fi
-}
-
-get_feature_paths() {
- local repo_root=$(get_repo_root)
- local current_branch=$(get_current_branch)
- local has_git_repo="false"
-
- if has_git; then
- has_git_repo="true"
- fi
-
- # Use prefix-based lookup to support multiple branches per spec
- local feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch")
-
- cat </dev/null) ]] && echo " ✓ $2" || echo " ✗ $2"; }
-
diff --git a/.specify/scripts/bash/create-new-feature.sh b/.specify/scripts/bash/create-new-feature.sh
deleted file mode 100755
index 510f620..0000000
--- a/.specify/scripts/bash/create-new-feature.sh
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-JSON_MODE=false
-SHORT_NAME=""
-ARGS=()
-i=0
-while [ $i -lt $# ]; do
- arg="${!i}"
- case "$arg" in
- --json)
- JSON_MODE=true
- ;;
- --short-name)
- if [ $((i + 1)) -ge $# ]; then
- echo 'Error: --short-name requires a value' >&2
- exit 1
- fi
- i=$((i + 1))
- SHORT_NAME="${!i}"
- ;;
- --help|-h)
- echo "Usage: $0 [--json] [--short-name ] "
- echo ""
- echo "Options:"
- echo " --json Output in JSON format"
- echo " --short-name Provide a custom short name (2-4 words) for the branch"
- echo " --help, -h Show this help message"
- echo ""
- echo "Examples:"
- echo " $0 'Add user authentication system' --short-name 'user-auth'"
- echo " $0 'Implement OAuth2 integration for API'"
- exit 0
- ;;
- *)
- ARGS+=("$arg")
- ;;
- esac
- i=$((i + 1))
-done
-
-FEATURE_DESCRIPTION="${ARGS[*]}"
-if [ -z "$FEATURE_DESCRIPTION" ]; then
- echo "Usage: $0 [--json] [--short-name ] " >&2
- exit 1
-fi
-
-# Function to find the repository root by searching for existing project markers
-find_repo_root() {
- local dir="$1"
- while [ "$dir" != "/" ]; do
- if [ -d "$dir/.git" ] || [ -d "$dir/.specify" ]; then
- echo "$dir"
- return 0
- fi
- dir="$(dirname "$dir")"
- done
- return 1
-}
-
-# Resolve repository root. Prefer git information when available, but fall back
-# to searching for repository markers so the workflow still functions in repositories that
-# were initialised with --no-git.
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-
-if git rev-parse --show-toplevel >/dev/null 2>&1; then
- REPO_ROOT=$(git rev-parse --show-toplevel)
- HAS_GIT=true
-else
- REPO_ROOT="$(find_repo_root "$SCRIPT_DIR")"
- if [ -z "$REPO_ROOT" ]; then
- echo "Error: Could not determine repository root. Please run this script from within the repository." >&2
- exit 1
- fi
- HAS_GIT=false
-fi
-
-cd "$REPO_ROOT"
-
-SPECS_DIR="$REPO_ROOT/specs"
-mkdir -p "$SPECS_DIR"
-
-HIGHEST=0
-if [ -d "$SPECS_DIR" ]; then
- for dir in "$SPECS_DIR"/*; do
- [ -d "$dir" ] || continue
- dirname=$(basename "$dir")
- number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0")
- number=$((10#$number))
- if [ "$number" -gt "$HIGHEST" ]; then HIGHEST=$number; fi
- done
-fi
-
-NEXT=$((HIGHEST + 1))
-FEATURE_NUM=$(printf "%03d" "$NEXT")
-
-# Function to generate branch name with stop word filtering and length filtering
-generate_branch_name() {
- local description="$1"
-
- # Common stop words to filter out
- local stop_words="^(i|a|an|the|to|for|of|in|on|at|by|with|from|is|are|was|were|be|been|being|have|has|had|do|does|did|will|would|should|could|can|may|might|must|shall|this|that|these|those|my|your|our|their|want|need|add|get|set)$"
-
- # Convert to lowercase and split into words
- local clean_name=$(echo "$description" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/ /g')
-
- # Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original)
- local meaningful_words=()
- for word in $clean_name; do
- # Skip empty words
- [ -z "$word" ] && continue
-
- # Keep words that are NOT stop words AND (length >= 3 OR are potential acronyms)
- if ! echo "$word" | grep -qiE "$stop_words"; then
- if [ ${#word} -ge 3 ]; then
- meaningful_words+=("$word")
- elif echo "$description" | grep -q "\b${word^^}\b"; then
- # Keep short words if they appear as uppercase in original (likely acronyms)
- meaningful_words+=("$word")
- fi
- fi
- done
-
- # If we have meaningful words, use first 3-4 of them
- if [ ${#meaningful_words[@]} -gt 0 ]; then
- local max_words=3
- if [ ${#meaningful_words[@]} -eq 4 ]; then max_words=4; fi
-
- local result=""
- local count=0
- for word in "${meaningful_words[@]}"; do
- if [ $count -ge $max_words ]; then break; fi
- if [ -n "$result" ]; then result="$result-"; fi
- result="$result$word"
- count=$((count + 1))
- done
- echo "$result"
- else
- # Fallback to original logic if no meaningful words found
- echo "$description" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//' | tr '-' '\n' | grep -v '^$' | head -3 | tr '\n' '-' | sed 's/-$//'
- fi
-}
-
-# Generate branch name
-if [ -n "$SHORT_NAME" ]; then
- # Use provided short name, just clean it up
- BRANCH_SUFFIX=$(echo "$SHORT_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//')
-else
- # Generate from description with smart filtering
- BRANCH_SUFFIX=$(generate_branch_name "$FEATURE_DESCRIPTION")
-fi
-
-BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
-
-# GitHub enforces a 244-byte limit on branch names
-# Validate and truncate if necessary
-MAX_BRANCH_LENGTH=244
-if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
- # Calculate how much we need to trim from suffix
- # Account for: feature number (3) + hyphen (1) = 4 chars
- MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - 4))
-
- # Truncate suffix at word boundary if possible
- TRUNCATED_SUFFIX=$(echo "$BRANCH_SUFFIX" | cut -c1-$MAX_SUFFIX_LENGTH)
- # Remove trailing hyphen if truncation created one
- TRUNCATED_SUFFIX=$(echo "$TRUNCATED_SUFFIX" | sed 's/-$//')
-
- ORIGINAL_BRANCH_NAME="$BRANCH_NAME"
- BRANCH_NAME="${FEATURE_NUM}-${TRUNCATED_SUFFIX}"
-
- >&2 echo "[specify] Warning: Branch name exceeded GitHub's 244-byte limit"
- >&2 echo "[specify] Original: $ORIGINAL_BRANCH_NAME (${#ORIGINAL_BRANCH_NAME} bytes)"
- >&2 echo "[specify] Truncated to: $BRANCH_NAME (${#BRANCH_NAME} bytes)"
-fi
-
-if [ "$HAS_GIT" = true ]; then
- git checkout -b "$BRANCH_NAME"
-else
- >&2 echo "[specify] Warning: Git repository not detected; skipped branch creation for $BRANCH_NAME"
-fi
-
-FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
-mkdir -p "$FEATURE_DIR"
-
-TEMPLATE="$REPO_ROOT/.specify/templates/spec-template.md"
-SPEC_FILE="$FEATURE_DIR/spec.md"
-if [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
-
-# Set the SPECIFY_FEATURE environment variable for the current session
-export SPECIFY_FEATURE="$BRANCH_NAME"
-
-if $JSON_MODE; then
- printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM"
-else
- echo "BRANCH_NAME: $BRANCH_NAME"
- echo "SPEC_FILE: $SPEC_FILE"
- echo "FEATURE_NUM: $FEATURE_NUM"
- echo "SPECIFY_FEATURE environment variable set to: $BRANCH_NAME"
-fi
\ No newline at end of file
diff --git a/.specify/scripts/bash/setup-plan.sh b/.specify/scripts/bash/setup-plan.sh
deleted file mode 100755
index 740a143..0000000
--- a/.specify/scripts/bash/setup-plan.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-# Parse command line arguments
-JSON_MODE=false
-ARGS=()
-
-for arg in "$@"; do
- case "$arg" in
- --json)
- JSON_MODE=true
- ;;
- --help|-h)
- echo "Usage: $0 [--json]"
- echo " --json Output results in JSON format"
- echo " --help Show this help message"
- exit 0
- ;;
- *)
- ARGS+=("$arg")
- ;;
- esac
-done
-
-# Get script directory and load common functions
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-source "$SCRIPT_DIR/common.sh"
-
-# Get all paths and variables from common functions
-eval $(get_feature_paths)
-
-# Check if we're on a proper feature branch (only for git repos)
-check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
-
-# Ensure the feature directory exists
-mkdir -p "$FEATURE_DIR"
-
-# Copy plan template if it exists
-TEMPLATE="$REPO_ROOT/.specify/templates/plan-template.md"
-if [[ -f "$TEMPLATE" ]]; then
- cp "$TEMPLATE" "$IMPL_PLAN"
- echo "Copied plan template to $IMPL_PLAN"
-else
- echo "Warning: Plan template not found at $TEMPLATE"
- # Create a basic plan file if template doesn't exist
- touch "$IMPL_PLAN"
-fi
-
-# Output results
-if $JSON_MODE; then
- printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \
- "$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH" "$HAS_GIT"
-else
- echo "FEATURE_SPEC: $FEATURE_SPEC"
- echo "IMPL_PLAN: $IMPL_PLAN"
- echo "SPECS_DIR: $FEATURE_DIR"
- echo "BRANCH: $CURRENT_BRANCH"
- echo "HAS_GIT: $HAS_GIT"
-fi
-
diff --git a/.specify/scripts/bash/update-agent-context.sh b/.specify/scripts/bash/update-agent-context.sh
deleted file mode 100755
index ba10ec2..0000000
--- a/.specify/scripts/bash/update-agent-context.sh
+++ /dev/null
@@ -1,739 +0,0 @@
-#!/usr/bin/env bash
-
-# Update agent context files with information from plan.md
-#
-# This script maintains AI agent context files by parsing feature specifications
-# and updating agent-specific configuration files with project information.
-#
-# MAIN FUNCTIONS:
-# 1. Environment Validation
-# - Verifies git repository structure and branch information
-# - Checks for required plan.md files and templates
-# - Validates file permissions and accessibility
-#
-# 2. Plan Data Extraction
-# - Parses plan.md files to extract project metadata
-# - Identifies language/version, frameworks, databases, and project types
-# - Handles missing or incomplete specification data gracefully
-#
-# 3. Agent File Management
-# - Creates new agent context files from templates when needed
-# - Updates existing agent files with new project information
-# - Preserves manual additions and custom configurations
-# - Supports multiple AI agent formats and directory structures
-#
-# 4. Content Generation
-# - Generates language-specific build/test commands
-# - Creates appropriate project directory structures
-# - Updates technology stacks and recent changes sections
-# - Maintains consistent formatting and timestamps
-#
-# 5. Multi-Agent Support
-# - Handles agent-specific file paths and naming conventions
-# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, or Amazon Q Developer CLI
-# - Can update single agents or all existing agent files
-# - Creates default Claude file if no agent files exist
-#
-# Usage: ./update-agent-context.sh [agent_type]
-# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|q
-# Leave empty to update all existing agent files
-
-set -e
-
-# Enable strict error handling
-set -u
-set -o pipefail
-
-#==============================================================================
-# Configuration and Global Variables
-#==============================================================================
-
-# Get script directory and load common functions
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-source "$SCRIPT_DIR/common.sh"
-
-# Get all paths and variables from common functions
-eval $(get_feature_paths)
-
-NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code
-AGENT_TYPE="${1:-}"
-
-# Agent-specific file paths
-CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"
-GEMINI_FILE="$REPO_ROOT/GEMINI.md"
-COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
-CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc"
-QWEN_FILE="$REPO_ROOT/QWEN.md"
-AGENTS_FILE="$REPO_ROOT/AGENTS.md"
-WINDSURF_FILE="$REPO_ROOT/.windsurf/rules/specify-rules.md"
-KILOCODE_FILE="$REPO_ROOT/.kilocode/rules/specify-rules.md"
-AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md"
-ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
-CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
-Q_FILE="$REPO_ROOT/AGENTS.md"
-
-# Template file
-TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md"
-
-# Global variables for parsed plan data
-NEW_LANG=""
-NEW_FRAMEWORK=""
-NEW_DB=""
-NEW_PROJECT_TYPE=""
-
-#==============================================================================
-# Utility Functions
-#==============================================================================
-
-log_info() {
- echo "INFO: $1"
-}
-
-log_success() {
- echo "✓ $1"
-}
-
-log_error() {
- echo "ERROR: $1" >&2
-}
-
-log_warning() {
- echo "WARNING: $1" >&2
-}
-
-# Cleanup function for temporary files
-cleanup() {
- local exit_code=$?
- rm -f /tmp/agent_update_*_$$
- rm -f /tmp/manual_additions_$$
- exit $exit_code
-}
-
-# Set up cleanup trap
-trap cleanup EXIT INT TERM
-
-#==============================================================================
-# Validation Functions
-#==============================================================================
-
-validate_environment() {
- # Check if we have a current branch/feature (git or non-git)
- if [[ -z "$CURRENT_BRANCH" ]]; then
- log_error "Unable to determine current feature"
- if [[ "$HAS_GIT" == "true" ]]; then
- log_info "Make sure you're on a feature branch"
- else
- log_info "Set SPECIFY_FEATURE environment variable or create a feature first"
- fi
- exit 1
- fi
-
- # Check if plan.md exists
- if [[ ! -f "$NEW_PLAN" ]]; then
- log_error "No plan.md found at $NEW_PLAN"
- log_info "Make sure you're working on a feature with a corresponding spec directory"
- if [[ "$HAS_GIT" != "true" ]]; then
- log_info "Use: export SPECIFY_FEATURE=your-feature-name or create a new feature first"
- fi
- exit 1
- fi
-
- # Check if template exists (needed for new files)
- if [[ ! -f "$TEMPLATE_FILE" ]]; then
- log_warning "Template file not found at $TEMPLATE_FILE"
- log_warning "Creating new agent files will fail"
- fi
-}
-
-#==============================================================================
-# Plan Parsing Functions
-#==============================================================================
-
-extract_plan_field() {
- local field_pattern="$1"
- local plan_file="$2"
-
- grep "^\*\*${field_pattern}\*\*: " "$plan_file" 2>/dev/null | \
- head -1 | \
- sed "s|^\*\*${field_pattern}\*\*: ||" | \
- sed 's/^[ \t]*//;s/[ \t]*$//' | \
- grep -v "NEEDS CLARIFICATION" | \
- grep -v "^N/A$" || echo ""
-}
-
-parse_plan_data() {
- local plan_file="$1"
-
- if [[ ! -f "$plan_file" ]]; then
- log_error "Plan file not found: $plan_file"
- return 1
- fi
-
- if [[ ! -r "$plan_file" ]]; then
- log_error "Plan file is not readable: $plan_file"
- return 1
- fi
-
- log_info "Parsing plan data from $plan_file"
-
- NEW_LANG=$(extract_plan_field "Language/Version" "$plan_file")
- NEW_FRAMEWORK=$(extract_plan_field "Primary Dependencies" "$plan_file")
- NEW_DB=$(extract_plan_field "Storage" "$plan_file")
- NEW_PROJECT_TYPE=$(extract_plan_field "Project Type" "$plan_file")
-
- # Log what we found
- if [[ -n "$NEW_LANG" ]]; then
- log_info "Found language: $NEW_LANG"
- else
- log_warning "No language information found in plan"
- fi
-
- if [[ -n "$NEW_FRAMEWORK" ]]; then
- log_info "Found framework: $NEW_FRAMEWORK"
- fi
-
- if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]]; then
- log_info "Found database: $NEW_DB"
- fi
-
- if [[ -n "$NEW_PROJECT_TYPE" ]]; then
- log_info "Found project type: $NEW_PROJECT_TYPE"
- fi
-}
-
-format_technology_stack() {
- local lang="$1"
- local framework="$2"
- local parts=()
-
- # Add non-empty parts
- [[ -n "$lang" && "$lang" != "NEEDS CLARIFICATION" ]] && parts+=("$lang")
- [[ -n "$framework" && "$framework" != "NEEDS CLARIFICATION" && "$framework" != "N/A" ]] && parts+=("$framework")
-
- # Join with proper formatting
- if [[ ${#parts[@]} -eq 0 ]]; then
- echo ""
- elif [[ ${#parts[@]} -eq 1 ]]; then
- echo "${parts[0]}"
- else
- # Join multiple parts with " + "
- local result="${parts[0]}"
- for ((i=1; i<${#parts[@]}; i++)); do
- result="$result + ${parts[i]}"
- done
- echo "$result"
- fi
-}
-
-#==============================================================================
-# Template and Content Generation Functions
-#==============================================================================
-
-get_project_structure() {
- local project_type="$1"
-
- if [[ "$project_type" == *"web"* ]]; then
- echo "backend/\\nfrontend/\\ntests/"
- else
- echo "src/\\ntests/"
- fi
-}
-
-get_commands_for_language() {
- local lang="$1"
-
- case "$lang" in
- *"Python"*)
- echo "cd src && pytest && ruff check ."
- ;;
- *"Rust"*)
- echo "cargo test && cargo clippy"
- ;;
- *"JavaScript"*|*"TypeScript"*)
- echo "npm test \&\& npm run lint"
- ;;
- *)
- echo "# Add commands for $lang"
- ;;
- esac
-}
-
-get_language_conventions() {
- local lang="$1"
- echo "$lang: Follow standard conventions"
-}
-
-create_new_agent_file() {
- local target_file="$1"
- local temp_file="$2"
- local project_name="$3"
- local current_date="$4"
-
- if [[ ! -f "$TEMPLATE_FILE" ]]; then
- log_error "Template not found at $TEMPLATE_FILE"
- return 1
- fi
-
- if [[ ! -r "$TEMPLATE_FILE" ]]; then
- log_error "Template file is not readable: $TEMPLATE_FILE"
- return 1
- fi
-
- log_info "Creating new agent context file from template..."
-
- if ! cp "$TEMPLATE_FILE" "$temp_file"; then
- log_error "Failed to copy template file"
- return 1
- fi
-
- # Replace template placeholders
- local project_structure
- project_structure=$(get_project_structure "$NEW_PROJECT_TYPE")
-
- local commands
- commands=$(get_commands_for_language "$NEW_LANG")
-
- local language_conventions
- language_conventions=$(get_language_conventions "$NEW_LANG")
-
- # Perform substitutions with error checking using safer approach
- # Escape special characters for sed by using a different delimiter or escaping
- local escaped_lang=$(printf '%s\n' "$NEW_LANG" | sed 's/[\[\.*^$()+{}|]/\\&/g')
- local escaped_framework=$(printf '%s\n' "$NEW_FRAMEWORK" | sed 's/[\[\.*^$()+{}|]/\\&/g')
- local escaped_branch=$(printf '%s\n' "$CURRENT_BRANCH" | sed 's/[\[\.*^$()+{}|]/\\&/g')
-
- # Build technology stack and recent change strings conditionally
- local tech_stack
- if [[ -n "$escaped_lang" && -n "$escaped_framework" ]]; then
- tech_stack="- $escaped_lang + $escaped_framework ($escaped_branch)"
- elif [[ -n "$escaped_lang" ]]; then
- tech_stack="- $escaped_lang ($escaped_branch)"
- elif [[ -n "$escaped_framework" ]]; then
- tech_stack="- $escaped_framework ($escaped_branch)"
- else
- tech_stack="- ($escaped_branch)"
- fi
-
- local recent_change
- if [[ -n "$escaped_lang" && -n "$escaped_framework" ]]; then
- recent_change="- $escaped_branch: Added $escaped_lang + $escaped_framework"
- elif [[ -n "$escaped_lang" ]]; then
- recent_change="- $escaped_branch: Added $escaped_lang"
- elif [[ -n "$escaped_framework" ]]; then
- recent_change="- $escaped_branch: Added $escaped_framework"
- else
- recent_change="- $escaped_branch: Added"
- fi
-
- local substitutions=(
- "s|\[PROJECT NAME\]|$project_name|"
- "s|\[DATE\]|$current_date|"
- "s|\[EXTRACTED FROM ALL PLAN.MD FILES\]|$tech_stack|"
- "s|\[ACTUAL STRUCTURE FROM PLANS\]|$project_structure|g"
- "s|\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]|$commands|"
- "s|\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]|$language_conventions|"
- "s|\[LAST 3 FEATURES AND WHAT THEY ADDED\]|$recent_change|"
- )
-
- for substitution in "${substitutions[@]}"; do
- if ! sed -i.bak -e "$substitution" "$temp_file"; then
- log_error "Failed to perform substitution: $substitution"
- rm -f "$temp_file" "$temp_file.bak"
- return 1
- fi
- done
-
- # Convert \n sequences to actual newlines
- newline=$(printf '\n')
- sed -i.bak2 "s/\\\\n/${newline}/g" "$temp_file"
-
- # Clean up backup files
- rm -f "$temp_file.bak" "$temp_file.bak2"
-
- return 0
-}
-
-
-
-
-update_existing_agent_file() {
- local target_file="$1"
- local current_date="$2"
-
- log_info "Updating existing agent context file..."
-
- # Use a single temporary file for atomic update
- local temp_file
- temp_file=$(mktemp) || {
- log_error "Failed to create temporary file"
- return 1
- }
-
- # Process the file in one pass
- local tech_stack=$(format_technology_stack "$NEW_LANG" "$NEW_FRAMEWORK")
- local new_tech_entries=()
- local new_change_entry=""
-
- # Prepare new technology entries
- if [[ -n "$tech_stack" ]] && ! grep -q "$tech_stack" "$target_file"; then
- new_tech_entries+=("- $tech_stack ($CURRENT_BRANCH)")
- fi
-
- if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]] && [[ "$NEW_DB" != "NEEDS CLARIFICATION" ]] && ! grep -q "$NEW_DB" "$target_file"; then
- new_tech_entries+=("- $NEW_DB ($CURRENT_BRANCH)")
- fi
-
- # Prepare new change entry
- if [[ -n "$tech_stack" ]]; then
- new_change_entry="- $CURRENT_BRANCH: Added $tech_stack"
- elif [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]] && [[ "$NEW_DB" != "NEEDS CLARIFICATION" ]]; then
- new_change_entry="- $CURRENT_BRANCH: Added $NEW_DB"
- fi
-
- # Process file line by line
- local in_tech_section=false
- local in_changes_section=false
- local tech_entries_added=false
- local changes_entries_added=false
- local existing_changes_count=0
-
- while IFS= read -r line || [[ -n "$line" ]]; do
- # Handle Active Technologies section
- if [[ "$line" == "## Active Technologies" ]]; then
- echo "$line" >> "$temp_file"
- in_tech_section=true
- continue
- elif [[ $in_tech_section == true ]] && [[ "$line" =~ ^##[[:space:]] ]]; then
- # Add new tech entries before closing the section
- if [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then
- printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file"
- tech_entries_added=true
- fi
- echo "$line" >> "$temp_file"
- in_tech_section=false
- continue
- elif [[ $in_tech_section == true ]] && [[ -z "$line" ]]; then
- # Add new tech entries before empty line in tech section
- if [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then
- printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file"
- tech_entries_added=true
- fi
- echo "$line" >> "$temp_file"
- continue
- fi
-
- # Handle Recent Changes section
- if [[ "$line" == "## Recent Changes" ]]; then
- echo "$line" >> "$temp_file"
- # Add new change entry right after the heading
- if [[ -n "$new_change_entry" ]]; then
- echo "$new_change_entry" >> "$temp_file"
- fi
- in_changes_section=true
- changes_entries_added=true
- continue
- elif [[ $in_changes_section == true ]] && [[ "$line" =~ ^##[[:space:]] ]]; then
- echo "$line" >> "$temp_file"
- in_changes_section=false
- continue
- elif [[ $in_changes_section == true ]] && [[ "$line" == "- "* ]]; then
- # Keep only first 2 existing changes
- if [[ $existing_changes_count -lt 2 ]]; then
- echo "$line" >> "$temp_file"
- ((existing_changes_count++))
- fi
- continue
- fi
-
- # Update timestamp
- if [[ "$line" =~ \*\*Last\ updated\*\*:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then
- echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file"
- else
- echo "$line" >> "$temp_file"
- fi
- done < "$target_file"
-
- # Post-loop check: if we're still in the Active Technologies section and haven't added new entries
- if [[ $in_tech_section == true ]] && [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then
- printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file"
- fi
-
- # Move temp file to target atomically
- if ! mv "$temp_file" "$target_file"; then
- log_error "Failed to update target file"
- rm -f "$temp_file"
- return 1
- fi
-
- return 0
-}
-#==============================================================================
-# Main Agent File Update Function
-#==============================================================================
-
-update_agent_file() {
- local target_file="$1"
- local agent_name="$2"
-
- if [[ -z "$target_file" ]] || [[ -z "$agent_name" ]]; then
- log_error "update_agent_file requires target_file and agent_name parameters"
- return 1
- fi
-
- log_info "Updating $agent_name context file: $target_file"
-
- local project_name
- project_name=$(basename "$REPO_ROOT")
- local current_date
- current_date=$(date +%Y-%m-%d)
-
- # Create directory if it doesn't exist
- local target_dir
- target_dir=$(dirname "$target_file")
- if [[ ! -d "$target_dir" ]]; then
- if ! mkdir -p "$target_dir"; then
- log_error "Failed to create directory: $target_dir"
- return 1
- fi
- fi
-
- if [[ ! -f "$target_file" ]]; then
- # Create new file from template
- local temp_file
- temp_file=$(mktemp) || {
- log_error "Failed to create temporary file"
- return 1
- }
-
- if create_new_agent_file "$target_file" "$temp_file" "$project_name" "$current_date"; then
- if mv "$temp_file" "$target_file"; then
- log_success "Created new $agent_name context file"
- else
- log_error "Failed to move temporary file to $target_file"
- rm -f "$temp_file"
- return 1
- fi
- else
- log_error "Failed to create new agent file"
- rm -f "$temp_file"
- return 1
- fi
- else
- # Update existing file
- if [[ ! -r "$target_file" ]]; then
- log_error "Cannot read existing file: $target_file"
- return 1
- fi
-
- if [[ ! -w "$target_file" ]]; then
- log_error "Cannot write to existing file: $target_file"
- return 1
- fi
-
- if update_existing_agent_file "$target_file" "$current_date"; then
- log_success "Updated existing $agent_name context file"
- else
- log_error "Failed to update existing agent file"
- return 1
- fi
- fi
-
- return 0
-}
-
-#==============================================================================
-# Agent Selection and Processing
-#==============================================================================
-
-update_specific_agent() {
- local agent_type="$1"
-
- case "$agent_type" in
- claude)
- update_agent_file "$CLAUDE_FILE" "Claude Code"
- ;;
- gemini)
- update_agent_file "$GEMINI_FILE" "Gemini CLI"
- ;;
- copilot)
- update_agent_file "$COPILOT_FILE" "GitHub Copilot"
- ;;
- cursor-agent)
- update_agent_file "$CURSOR_FILE" "Cursor IDE"
- ;;
- qwen)
- update_agent_file "$QWEN_FILE" "Qwen Code"
- ;;
- opencode)
- update_agent_file "$AGENTS_FILE" "opencode"
- ;;
- codex)
- update_agent_file "$AGENTS_FILE" "Codex CLI"
- ;;
- windsurf)
- update_agent_file "$WINDSURF_FILE" "Windsurf"
- ;;
- kilocode)
- update_agent_file "$KILOCODE_FILE" "Kilo Code"
- ;;
- auggie)
- update_agent_file "$AUGGIE_FILE" "Auggie CLI"
- ;;
- roo)
- update_agent_file "$ROO_FILE" "Roo Code"
- ;;
- codebuddy)
- update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
- ;;
- q)
- update_agent_file "$Q_FILE" "Amazon Q Developer CLI"
- ;;
- *)
- log_error "Unknown agent type '$agent_type'"
- log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|q"
- exit 1
- ;;
- esac
-}
-
-update_all_existing_agents() {
- local found_agent=false
-
- # Check each possible agent file and update if it exists
- if [[ -f "$CLAUDE_FILE" ]]; then
- update_agent_file "$CLAUDE_FILE" "Claude Code"
- found_agent=true
- fi
-
- if [[ -f "$GEMINI_FILE" ]]; then
- update_agent_file "$GEMINI_FILE" "Gemini CLI"
- found_agent=true
- fi
-
- if [[ -f "$COPILOT_FILE" ]]; then
- update_agent_file "$COPILOT_FILE" "GitHub Copilot"
- found_agent=true
- fi
-
- if [[ -f "$CURSOR_FILE" ]]; then
- update_agent_file "$CURSOR_FILE" "Cursor IDE"
- found_agent=true
- fi
-
- if [[ -f "$QWEN_FILE" ]]; then
- update_agent_file "$QWEN_FILE" "Qwen Code"
- found_agent=true
- fi
-
- if [[ -f "$AGENTS_FILE" ]]; then
- update_agent_file "$AGENTS_FILE" "Codex/opencode"
- found_agent=true
- fi
-
- if [[ -f "$WINDSURF_FILE" ]]; then
- update_agent_file "$WINDSURF_FILE" "Windsurf"
- found_agent=true
- fi
-
- if [[ -f "$KILOCODE_FILE" ]]; then
- update_agent_file "$KILOCODE_FILE" "Kilo Code"
- found_agent=true
- fi
-
- if [[ -f "$AUGGIE_FILE" ]]; then
- update_agent_file "$AUGGIE_FILE" "Auggie CLI"
- found_agent=true
- fi
-
- if [[ -f "$ROO_FILE" ]]; then
- update_agent_file "$ROO_FILE" "Roo Code"
- found_agent=true
- fi
-
- if [[ -f "$CODEBUDDY_FILE" ]]; then
- update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
- found_agent=true
- fi
-
- if [[ -f "$Q_FILE" ]]; then
- update_agent_file "$Q_FILE" "Amazon Q Developer CLI"
- found_agent=true
- fi
-
- # If no agent files exist, create a default Claude file
- if [[ "$found_agent" == false ]]; then
- log_info "No existing agent files found, creating default Claude file..."
- update_agent_file "$CLAUDE_FILE" "Claude Code"
- fi
-}
-print_summary() {
- echo
- log_info "Summary of changes:"
-
- if [[ -n "$NEW_LANG" ]]; then
- echo " - Added language: $NEW_LANG"
- fi
-
- if [[ -n "$NEW_FRAMEWORK" ]]; then
- echo " - Added framework: $NEW_FRAMEWORK"
- fi
-
- if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]]; then
- echo " - Added database: $NEW_DB"
- fi
-
- echo
-
- log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|codebuddy|q]"
-}
-
-#==============================================================================
-# Main Execution
-#==============================================================================
-
-main() {
- # Validate environment before proceeding
- validate_environment
-
- log_info "=== Updating agent context files for feature $CURRENT_BRANCH ==="
-
- # Parse the plan file to extract project information
- if ! parse_plan_data "$NEW_PLAN"; then
- log_error "Failed to parse plan data"
- exit 1
- fi
-
- # Process based on agent type argument
- local success=true
-
- if [[ -z "$AGENT_TYPE" ]]; then
- # No specific agent provided - update all existing agent files
- log_info "No agent specified, updating all existing agent files..."
- if ! update_all_existing_agents; then
- success=false
- fi
- else
- # Specific agent provided - update only that agent
- log_info "Updating specific agent: $AGENT_TYPE"
- if ! update_specific_agent "$AGENT_TYPE"; then
- success=false
- fi
- fi
-
- # Print summary
- print_summary
-
- if [[ "$success" == true ]]; then
- log_success "Agent context update completed successfully"
- exit 0
- else
- log_error "Agent context update completed with errors"
- exit 1
- fi
-}
-
-# Execute main function if script is run directly
-if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
- main "$@"
-fi
-
diff --git a/.specify/templates/agent-file-template.md b/.specify/templates/agent-file-template.md
deleted file mode 100644
index f734997..0000000
--- a/.specify/templates/agent-file-template.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# [PROJECT NAME] Development Guidelines
-
-Auto-generated from all feature plans. Last updated: [DATE]
-
-## Active Technologies
-[EXTRACTED FROM ALL PLAN.MD FILES]
-
-## Project Structure
-```
-[ACTUAL STRUCTURE FROM PLANS]
-```
-
-## Commands
-[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES]
-
-## Code Style
-[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE]
-
-## Recent Changes
-[LAST 3 FEATURES AND WHAT THEY ADDED]
-
-
-
diff --git a/.specify/templates/checklist-template.md b/.specify/templates/checklist-template.md
deleted file mode 100644
index 1c8b11a..0000000
--- a/.specify/templates/checklist-template.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# [CHECKLIST TYPE] Checklist: [FEATURE NAME]
-
-**Purpose**: [Brief description of what this checklist covers]
-**Created**: [DATE]
-**Feature**: [Link to spec.md or relevant documentation]
-
-**Note**: This checklist is generated by the `/speckit.checklist` command based on feature context and requirements.
-
-
-
-## [Category 1]
-
-- [ ] CHK001 First checklist item with clear action
-- [ ] CHK002 Second checklist item
-- [ ] CHK003 Third checklist item
-
-## [Category 2]
-
-- [ ] CHK004 Another category item
-- [ ] CHK005 Item with specific criteria
-- [ ] CHK006 Final item in this category
-
-## Notes
-
-- Check items off as completed: `[x]`
-- Add comments or findings inline
-- Link to relevant resources or documentation
-- Items are numbered sequentially for easy reference
-
diff --git a/.specify/templates/plan-template.md b/.specify/templates/plan-template.md
deleted file mode 100644
index 43460c3..0000000
--- a/.specify/templates/plan-template.md
+++ /dev/null
@@ -1,105 +0,0 @@
-# Implementation Plan: [FEATURE]
-
-**Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link]
-**Input**: Feature specification from `/specs/[###-feature-name]/spec.md`
-
-**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/commands/plan.md` for the execution workflow.
-
-## Summary
-
-[Extract from feature spec: primary requirement + technical approach from research]
-
-## Technical Context
-
-
-
-**Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION]
-**Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION]
-**Storage**: [if applicable, e.g., PostgreSQL, CoreData, files or N/A]
-**Testing**: [e.g., pytest, XCTest, cargo test or NEEDS CLARIFICATION]
-**Target Platform**: [e.g., Linux server, iOS 15+, WASM or NEEDS CLARIFICATION]
-**Project Type**: [single/web/mobile - determines source structure]
-**Performance Goals**: [domain-specific, e.g., 1000 req/s, 10k lines/sec, 60 fps or NEEDS CLARIFICATION]
-**Constraints**: [domain-specific, e.g., <200ms p95, <100MB memory, offline-capable or NEEDS CLARIFICATION]
-**Scale/Scope**: [domain-specific, e.g., 10k users, 1M LOC, 50 screens or NEEDS CLARIFICATION]
-
-## Constitution Check
-
-*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.*
-
-[Gates determined based on constitution file]
-
-## Project Structure
-
-### Documentation (this feature)
-
-```
-specs/[###-feature]/
-├── plan.md # This file (/speckit.plan command output)
-├── research.md # Phase 0 output (/speckit.plan command)
-├── data-model.md # Phase 1 output (/speckit.plan command)
-├── quickstart.md # Phase 1 output (/speckit.plan command)
-├── contracts/ # Phase 1 output (/speckit.plan command)
-└── tasks.md # Phase 2 output (/speckit.tasks command - NOT created by /speckit.plan)
-```
-
-### Source Code (repository root)
-
-
-```
-# [REMOVE IF UNUSED] Option 1: Single project (DEFAULT)
-src/
-├── models/
-├── services/
-├── cli/
-└── lib/
-
-tests/
-├── contract/
-├── integration/
-└── unit/
-
-# [REMOVE IF UNUSED] Option 2: Web application (when "frontend" + "backend" detected)
-backend/
-├── src/
-│ ├── models/
-│ ├── services/
-│ └── api/
-└── tests/
-
-frontend/
-├── src/
-│ ├── components/
-│ ├── pages/
-│ └── services/
-└── tests/
-
-# [REMOVE IF UNUSED] Option 3: Mobile + API (when "iOS/Android" detected)
-api/
-└── [same as backend above]
-
-ios/ or android/
-└── [platform-specific structure: feature modules, UI flows, platform tests]
-```
-
-**Structure Decision**: [Document the selected structure and reference the real
-directories captured above]
-
-## Complexity Tracking
-
-*Fill ONLY if Constitution Check has violations that must be justified*
-
-| Violation | Why Needed | Simpler Alternative Rejected Because |
-|-----------|------------|-------------------------------------|
-| [e.g., 4th project] | [current need] | [why 3 projects insufficient] |
-| [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] |
-
diff --git a/.specify/templates/spec-template.md b/.specify/templates/spec-template.md
deleted file mode 100644
index 9a83ac6..0000000
--- a/.specify/templates/spec-template.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# Feature Specification: [FEATURE NAME]
-
-**Feature Branch**: `[###-feature-name]`
-**Created**: [DATE]
-**Status**: Draft
-**Input**: User description: "$ARGUMENTS"
-
-## User Scenarios & Testing *(mandatory)*
-
-
-
-### User Story 1 - [Brief Title] (Priority: P1)
-
-[Describe this user journey in plain language]
-
-**Why this priority**: [Explain the value and why it has this priority level]
-
-**Independent Test**: [Describe how this can be tested independently - e.g., "Can be fully tested by [specific action] and delivers [specific value]"]
-
-**Acceptance Scenarios**:
-
-1. **Given** [initial state], **When** [action], **Then** [expected outcome]
-2. **Given** [initial state], **When** [action], **Then** [expected outcome]
-
----
-
-### User Story 2 - [Brief Title] (Priority: P2)
-
-[Describe this user journey in plain language]
-
-**Why this priority**: [Explain the value and why it has this priority level]
-
-**Independent Test**: [Describe how this can be tested independently]
-
-**Acceptance Scenarios**:
-
-1. **Given** [initial state], **When** [action], **Then** [expected outcome]
-
----
-
-### User Story 3 - [Brief Title] (Priority: P3)
-
-[Describe this user journey in plain language]
-
-**Why this priority**: [Explain the value and why it has this priority level]
-
-**Independent Test**: [Describe how this can be tested independently]
-
-**Acceptance Scenarios**:
-
-1. **Given** [initial state], **When** [action], **Then** [expected outcome]
-
----
-
-[Add more user stories as needed, each with an assigned priority]
-
-### Edge Cases
-
-
-
-- What happens when [boundary condition]?
-- How does system handle [error scenario]?
-
-## Requirements *(mandatory)*
-
-
-
-### Functional Requirements
-
-- **FR-001**: System MUST [specific capability, e.g., "allow users to create accounts"]
-- **FR-002**: System MUST [specific capability, e.g., "validate email addresses"]
-- **FR-003**: Users MUST be able to [key interaction, e.g., "reset their password"]
-- **FR-004**: System MUST [data requirement, e.g., "persist user preferences"]
-- **FR-005**: System MUST [behavior, e.g., "log all security events"]
-
-*Example of marking unclear requirements:*
-
-- **FR-006**: System MUST authenticate users via [NEEDS CLARIFICATION: auth method not specified - email/password, SSO, OAuth?]
-- **FR-007**: System MUST retain user data for [NEEDS CLARIFICATION: retention period not specified]
-
-### Key Entities *(include if feature involves data)*
-
-- **[Entity 1]**: [What it represents, key attributes without implementation]
-- **[Entity 2]**: [What it represents, relationships to other entities]
-
-## Success Criteria *(mandatory)*
-
-
-
-### Measurable Outcomes
-
-- **SC-001**: [Measurable metric, e.g., "Users can complete account creation in under 2 minutes"]
-- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"]
-- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"]
-- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"]
-
diff --git a/.specify/templates/tasks-template.md b/.specify/templates/tasks-template.md
deleted file mode 100644
index eea131f..0000000
--- a/.specify/templates/tasks-template.md
+++ /dev/null
@@ -1,251 +0,0 @@
----
-description: "Task list template for feature implementation"
----
-
-# Tasks: [FEATURE NAME]
-
-**Input**: Design documents from `/specs/[###-feature-name]/`
-**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/
-
-**Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification.
-
-**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story.
-
-## Format: `[ID] [P?] [Story] Description`
-- **[P]**: Can run in parallel (different files, no dependencies)
-- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3)
-- Include exact file paths in descriptions
-
-## Path Conventions
-- **Single project**: `src/`, `tests/` at repository root
-- **Web app**: `backend/src/`, `frontend/src/`
-- **Mobile**: `api/src/`, `ios/src/` or `android/src/`
-- Paths shown below assume single project - adjust based on plan.md structure
-
-
-
-## Phase 1: Setup (Shared Infrastructure)
-
-**Purpose**: Project initialization and basic structure
-
-- [ ] T001 Create project structure per implementation plan
-- [ ] T002 Initialize [language] project with [framework] dependencies
-- [ ] T003 [P] Configure linting and formatting tools
-
----
-
-## Phase 2: Foundational (Blocking Prerequisites)
-
-**Purpose**: Core infrastructure that MUST be complete before ANY user story can be implemented
-
-**⚠️ CRITICAL**: No user story work can begin until this phase is complete
-
-Examples of foundational tasks (adjust based on your project):
-
-- [ ] T004 Setup database schema and migrations framework
-- [ ] T005 [P] Implement authentication/authorization framework
-- [ ] T006 [P] Setup API routing and middleware structure
-- [ ] T007 Create base models/entities that all stories depend on
-- [ ] T008 Configure error handling and logging infrastructure
-- [ ] T009 Setup environment configuration management
-
-**Checkpoint**: Foundation ready - user story implementation can now begin in parallel
-
----
-
-## Phase 3: User Story 1 - [Title] (Priority: P1) 🎯 MVP
-
-**Goal**: [Brief description of what this story delivers]
-
-**Independent Test**: [How to verify this story works on its own]
-
-### Tests for User Story 1 (OPTIONAL - only if tests requested) ⚠️
-
-**NOTE: Write these tests FIRST, ensure they FAIL before implementation**
-
-- [ ] T010 [P] [US1] Contract test for [endpoint] in tests/contract/test_[name].py
-- [ ] T011 [P] [US1] Integration test for [user journey] in tests/integration/test_[name].py
-
-### Implementation for User Story 1
-
-- [ ] T012 [P] [US1] Create [Entity1] model in src/models/[entity1].py
-- [ ] T013 [P] [US1] Create [Entity2] model in src/models/[entity2].py
-- [ ] T014 [US1] Implement [Service] in src/services/[service].py (depends on T012, T013)
-- [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py
-- [ ] T016 [US1] Add validation and error handling
-- [ ] T017 [US1] Add logging for user story 1 operations
-
-**Checkpoint**: At this point, User Story 1 should be fully functional and testable independently
-
----
-
-## Phase 4: User Story 2 - [Title] (Priority: P2)
-
-**Goal**: [Brief description of what this story delivers]
-
-**Independent Test**: [How to verify this story works on its own]
-
-### Tests for User Story 2 (OPTIONAL - only if tests requested) ⚠️
-
-- [ ] T018 [P] [US2] Contract test for [endpoint] in tests/contract/test_[name].py
-- [ ] T019 [P] [US2] Integration test for [user journey] in tests/integration/test_[name].py
-
-### Implementation for User Story 2
-
-- [ ] T020 [P] [US2] Create [Entity] model in src/models/[entity].py
-- [ ] T021 [US2] Implement [Service] in src/services/[service].py
-- [ ] T022 [US2] Implement [endpoint/feature] in src/[location]/[file].py
-- [ ] T023 [US2] Integrate with User Story 1 components (if needed)
-
-**Checkpoint**: At this point, User Stories 1 AND 2 should both work independently
-
----
-
-## Phase 5: User Story 3 - [Title] (Priority: P3)
-
-**Goal**: [Brief description of what this story delivers]
-
-**Independent Test**: [How to verify this story works on its own]
-
-### Tests for User Story 3 (OPTIONAL - only if tests requested) ⚠️
-
-- [ ] T024 [P] [US3] Contract test for [endpoint] in tests/contract/test_[name].py
-- [ ] T025 [P] [US3] Integration test for [user journey] in tests/integration/test_[name].py
-
-### Implementation for User Story 3
-
-- [ ] T026 [P] [US3] Create [Entity] model in src/models/[entity].py
-- [ ] T027 [US3] Implement [Service] in src/services/[service].py
-- [ ] T028 [US3] Implement [endpoint/feature] in src/[location]/[file].py
-
-**Checkpoint**: All user stories should now be independently functional
-
----
-
-[Add more user story phases as needed, following the same pattern]
-
----
-
-## Phase N: Polish & Cross-Cutting Concerns
-
-**Purpose**: Improvements that affect multiple user stories
-
-- [ ] TXXX [P] Documentation updates in docs/
-- [ ] TXXX Code cleanup and refactoring
-- [ ] TXXX Performance optimization across all stories
-- [ ] TXXX [P] Additional unit tests (if requested) in tests/unit/
-- [ ] TXXX Security hardening
-- [ ] TXXX Run quickstart.md validation
-
----
-
-## Dependencies & Execution Order
-
-### Phase Dependencies
-
-- **Setup (Phase 1)**: No dependencies - can start immediately
-- **Foundational (Phase 2)**: Depends on Setup completion - BLOCKS all user stories
-- **User Stories (Phase 3+)**: All depend on Foundational phase completion
- - User stories can then proceed in parallel (if staffed)
- - Or sequentially in priority order (P1 → P2 → P3)
-- **Polish (Final Phase)**: Depends on all desired user stories being complete
-
-### User Story Dependencies
-
-- **User Story 1 (P1)**: Can start after Foundational (Phase 2) - No dependencies on other stories
-- **User Story 2 (P2)**: Can start after Foundational (Phase 2) - May integrate with US1 but should be independently testable
-- **User Story 3 (P3)**: Can start after Foundational (Phase 2) - May integrate with US1/US2 but should be independently testable
-
-### Within Each User Story
-
-- Tests (if included) MUST be written and FAIL before implementation
-- Models before services
-- Services before endpoints
-- Core implementation before integration
-- Story complete before moving to next priority
-
-### Parallel Opportunities
-
-- All Setup tasks marked [P] can run in parallel
-- All Foundational tasks marked [P] can run in parallel (within Phase 2)
-- Once Foundational phase completes, all user stories can start in parallel (if team capacity allows)
-- All tests for a user story marked [P] can run in parallel
-- Models within a story marked [P] can run in parallel
-- Different user stories can be worked on in parallel by different team members
-
----
-
-## Parallel Example: User Story 1
-
-```bash
-# Launch all tests for User Story 1 together (if tests requested):
-Task: "Contract test for [endpoint] in tests/contract/test_[name].py"
-Task: "Integration test for [user journey] in tests/integration/test_[name].py"
-
-# Launch all models for User Story 1 together:
-Task: "Create [Entity1] model in src/models/[entity1].py"
-Task: "Create [Entity2] model in src/models/[entity2].py"
-```
-
----
-
-## Implementation Strategy
-
-### MVP First (User Story 1 Only)
-
-1. Complete Phase 1: Setup
-2. Complete Phase 2: Foundational (CRITICAL - blocks all stories)
-3. Complete Phase 3: User Story 1
-4. **STOP and VALIDATE**: Test User Story 1 independently
-5. Deploy/demo if ready
-
-### Incremental Delivery
-
-1. Complete Setup + Foundational → Foundation ready
-2. Add User Story 1 → Test independently → Deploy/Demo (MVP!)
-3. Add User Story 2 → Test independently → Deploy/Demo
-4. Add User Story 3 → Test independently → Deploy/Demo
-5. Each story adds value without breaking previous stories
-
-### Parallel Team Strategy
-
-With multiple developers:
-
-1. Team completes Setup + Foundational together
-2. Once Foundational is done:
- - Developer A: User Story 1
- - Developer B: User Story 2
- - Developer C: User Story 3
-3. Stories complete and integrate independently
-
----
-
-## Notes
-
-- [P] tasks = different files, no dependencies
-- [Story] label maps task to specific user story for traceability
-- Each user story should be independently completable and testable
-- Verify tests fail before implementing
-- Commit after each task or logical group
-- Stop at any checkpoint to validate story independently
-- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence
-
-
-
From 974d87daf7203f95f23b64c6dfb141e89d805e0b Mon Sep 17 00:00:00 2001
From: d-kimsuon
Date: Sat, 25 Oct 2025 01:32:42 +0900
Subject: [PATCH 2/4] implement BE for scheduler feat
---
src/app/api/[[...route]]/route.ts | 4 +
src/server/core/scheduler/config.test.ts | 124 +++++++
src/server/core/scheduler/config.ts | 94 ++++++
src/server/core/scheduler/domain/Job.test.ts | 167 ++++++++++
src/server/core/scheduler/domain/Job.ts | 74 +++++
.../core/scheduler/domain/Scheduler.test.ts | 232 +++++++++++++
src/server/core/scheduler/domain/Scheduler.ts | 313 ++++++++++++++++++
.../presentation/SchedulerController.ts | 106 ++++++
src/server/core/scheduler/schema.ts | 85 +++++
src/server/hono/route.ts | 8 +
10 files changed, 1207 insertions(+)
create mode 100644 src/server/core/scheduler/config.test.ts
create mode 100644 src/server/core/scheduler/config.ts
create mode 100644 src/server/core/scheduler/domain/Job.test.ts
create mode 100644 src/server/core/scheduler/domain/Job.ts
create mode 100644 src/server/core/scheduler/domain/Scheduler.test.ts
create mode 100644 src/server/core/scheduler/domain/Scheduler.ts
create mode 100644 src/server/core/scheduler/presentation/SchedulerController.ts
create mode 100644 src/server/core/scheduler/schema.ts
diff --git a/src/app/api/[[...route]]/route.ts b/src/app/api/[[...route]]/route.ts
index f6fbe2c..3d4d922 100644
--- a/src/app/api/[[...route]]/route.ts
+++ b/src/app/api/[[...route]]/route.ts
@@ -16,6 +16,8 @@ import { GitService } from "../../../server/core/git/services/GitService";
import { ProjectRepository } from "../../../server/core/project/infrastructure/ProjectRepository";
import { ProjectController } from "../../../server/core/project/presentation/ProjectController";
import { ProjectMetaService } from "../../../server/core/project/services/ProjectMetaService";
+import { SchedulerService } from "../../../server/core/scheduler/domain/Scheduler";
+import { SchedulerController } from "../../../server/core/scheduler/presentation/SchedulerController";
import { SessionRepository } from "../../../server/core/session/infrastructure/SessionRepository";
import { VirtualConversationDatabase } from "../../../server/core/session/infrastructure/VirtualConversationDatabase";
import { SessionController } from "../../../server/core/session/presentation/SessionController";
@@ -40,6 +42,7 @@ await Effect.runPromise(
Effect.provide(ClaudeCodePermissionController.Live),
Effect.provide(FileSystemController.Live),
Effect.provide(SSEController.Live),
+ Effect.provide(SchedulerController.Live),
)
.pipe(
/** Application */
@@ -53,6 +56,7 @@ await Effect.runPromise(
Effect.provide(ClaudeCodeSessionProcessService.Live),
Effect.provide(ClaudeCodeService.Live),
Effect.provide(GitService.Live),
+ Effect.provide(SchedulerService.Live),
)
.pipe(
/** Infrastructure */
diff --git a/src/server/core/scheduler/config.test.ts b/src/server/core/scheduler/config.test.ts
new file mode 100644
index 0000000..b483041
--- /dev/null
+++ b/src/server/core/scheduler/config.test.ts
@@ -0,0 +1,124 @@
+import { mkdir, rm } from "node:fs/promises";
+import { tmpdir } from "node:os";
+import { join } from "node:path";
+import { FileSystem, Path } from "@effect/platform";
+import { NodeFileSystem, NodePath } from "@effect/platform-node";
+import { Effect, Layer } from "effect";
+import { afterEach, beforeEach, describe, expect, test } from "vitest";
+import {
+ getConfigPath,
+ initializeConfig,
+ readConfig,
+ writeConfig,
+} from "./config";
+import type { SchedulerConfig } from "./schema";
+
+describe("scheduler config", () => {
+ let testDir: string;
+ const testLayer = Layer.mergeAll(NodeFileSystem.layer, NodePath.layer);
+
+ beforeEach(async () => {
+ testDir = join(tmpdir(), `scheduler-test-${Date.now()}`);
+ await mkdir(testDir, { recursive: true });
+ });
+
+ afterEach(async () => {
+ await rm(testDir, { recursive: true, force: true });
+ });
+
+ test("getConfigPath returns correct path", async () => {
+ const result = await Effect.runPromise(
+ getConfigPath.pipe(Effect.provide(testLayer)),
+ );
+
+ expect(result).toContain(".claude-code-viewer/scheduler/config.json");
+ });
+
+ test("writeConfig and readConfig work correctly", async () => {
+ const config: SchedulerConfig = {
+ jobs: [
+ {
+ id: "test-job-1",
+ name: "Test Job",
+ schedule: {
+ type: "cron",
+ expression: "0 0 * * *",
+ },
+ message: {
+ content: "test message",
+ projectId: "project-1",
+ baseSessionId: null,
+ },
+ enabled: true,
+ concurrencyPolicy: "skip",
+ createdAt: "2025-10-25T00:00:00Z",
+ lastRunAt: null,
+ lastRunStatus: null,
+ },
+ ],
+ };
+
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ yield* writeConfig(config);
+ return yield* readConfig;
+ }).pipe(Effect.provide(testLayer)),
+ );
+
+ expect(result).toEqual(config);
+ });
+
+ test("initializeConfig creates file if not exists", async () => {
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ const configPath = yield* getConfigPath;
+ const fs = yield* FileSystem.FileSystem;
+
+ const exists = yield* fs.exists(configPath);
+ if (exists) {
+ yield* fs.remove(configPath);
+ }
+
+ return yield* initializeConfig;
+ }).pipe(Effect.provide(testLayer)),
+ );
+
+ expect(result).toEqual({ jobs: [] });
+ });
+
+ test("readConfig fails with ConfigFileNotFoundError when file does not exist", async () => {
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ const fs = yield* FileSystem.FileSystem;
+ const configPath = yield* getConfigPath;
+
+ const exists = yield* fs.exists(configPath);
+ if (exists) {
+ yield* fs.remove(configPath);
+ }
+
+ return yield* readConfig;
+ }).pipe(Effect.provide(testLayer), Effect.flip),
+ );
+
+ expect(result._tag).toBe("ConfigFileNotFoundError");
+ });
+
+ test("readConfig fails with ConfigParseError for invalid JSON", async () => {
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ const fs = yield* FileSystem.FileSystem;
+ const path = yield* Path.Path;
+ const configPath = yield* getConfigPath;
+ const configDir = path.dirname(configPath);
+
+ yield* fs.makeDirectory(configDir, { recursive: true });
+ yield* fs.writeFileString(configPath, "{ invalid json }");
+
+ return yield* readConfig;
+ }).pipe(Effect.provide(testLayer), Effect.flip),
+ );
+
+ expect(result._tag).toBe("ConfigParseError");
+ });
+});
diff --git a/src/server/core/scheduler/config.ts b/src/server/core/scheduler/config.ts
new file mode 100644
index 0000000..69a3005
--- /dev/null
+++ b/src/server/core/scheduler/config.ts
@@ -0,0 +1,94 @@
+import { homedir } from "node:os";
+import { FileSystem, Path } from "@effect/platform";
+import { Data, Effect } from "effect";
+import { type SchedulerConfig, schedulerConfigSchema } from "./schema";
+
+class ConfigFileNotFoundError extends Data.TaggedError(
+ "ConfigFileNotFoundError",
+)<{
+ readonly path: string;
+}> {}
+
+class ConfigParseError extends Data.TaggedError("ConfigParseError")<{
+ readonly path: string;
+ readonly cause: unknown;
+}> {}
+
+const CONFIG_DIR = "scheduler";
+const CONFIG_FILE = "config.json";
+
+export const getConfigPath = Effect.gen(function* () {
+ const path = yield* Path.Path;
+ const baseDir = path.resolve(homedir(), ".claude-code-viewer");
+ return path.join(baseDir, CONFIG_DIR, CONFIG_FILE);
+});
+
+export const readConfig = Effect.gen(function* () {
+ const fs = yield* FileSystem.FileSystem;
+ const configPath = yield* getConfigPath;
+
+ const exists = yield* fs.exists(configPath);
+ if (!exists) {
+ return yield* Effect.fail(
+ new ConfigFileNotFoundError({ path: configPath }),
+ );
+ }
+
+ const content = yield* fs.readFileString(configPath);
+
+ const jsonResult = yield* Effect.try({
+ try: () => JSON.parse(content),
+ catch: (error) =>
+ new ConfigParseError({
+ path: configPath,
+ cause: error,
+ }),
+ });
+
+ const parsed = schedulerConfigSchema.safeParse(jsonResult);
+
+ if (!parsed.success) {
+ return yield* Effect.fail(
+ new ConfigParseError({
+ path: configPath,
+ cause: parsed.error,
+ }),
+ );
+ }
+
+ return parsed.data;
+});
+
+export const writeConfig = (config: SchedulerConfig) =>
+ Effect.gen(function* () {
+ const fs = yield* FileSystem.FileSystem;
+ const path = yield* Path.Path;
+ const configPath = yield* getConfigPath;
+ const configDir = path.dirname(configPath);
+
+ yield* fs.makeDirectory(configDir, { recursive: true });
+
+ const content = JSON.stringify(config, null, 2);
+ yield* fs.writeFileString(configPath, content);
+ });
+
+export const initializeConfig = Effect.gen(function* () {
+ const result = yield* readConfig.pipe(
+ Effect.catchTags({
+ ConfigFileNotFoundError: () =>
+ Effect.gen(function* () {
+ const initialConfig: SchedulerConfig = { jobs: [] };
+ yield* writeConfig(initialConfig);
+ return initialConfig;
+ }),
+ ConfigParseError: () =>
+ Effect.gen(function* () {
+ const initialConfig: SchedulerConfig = { jobs: [] };
+ yield* writeConfig(initialConfig);
+ return initialConfig;
+ }),
+ }),
+ );
+
+ return result;
+});
diff --git a/src/server/core/scheduler/domain/Job.test.ts b/src/server/core/scheduler/domain/Job.test.ts
new file mode 100644
index 0000000..7bb31ce
--- /dev/null
+++ b/src/server/core/scheduler/domain/Job.test.ts
@@ -0,0 +1,167 @@
+import { describe, expect, test } from "vitest";
+import type { SchedulerJob } from "../schema";
+import { calculateFixedDelay, shouldExecuteJob } from "./Job";
+
+describe("shouldExecuteJob", () => {
+ test("returns false when job is disabled", () => {
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: { type: "cron", expression: "* * * * *" },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: false,
+ concurrencyPolicy: "skip",
+ createdAt: "2025-10-25T00:00:00Z",
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ expect(shouldExecuteJob(job, new Date())).toBe(false);
+ });
+
+ test("returns true for cron job when enabled", () => {
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: { type: "cron", expression: "* * * * *" },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ concurrencyPolicy: "skip",
+ createdAt: "2025-10-25T00:00:00Z",
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ expect(shouldExecuteJob(job, new Date())).toBe(true);
+ });
+
+ test("returns false for oneTime fixed job that has already run", () => {
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: { type: "fixed", delayMs: 60000, oneTime: true },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ concurrencyPolicy: "skip",
+ createdAt: "2025-10-25T00:00:00Z",
+ lastRunAt: "2025-10-25T00:01:00Z",
+ lastRunStatus: "success",
+ };
+
+ expect(shouldExecuteJob(job, new Date())).toBe(false);
+ });
+
+ test("returns false for oneTime fixed job when scheduled time has not arrived", () => {
+ const createdAt = new Date("2025-10-25T00:00:00Z");
+ const now = new Date("2025-10-25T00:00:30Z");
+
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: { type: "fixed", delayMs: 60000, oneTime: true },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ concurrencyPolicy: "skip",
+ createdAt: createdAt.toISOString(),
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ expect(shouldExecuteJob(job, now)).toBe(false);
+ });
+
+ test("returns true for oneTime fixed job when scheduled time has arrived", () => {
+ const createdAt = new Date("2025-10-25T00:00:00Z");
+ const now = new Date("2025-10-25T00:01:01Z");
+
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: { type: "fixed", delayMs: 60000, oneTime: true },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ concurrencyPolicy: "skip",
+ createdAt: createdAt.toISOString(),
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ expect(shouldExecuteJob(job, now)).toBe(true);
+ });
+
+ test("returns true for recurring fixed job", () => {
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: { type: "fixed", delayMs: 60000, oneTime: false },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ concurrencyPolicy: "skip",
+ createdAt: "2025-10-25T00:00:00Z",
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ expect(shouldExecuteJob(job, new Date())).toBe(true);
+ });
+});
+
+describe("calculateFixedDelay", () => {
+ test("calculates delay correctly for future scheduled time", () => {
+ const createdAt = new Date("2025-10-25T00:00:00Z");
+ const now = new Date("2025-10-25T00:00:30Z");
+
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: { type: "fixed", delayMs: 60000, oneTime: true },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ concurrencyPolicy: "skip",
+ createdAt: createdAt.toISOString(),
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ const delay = calculateFixedDelay(job, now);
+ expect(delay).toBe(30000);
+ });
+
+ test("returns 0 for past scheduled time", () => {
+ const createdAt = new Date("2025-10-25T00:00:00Z");
+ const now = new Date("2025-10-25T00:02:00Z");
+
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: { type: "fixed", delayMs: 60000, oneTime: true },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ concurrencyPolicy: "skip",
+ createdAt: createdAt.toISOString(),
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ const delay = calculateFixedDelay(job, now);
+ expect(delay).toBe(0);
+ });
+
+ test("throws error for non-fixed schedule type", () => {
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: { type: "cron", expression: "* * * * *" },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ concurrencyPolicy: "skip",
+ createdAt: "2025-10-25T00:00:00Z",
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ expect(() => calculateFixedDelay(job, new Date())).toThrow(
+ "Job schedule type must be fixed",
+ );
+ });
+});
diff --git a/src/server/core/scheduler/domain/Job.ts b/src/server/core/scheduler/domain/Job.ts
new file mode 100644
index 0000000..4537ae7
--- /dev/null
+++ b/src/server/core/scheduler/domain/Job.ts
@@ -0,0 +1,74 @@
+import { Effect } from "effect";
+import { ClaudeCodeLifeCycleService } from "../../claude-code/services/ClaudeCodeLifeCycleService";
+import { UserConfigService } from "../../platform/services/UserConfigService";
+import { ProjectRepository } from "../../project/infrastructure/ProjectRepository";
+import type { SchedulerJob } from "../schema";
+
+export const executeJob = (job: SchedulerJob) =>
+ Effect.gen(function* () {
+ const lifeCycleService = yield* ClaudeCodeLifeCycleService;
+ const projectRepository = yield* ProjectRepository;
+ const userConfigService = yield* UserConfigService;
+
+ const { message } = job;
+ const { project } = yield* projectRepository.getProject(message.projectId);
+ const userConfig = yield* userConfigService.getUserConfig();
+
+ if (project.meta.projectPath === null) {
+ return yield* Effect.fail(
+ new Error(`Project path not found for projectId: ${message.projectId}`),
+ );
+ }
+
+ if (message.baseSessionId === null) {
+ yield* lifeCycleService.startTask({
+ baseSession: {
+ cwd: project.meta.projectPath,
+ projectId: message.projectId,
+ sessionId: undefined,
+ },
+ userConfig,
+ message: message.content,
+ });
+ } else {
+ yield* lifeCycleService.continueTask({
+ sessionProcessId: message.baseSessionId,
+ message: message.content,
+ baseSessionId: message.baseSessionId,
+ });
+ }
+ });
+
+export const shouldExecuteJob = (job: SchedulerJob, now: Date): boolean => {
+ if (!job.enabled) {
+ return false;
+ }
+
+ if (job.schedule.type === "cron") {
+ return true;
+ }
+
+ if (job.schedule.type === "fixed" && job.schedule.oneTime) {
+ if (job.lastRunStatus !== null) {
+ return false;
+ }
+
+ const createdAt = new Date(job.createdAt);
+ const scheduledTime = new Date(createdAt.getTime() + job.schedule.delayMs);
+ return now >= scheduledTime;
+ }
+
+ return true;
+};
+
+export const calculateFixedDelay = (job: SchedulerJob, now: Date): number => {
+ if (job.schedule.type !== "fixed") {
+ throw new Error("Job schedule type must be fixed");
+ }
+
+ const createdAt = new Date(job.createdAt);
+ const scheduledTime = new Date(createdAt.getTime() + job.schedule.delayMs);
+ const delay = scheduledTime.getTime() - now.getTime();
+
+ return Math.max(0, delay);
+};
diff --git a/src/server/core/scheduler/domain/Scheduler.test.ts b/src/server/core/scheduler/domain/Scheduler.test.ts
new file mode 100644
index 0000000..4059771
--- /dev/null
+++ b/src/server/core/scheduler/domain/Scheduler.test.ts
@@ -0,0 +1,232 @@
+import { mkdir, rm, unlink } from "node:fs/promises";
+import { homedir, tmpdir } from "node:os";
+import { join } from "node:path";
+import { NodeContext, NodeFileSystem, NodePath } from "@effect/platform-node";
+import { Effect, Layer } from "effect";
+import { afterEach, beforeEach, describe, expect, test } from "vitest";
+import { ClaudeCodeLifeCycleService } from "../../claude-code/services/ClaudeCodeLifeCycleService";
+import { ClaudeCodeSessionProcessService } from "../../claude-code/services/ClaudeCodeSessionProcessService";
+import { EnvService } from "../../platform/services/EnvService";
+import { UserConfigService } from "../../platform/services/UserConfigService";
+import { ProjectRepository } from "../../project/infrastructure/ProjectRepository";
+import type { NewSchedulerJob } from "../schema";
+import { SchedulerService } from "./Scheduler";
+
+describe("SchedulerService", () => {
+ let testDir: string;
+
+ const mockSessionProcessService = Layer.succeed(
+ ClaudeCodeSessionProcessService,
+ {
+ startSessionProcess: () =>
+ Effect.succeed({ sessionProcess: {} as never, task: {} as never }),
+ continueSessionProcess: () =>
+ Effect.succeed({ sessionProcess: {} as never, task: {} as never }),
+ toNotInitializedState: () =>
+ Effect.succeed({ sessionProcess: {} as never, task: {} as never }),
+ toInitializedState: () => Effect.succeed({ sessionProcess: {} as never }),
+ toFileCreatedState: () => Effect.succeed({ sessionProcess: {} as never }),
+ toPausedState: () => Effect.succeed({ sessionProcess: {} as never }),
+ toCompletedState: () =>
+ Effect.succeed({ sessionProcess: {} as never, task: undefined }),
+ dangerouslyChangeProcessState: () => Effect.succeed({} as never),
+ getSessionProcesses: () => Effect.succeed([]),
+ getSessionProcess: () => Effect.succeed({} as never),
+ getTask: () => Effect.succeed({} as never),
+ changeTaskState: () => Effect.succeed({} as never),
+ },
+ );
+
+ const mockLifeCycleService = Layer.succeed(ClaudeCodeLifeCycleService, {
+ startTask: () => Effect.void,
+ continueTask: () => Effect.void,
+ } as never);
+
+ const mockProjectRepository = Layer.succeed(ProjectRepository, {
+ getProject: () =>
+ Effect.succeed({
+ project: {
+ meta: { projectPath: "/tmp/test-project" },
+ },
+ } as never),
+ } as never);
+
+ const mockUserConfigService = Layer.succeed(UserConfigService, {
+ getUserConfig: () =>
+ Effect.succeed({
+ hideNoUserMessageSession: true,
+ unifySameTitleSession: true,
+ enterKeyBehavior: "shift-enter-send",
+ permissionMode: "default",
+ locale: "ja",
+ }),
+ } as never);
+
+ const mockEnvService = Layer.succeed(EnvService, {
+ getEnv: () => Effect.succeed(undefined),
+ } as never);
+
+ const baseLayers = Layer.mergeAll(
+ NodeFileSystem.layer,
+ NodePath.layer,
+ NodeContext.layer,
+ mockSessionProcessService,
+ mockLifeCycleService,
+ mockProjectRepository,
+ mockUserConfigService,
+ mockEnvService,
+ );
+
+ const testLayer = Layer.mergeAll(SchedulerService.Live, baseLayers).pipe(
+ Layer.provide(baseLayers),
+ );
+
+ beforeEach(async () => {
+ testDir = join(tmpdir(), `scheduler-test-${Date.now()}`);
+ await mkdir(testDir, { recursive: true });
+
+ // Clean up existing config file
+ const configPath = join(
+ homedir(),
+ ".claude-code-viewer",
+ "scheduler",
+ "config.json",
+ );
+ try {
+ await unlink(configPath);
+ } catch {
+ // Ignore if file doesn't exist
+ }
+ });
+
+ afterEach(async () => {
+ await rm(testDir, { recursive: true, force: true });
+ });
+
+ test("addJob creates a new job with generated id", async () => {
+ const newJob: NewSchedulerJob = {
+ name: "Test Job",
+ schedule: { type: "cron", expression: "0 0 * * *" },
+ message: {
+ content: "test message",
+ projectId: "project-1",
+ baseSessionId: null,
+ },
+ enabled: false,
+ concurrencyPolicy: "skip",
+ };
+
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ const service = yield* SchedulerService;
+ const job = yield* service.addJob(newJob);
+ return job;
+ }).pipe(Effect.provide(testLayer)),
+ );
+
+ expect(result.id).toBeDefined();
+ expect(result.name).toBe("Test Job");
+ expect(result.createdAt).toBeDefined();
+ expect(result.lastRunAt).toBe(null);
+ expect(result.lastRunStatus).toBe(null);
+ });
+
+ test("getJobs returns all jobs", async () => {
+ const newJob: NewSchedulerJob = {
+ name: "Test Job",
+ schedule: { type: "cron", expression: "0 0 * * *" },
+ message: {
+ content: "test message",
+ projectId: "project-1",
+ baseSessionId: null,
+ },
+ enabled: false,
+ concurrencyPolicy: "skip",
+ };
+
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ const service = yield* SchedulerService;
+ yield* service.addJob(newJob);
+ yield* service.addJob(newJob);
+ return yield* service.getJobs();
+ }).pipe(Effect.provide(testLayer)),
+ );
+
+ expect(result).toHaveLength(2);
+ });
+
+ test("updateJob modifies an existing job", async () => {
+ const newJob: NewSchedulerJob = {
+ name: "Test Job",
+ schedule: { type: "cron", expression: "0 0 * * *" },
+ message: {
+ content: "test message",
+ projectId: "project-1",
+ baseSessionId: null,
+ },
+ enabled: false,
+ concurrencyPolicy: "skip",
+ };
+
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ const service = yield* SchedulerService;
+ const job = yield* service.addJob(newJob);
+ const updated = yield* service.updateJob(job.id, {
+ name: "Updated Job",
+ });
+ return updated;
+ }).pipe(Effect.provide(testLayer)),
+ );
+
+ expect(result.name).toBe("Updated Job");
+ });
+
+ test("deleteJob removes a job", async () => {
+ const newJob: NewSchedulerJob = {
+ name: "Test Job",
+ schedule: { type: "cron", expression: "0 0 * * *" },
+ message: {
+ content: "test message",
+ projectId: "project-1",
+ baseSessionId: null,
+ },
+ enabled: false,
+ concurrencyPolicy: "skip",
+ };
+
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ const service = yield* SchedulerService;
+ const job = yield* service.addJob(newJob);
+ yield* service.deleteJob(job.id);
+ return yield* service.getJobs();
+ }).pipe(Effect.provide(testLayer)),
+ );
+
+ expect(result).toHaveLength(0);
+ });
+
+ test("updateJob fails with SchedulerJobNotFoundError for non-existent job", async () => {
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ const service = yield* SchedulerService;
+ return yield* service.updateJob("non-existent-id", { name: "Updated" });
+ }).pipe(Effect.provide(testLayer), Effect.flip),
+ );
+
+ expect(result._tag).toBe("SchedulerJobNotFoundError");
+ });
+
+ test("deleteJob fails with SchedulerJobNotFoundError for non-existent job", async () => {
+ const result = await Effect.runPromise(
+ Effect.gen(function* () {
+ const service = yield* SchedulerService;
+ return yield* service.deleteJob("non-existent-id");
+ }).pipe(Effect.provide(testLayer), Effect.flip),
+ );
+
+ expect(result._tag).toBe("SchedulerJobNotFoundError");
+ });
+});
diff --git a/src/server/core/scheduler/domain/Scheduler.ts b/src/server/core/scheduler/domain/Scheduler.ts
new file mode 100644
index 0000000..32b3f5e
--- /dev/null
+++ b/src/server/core/scheduler/domain/Scheduler.ts
@@ -0,0 +1,313 @@
+import { randomUUID } from "node:crypto";
+import {
+ Context,
+ Cron,
+ Data,
+ Duration,
+ Effect,
+ Fiber,
+ Layer,
+ Ref,
+ Schedule,
+} from "effect";
+import type { InferEffect } from "../../../lib/effect/types";
+import { initializeConfig, readConfig, writeConfig } from "../config";
+import type {
+ NewSchedulerJob,
+ SchedulerConfig,
+ SchedulerJob,
+ UpdateSchedulerJob,
+} from "../schema";
+import { calculateFixedDelay, executeJob, shouldExecuteJob } from "./Job";
+
+class SchedulerJobNotFoundError extends Data.TaggedError(
+ "SchedulerJobNotFoundError",
+)<{
+ readonly jobId: string;
+}> {}
+
+class InvalidCronExpressionError extends Data.TaggedError(
+ "InvalidCronExpressionError",
+)<{
+ readonly expression: string;
+ readonly cause: unknown;
+}> {}
+
+const LayerImpl = Effect.gen(function* () {
+ const fibersRef = yield* Ref.make<
+ Map>
+ >(new Map());
+ const runningJobsRef = yield* Ref.make>(new Set());
+
+ const startJob = (job: SchedulerJob) =>
+ Effect.gen(function* () {
+ const now = new Date();
+
+ if (job.schedule.type === "cron") {
+ const cronResult = Cron.parse(job.schedule.expression);
+
+ if (cronResult._tag === "Left") {
+ return yield* Effect.fail(
+ new InvalidCronExpressionError({
+ expression: job.schedule.expression,
+ cause: cronResult.left,
+ }),
+ );
+ }
+
+ const schedule = Schedule.cron(cronResult.right);
+
+ const fiber = yield* Effect.repeat(
+ runJobWithConcurrencyControl(job),
+ schedule,
+ ).pipe(Effect.forkDaemon);
+
+ yield* Ref.update(fibersRef, (fibers) =>
+ new Map(fibers).set(job.id, fiber),
+ );
+ } else if (job.schedule.type === "fixed") {
+ if (!shouldExecuteJob(job, now)) {
+ return;
+ }
+
+ const delay = calculateFixedDelay(job, now);
+ const delayDuration = Duration.millis(delay);
+
+ if (job.schedule.oneTime) {
+ const fiber = yield* Effect.delay(
+ runJobWithConcurrencyControl(job),
+ delayDuration,
+ ).pipe(Effect.forkDaemon);
+
+ yield* Ref.update(fibersRef, (fibers) =>
+ new Map(fibers).set(job.id, fiber),
+ );
+ } else {
+ const schedule = Schedule.spaced(delayDuration);
+
+ const fiber = yield* Effect.repeat(
+ runJobWithConcurrencyControl(job),
+ schedule,
+ ).pipe(Effect.forkDaemon);
+
+ yield* Ref.update(fibersRef, (fibers) =>
+ new Map(fibers).set(job.id, fiber),
+ );
+ }
+ }
+ });
+
+ const runJobWithConcurrencyControl = (job: SchedulerJob) =>
+ Effect.gen(function* () {
+ if (job.concurrencyPolicy === "skip") {
+ const runningJobs = yield* Ref.get(runningJobsRef);
+ if (runningJobs.has(job.id)) {
+ return;
+ }
+ }
+
+ yield* Ref.update(runningJobsRef, (jobs) => new Set(jobs).add(job.id));
+
+ const result = yield* executeJob(job).pipe(
+ Effect.matchEffect({
+ onSuccess: () =>
+ updateJobStatus(job.id, "success", new Date().toISOString()),
+ onFailure: () =>
+ updateJobStatus(job.id, "failed", new Date().toISOString()),
+ }),
+ );
+
+ yield* Ref.update(runningJobsRef, (jobs) => {
+ const newJobs = new Set(jobs);
+ newJobs.delete(job.id);
+ return newJobs;
+ });
+
+ return result;
+ });
+
+ const updateJobStatus = (
+ jobId: string,
+ status: "success" | "failed",
+ runAt: string,
+ ) =>
+ Effect.gen(function* () {
+ const config = yield* readConfig;
+ const job = config.jobs.find((j) => j.id === jobId);
+
+ if (job === undefined) {
+ return;
+ }
+
+ const updatedJob: SchedulerJob = {
+ ...job,
+ lastRunAt: runAt,
+ lastRunStatus: status,
+ };
+
+ const updatedConfig: SchedulerConfig = {
+ jobs: config.jobs.map((j) => (j.id === jobId ? updatedJob : j)),
+ };
+
+ yield* writeConfig(updatedConfig);
+ });
+
+ const stopJob = (jobId: string) =>
+ Effect.gen(function* () {
+ const fibers = yield* Ref.get(fibersRef);
+ const fiber = fibers.get(jobId);
+
+ if (fiber !== undefined) {
+ yield* Fiber.interrupt(fiber);
+ yield* Ref.update(fibersRef, (fibers) => {
+ const newFibers = new Map(fibers);
+ newFibers.delete(jobId);
+ return newFibers;
+ });
+ }
+ });
+
+ const startScheduler = Effect.gen(function* () {
+ yield* initializeConfig;
+ const config = yield* readConfig;
+
+ for (const job of config.jobs) {
+ if (job.enabled) {
+ yield* startJob(job);
+ }
+ }
+ });
+
+ const stopScheduler = Effect.gen(function* () {
+ const fibers = yield* Ref.get(fibersRef);
+
+ for (const fiber of fibers.values()) {
+ yield* Fiber.interrupt(fiber);
+ }
+
+ yield* Ref.set(fibersRef, new Map());
+ });
+
+ const getJobs = () =>
+ Effect.gen(function* () {
+ const config = yield* readConfig.pipe(
+ Effect.catchTags({
+ ConfigFileNotFoundError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ ConfigParseError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ }),
+ );
+ return config.jobs;
+ });
+
+ const addJob = (newJob: NewSchedulerJob) =>
+ Effect.gen(function* () {
+ const config = yield* readConfig.pipe(
+ Effect.catchTags({
+ ConfigFileNotFoundError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ ConfigParseError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ }),
+ );
+ const job: SchedulerJob = {
+ ...newJob,
+ id: randomUUID(),
+ createdAt: new Date().toISOString(),
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ const updatedConfig: SchedulerConfig = {
+ jobs: [...config.jobs, job],
+ };
+
+ yield* writeConfig(updatedConfig);
+
+ if (job.enabled) {
+ yield* startJob(job);
+ }
+
+ return job;
+ });
+
+ const updateJob = (jobId: string, updates: UpdateSchedulerJob) =>
+ Effect.gen(function* () {
+ const config = yield* readConfig.pipe(
+ Effect.catchTags({
+ ConfigFileNotFoundError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ ConfigParseError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ }),
+ );
+ const job = config.jobs.find((j) => j.id === jobId);
+
+ if (job === undefined) {
+ return yield* Effect.fail(new SchedulerJobNotFoundError({ jobId }));
+ }
+
+ yield* stopJob(jobId);
+
+ const updatedJob: SchedulerJob = {
+ ...job,
+ ...updates,
+ };
+
+ const updatedConfig: SchedulerConfig = {
+ jobs: config.jobs.map((j) => (j.id === jobId ? updatedJob : j)),
+ };
+
+ yield* writeConfig(updatedConfig);
+
+ if (updatedJob.enabled) {
+ yield* startJob(updatedJob);
+ }
+
+ return updatedJob;
+ });
+
+ const deleteJob = (jobId: string) =>
+ Effect.gen(function* () {
+ const config = yield* readConfig.pipe(
+ Effect.catchTags({
+ ConfigFileNotFoundError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ ConfigParseError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ }),
+ );
+ const job = config.jobs.find((j) => j.id === jobId);
+
+ if (job === undefined) {
+ return yield* Effect.fail(new SchedulerJobNotFoundError({ jobId }));
+ }
+
+ yield* stopJob(jobId);
+
+ const updatedConfig: SchedulerConfig = {
+ jobs: config.jobs.filter((j) => j.id !== jobId),
+ };
+
+ yield* writeConfig(updatedConfig);
+ });
+
+ return {
+ startScheduler,
+ stopScheduler,
+ getJobs,
+ addJob,
+ updateJob,
+ deleteJob,
+ };
+});
+
+export type ISchedulerService = InferEffect;
+
+export class SchedulerService extends Context.Tag("SchedulerService")<
+ SchedulerService,
+ ISchedulerService
+>() {
+ static Live = Layer.effect(this, LayerImpl);
+}
diff --git a/src/server/core/scheduler/presentation/SchedulerController.ts b/src/server/core/scheduler/presentation/SchedulerController.ts
new file mode 100644
index 0000000..27d6aae
--- /dev/null
+++ b/src/server/core/scheduler/presentation/SchedulerController.ts
@@ -0,0 +1,106 @@
+import type { FileSystem, Path } from "@effect/platform";
+import type { CommandExecutor } from "@effect/platform/CommandExecutor";
+import { Context, Effect, Layer, Runtime } from "effect";
+import { Hono, type Context as HonoContext } from "hono";
+import type { InferEffect } from "../../../lib/effect/types";
+import type { ClaudeCodeLifeCycleService } from "../../claude-code/services/ClaudeCodeLifeCycleService";
+import type { EnvService } from "../../platform/services/EnvService";
+import type { UserConfigService } from "../../platform/services/UserConfigService";
+import type { ProjectRepository } from "../../project/infrastructure/ProjectRepository";
+import { SchedulerService } from "../domain/Scheduler";
+import { newSchedulerJobSchema, updateSchedulerJobSchema } from "../schema";
+
+const LayerImpl = Effect.gen(function* () {
+ const schedulerService = yield* SchedulerService;
+
+ const runtime = yield* Effect.runtime<
+ | FileSystem.FileSystem
+ | Path.Path
+ | CommandExecutor
+ | EnvService
+ | ProjectRepository
+ | UserConfigService
+ | ClaudeCodeLifeCycleService
+ >();
+
+ const app = new Hono()
+ .get("/jobs", async (c: HonoContext) => {
+ const result = await Runtime.runPromise(runtime)(
+ schedulerService.getJobs(),
+ );
+ return c.json(result);
+ })
+ .post("/jobs", async (c: HonoContext) => {
+ const body = await c.req.json();
+ const parsed = newSchedulerJobSchema.safeParse(body);
+
+ if (!parsed.success) {
+ return c.json(
+ { error: "Invalid request body", details: parsed.error },
+ 400,
+ );
+ }
+
+ const result = await Runtime.runPromise(runtime)(
+ schedulerService.addJob(parsed.data),
+ );
+ return c.json(result, 201);
+ })
+ .patch("/jobs/:id", async (c: HonoContext) => {
+ const id = c.req.param("id");
+ const body = await c.req.json();
+ const parsed = updateSchedulerJobSchema.safeParse(body);
+
+ if (!parsed.success) {
+ return c.json(
+ { error: "Invalid request body", details: parsed.error },
+ 400,
+ );
+ }
+
+ const result = await Runtime.runPromise(runtime)(
+ schedulerService
+ .updateJob(id, parsed.data)
+ .pipe(
+ Effect.catchTag("SchedulerJobNotFoundError", () =>
+ Effect.succeed(null),
+ ),
+ ),
+ );
+
+ if (result === null) {
+ return c.json({ error: "Job not found" }, 404);
+ }
+
+ return c.json(result);
+ })
+ .delete("/jobs/:id", async (c: HonoContext) => {
+ const id = c.req.param("id");
+
+ const result = await Runtime.runPromise(runtime)(
+ schedulerService.deleteJob(id).pipe(
+ Effect.catchTag("SchedulerJobNotFoundError", () =>
+ Effect.succeed(false),
+ ),
+ Effect.map(() => true),
+ ),
+ );
+
+ if (!result) {
+ return c.json({ error: "Job not found" }, 404);
+ }
+
+ return c.json({ success: true }, 200);
+ });
+
+ return { app };
+});
+
+export type ISchedulerController = InferEffect;
+
+export class SchedulerController extends Context.Tag("SchedulerController")<
+ SchedulerController,
+ ISchedulerController
+>() {
+ static Live = Layer.effect(this, LayerImpl);
+}
diff --git a/src/server/core/scheduler/schema.ts b/src/server/core/scheduler/schema.ts
new file mode 100644
index 0000000..7b9b5a5
--- /dev/null
+++ b/src/server/core/scheduler/schema.ts
@@ -0,0 +1,85 @@
+import { z } from "zod";
+
+// Schedule type discriminated union
+export const cronScheduleSchema = z.object({
+ type: z.literal("cron"),
+ expression: z.string(),
+});
+
+export const fixedScheduleSchema = z.object({
+ type: z.literal("fixed"),
+ delayMs: z.number().int().positive(),
+ oneTime: z.boolean(),
+});
+
+export const scheduleSchema = z.discriminatedUnion("type", [
+ cronScheduleSchema,
+ fixedScheduleSchema,
+]);
+
+// Message configuration
+export const messageConfigSchema = z.object({
+ content: z.string(),
+ projectId: z.string(),
+ baseSessionId: z.string().nullable(),
+});
+
+// Job status
+export const jobStatusSchema = z.enum(["success", "failed"]);
+
+// Concurrency policy
+export const concurrencyPolicySchema = z.enum(["skip", "run"]);
+
+// Scheduler job
+export const schedulerJobSchema = z.object({
+ id: z.string(),
+ name: z.string(),
+ schedule: scheduleSchema,
+ message: messageConfigSchema,
+ enabled: z.boolean(),
+ concurrencyPolicy: concurrencyPolicySchema,
+ createdAt: z.string().datetime(),
+ lastRunAt: z.string().datetime().nullable(),
+ lastRunStatus: jobStatusSchema.nullable(),
+});
+
+// Config file schema
+export const schedulerConfigSchema = z.object({
+ jobs: z.array(schedulerJobSchema),
+});
+
+// Type exports
+export type CronSchedule = z.infer;
+export type FixedSchedule = z.infer;
+export type Schedule = z.infer;
+export type MessageConfig = z.infer;
+export type JobStatus = z.infer;
+export type ConcurrencyPolicy = z.infer;
+export type SchedulerJob = z.infer;
+export type SchedulerConfig = z.infer;
+
+// New job creation schema (without runtime fields)
+export const newSchedulerJobSchema = schedulerJobSchema
+ .omit({
+ id: true,
+ createdAt: true,
+ lastRunAt: true,
+ lastRunStatus: true,
+ })
+ .extend({
+ enabled: z.boolean().default(true),
+ concurrencyPolicy: concurrencyPolicySchema.default("skip"),
+ });
+
+export type NewSchedulerJob = z.infer;
+
+// Job update schema (partial fields)
+export const updateSchedulerJobSchema = schedulerJobSchema.partial().pick({
+ name: true,
+ schedule: true,
+ message: true,
+ enabled: true,
+ concurrencyPolicy: true,
+});
+
+export type UpdateSchedulerJob = z.infer;
diff --git a/src/server/hono/route.ts b/src/server/hono/route.ts
index 9b4715f..0136be4 100644
--- a/src/server/hono/route.ts
+++ b/src/server/hono/route.ts
@@ -18,6 +18,7 @@ import { CommitRequestSchema, PushRequestSchema } from "../core/git/schema";
import { EnvService } from "../core/platform/services/EnvService";
import { UserConfigService } from "../core/platform/services/UserConfigService";
import { ProjectController } from "../core/project/presentation/ProjectController";
+import { SchedulerController } from "../core/scheduler/presentation/SchedulerController";
import type { VirtualConversationDatabase } from "../core/session/infrastructure/VirtualConversationDatabase";
import { SessionController } from "../core/session/presentation/SessionController";
import type { SessionMetaService } from "../core/session/services/SessionMetaService";
@@ -40,6 +41,7 @@ export const routes = (app: HonoAppType) =>
const sseController = yield* SSEController;
const fileSystemController = yield* FileSystemController;
const claudeCodeController = yield* ClaudeCodeController;
+ const schedulerController = yield* SchedulerController;
// services
const envService = yield* EnvService;
@@ -440,6 +442,12 @@ export const routes = (app: HonoAppType) =>
);
})
+ /**
+ * SchedulerController Routes
+ */
+
+ .route("/scheduler", schedulerController.app)
+
/**
* FileSystemController Routes
*/
From ef4521750fd784fd5921cdeef115c3e91dbe6f08 Mon Sep 17 00:00:00 2001
From: d-kimsuon
Date: Sat, 25 Oct 2025 14:40:44 +0900
Subject: [PATCH 3/4] implement frontend
---
.../[projectId]/components/chatForm/index.ts | 2 +
.../chatForm/useMessageCompletion.ts | 161 +++++++
.../sessionSidebar/SchedulerTab.tsx | 364 +++++++++++++++
.../sessionSidebar/SessionSidebar.tsx | 16 +-
.../scheduler/CronExpressionBuilder.tsx | 351 +++++++++++++++
.../scheduler/SchedulerJobDialog.tsx | 421 ++++++++++++++++++
src/components/ui/label.tsx | 24 +
src/components/ui/switch.tsx | 50 +++
src/hooks/useScheduler.ts | 183 ++++++++
src/lib/api/queries.ts | 13 +
src/lib/i18n/locales/en/messages.json | 244 +++++++---
src/lib/i18n/locales/en/messages.ts | 2 +-
src/lib/i18n/locales/ja/messages.json | 244 +++++++---
src/lib/i18n/locales/ja/messages.ts | 2 +-
src/server/core/scheduler/domain/Scheduler.ts | 24 +-
.../presentation/SchedulerController.ts | 133 +++---
src/server/hono/route.ts | 107 ++++-
17 files changed, 2156 insertions(+), 185 deletions(-)
create mode 100644 src/app/projects/[projectId]/components/chatForm/useMessageCompletion.ts
create mode 100644 src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SchedulerTab.tsx
create mode 100644 src/components/scheduler/CronExpressionBuilder.tsx
create mode 100644 src/components/scheduler/SchedulerJobDialog.tsx
create mode 100644 src/components/ui/label.tsx
create mode 100644 src/components/ui/switch.tsx
create mode 100644 src/hooks/useScheduler.ts
diff --git a/src/app/projects/[projectId]/components/chatForm/index.ts b/src/app/projects/[projectId]/components/chatForm/index.ts
index ca75a3b..bb31081 100644
--- a/src/app/projects/[projectId]/components/chatForm/index.ts
+++ b/src/app/projects/[projectId]/components/chatForm/index.ts
@@ -8,3 +8,5 @@ export {
useContinueSessionProcessMutation,
useCreateSessionProcessMutation,
} from "./useChatMutations";
+export type { UseMessageCompletionResult } from "./useMessageCompletion";
+export { useMessageCompletion } from "./useMessageCompletion";
diff --git a/src/app/projects/[projectId]/components/chatForm/useMessageCompletion.ts b/src/app/projects/[projectId]/components/chatForm/useMessageCompletion.ts
new file mode 100644
index 0000000..fdb86c0
--- /dev/null
+++ b/src/app/projects/[projectId]/components/chatForm/useMessageCompletion.ts
@@ -0,0 +1,161 @@
+import { useCallback, useRef, useState } from "react";
+import type { CommandCompletionRef } from "./CommandCompletion";
+import type { FileCompletionRef } from "./FileCompletion";
+
+export interface UseMessageCompletionResult {
+ cursorPosition: {
+ relative: { top: number; left: number };
+ absolute: { top: number; left: number };
+ };
+ containerRef: React.RefObject;
+ textareaRef: React.RefObject;
+ commandCompletionRef: React.RefObject;
+ fileCompletionRef: React.RefObject;
+ getCursorPosition: () =>
+ | {
+ relative: { top: number; left: number };
+ absolute: { top: number; left: number };
+ }
+ | undefined;
+ handleChange: (value: string, onChange: (value: string) => void) => void;
+ handleKeyDown: (e: React.KeyboardEvent) => boolean;
+ handleCommandSelect: (
+ command: string,
+ onSelect: (command: string) => void,
+ ) => void;
+ handleFileSelect: (
+ filePath: string,
+ onSelect: (filePath: string) => void,
+ ) => void;
+}
+
+/**
+ * Message input with command and file completion support
+ */
+export function useMessageCompletion(): UseMessageCompletionResult {
+ const [cursorPosition, setCursorPosition] = useState<{
+ relative: { top: number; left: number };
+ absolute: { top: number; left: number };
+ }>({ relative: { top: 0, left: 0 }, absolute: { top: 0, left: 0 } });
+
+ const containerRef = useRef(null);
+ const textareaRef = useRef(null);
+ const commandCompletionRef = useRef(null);
+ const fileCompletionRef = useRef(null);
+
+ const getCursorPosition = useCallback(() => {
+ const textarea = textareaRef.current;
+ const container = containerRef.current;
+ if (textarea === null || container === null) return undefined;
+
+ const cursorPos = textarea.selectionStart;
+ const textBeforeCursor = textarea.value.substring(0, cursorPos);
+ const textAfterCursor = textarea.value.substring(cursorPos);
+
+ const pre = document.createTextNode(textBeforeCursor);
+ const post = document.createTextNode(textAfterCursor);
+ const caret = document.createElement("span");
+ caret.innerHTML = " ";
+
+ const mirrored = document.createElement("div");
+
+ mirrored.innerHTML = "";
+ mirrored.append(pre, caret, post);
+
+ const textareaStyles = window.getComputedStyle(textarea);
+ for (const property of [
+ "border",
+ "boxSizing",
+ "fontFamily",
+ "fontSize",
+ "fontWeight",
+ "letterSpacing",
+ "lineHeight",
+ "padding",
+ "textDecoration",
+ "textIndent",
+ "textTransform",
+ "whiteSpace",
+ "wordSpacing",
+ "wordWrap",
+ ] as const) {
+ mirrored.style[property] = textareaStyles[property];
+ }
+
+ mirrored.style.visibility = "hidden";
+ container.prepend(mirrored);
+
+ const caretRect = caret.getBoundingClientRect();
+ const containerRect = container.getBoundingClientRect();
+
+ container.removeChild(mirrored);
+
+ return {
+ relative: {
+ top: caretRect.top - containerRect.top - textarea.scrollTop,
+ left: caretRect.left - containerRect.left - textarea.scrollLeft,
+ },
+ absolute: {
+ top: caretRect.top - textarea.scrollTop,
+ left: caretRect.left - textarea.scrollLeft,
+ },
+ };
+ }, []);
+
+ const handleChange = useCallback(
+ (value: string, onChange: (value: string) => void) => {
+ if (value.endsWith("@") || value.endsWith("/")) {
+ const position = getCursorPosition();
+ if (position) {
+ setCursorPosition(position);
+ }
+ }
+ onChange(value);
+ },
+ [getCursorPosition],
+ );
+
+ const handleKeyDown = useCallback(
+ (e: React.KeyboardEvent): boolean => {
+ if (fileCompletionRef.current?.handleKeyDown(e)) {
+ return true;
+ }
+
+ if (commandCompletionRef.current?.handleKeyDown(e)) {
+ return true;
+ }
+
+ return false;
+ },
+ [],
+ );
+
+ const handleCommandSelect = useCallback(
+ (command: string, onSelect: (command: string) => void) => {
+ onSelect(command);
+ textareaRef.current?.focus();
+ },
+ [],
+ );
+
+ const handleFileSelect = useCallback(
+ (filePath: string, onSelect: (filePath: string) => void) => {
+ onSelect(filePath);
+ textareaRef.current?.focus();
+ },
+ [],
+ );
+
+ return {
+ cursorPosition,
+ containerRef,
+ textareaRef,
+ commandCompletionRef,
+ fileCompletionRef,
+ getCursorPosition,
+ handleChange,
+ handleKeyDown,
+ handleCommandSelect,
+ handleFileSelect,
+ };
+}
diff --git a/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SchedulerTab.tsx b/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SchedulerTab.tsx
new file mode 100644
index 0000000..ad64133
--- /dev/null
+++ b/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SchedulerTab.tsx
@@ -0,0 +1,364 @@
+"use client";
+
+import { Trans, useLingui } from "@lingui/react";
+import { EditIcon, PlusIcon, RefreshCwIcon, TrashIcon } from "lucide-react";
+import { type FC, useState } from "react";
+import { toast } from "sonner";
+import { SchedulerJobDialog } from "@/components/scheduler/SchedulerJobDialog";
+import { Badge } from "@/components/ui/badge";
+import { Button } from "@/components/ui/button";
+import {
+ Dialog,
+ DialogContent,
+ DialogDescription,
+ DialogFooter,
+ DialogHeader,
+ DialogTitle,
+} from "@/components/ui/dialog";
+import {
+ type NewSchedulerJob,
+ type SchedulerJob,
+ useCreateSchedulerJob,
+ useDeleteSchedulerJob,
+ useSchedulerJobs,
+ useUpdateSchedulerJob,
+} from "@/hooks/useScheduler";
+
+export const SchedulerTab: FC<{ projectId: string; sessionId: string }> = ({
+ projectId,
+ sessionId,
+}) => {
+ const { i18n } = useLingui();
+ const { data: jobs, isLoading, error, refetch } = useSchedulerJobs();
+ const createJob = useCreateSchedulerJob();
+ const updateJob = useUpdateSchedulerJob();
+ const deleteJob = useDeleteSchedulerJob();
+
+ const [dialogOpen, setDialogOpen] = useState(false);
+ const [editingJob, setEditingJob] = useState(null);
+ const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
+ const [deletingJobId, setDeletingJobId] = useState(null);
+
+ const handleCreateJob = (job: NewSchedulerJob) => {
+ createJob.mutate(job, {
+ onSuccess: () => {
+ toast.success(
+ i18n._({
+ id: "scheduler.job.created",
+ message: "Job created successfully",
+ }),
+ );
+ setDialogOpen(false);
+ },
+ onError: (error) => {
+ toast.error(
+ i18n._({
+ id: "scheduler.job.create_failed",
+ message: "Failed to create job",
+ }),
+ {
+ description: error.message,
+ },
+ );
+ },
+ });
+ };
+
+ const handleUpdateJob = (job: NewSchedulerJob) => {
+ if (!editingJob) return;
+
+ updateJob.mutate(
+ {
+ id: editingJob.id,
+ updates: job,
+ },
+ {
+ onSuccess: () => {
+ toast.success(
+ i18n._({
+ id: "scheduler.job.updated",
+ message: "Job updated successfully",
+ }),
+ );
+ setDialogOpen(false);
+ setEditingJob(null);
+ },
+ onError: (error) => {
+ toast.error(
+ i18n._({
+ id: "scheduler.job.update_failed",
+ message: "Failed to update job",
+ }),
+ {
+ description: error.message,
+ },
+ );
+ },
+ },
+ );
+ };
+
+ const handleDeleteConfirm = () => {
+ if (!deletingJobId) return;
+
+ deleteJob.mutate(deletingJobId, {
+ onSuccess: () => {
+ toast.success(
+ i18n._({
+ id: "scheduler.job.deleted",
+ message: "Job deleted successfully",
+ }),
+ );
+ setDeleteDialogOpen(false);
+ setDeletingJobId(null);
+ },
+ onError: (error) => {
+ toast.error(
+ i18n._({
+ id: "scheduler.job.delete_failed",
+ message: "Failed to delete job",
+ }),
+ {
+ description: error.message,
+ },
+ );
+ },
+ });
+ };
+
+ const handleEditClick = (job: SchedulerJob) => {
+ setEditingJob(job);
+ setDialogOpen(true);
+ };
+
+ const handleDeleteClick = (jobId: string) => {
+ setDeletingJobId(jobId);
+ setDeleteDialogOpen(true);
+ };
+
+ const formatSchedule = (job: SchedulerJob) => {
+ if (job.schedule.type === "cron") {
+ return `Cron: ${job.schedule.expression}`;
+ }
+ const hours = Math.floor(job.schedule.delayMs / 3600000);
+ const minutes = Math.floor((job.schedule.delayMs % 3600000) / 60000);
+ const timeStr =
+ hours > 0
+ ? `${hours}h ${minutes}m`
+ : minutes > 0
+ ? `${minutes}m`
+ : `${job.schedule.delayMs}ms`;
+ return `${job.schedule.oneTime ? "Once" : "Recurring"}: ${timeStr}`;
+ };
+
+ const formatLastRun = (lastRunAt: string | null) => {
+ if (!lastRunAt) return "Never";
+ const date = new Date(lastRunAt);
+ return date.toLocaleString();
+ };
+
+ return (
+
+
+
+
+
+
+
+
refetch()}
+ variant="ghost"
+ size="sm"
+ className="h-7 w-7 p-0"
+ disabled={isLoading}
+ title={i18n._({ id: "common.reload", message: "Reload" })}
+ >
+
+
+
{
+ setEditingJob(null);
+ setDialogOpen(true);
+ }}
+ variant="ghost"
+ size="sm"
+ className="h-7 w-7 p-0"
+ title={i18n._({
+ id: "scheduler.create_job",
+ message: "Create Job",
+ })}
+ >
+
+
+
+
+
+
+
+ {isLoading && (
+
+ )}
+
+ {error && (
+
+
+
+ )}
+
+ {jobs && jobs.length === 0 && (
+
+
+
+ )}
+
+ {jobs && jobs.length > 0 && (
+
+ {jobs.map((job) => (
+
+
+
+
+
+ {job.name}
+
+
+ {job.enabled ? (
+
+ ) : (
+
+ )}
+
+
+
+ {formatSchedule(job)}
+
+
+
+ handleEditClick(job)}
+ >
+
+
+ handleDeleteClick(job.id)}
+ >
+
+
+
+
+
+ {job.lastRunAt && (
+
+
+
+
+ {formatLastRun(job.lastRunAt)}
+
+ {job.lastRunStatus && (
+
+ {job.lastRunStatus}
+
+ )}
+
+
+ )}
+
+ ))}
+
+ )}
+
+
+ {/* Create/Edit Dialog */}
+
{
+ setDialogOpen(open);
+ if (!open) setEditingJob(null);
+ }}
+ job={editingJob}
+ projectId={projectId}
+ currentSessionId={sessionId}
+ onSubmit={editingJob ? handleUpdateJob : handleCreateJob}
+ isSubmitting={createJob.isPending || updateJob.isPending}
+ />
+
+ {/* Delete Confirmation Dialog */}
+
+
+
+
+
+
+
+
+
+
+
+ {
+ setDeleteDialogOpen(false);
+ setDeletingJobId(null);
+ }}
+ disabled={deleteJob.isPending}
+ >
+
+
+
+ {deleteJob.isPending ? (
+
+ ) : (
+
+ )}
+
+
+
+
+
+ );
+};
diff --git a/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SessionSidebar.tsx b/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SessionSidebar.tsx
index 13bf3f7..2bf5eb2 100644
--- a/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SessionSidebar.tsx
+++ b/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SessionSidebar.tsx
@@ -1,7 +1,12 @@
"use client";
import { Trans } from "@lingui/react";
-import { ArrowLeftIcon, MessageSquareIcon, PlugIcon } from "lucide-react";
+import {
+ ArrowLeftIcon,
+ CalendarClockIcon,
+ MessageSquareIcon,
+ PlugIcon,
+} from "lucide-react";
import Link from "next/link";
import { type FC, useMemo } from "react";
import type { SidebarTab } from "@/components/GlobalSidebar";
@@ -16,6 +21,7 @@ import { cn } from "@/lib/utils";
import { useProject } from "../../../../hooks/useProject";
import { McpTab } from "./McpTab";
import { MobileSidebar } from "./MobileSidebar";
+import { SchedulerTab } from "./SchedulerTab";
import { SessionsTab } from "./SessionsTab";
export const SessionSidebar: FC<{
@@ -65,6 +71,14 @@ export const SessionSidebar: FC<{
title: "Show MCP server settings",
content: ,
},
+ {
+ id: "scheduler",
+ icon: CalendarClockIcon,
+ title: "Show scheduler jobs",
+ content: (
+
+ ),
+ },
],
[
sessions,
diff --git a/src/components/scheduler/CronExpressionBuilder.tsx b/src/components/scheduler/CronExpressionBuilder.tsx
new file mode 100644
index 0000000..eb76cd0
--- /dev/null
+++ b/src/components/scheduler/CronExpressionBuilder.tsx
@@ -0,0 +1,351 @@
+"use client";
+
+import { Trans } from "@lingui/react";
+import { useEffect, useState } from "react";
+import { Input } from "@/components/ui/input";
+import { Label } from "@/components/ui/label";
+import {
+ Select,
+ SelectContent,
+ SelectItem,
+ SelectTrigger,
+ SelectValue,
+} from "@/components/ui/select";
+
+type CronMode = "hourly" | "daily" | "weekly" | "custom";
+
+interface CronExpressionBuilderProps {
+ value: string;
+ onChange: (expression: string) => void;
+}
+
+interface ParsedCron {
+ mode: CronMode;
+ hour: number;
+ minute: number;
+ dayOfWeek: number;
+}
+
+const WEEKDAYS = [
+ {
+ value: 0,
+ labelKey: ,
+ },
+ {
+ value: 1,
+ labelKey: ,
+ },
+ {
+ value: 2,
+ labelKey: ,
+ },
+ {
+ value: 3,
+ labelKey: ,
+ },
+ {
+ value: 4,
+ labelKey: ,
+ },
+ {
+ value: 5,
+ labelKey: ,
+ },
+ {
+ value: 6,
+ labelKey: ,
+ },
+];
+
+function parseCronExpression(expression: string): ParsedCron | null {
+ const parts = expression.trim().split(/\s+/);
+ if (parts.length !== 5) return null;
+
+ const minute = parts[0];
+ const hour = parts[1];
+ const dayOfWeek = parts[4];
+
+ if (!minute || !hour || !dayOfWeek) return null;
+
+ // Hourly: "0 * * * *"
+ if (hour === "*" && minute === "0") {
+ return { mode: "hourly", hour: 0, minute: 0, dayOfWeek: 0 };
+ }
+
+ // Daily: "0 9 * * *"
+ if (dayOfWeek === "*" && hour !== "*") {
+ const h = Number.parseInt(hour, 10);
+ const m = Number.parseInt(minute, 10);
+ if (!Number.isNaN(h) && !Number.isNaN(m)) {
+ return { mode: "daily", hour: h, minute: m, dayOfWeek: 0 };
+ }
+ }
+
+ // Weekly: "0 9 * * 1"
+ if (dayOfWeek !== "*" && hour !== "*") {
+ const h = Number.parseInt(hour, 10);
+ const m = Number.parseInt(minute, 10);
+ const dow = Number.parseInt(dayOfWeek, 10);
+ if (!Number.isNaN(h) && !Number.isNaN(m) && !Number.isNaN(dow)) {
+ return { mode: "weekly", hour: h, minute: m, dayOfWeek: dow };
+ }
+ }
+
+ return { mode: "custom", hour: 0, minute: 0, dayOfWeek: 0 };
+}
+
+function buildCronExpression(
+ mode: CronMode,
+ hour: number,
+ minute: number,
+ dayOfWeek: number,
+): string {
+ switch (mode) {
+ case "hourly":
+ return "0 * * * *";
+ case "daily":
+ return `${minute} ${hour} * * *`;
+ case "weekly":
+ return `${minute} ${hour} * * ${dayOfWeek}`;
+ case "custom":
+ return "0 0 * * *";
+ }
+}
+
+function validateCronExpression(expression: string): boolean {
+ const parts = expression.trim().split(/\s+/);
+ if (parts.length !== 5) return false;
+
+ const minute = parts[0];
+ const hour = parts[1];
+ const dayOfMonth = parts[2];
+ const month = parts[3];
+ const dayOfWeek = parts[4];
+
+ if (!minute || !hour || !dayOfMonth || !month || !dayOfWeek) return false;
+
+ const isValidField = (field: string, min: number, max: number): boolean => {
+ if (field === "*") return true;
+ const num = Number.parseInt(field, 10);
+ return !Number.isNaN(num) && num >= min && num <= max;
+ };
+
+ return (
+ isValidField(minute, 0, 59) &&
+ (hour === "*" || isValidField(hour, 0, 23)) &&
+ (dayOfMonth === "*" || isValidField(dayOfMonth, 1, 31)) &&
+ (month === "*" || isValidField(month, 1, 12)) &&
+ (dayOfWeek === "*" || isValidField(dayOfWeek, 0, 6))
+ );
+}
+
+function getNextExecutionPreview(expression: string): React.ReactNode {
+ const parts = expression.trim().split(/\s+/);
+ if (parts.length !== 5) return "Invalid cron expression";
+
+ const minute = parts[0];
+ const hour = parts[1];
+ const dayOfWeek = parts[4];
+
+ if (!minute || !hour || !dayOfWeek) return "Invalid cron expression";
+
+ if (hour === "*") {
+ return `Every hour at ${minute} minute(s)`;
+ }
+
+ const timeStr = `${hour.padStart(2, "0")}:${minute.padStart(2, "0")}`;
+
+ if (dayOfWeek === "*") {
+ return `Every day at ${timeStr}`;
+ }
+
+ const dow = Number.parseInt(dayOfWeek, 10);
+ const dayName = WEEKDAYS.find((d) => d.value === dow);
+ return (
+ <>
+ Every {dayName ? dayName.labelKey : "unknown"} at {timeStr}
+ >
+ );
+}
+
+export function CronExpressionBuilder({
+ value,
+ onChange,
+}: CronExpressionBuilderProps) {
+ const parsed = parseCronExpression(value);
+
+ const [mode, setMode] = useState(parsed?.mode || "daily");
+ const [hour, setHour] = useState(parsed?.hour || 9);
+ const [minute, setMinute] = useState(parsed?.minute || 0);
+ const [dayOfWeek, setDayOfWeek] = useState(parsed?.dayOfWeek || 1);
+ const [customExpression, setCustomExpression] = useState(value);
+ const [error, setError] = useState(null);
+
+ useEffect(() => {
+ if (mode === "custom") {
+ if (validateCronExpression(customExpression)) {
+ onChange(customExpression);
+ setError(null);
+ } else {
+ setError("Invalid cron expression");
+ }
+ } else {
+ const expr = buildCronExpression(mode, hour, minute, dayOfWeek);
+ onChange(expr);
+ setCustomExpression(expr);
+ setError(null);
+ }
+ }, [mode, hour, minute, dayOfWeek, customExpression, onChange]);
+
+ const handleModeChange = (newMode: CronMode) => {
+ setMode(newMode);
+ if (newMode !== "custom") {
+ const expr = buildCronExpression(newMode, hour, minute, dayOfWeek);
+ setCustomExpression(expr);
+ }
+ };
+
+ return (
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {mode === "daily" && (
+
+ )}
+
+ {mode === "weekly" && (
+
+
+
+
+
+ setDayOfWeek(Number.parseInt(v, 10))}
+ >
+
+
+
+
+ {WEEKDAYS.map((day) => (
+
+ {day.labelKey}
+
+ ))}
+
+
+
+
+
+ )}
+
+ {mode === "custom" && (
+
+
+
+
+ setCustomExpression(e.target.value)}
+ placeholder="0 9 * * *"
+ />
+
+ )}
+
+
+
+
+
+
+ {error ? (
+ {error}
+ ) : (
+ getNextExecutionPreview(
+ mode === "custom" ? customExpression : value,
+ )
+ )}
+
+
+ :{" "}
+ {mode === "custom" ? customExpression : value}
+
+
+
+ );
+}
diff --git a/src/components/scheduler/SchedulerJobDialog.tsx b/src/components/scheduler/SchedulerJobDialog.tsx
new file mode 100644
index 0000000..d044c00
--- /dev/null
+++ b/src/components/scheduler/SchedulerJobDialog.tsx
@@ -0,0 +1,421 @@
+"use client";
+
+import { Trans, useLingui } from "@lingui/react";
+import { type FC, useCallback, useEffect, useState } from "react";
+import { InlineCompletion } from "@/app/projects/[projectId]/components/chatForm/InlineCompletion";
+import { useMessageCompletion } from "@/app/projects/[projectId]/components/chatForm/useMessageCompletion";
+import { Button } from "@/components/ui/button";
+import {
+ Dialog,
+ DialogContent,
+ DialogDescription,
+ DialogFooter,
+ DialogHeader,
+ DialogTitle,
+} from "@/components/ui/dialog";
+import { Input } from "@/components/ui/input";
+import { Label } from "@/components/ui/label";
+import {
+ Select,
+ SelectContent,
+ SelectItem,
+ SelectTrigger,
+ SelectValue,
+} from "@/components/ui/select";
+import { Switch } from "@/components/ui/switch";
+import { Textarea } from "@/components/ui/textarea";
+import type {
+ NewSchedulerJob,
+ SchedulerJob,
+} from "@/server/core/scheduler/schema";
+import { CronExpressionBuilder } from "./CronExpressionBuilder";
+
+export interface SchedulerJobDialogProps {
+ open: boolean;
+ onOpenChange: (open: boolean) => void;
+ job: SchedulerJob | null;
+ projectId: string;
+ currentSessionId: string;
+ onSubmit: (job: NewSchedulerJob) => void;
+ isSubmitting?: boolean;
+}
+
+type DelayUnit = "minutes" | "hours" | "days";
+
+export const SchedulerJobDialog: FC = ({
+ open,
+ onOpenChange,
+ job,
+ projectId,
+ onSubmit,
+ isSubmitting = false,
+}) => {
+ const { _, i18n } = useLingui();
+
+ const [name, setName] = useState("");
+ const [scheduleType, setScheduleType] = useState<"cron" | "fixed">("cron");
+ const [cronExpression, setCronExpression] = useState("0 9 * * *");
+ const [delayValue, setDelayValue] = useState(60); // 60 minutes default
+ const [delayUnit, setDelayUnit] = useState("minutes");
+ const [messageContent, setMessageContent] = useState("");
+ const [enabled, setEnabled] = useState(true);
+ const [concurrencyPolicy, setConcurrencyPolicy] = useState<"skip" | "run">(
+ "skip",
+ );
+
+ // Message completion hook
+ const completion = useMessageCompletion();
+
+ // Convert delay value and unit to milliseconds
+ const delayToMs = useCallback((value: number, unit: DelayUnit): number => {
+ switch (unit) {
+ case "minutes":
+ return value * 60 * 1000;
+ case "hours":
+ return value * 60 * 60 * 1000;
+ case "days":
+ return value * 24 * 60 * 60 * 1000;
+ }
+ }, []);
+
+ // Convert milliseconds to delay value and unit
+ const msToDelay = useCallback(
+ (ms: number): { value: number; unit: DelayUnit } => {
+ const minutes = ms / (60 * 1000);
+ const hours = ms / (60 * 60 * 1000);
+ const days = ms / (24 * 60 * 60 * 1000);
+
+ if (days >= 1 && days === Math.floor(days)) {
+ return { value: days, unit: "days" };
+ }
+ if (hours >= 1 && hours === Math.floor(hours)) {
+ return { value: hours, unit: "hours" };
+ }
+ return { value: minutes, unit: "minutes" };
+ },
+ [],
+ );
+
+ // Initialize form with job data when editing
+ useEffect(() => {
+ if (job) {
+ setName(job.name);
+ setScheduleType(job.schedule.type);
+ if (job.schedule.type === "cron") {
+ setCronExpression(job.schedule.expression);
+ } else {
+ const { value, unit } = msToDelay(job.schedule.delayMs);
+ setDelayValue(value);
+ setDelayUnit(unit);
+ }
+ setMessageContent(job.message.content);
+ setEnabled(job.enabled);
+ setConcurrencyPolicy(job.concurrencyPolicy);
+ } else {
+ // Reset form for new job
+ setName("");
+ setScheduleType("cron");
+ setCronExpression("0 9 * * *");
+ setDelayValue(60);
+ setDelayUnit("minutes");
+ setMessageContent("");
+ setEnabled(true);
+ setConcurrencyPolicy("skip");
+ }
+ }, [job, msToDelay]);
+
+ const handleSubmit = () => {
+ const delayMs = delayToMs(delayValue, delayUnit);
+ const newJob: NewSchedulerJob = {
+ name,
+ schedule:
+ scheduleType === "cron"
+ ? { type: "cron", expression: cronExpression }
+ : { type: "fixed", delayMs, oneTime: true },
+ message: {
+ content: messageContent,
+ projectId,
+ baseSessionId: null,
+ },
+ enabled,
+ concurrencyPolicy,
+ };
+
+ onSubmit(newJob);
+ };
+
+ const isFormValid = name.trim() !== "" && messageContent.trim() !== "";
+
+ return (
+
+
+
+
+ {job ? (
+
+ ) : (
+
+ )}
+
+
+
+
+
+
+
+ {/* Enabled Toggle */}
+
+
+ {/* Job Name */}
+
+
+
+
+ setName(e.target.value)}
+ placeholder={_({
+ id: "scheduler.form.name.placeholder",
+ message: "例: 日次レポート",
+ })}
+ disabled={isSubmitting}
+ />
+
+
+ {/* Schedule Type */}
+
+
+
+
+
+ setScheduleType(value)
+ }
+ disabled={isSubmitting}
+ >
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {/* Schedule Configuration */}
+ {scheduleType === "cron" ? (
+
+ ) : (
+
+
+
+
+
+
+ setDelayValue(Number.parseInt(e.target.value, 10))
+ }
+ disabled={isSubmitting}
+ className="flex-1"
+ placeholder="60"
+ />
+ setDelayUnit(value)}
+ disabled={isSubmitting}
+ >
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ )}
+
+ {/* Message Content */}
+
+
+ {/* Concurrency Policy */}
+
+
+
+
+
+ setConcurrencyPolicy(value)
+ }
+ disabled={isSubmitting}
+ >
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ onOpenChange(false)}
+ disabled={isSubmitting}
+ >
+
+
+
+ {isSubmitting ? (
+
+ ) : job ? (
+
+ ) : (
+
+ )}
+
+
+
+
+ );
+};
diff --git a/src/components/ui/label.tsx b/src/components/ui/label.tsx
new file mode 100644
index 0000000..1c46055
--- /dev/null
+++ b/src/components/ui/label.tsx
@@ -0,0 +1,24 @@
+import * as React from "react";
+import { cn } from "@/lib/utils";
+
+export interface LabelProps
+ extends React.LabelHTMLAttributes {}
+
+const Label = React.forwardRef(
+ ({ className, children, ...props }, ref) => (
+ // biome-ignore lint/a11y/noLabelWithoutControl: Label is used with htmlFor prop or wraps input elements
+
+ {children}
+
+ ),
+);
+Label.displayName = "Label";
+
+export { Label };
diff --git a/src/components/ui/switch.tsx b/src/components/ui/switch.tsx
new file mode 100644
index 0000000..bd741f9
--- /dev/null
+++ b/src/components/ui/switch.tsx
@@ -0,0 +1,50 @@
+"use client";
+
+import * as React from "react";
+import { cn } from "@/lib/utils";
+
+interface SwitchProps
+ extends Omit, "type"> {
+ checked?: boolean;
+ onCheckedChange?: (checked: boolean) => void;
+}
+
+const Switch = React.forwardRef(
+ ({ className, checked, onCheckedChange, disabled, ...props }, ref) => {
+ const handleChange = (e: React.ChangeEvent) => {
+ onCheckedChange?.(e.target.checked);
+ props.onChange?.(e);
+ };
+
+ return (
+
+
+
+
+ );
+ },
+);
+Switch.displayName = "Switch";
+
+export { Switch };
diff --git a/src/hooks/useScheduler.ts b/src/hooks/useScheduler.ts
new file mode 100644
index 0000000..ec37fcc
--- /dev/null
+++ b/src/hooks/useScheduler.ts
@@ -0,0 +1,183 @@
+"use client";
+
+import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query";
+import type {
+ NewSchedulerJob,
+ SchedulerJob,
+ UpdateSchedulerJob,
+} from "@/server/core/scheduler/schema";
+import { honoClient } from "../lib/api/client";
+
+/**
+ * Query key factory for scheduler-related queries
+ */
+const schedulerKeys = {
+ all: ["scheduler"] as const,
+ jobs: () => [...schedulerKeys.all, "jobs"] as const,
+ job: (id: string) => [...schedulerKeys.all, "job", id] as const,
+};
+
+/**
+ * Hook to fetch all scheduler jobs
+ *
+ * @example
+ * const { data: jobs, isLoading, error } = useSchedulerJobs();
+ *
+ * @returns Query result containing array of SchedulerJob
+ */
+export const useSchedulerJobs = () => {
+ return useQuery({
+ queryKey: schedulerKeys.jobs(),
+ queryFn: async (): Promise => {
+ const response = await honoClient.api.scheduler.jobs.$get();
+ if (!response.ok) {
+ throw new Error("Failed to fetch scheduler jobs");
+ }
+ return response.json();
+ },
+ });
+};
+
+/**
+ * Hook to create a new scheduler job
+ *
+ * @example
+ * const createJob = useCreateSchedulerJob();
+ *
+ * createJob.mutate({
+ * name: "Daily Report",
+ * schedule: { type: "cron", expression: "0 9 * * *" },
+ * message: {
+ * content: "Generate daily report",
+ * projectId: "project-123",
+ * baseSessionId: null,
+ * },
+ * enabled: true,
+ * concurrencyPolicy: "skip",
+ * });
+ *
+ * @returns Mutation result for creating a scheduler job
+ */
+export const useCreateSchedulerJob = () => {
+ const queryClient = useQueryClient();
+
+ return useMutation({
+ mutationFn: async (newJob: NewSchedulerJob): Promise => {
+ const response = await honoClient.api.scheduler.jobs.$post({
+ json: newJob,
+ });
+
+ if (!response.ok) {
+ throw new Error("Failed to create scheduler job");
+ }
+
+ return response.json();
+ },
+ onSuccess: () => {
+ // Invalidate jobs list to refetch
+ void queryClient.invalidateQueries({ queryKey: schedulerKeys.jobs() });
+ },
+ });
+};
+
+/**
+ * Hook to update an existing scheduler job
+ *
+ * @example
+ * const updateJob = useUpdateSchedulerJob();
+ *
+ * updateJob.mutate({
+ * id: "job-123",
+ * updates: {
+ * enabled: false,
+ * name: "Updated Job Name",
+ * },
+ * });
+ *
+ * @returns Mutation result for updating a scheduler job
+ */
+export const useUpdateSchedulerJob = () => {
+ const queryClient = useQueryClient();
+
+ return useMutation({
+ mutationFn: async ({
+ id,
+ updates,
+ }: {
+ id: string;
+ updates: UpdateSchedulerJob;
+ }): Promise => {
+ // TODO: Hono RPC type inference for nested routes (.route()) with $patch is incomplete
+ // This causes a TypeScript error even though the runtime behavior is correct
+ // Possible solutions:
+ // 1. Move scheduler routes directly to main route.ts instead of using .route()
+ // 2. Wait for Hono RPC to improve type inference for nested routes
+ // 3. Use type assertion (currently forbidden by CLAUDE.md)
+ const response = await honoClient.api.scheduler.jobs[":id"].$patch({
+ param: { id },
+ json: updates,
+ });
+
+ if (!response.ok) {
+ if (response.status === 404) {
+ throw new Error("Job not found");
+ }
+ throw new Error("Failed to update scheduler job");
+ }
+
+ return response.json();
+ },
+ onSuccess: (data) => {
+ // Invalidate specific job and jobs list
+ void queryClient.invalidateQueries({
+ queryKey: schedulerKeys.job(data.id),
+ });
+ void queryClient.invalidateQueries({ queryKey: schedulerKeys.jobs() });
+ },
+ });
+};
+
+/**
+ * Hook to delete a scheduler job
+ *
+ * @example
+ * const deleteJob = useDeleteSchedulerJob();
+ *
+ * deleteJob.mutate("job-123", {
+ * onSuccess: () => {
+ * console.log("Job deleted successfully");
+ * },
+ * });
+ *
+ * @returns Mutation result for deleting a scheduler job
+ */
+export const useDeleteSchedulerJob = () => {
+ const queryClient = useQueryClient();
+
+ return useMutation({
+ mutationFn: async (id: string): Promise<{ success: true }> => {
+ const response = await honoClient.api.scheduler.jobs[":id"].$delete({
+ param: { id },
+ });
+
+ if (!response.ok) {
+ if (response.status === 404) {
+ throw new Error("Job not found");
+ }
+ throw new Error("Failed to delete scheduler job");
+ }
+
+ return response.json();
+ },
+ onSuccess: (_, deletedId) => {
+ // Invalidate specific job and jobs list
+ void queryClient.invalidateQueries({
+ queryKey: schedulerKeys.job(deletedId),
+ });
+ void queryClient.invalidateQueries({ queryKey: schedulerKeys.jobs() });
+ },
+ });
+};
+
+// Export types for external use
+export type { SchedulerJob, NewSchedulerJob, UpdateSchedulerJob };
diff --git a/src/lib/api/queries.ts b/src/lib/api/queries.ts
index ce9e18a..e58534c 100644
--- a/src/lib/api/queries.ts
+++ b/src/lib/api/queries.ts
@@ -249,3 +249,16 @@ export const claudeCodeFeaturesQuery = {
return await response.json();
},
} as const;
+
+export const schedulerJobsQuery = {
+ queryKey: ["scheduler", "jobs"],
+ queryFn: async () => {
+ const response = await honoClient.api.scheduler.jobs.$get();
+
+ if (!response.ok) {
+ throw new Error(`Failed to fetch scheduler jobs: ${response.statusText}`);
+ }
+
+ return await response.json();
+ },
+} as const;
diff --git a/src/lib/i18n/locales/en/messages.json b/src/lib/i18n/locales/en/messages.json
index 41314f4..38dd640 100644
--- a/src/lib/i18n/locales/en/messages.json
+++ b/src/lib/i18n/locales/en/messages.json
@@ -23,16 +23,16 @@
"origin": [["src/components/SettingsControls.tsx", 306]],
"translation": "Select theme"
},
- "Reload MCP servers": {
+ "Close sidebar": {
"placeholders": {},
"comments": [],
"origin": [
[
- "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/McpTab.tsx",
- 42
+ "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
+ 179
]
],
- "translation": "Reload MCP servers"
+ "translation": "Close sidebar"
},
"Type your message... (Start with / for commands, @ for files, Enter to send)": {
"placeholders": {},
@@ -79,16 +79,16 @@
],
"translation": "Type your message... (Start with / for commands, @ for files, Shift+Enter to send)"
},
- "Close sidebar": {
+ "Reload MCP servers": {
"placeholders": {},
"comments": [],
"origin": [
[
- "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 173
+ "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/McpTab.tsx",
+ 42
]
],
- "translation": "Close sidebar"
+ "translation": "Reload MCP servers"
},
"Type your message here... (Start with / for commands, @ for files, Enter to send)": {
"placeholders": {},
@@ -136,6 +136,14 @@
],
"translation": "Available commands"
},
+ "Message input with completion support": {
+ "placeholders": {},
+ "comments": [],
+ "origin": [
+ ["src/app/projects/[projectId]/components/chatForm/ChatInput.tsx", 210]
+ ],
+ "translation": "Message input with completion support"
+ },
"Uncommitted changes": {
"placeholders": {},
"comments": [],
@@ -153,7 +161,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 303
+ 302
]
],
"translation": "Failed to commit"
@@ -164,7 +172,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 321
+ 320
]
],
"translation": "Failed to push"
@@ -175,7 +183,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 356
+ 355
]
],
"translation": "Retry Push"
@@ -186,7 +194,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 369
+ 368
]
],
"translation": "Failed to commit and push"
@@ -197,7 +205,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 388
+ 387
]
],
"translation": "Compare from"
@@ -208,19 +216,11 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 394
+ 393
]
],
"translation": "Compare to"
},
- "Message input with completion support": {
- "placeholders": {},
- "comments": [],
- "origin": [
- ["src/app/projects/[projectId]/components/chatForm/ChatInput.tsx", 210]
- ],
- "translation": "Message input with completion support"
- },
"assistant.tool.message_count": {
"message": "{count} messages",
"placeholders": {
@@ -288,7 +288,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 197
+ 203
],
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SessionSidebar.tsx",
@@ -414,7 +414,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 567
+ 566
]
],
"translation": "Commit"
@@ -426,7 +426,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 602
+ 601
]
],
"translation": "Commit & Push"
@@ -438,7 +438,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 463
+ 462
]
],
"translation": "Commit Changes"
@@ -450,7 +450,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 535
+ 534
]
],
"translation": "Commit message"
@@ -462,7 +462,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 596
+ 595
]
],
"translation": "Committing & Pushing..."
@@ -474,7 +474,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 561
+ 560
]
],
"translation": "Committing..."
@@ -531,6 +531,13 @@
"origin": [["src/app/projects/components/CreateProjectDialog.tsx", 106]],
"translation": "Creating..."
},
+ "cron_builder.cron_expression": {
+ "translation": "Cron Expression",
+ "message": "Cron Expression",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 281]]
+ },
"directory_picker.current": {
"message": "Current:",
"placeholders": {},
@@ -538,6 +545,20 @@
"origin": [["src/app/projects/components/DirectoryPicker.tsx", 38]],
"translation": "Current:"
},
+ "cron_builder.custom": {
+ "translation": "Custom",
+ "message": "Custom",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 192]]
+ },
+ "cron_builder.daily": {
+ "translation": "Daily",
+ "message": "Daily",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 186]]
+ },
"settings.theme.dark": {
"message": "Dark",
"placeholders": {},
@@ -545,6 +566,13 @@
"origin": [["src/components/SettingsControls.tsx", 313]],
"translation": "Dark"
},
+ "cron_builder.day_of_week": {
+ "translation": "Day of Week",
+ "message": "Day of Week",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 231]]
+ },
"settings.permission.mode.default": {
"message": "Default (Ask permission)",
"placeholders": {},
@@ -559,7 +587,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 492
+ 491
]
],
"translation": "Deselect All"
@@ -592,7 +620,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 618
+ 617
]
],
"translation": "Enter a commit message"
@@ -644,6 +672,13 @@
"origin": [["src/components/SystemInfoCard.tsx", 146]],
"translation": "Executable"
},
+ "cron_builder.expression": {
+ "translation": "Expression",
+ "message": "Expression",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 303]]
+ },
"mcp.error.load_failed": {
"message": "Failed to load MCP servers: {error}",
"placeholders": {
@@ -705,6 +740,13 @@
],
"translation": "files changed"
},
+ "cron_builder.friday": {
+ "translation": "Friday",
+ "message": "Friday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 36]]
+ },
"settings.session.hide_no_user_message": {
"message": "Hide sessions without user messages",
"placeholders": {},
@@ -712,6 +754,23 @@
"origin": [["src/components/SettingsControls.tsx", 117]],
"translation": "Hide sessions without user messages"
},
+ "cron_builder.hour": {
+ "translation": "Hour (0-23)",
+ "message": "Hour (0-23)",
+ "placeholders": {},
+ "comments": [],
+ "origin": [
+ ["src/components/scheduler/CronExpressionBuilder.tsx", 202],
+ ["src/components/scheduler/CronExpressionBuilder.tsx", 252]
+ ]
+ },
+ "cron_builder.hourly": {
+ "translation": "Hourly",
+ "message": "Hourly",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 183]]
+ },
"user.content.image": {
"message": "Image",
"placeholders": {},
@@ -731,7 +790,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 198
+ 170
]
],
"translation": "Input Parameters"
@@ -776,7 +835,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 657
+ 656
]
],
"translation": "Loading diff..."
@@ -795,7 +854,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 115
+ 119
],
["src/components/GlobalSidebar.tsx", 67]
],
@@ -822,7 +881,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 413
+ 412
],
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/McpTab.tsx",
@@ -873,6 +932,23 @@
"origin": [["src/app/projects/components/ProjectList.tsx", 71]],
"translation": "Messages:"
},
+ "cron_builder.minute": {
+ "translation": "Minute (0-59)",
+ "message": "Minute (0-59)",
+ "placeholders": {},
+ "comments": [],
+ "origin": [
+ ["src/components/scheduler/CronExpressionBuilder.tsx", 214],
+ ["src/components/scheduler/CronExpressionBuilder.tsx", 264]
+ ]
+ },
+ "cron_builder.monday": {
+ "translation": "Monday",
+ "message": "Monday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 32]]
+ },
"sessions.new": {
"message": "New",
"placeholders": {},
@@ -946,7 +1022,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 136
+ 140
]
],
"translation": "Notifications"
@@ -998,6 +1074,13 @@
"origin": [["src/lib/notifications.tsx", 107]],
"translation": "Pop"
},
+ "cron_builder.preview": {
+ "translation": "Preview",
+ "message": "Preview",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 293]]
+ },
"chat.status.processing": {
"message": "Processing...",
"placeholders": {},
@@ -1021,7 +1104,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 582
+ 581
]
],
"translation": "Push"
@@ -1033,7 +1116,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 579
+ 578
]
],
"translation": "Pushing..."
@@ -1086,6 +1169,20 @@
],
"translation": "Running"
},
+ "cron_builder.saturday": {
+ "translation": "Saturday",
+ "message": "Saturday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 37]]
+ },
+ "cron_builder.schedule_type": {
+ "translation": "Schedule Type",
+ "message": "Schedule Type",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 175]]
+ },
"conversation.error.schema": {
"message": "Schema Error",
"placeholders": {},
@@ -1131,7 +1228,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 484
+ 483
]
],
"translation": "Select All"
@@ -1143,7 +1240,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 613
+ 612
]
],
"translation": "Select at least one file"
@@ -1188,7 +1285,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 126
+ 130
]
],
"translation": "Session Display"
@@ -1219,7 +1316,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 278
+ 284
],
["src/components/GlobalSidebar.tsx", 44]
],
@@ -1239,7 +1336,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 252
+ 258
]
],
"translation": "Show MCP server settings"
@@ -1258,11 +1355,23 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 225
+ 231
]
],
"translation": "Show session list"
},
+ "system.info.tab.title": {
+ "message": "Show system information",
+ "placeholders": {},
+ "comments": [],
+ "origin": [
+ [
+ "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
+ 309
+ ]
+ ],
+ "translation": "Show system information"
+ },
"chat.button.start": {
"message": "Start Chat",
"placeholders": {},
@@ -1281,6 +1390,13 @@
],
"translation": "Start New Chat"
},
+ "cron_builder.sunday": {
+ "translation": "Sunday",
+ "message": "Sunday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 31]]
+ },
"settings.theme.system": {
"message": "System",
"placeholders": {},
@@ -1342,7 +1458,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 89
+ 64
]
],
"translation": "Thinking"
@@ -1359,17 +1475,24 @@
],
"translation": "This conversation entry failed to parse correctly. This might indicate a format change or parsing issue."
},
+ "cron_builder.thursday": {
+ "translation": "Thursday",
+ "message": "Thursday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 35]]
+ },
"assistant.tool.tool_id": {
- "translation": "Tool ID",
"message": "Tool ID",
"placeholders": {},
"comments": [],
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 190
+ 162
]
- ]
+ ],
+ "translation": "Tool ID"
},
"assistant.tool.result": {
"message": "Tool Result",
@@ -1378,7 +1501,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 215
+ 187
]
],
"translation": "Tool Result"
@@ -1409,6 +1532,13 @@
"origin": [["src/app/projects/[projectId]/error.tsx", 68]],
"translation": "Try Again"
},
+ "cron_builder.tuesday": {
+ "translation": "Tuesday",
+ "message": "Tuesday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 33]]
+ },
"settings.session.unify_same_title": {
"message": "Unify sessions with same title",
"placeholders": {},
@@ -1500,7 +1630,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 145
+ 120
]
],
"translation": "View Task"
@@ -1512,6 +1642,20 @@
"origin": [["src/app/projects/[projectId]/error.tsx", 40]],
"translation": "We encountered an error while loading this project"
},
+ "cron_builder.wednesday": {
+ "translation": "Wednesday",
+ "message": "Wednesday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 34]]
+ },
+ "cron_builder.weekly": {
+ "translation": "Weekly",
+ "message": "Weekly",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 189]]
+ },
"projects.page.title": {
"message": "Your Projects",
"placeholders": {},
diff --git a/src/lib/i18n/locales/en/messages.ts b/src/lib/i18n/locales/en/messages.ts
index c3b48f1..8ad72f9 100644
--- a/src/lib/i18n/locales/en/messages.ts
+++ b/src/lib/i18n/locales/en/messages.ts
@@ -1 +1 @@
-/*eslint-disable*/import type{Messages}from"@lingui/core";export const messages=JSON.parse("{\"Available commands\":[\"Available commands\"],\"Available files and directories\":[\"Available files and directories\"],\"Close sidebar\":[\"Close sidebar\"],\"Compare from\":[\"Compare from\"],\"Compare to\":[\"Compare to\"],\"Failed to commit\":[\"Failed to commit\"],\"Failed to commit and push\":[\"Failed to commit and push\"],\"Failed to push\":[\"Failed to push\"],\"Message input with completion support\":[\"Message input with completion support\"],\"Reload MCP servers\":[\"Reload MCP servers\"],\"Retry Push\":[\"Retry Push\"],\"Select enter key behavior\":[\"Select enter key behavior\"],\"Select language\":[\"Select language\"],\"Select permission mode\":[\"Select permission mode\"],\"Select theme\":[\"Select theme\"],\"Type your message here... (Start with / for commands, @ for files, Command+Enter to send)\":[\"Type your message here... (Start with / for commands, @ for files, Command+Enter to send)\"],\"Type your message here... (Start with / for commands, @ for files, Enter to send)\":[\"Type your message here... (Start with / for commands, @ for files, Enter to send)\"],\"Type your message here... (Start with / for commands, @ for files, Shift+Enter to send)\":[\"Type your message here... (Start with / for commands, @ for files, Shift+Enter to send)\"],\"Type your message... (Start with / for commands, @ for files, Command+Enter to send)\":[\"Type your message... (Start with / for commands, @ for files, Command+Enter to send)\"],\"Type your message... (Start with / for commands, @ for files, Enter to send)\":[\"Type your message... (Start with / for commands, @ for files, Enter to send)\"],\"Type your message... (Start with / for commands, @ for files, Shift+Enter to send)\":[\"Type your message... (Start with / for commands, @ for files, Shift+Enter to send)\"],\"Uncommitted changes\":[\"Uncommitted changes\"],\"assistant.thinking\":[\"Thinking\"],\"assistant.tool.input_parameters\":[\"Input Parameters\"],\"assistant.tool.message_count\":[[\"count\"],\" messages\"],\"assistant.tool.result\":[\"Tool Result\"],\"assistant.tool.task_id\":[\"Task ID\"],\"assistant.tool.tool_id\":[\"Tool ID\"],\"assistant.tool.view_task\":[\"View Task\"],\"assistant.tool.view_task_details\":[\"View Task\"],\"chat.autocomplete.active\":[\"Autocomplete active\"],\"chat.button.start\":[\"Start Chat\"],\"chat.error.send_failed\":[\"Failed to send message. Please try again.\"],\"chat.modal.title\":[\"Start New Chat\"],\"chat.resume\":[\"Resume\"],\"chat.send\":[\"Send\"],\"chat.status.processing\":[\"Processing...\"],\"common.action.cancel\":[\"Cancel\"],\"common.error\":[\"Error\"],\"common.loading\":[\"Loading...\"],\"conversation.error.raw_content\":[\"Raw Content:\"],\"conversation.error.report_issue\":[\"Report this issue\"],\"conversation.error.schema\":[\"Schema Error\"],\"conversation.error.schema_validation\":[\"Schema Validation Error\"],\"conversation.error.schema_validation.description\":[\"This conversation entry failed to parse correctly. This might indicate a format change or parsing issue.\"],\"diff.commit\":[\"Commit\"],\"diff.commit.changes\":[\"Commit Changes\"],\"diff.commit.message\":[\"Commit message\"],\"diff.commit.push\":[\"Commit & Push\"],\"diff.committing\":[\"Committing...\"],\"diff.committing.pushing\":[\"Committing & Pushing...\"],\"diff.deselect.all\":[\"Deselect All\"],\"diff.enter.message\":[\"Enter a commit message\"],\"diff.files\":[\"files\"],\"diff.files.changed\":[\"files changed\"],\"diff.loading\":[\"Loading diff...\"],\"diff.push\":[\"Push\"],\"diff.pushing\":[\"Pushing...\"],\"diff.select.all\":[\"Select All\"],\"diff.select.file\":[\"Select at least one file\"],\"directory_picker.current\":[\"Current:\"],\"directory_picker.loading\":[\"Loading...\"],\"directory_picker.no_directories\":[\"No directories found\"],\"directory_picker.select\":[\"Select This Directory\"],\"mcp.error.load_failed\":[\"Failed to load MCP servers: \",[\"error\"]],\"mcp.no.servers\":[\"No MCP servers found\"],\"mcp.title\":[\"MCP Servers\"],\"notification.beep\":[\"Beep\"],\"notification.chime\":[\"Chime\"],\"notification.description\":[\"Select a sound to play when a task completes\"],\"notification.none\":[\"None\"],\"notification.ping\":[\"Ping\"],\"notification.pop\":[\"Pop\"],\"notification.test\":[\"Test\"],\"project.create.action.create\":[\"Create Project\"],\"project.create.action.creating\":[\"Creating...\"],\"project.create.description\":[\"Select a directory to initialize as a Claude Code project. This will run <0>/init0> in the selected directory.\"],\"project.create.selected_directory\":[\"Selected directory:\"],\"project.create.title\":[\"Create New Project\"],\"project.error.back_to_projects\":[\"Back to Projects\"],\"project.error.description\":[\"We encountered an error while loading this project\"],\"project.error.details_title\":[\"Error Details\"],\"project.error.error_id\":[\"Error ID:\"],\"project.error.title\":[\"Failed to load project\"],\"project.error.try_again\":[\"Try Again\"],\"project.new\":[\"New Project\"],\"project.not_found.back_to_projects\":[\"Back to Projects\"],\"project.not_found.description\":[\"The project you are looking for does not exist or has been removed\"],\"project.not_found.title\":[\"Project Not Found\"],\"project_list.last_modified\":[\"Last modified:\"],\"project_list.messages\":[\"Messages:\"],\"project_list.no_projects.description\":[\"No Claude Code projects found in your ~/.claude/projects directory. Start a conversation with Claude Code to create your first project.\"],\"project_list.no_projects.title\":[\"No projects found\"],\"project_list.view_conversations\":[\"View Conversations\"],\"projects.page.description\":[\"Browse your Claude Code conversation history and project interactions\"],\"projects.page.loading\":[\"Loading projects...\"],\"projects.page.title\":[\"Your Projects\"],\"session.conversation.abort\":[\"Abort\"],\"session.conversation.in.progress\":[\"Conversation is in progress...\"],\"session.conversation.paused\":[\"Conversation is paused...\"],\"session.processing\":[\"Claude Code is processing...\"],\"session.status.paused\":[\"Paused\"],\"session.status.running\":[\"Running\"],\"sessions.load.more\":[\"Load More\"],\"sessions.new\":[\"New\"],\"sessions.title\":[\"Sessions\"],\"sessions.total\":[\"total\"],\"settings.description\":[\"Display and behavior preferences\"],\"settings.input.enter_key_behavior\":[\"Enter Key Behavior\"],\"settings.input.enter_key_behavior.command_enter\":[\"Command+Enter to send\"],\"settings.input.enter_key_behavior.description\":[\"Choose how the Enter key behaves in message input\"],\"settings.input.enter_key_behavior.enter\":[\"Enter to send\"],\"settings.input.enter_key_behavior.shift_enter\":[\"Shift+Enter to send (default)\"],\"settings.loading\":[\"Loading settings...\"],\"settings.locale\":[\"Language\"],\"settings.locale.description\":[\"Choose your preferred language\"],\"settings.locale.en\":[\"English\"],\"settings.locale.ja\":[\"日本語\"],\"settings.notifications\":[\"Notifications\"],\"settings.permission.mode\":[\"Permission Mode\"],\"settings.permission.mode.accept_edits\":[\"Accept Edits (Auto-approve file edits)\"],\"settings.permission.mode.bypass_permissions\":[\"Bypass Permissions (No prompts)\"],\"settings.permission.mode.default\":[\"Default (Ask permission)\"],\"settings.permission.mode.description\":[\"Control how Claude Code handles permission requests for file operations\"],\"settings.permission.mode.plan\":[\"Plan Mode (Planning only)\"],\"settings.section.notifications\":[\"Notifications\"],\"settings.section.session_display\":[\"Session Display\"],\"settings.section.system_info\":[\"System Information\"],\"settings.session.display\":[\"Session Display\"],\"settings.session.hide_no_user_message\":[\"Hide sessions without user messages\"],\"settings.session.hide_no_user_message.description\":[\"Only show sessions that contain user commands or messages\"],\"settings.session.unify_same_title\":[\"Unify sessions with same title\"],\"settings.session.unify_same_title.description\":[\"Show only the latest session when multiple sessions have the same title\"],\"settings.tab.title\":[\"Settings for display and notifications\"],\"settings.theme\":[\"Theme\"],\"settings.theme.dark\":[\"Dark\"],\"settings.theme.description\":[\"Choose your preferred color theme\"],\"settings.theme.light\":[\"Light\"],\"settings.theme.system\":[\"System\"],\"settings.title\":[\"Settings\"],\"sidebar.back.to.projects\":[\"Back to projects\"],\"sidebar.show.mcp.settings\":[\"Show MCP server settings\"],\"sidebar.show.session.list\":[\"Show session list\"],\"system_info.available_features\":[\"Available Features\"],\"system_info.claude_code\":[\"Claude Code\"],\"system_info.description\":[\"Version and feature information\"],\"system_info.executable_path\":[\"Executable\"],\"system_info.feature.agent_sdk.description\":[\"Uses Claude Agent SDK instead of Claude Code SDK (v1.0.125+)\"],\"system_info.feature.agent_sdk.title\":[\"Claude Agent SDK\"],\"system_info.feature.can_use_tool.description\":[\"Dynamically control tool usage permissions and request user approval before tool execution (v1.0.82+)\"],\"system_info.feature.can_use_tool.title\":[\"Tool Use Permission Control\"],\"system_info.feature.unknown.description\":[\"Feature information not available\"],\"system_info.feature.uuid_on_sdk_message.description\":[\"Adds unique identifiers to SDK messages for better tracking (v1.0.86+)\"],\"system_info.feature.uuid_on_sdk_message.title\":[\"Message UUID Support\"],\"system_info.loading\":[\"Loading system information...\"],\"system_info.title\":[\"System Information\"],\"system_info.unknown\":[\"Unknown\"],\"system_info.version_label\":[\"Version\"],\"system_info.viewer_version\":[\"Claude Code Viewer\"],\"user.content.image\":[\"Image\"],\"user.content.image.description\":[\"User uploaded image content\"],\"user.content.unsupported_media\":[\"Unsupported Media\"],\"user.content.unsupported_media.description\":[\"Media type not supported for display\"]}")as Messages;
\ No newline at end of file
+/*eslint-disable*/import type{Messages}from"@lingui/core";export const messages=JSON.parse("{\"Available commands\":[\"Available commands\"],\"Available files and directories\":[\"Available files and directories\"],\"Close sidebar\":[\"Close sidebar\"],\"Compare from\":[\"Compare from\"],\"Compare to\":[\"Compare to\"],\"Failed to commit\":[\"Failed to commit\"],\"Failed to commit and push\":[\"Failed to commit and push\"],\"Failed to push\":[\"Failed to push\"],\"Message input with completion support\":[\"Message input with completion support\"],\"Reload MCP servers\":[\"Reload MCP servers\"],\"Retry Push\":[\"Retry Push\"],\"Select enter key behavior\":[\"Select enter key behavior\"],\"Select language\":[\"Select language\"],\"Select permission mode\":[\"Select permission mode\"],\"Select theme\":[\"Select theme\"],\"Type your message here... (Start with / for commands, @ for files, Command+Enter to send)\":[\"Type your message here... (Start with / for commands, @ for files, Command+Enter to send)\"],\"Type your message here... (Start with / for commands, @ for files, Enter to send)\":[\"Type your message here... (Start with / for commands, @ for files, Enter to send)\"],\"Type your message here... (Start with / for commands, @ for files, Shift+Enter to send)\":[\"Type your message here... (Start with / for commands, @ for files, Shift+Enter to send)\"],\"Type your message... (Start with / for commands, @ for files, Command+Enter to send)\":[\"Type your message... (Start with / for commands, @ for files, Command+Enter to send)\"],\"Type your message... (Start with / for commands, @ for files, Enter to send)\":[\"Type your message... (Start with / for commands, @ for files, Enter to send)\"],\"Type your message... (Start with / for commands, @ for files, Shift+Enter to send)\":[\"Type your message... (Start with / for commands, @ for files, Shift+Enter to send)\"],\"Uncommitted changes\":[\"Uncommitted changes\"],\"assistant.thinking\":[\"Thinking\"],\"assistant.tool.input_parameters\":[\"Input Parameters\"],\"assistant.tool.message_count\":[[\"count\"],\" messages\"],\"assistant.tool.result\":[\"Tool Result\"],\"assistant.tool.task_id\":[\"Task ID\"],\"assistant.tool.tool_id\":[\"Tool ID\"],\"assistant.tool.view_task\":[\"View Task\"],\"assistant.tool.view_task_details\":[\"View Task\"],\"chat.autocomplete.active\":[\"Autocomplete active\"],\"chat.button.start\":[\"Start Chat\"],\"chat.error.send_failed\":[\"Failed to send message. Please try again.\"],\"chat.modal.title\":[\"Start New Chat\"],\"chat.resume\":[\"Resume\"],\"chat.send\":[\"Send\"],\"chat.status.processing\":[\"Processing...\"],\"common.action.cancel\":[\"Cancel\"],\"common.error\":[\"Error\"],\"common.loading\":[\"Loading...\"],\"conversation.error.raw_content\":[\"Raw Content:\"],\"conversation.error.report_issue\":[\"Report this issue\"],\"conversation.error.schema\":[\"Schema Error\"],\"conversation.error.schema_validation\":[\"Schema Validation Error\"],\"conversation.error.schema_validation.description\":[\"This conversation entry failed to parse correctly. This might indicate a format change or parsing issue.\"],\"cron_builder.cron_expression\":[\"Cron Expression\"],\"cron_builder.custom\":[\"Custom\"],\"cron_builder.daily\":[\"Daily\"],\"cron_builder.day_of_week\":[\"Day of Week\"],\"cron_builder.expression\":[\"Expression\"],\"cron_builder.friday\":[\"Friday\"],\"cron_builder.hour\":[\"Hour (0-23)\"],\"cron_builder.hourly\":[\"Hourly\"],\"cron_builder.minute\":[\"Minute (0-59)\"],\"cron_builder.monday\":[\"Monday\"],\"cron_builder.preview\":[\"Preview\"],\"cron_builder.saturday\":[\"Saturday\"],\"cron_builder.schedule_type\":[\"Schedule Type\"],\"cron_builder.sunday\":[\"Sunday\"],\"cron_builder.thursday\":[\"Thursday\"],\"cron_builder.tuesday\":[\"Tuesday\"],\"cron_builder.wednesday\":[\"Wednesday\"],\"cron_builder.weekly\":[\"Weekly\"],\"diff.commit\":[\"Commit\"],\"diff.commit.changes\":[\"Commit Changes\"],\"diff.commit.message\":[\"Commit message\"],\"diff.commit.push\":[\"Commit & Push\"],\"diff.committing\":[\"Committing...\"],\"diff.committing.pushing\":[\"Committing & Pushing...\"],\"diff.deselect.all\":[\"Deselect All\"],\"diff.enter.message\":[\"Enter a commit message\"],\"diff.files\":[\"files\"],\"diff.files.changed\":[\"files changed\"],\"diff.loading\":[\"Loading diff...\"],\"diff.push\":[\"Push\"],\"diff.pushing\":[\"Pushing...\"],\"diff.select.all\":[\"Select All\"],\"diff.select.file\":[\"Select at least one file\"],\"directory_picker.current\":[\"Current:\"],\"directory_picker.loading\":[\"Loading...\"],\"directory_picker.no_directories\":[\"No directories found\"],\"directory_picker.select\":[\"Select This Directory\"],\"mcp.error.load_failed\":[\"Failed to load MCP servers: \",[\"error\"]],\"mcp.no.servers\":[\"No MCP servers found\"],\"mcp.title\":[\"MCP Servers\"],\"notification.beep\":[\"Beep\"],\"notification.chime\":[\"Chime\"],\"notification.description\":[\"Select a sound to play when a task completes\"],\"notification.none\":[\"None\"],\"notification.ping\":[\"Ping\"],\"notification.pop\":[\"Pop\"],\"notification.test\":[\"Test\"],\"project.create.action.create\":[\"Create Project\"],\"project.create.action.creating\":[\"Creating...\"],\"project.create.description\":[\"Select a directory to initialize as a Claude Code project. This will run <0>/init0> in the selected directory.\"],\"project.create.selected_directory\":[\"Selected directory:\"],\"project.create.title\":[\"Create New Project\"],\"project.error.back_to_projects\":[\"Back to Projects\"],\"project.error.description\":[\"We encountered an error while loading this project\"],\"project.error.details_title\":[\"Error Details\"],\"project.error.error_id\":[\"Error ID:\"],\"project.error.title\":[\"Failed to load project\"],\"project.error.try_again\":[\"Try Again\"],\"project.new\":[\"New Project\"],\"project.not_found.back_to_projects\":[\"Back to Projects\"],\"project.not_found.description\":[\"The project you are looking for does not exist or has been removed\"],\"project.not_found.title\":[\"Project Not Found\"],\"project_list.last_modified\":[\"Last modified:\"],\"project_list.messages\":[\"Messages:\"],\"project_list.no_projects.description\":[\"No Claude Code projects found in your ~/.claude/projects directory. Start a conversation with Claude Code to create your first project.\"],\"project_list.no_projects.title\":[\"No projects found\"],\"project_list.view_conversations\":[\"View Conversations\"],\"projects.page.description\":[\"Browse your Claude Code conversation history and project interactions\"],\"projects.page.loading\":[\"Loading projects...\"],\"projects.page.title\":[\"Your Projects\"],\"session.conversation.abort\":[\"Abort\"],\"session.conversation.in.progress\":[\"Conversation is in progress...\"],\"session.conversation.paused\":[\"Conversation is paused...\"],\"session.processing\":[\"Claude Code is processing...\"],\"session.status.paused\":[\"Paused\"],\"session.status.running\":[\"Running\"],\"sessions.load.more\":[\"Load More\"],\"sessions.new\":[\"New\"],\"sessions.title\":[\"Sessions\"],\"sessions.total\":[\"total\"],\"settings.description\":[\"Display and behavior preferences\"],\"settings.input.enter_key_behavior\":[\"Enter Key Behavior\"],\"settings.input.enter_key_behavior.command_enter\":[\"Command+Enter to send\"],\"settings.input.enter_key_behavior.description\":[\"Choose how the Enter key behaves in message input\"],\"settings.input.enter_key_behavior.enter\":[\"Enter to send\"],\"settings.input.enter_key_behavior.shift_enter\":[\"Shift+Enter to send (default)\"],\"settings.loading\":[\"Loading settings...\"],\"settings.locale\":[\"Language\"],\"settings.locale.description\":[\"Choose your preferred language\"],\"settings.locale.en\":[\"English\"],\"settings.locale.ja\":[\"日本語\"],\"settings.notifications\":[\"Notifications\"],\"settings.permission.mode\":[\"Permission Mode\"],\"settings.permission.mode.accept_edits\":[\"Accept Edits (Auto-approve file edits)\"],\"settings.permission.mode.bypass_permissions\":[\"Bypass Permissions (No prompts)\"],\"settings.permission.mode.default\":[\"Default (Ask permission)\"],\"settings.permission.mode.description\":[\"Control how Claude Code handles permission requests for file operations\"],\"settings.permission.mode.plan\":[\"Plan Mode (Planning only)\"],\"settings.section.notifications\":[\"Notifications\"],\"settings.section.session_display\":[\"Session Display\"],\"settings.section.system_info\":[\"System Information\"],\"settings.session.display\":[\"Session Display\"],\"settings.session.hide_no_user_message\":[\"Hide sessions without user messages\"],\"settings.session.hide_no_user_message.description\":[\"Only show sessions that contain user commands or messages\"],\"settings.session.unify_same_title\":[\"Unify sessions with same title\"],\"settings.session.unify_same_title.description\":[\"Show only the latest session when multiple sessions have the same title\"],\"settings.tab.title\":[\"Settings for display and notifications\"],\"settings.theme\":[\"Theme\"],\"settings.theme.dark\":[\"Dark\"],\"settings.theme.description\":[\"Choose your preferred color theme\"],\"settings.theme.light\":[\"Light\"],\"settings.theme.system\":[\"System\"],\"settings.title\":[\"Settings\"],\"sidebar.back.to.projects\":[\"Back to projects\"],\"sidebar.show.mcp.settings\":[\"Show MCP server settings\"],\"sidebar.show.session.list\":[\"Show session list\"],\"system.info.tab.title\":[\"Show system information\"],\"system_info.available_features\":[\"Available Features\"],\"system_info.claude_code\":[\"Claude Code\"],\"system_info.description\":[\"Version and feature information\"],\"system_info.executable_path\":[\"Executable\"],\"system_info.feature.agent_sdk.description\":[\"Uses Claude Agent SDK instead of Claude Code SDK (v1.0.125+)\"],\"system_info.feature.agent_sdk.title\":[\"Claude Agent SDK\"],\"system_info.feature.can_use_tool.description\":[\"Dynamically control tool usage permissions and request user approval before tool execution (v1.0.82+)\"],\"system_info.feature.can_use_tool.title\":[\"Tool Use Permission Control\"],\"system_info.feature.unknown.description\":[\"Feature information not available\"],\"system_info.feature.uuid_on_sdk_message.description\":[\"Adds unique identifiers to SDK messages for better tracking (v1.0.86+)\"],\"system_info.feature.uuid_on_sdk_message.title\":[\"Message UUID Support\"],\"system_info.loading\":[\"Loading system information...\"],\"system_info.title\":[\"System Information\"],\"system_info.unknown\":[\"Unknown\"],\"system_info.version_label\":[\"Version\"],\"system_info.viewer_version\":[\"Claude Code Viewer\"],\"user.content.image\":[\"Image\"],\"user.content.image.description\":[\"User uploaded image content\"],\"user.content.unsupported_media\":[\"Unsupported Media\"],\"user.content.unsupported_media.description\":[\"Media type not supported for display\"]}")as Messages;
\ No newline at end of file
diff --git a/src/lib/i18n/locales/ja/messages.json b/src/lib/i18n/locales/ja/messages.json
index 086b989..f45c2a5 100644
--- a/src/lib/i18n/locales/ja/messages.json
+++ b/src/lib/i18n/locales/ja/messages.json
@@ -23,16 +23,16 @@
"origin": [["src/components/SettingsControls.tsx", 306]],
"translation": "テーマを選択"
},
- "Reload MCP servers": {
+ "Close sidebar": {
"placeholders": {},
"comments": [],
"origin": [
[
- "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/McpTab.tsx",
- 42
+ "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
+ 179
]
],
- "translation": "MCPサーバーを再読み込み"
+ "translation": "サイドバーを閉じる"
},
"Type your message... (Start with / for commands, @ for files, Enter to send)": {
"placeholders": {},
@@ -79,16 +79,16 @@
],
"translation": "メッセージを入力... (/でコマンド、@でファイル、Shift+Enterで送信)"
},
- "Close sidebar": {
+ "Reload MCP servers": {
"placeholders": {},
"comments": [],
"origin": [
[
- "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 173
+ "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/McpTab.tsx",
+ 42
]
],
- "translation": "サイドバーを閉じる"
+ "translation": "MCPサーバーを再読み込み"
},
"Type your message here... (Start with / for commands, @ for files, Enter to send)": {
"placeholders": {},
@@ -136,6 +136,14 @@
],
"translation": "利用可能なコマンド"
},
+ "Message input with completion support": {
+ "placeholders": {},
+ "comments": [],
+ "origin": [
+ ["src/app/projects/[projectId]/components/chatForm/ChatInput.tsx", 210]
+ ],
+ "translation": "補完機能付きメッセージ入力"
+ },
"Uncommitted changes": {
"placeholders": {},
"comments": [],
@@ -153,7 +161,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 303
+ 302
]
],
"translation": "コミットに失敗しました"
@@ -164,7 +172,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 321
+ 320
]
],
"translation": "プッシュに失敗しました"
@@ -175,7 +183,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 356
+ 355
]
],
"translation": "プッシュを再試行"
@@ -186,7 +194,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 369
+ 368
]
],
"translation": "コミットとプッシュに失敗しました"
@@ -197,7 +205,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 388
+ 387
]
],
"translation": "比較元"
@@ -208,19 +216,11 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 394
+ 393
]
],
"translation": "比較先"
},
- "Message input with completion support": {
- "placeholders": {},
- "comments": [],
- "origin": [
- ["src/app/projects/[projectId]/components/chatForm/ChatInput.tsx", 210]
- ],
- "translation": "補完機能付きメッセージ入力"
- },
"assistant.tool.message_count": {
"message": "{count} messages",
"placeholders": {
@@ -288,7 +288,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 197
+ 203
],
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SessionSidebar.tsx",
@@ -414,7 +414,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 567
+ 566
]
],
"translation": "コミット"
@@ -426,7 +426,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 602
+ 601
]
],
"translation": "コミット&プッシュ"
@@ -438,7 +438,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 463
+ 462
]
],
"translation": "変更をコミット"
@@ -450,7 +450,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 535
+ 534
]
],
"translation": "コミットメッセージ"
@@ -462,7 +462,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 596
+ 595
]
],
"translation": "コミット&プッシュ中..."
@@ -474,7 +474,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 561
+ 560
]
],
"translation": "コミット中..."
@@ -531,6 +531,13 @@
"origin": [["src/app/projects/components/CreateProjectDialog.tsx", 106]],
"translation": "作成中..."
},
+ "cron_builder.cron_expression": {
+ "translation": "Cron式",
+ "message": "Cron Expression",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 281]]
+ },
"directory_picker.current": {
"message": "Current:",
"placeholders": {},
@@ -538,6 +545,20 @@
"origin": [["src/app/projects/components/DirectoryPicker.tsx", 38]],
"translation": "現在:"
},
+ "cron_builder.custom": {
+ "translation": "カスタム",
+ "message": "Custom",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 192]]
+ },
+ "cron_builder.daily": {
+ "translation": "毎日",
+ "message": "Daily",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 186]]
+ },
"settings.theme.dark": {
"message": "Dark",
"placeholders": {},
@@ -545,6 +566,13 @@
"origin": [["src/components/SettingsControls.tsx", 313]],
"translation": "ダーク"
},
+ "cron_builder.day_of_week": {
+ "translation": "曜日",
+ "message": "Day of Week",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 231]]
+ },
"settings.permission.mode.default": {
"message": "Default (Ask permission)",
"placeholders": {},
@@ -559,7 +587,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 492
+ 491
]
],
"translation": "すべて選択解除"
@@ -592,7 +620,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 618
+ 617
]
],
"translation": "コミットメッセージを入力"
@@ -644,6 +672,13 @@
"origin": [["src/components/SystemInfoCard.tsx", 146]],
"translation": "実行ファイル"
},
+ "cron_builder.expression": {
+ "translation": "Cron式",
+ "message": "Expression",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 303]]
+ },
"mcp.error.load_failed": {
"message": "Failed to load MCP servers: {error}",
"placeholders": {
@@ -705,6 +740,13 @@
],
"translation": "ファイルが変更されました"
},
+ "cron_builder.friday": {
+ "translation": "金曜日",
+ "message": "Friday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 36]]
+ },
"settings.session.hide_no_user_message": {
"message": "Hide sessions without user messages",
"placeholders": {},
@@ -712,6 +754,23 @@
"origin": [["src/components/SettingsControls.tsx", 117]],
"translation": "ユーザーメッセージのないセッションを非表示"
},
+ "cron_builder.hour": {
+ "translation": "時 (0-23)",
+ "message": "Hour (0-23)",
+ "placeholders": {},
+ "comments": [],
+ "origin": [
+ ["src/components/scheduler/CronExpressionBuilder.tsx", 202],
+ ["src/components/scheduler/CronExpressionBuilder.tsx", 252]
+ ]
+ },
+ "cron_builder.hourly": {
+ "translation": "毎時",
+ "message": "Hourly",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 183]]
+ },
"user.content.image": {
"message": "Image",
"placeholders": {},
@@ -731,7 +790,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 198
+ 170
]
],
"translation": "入力パラメータ"
@@ -776,7 +835,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 657
+ 656
]
],
"translation": "差分を読み込み中..."
@@ -795,7 +854,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 115
+ 119
],
["src/components/GlobalSidebar.tsx", 67]
],
@@ -822,7 +881,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 413
+ 412
],
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/McpTab.tsx",
@@ -873,6 +932,23 @@
"origin": [["src/app/projects/components/ProjectList.tsx", 71]],
"translation": "メッセージ:"
},
+ "cron_builder.minute": {
+ "translation": "分 (0-59)",
+ "message": "Minute (0-59)",
+ "placeholders": {},
+ "comments": [],
+ "origin": [
+ ["src/components/scheduler/CronExpressionBuilder.tsx", 214],
+ ["src/components/scheduler/CronExpressionBuilder.tsx", 264]
+ ]
+ },
+ "cron_builder.monday": {
+ "translation": "月曜日",
+ "message": "Monday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 32]]
+ },
"sessions.new": {
"message": "New",
"placeholders": {},
@@ -946,7 +1022,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 136
+ 140
]
],
"translation": "通知"
@@ -998,6 +1074,13 @@
"origin": [["src/lib/notifications.tsx", 107]],
"translation": "ポップ"
},
+ "cron_builder.preview": {
+ "translation": "プレビュー",
+ "message": "Preview",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 293]]
+ },
"chat.status.processing": {
"message": "Processing...",
"placeholders": {},
@@ -1021,7 +1104,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 582
+ 581
]
],
"translation": "プッシュ"
@@ -1033,7 +1116,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 579
+ 578
]
],
"translation": "プッシュ中..."
@@ -1086,6 +1169,20 @@
],
"translation": "実行中"
},
+ "cron_builder.saturday": {
+ "translation": "土曜日",
+ "message": "Saturday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 37]]
+ },
+ "cron_builder.schedule_type": {
+ "translation": "スケジュールタイプ",
+ "message": "Schedule Type",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 175]]
+ },
"conversation.error.schema": {
"message": "Schema Error",
"placeholders": {},
@@ -1131,7 +1228,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 484
+ 483
]
],
"translation": "すべて選択"
@@ -1143,7 +1240,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/diffModal/DiffModal.tsx",
- 613
+ 612
]
],
"translation": "少なくとも1つのファイルを選択してください"
@@ -1188,7 +1285,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 126
+ 130
]
],
"translation": "セッション表示"
@@ -1219,7 +1316,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 278
+ 284
],
["src/components/GlobalSidebar.tsx", 44]
],
@@ -1239,7 +1336,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 252
+ 258
]
],
"translation": "MCPサーバー設定を表示"
@@ -1258,11 +1355,23 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
- 225
+ 231
]
],
"translation": "セッション一覧を表示"
},
+ "system.info.tab.title": {
+ "message": "Show system information",
+ "placeholders": {},
+ "comments": [],
+ "origin": [
+ [
+ "src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/MobileSidebar.tsx",
+ 309
+ ]
+ ],
+ "translation": "システム情報を表示"
+ },
"chat.button.start": {
"message": "Start Chat",
"placeholders": {},
@@ -1281,6 +1390,13 @@
],
"translation": "新しいチャットを開始"
},
+ "cron_builder.sunday": {
+ "translation": "日曜日",
+ "message": "Sunday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 31]]
+ },
"settings.theme.system": {
"message": "System",
"placeholders": {},
@@ -1342,7 +1458,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 89
+ 64
]
],
"translation": "思考中"
@@ -1359,17 +1475,24 @@
],
"translation": "この会話エントリの解析に失敗しました。フォーマットの変更または解析の問題が考えられます。"
},
+ "cron_builder.thursday": {
+ "translation": "木曜日",
+ "message": "Thursday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 35]]
+ },
"assistant.tool.tool_id": {
- "translation": "ツールID",
"message": "Tool ID",
"placeholders": {},
"comments": [],
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 190
+ 162
]
- ]
+ ],
+ "translation": "ツールID"
},
"assistant.tool.result": {
"message": "Tool Result",
@@ -1378,7 +1501,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 215
+ 187
]
],
"translation": "ツール実行結果"
@@ -1409,6 +1532,13 @@
"origin": [["src/app/projects/[projectId]/error.tsx", 68]],
"translation": "再試行"
},
+ "cron_builder.tuesday": {
+ "translation": "火曜日",
+ "message": "Tuesday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 33]]
+ },
"settings.session.unify_same_title": {
"message": "Unify sessions with same title",
"placeholders": {},
@@ -1500,7 +1630,7 @@
"origin": [
[
"src/app/projects/[projectId]/sessions/[sessionId]/components/conversationList/AssistantConversationContent.tsx",
- 145
+ 120
]
],
"translation": "タスクを確認"
@@ -1512,6 +1642,20 @@
"origin": [["src/app/projects/[projectId]/error.tsx", 40]],
"translation": "このプロジェクトの読み込み中にエラーが発生しました"
},
+ "cron_builder.wednesday": {
+ "translation": "水曜日",
+ "message": "Wednesday",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 34]]
+ },
+ "cron_builder.weekly": {
+ "translation": "毎週",
+ "message": "Weekly",
+ "placeholders": {},
+ "comments": [],
+ "origin": [["src/components/scheduler/CronExpressionBuilder.tsx", 189]]
+ },
"projects.page.title": {
"message": "Your Projects",
"placeholders": {},
diff --git a/src/lib/i18n/locales/ja/messages.ts b/src/lib/i18n/locales/ja/messages.ts
index e7e5dce..9ccf10e 100644
--- a/src/lib/i18n/locales/ja/messages.ts
+++ b/src/lib/i18n/locales/ja/messages.ts
@@ -1 +1 @@
-/*eslint-disable*/import type{Messages}from"@lingui/core";export const messages=JSON.parse("{\"Available commands\":[\"利用可能なコマンド\"],\"Available files and directories\":[\"利用可能なファイルとディレクトリ\"],\"Close sidebar\":[\"サイドバーを閉じる\"],\"Compare from\":[\"比較元\"],\"Compare to\":[\"比較先\"],\"Failed to commit\":[\"コミットに失敗しました\"],\"Failed to commit and push\":[\"コミットとプッシュに失敗しました\"],\"Failed to push\":[\"プッシュに失敗しました\"],\"Message input with completion support\":[\"補完機能付きメッセージ入力\"],\"Reload MCP servers\":[\"MCPサーバーを再読み込み\"],\"Retry Push\":[\"プッシュを再試行\"],\"Select enter key behavior\":[\"Enterキーの動作を選択\"],\"Select language\":[\"言語を選択\"],\"Select permission mode\":[\"権限モードを選択\"],\"Select theme\":[\"テーマを選択\"],\"Type your message here... (Start with / for commands, @ for files, Command+Enter to send)\":[\"ここにメッセージを入力... (/でコマンド、@でファイル、Command+Enterで送信)\"],\"Type your message here... (Start with / for commands, @ for files, Enter to send)\":[\"ここにメッセージを入力... (/でコマンド、@でファイル、Enterで送信)\"],\"Type your message here... (Start with / for commands, @ for files, Shift+Enter to send)\":[\"ここにメッセージを入力... (/でコマンド、@でファイル、Shift+Enterで送信)\"],\"Type your message... (Start with / for commands, @ for files, Command+Enter to send)\":[\"メッセージを入力... (/でコマンド、@でファイル、Command+Enterで送信)\"],\"Type your message... (Start with / for commands, @ for files, Enter to send)\":[\"メッセージを入力... (/でコマンド、@でファイル、Enterで送信)\"],\"Type your message... (Start with / for commands, @ for files, Shift+Enter to send)\":[\"メッセージを入力... (/でコマンド、@でファイル、Shift+Enterで送信)\"],\"Uncommitted changes\":[\"未コミットの変更\"],\"assistant.thinking\":[\"思考中\"],\"assistant.tool.input_parameters\":[\"入力パラメータ\"],\"assistant.tool.message_count\":[[\"count\"],\"件のメッセージ\"],\"assistant.tool.result\":[\"ツール実行結果\"],\"assistant.tool.task_id\":[\"タスクID\"],\"assistant.tool.tool_id\":[\"ツールID\"],\"assistant.tool.view_task\":[\"タスクを表示\"],\"assistant.tool.view_task_details\":[\"タスクを確認\"],\"chat.autocomplete.active\":[\"オートコンプリート有効\"],\"chat.button.start\":[\"チャット開始\"],\"chat.error.send_failed\":[\"メッセージの送信に失敗しました。もう一度お試しください。\"],\"chat.modal.title\":[\"新しいチャットを開始\"],\"chat.resume\":[\"再開\"],\"chat.send\":[\"送信\"],\"chat.status.processing\":[\"処理中...\"],\"common.action.cancel\":[\"キャンセル\"],\"common.error\":[\"エラー\"],\"common.loading\":[\"読み込み中...\"],\"conversation.error.raw_content\":[\"生データ:\"],\"conversation.error.report_issue\":[\"この問題を報告\"],\"conversation.error.schema\":[\"スキーマエラー\"],\"conversation.error.schema_validation\":[\"スキーマ検証エラー\"],\"conversation.error.schema_validation.description\":[\"この会話エントリの解析に失敗しました。フォーマットの変更または解析の問題が考えられます。\"],\"diff.commit\":[\"コミット\"],\"diff.commit.changes\":[\"変更をコミット\"],\"diff.commit.message\":[\"コミットメッセージ\"],\"diff.commit.push\":[\"コミット&プッシュ\"],\"diff.committing\":[\"コミット中...\"],\"diff.committing.pushing\":[\"コミット&プッシュ中...\"],\"diff.deselect.all\":[\"すべて選択解除\"],\"diff.enter.message\":[\"コミットメッセージを入力\"],\"diff.files\":[\"ファイル\"],\"diff.files.changed\":[\"ファイルが変更されました\"],\"diff.loading\":[\"差分を読み込み中...\"],\"diff.push\":[\"プッシュ\"],\"diff.pushing\":[\"プッシュ中...\"],\"diff.select.all\":[\"すべて選択\"],\"diff.select.file\":[\"少なくとも1つのファイルを選択してください\"],\"directory_picker.current\":[\"現在:\"],\"directory_picker.loading\":[\"読み込み中...\"],\"directory_picker.no_directories\":[\"ディレクトリが見つかりません\"],\"directory_picker.select\":[\"このディレクトリを選択\"],\"mcp.error.load_failed\":[\"MCPサーバーの読み込みに失敗しました: \",[\"error\"]],\"mcp.no.servers\":[\"MCPサーバーが見つかりません\"],\"mcp.title\":[\"MCPサーバー\"],\"notification.beep\":[\"ビープ音\"],\"notification.chime\":[\"チャイム\"],\"notification.description\":[\"Claude Code のタスクが完了した時に再生する音を選択してください\"],\"notification.none\":[\"なし\"],\"notification.ping\":[\"ピン\"],\"notification.pop\":[\"ポップ\"],\"notification.test\":[\"テスト\"],\"project.create.action.create\":[\"プロジェクトを作成\"],\"project.create.action.creating\":[\"作成中...\"],\"project.create.description\":[\"Claude Codeプロジェクトとして初期化するディレクトリを選択してください。選択したディレクトリで<0>/init0>が実行されます。\"],\"project.create.selected_directory\":[\"選択したディレクトリ:\"],\"project.create.title\":[\"新規プロジェクトを作成\"],\"project.error.back_to_projects\":[\"プロジェクト一覧に戻る\"],\"project.error.description\":[\"このプロジェクトの読み込み中にエラーが発生しました\"],\"project.error.details_title\":[\"エラー詳細\"],\"project.error.error_id\":[\"エラーID:\"],\"project.error.title\":[\"プロジェクトの読み込みに失敗しました\"],\"project.error.try_again\":[\"再試行\"],\"project.new\":[\"新規プロジェクト\"],\"project.not_found.back_to_projects\":[\"プロジェクト一覧に戻る\"],\"project.not_found.description\":[\"お探しのプロジェクトは存在しないか、削除されています\"],\"project.not_found.title\":[\"プロジェクトが見つかりません\"],\"project_list.last_modified\":[\"最終更新:\"],\"project_list.messages\":[\"メッセージ:\"],\"project_list.no_projects.description\":[\"~/.claude/projectsディレクトリにClaude Codeプロジェクトが見つかりません。Claude Codeとの会話を開始して、最初のプロジェクトを作成してください。\"],\"project_list.no_projects.title\":[\"プロジェクトが見つかりません\"],\"project_list.view_conversations\":[\"会話を表示\"],\"projects.page.description\":[\"Claude Codeの会話履歴とプロジェクトの操作を閲覧\"],\"projects.page.loading\":[\"プロジェクトを読み込み中...\"],\"projects.page.title\":[\"プロジェクト\"],\"session.conversation.abort\":[\"中止\"],\"session.conversation.in.progress\":[\"会話を進行中...\"],\"session.conversation.paused\":[\"会話を一時停止中...\"],\"session.processing\":[\"Claude Codeが処理中...\"],\"session.status.paused\":[\"一時停止\"],\"session.status.running\":[\"実行中\"],\"sessions.load.more\":[\"さらに読み込む\"],\"sessions.new\":[\"新規\"],\"sessions.title\":[\"セッション\"],\"sessions.total\":[\"合計\"],\"settings.description\":[\"表示と動作の設定\"],\"settings.input.enter_key_behavior\":[\"Enterキーの動作\"],\"settings.input.enter_key_behavior.command_enter\":[\"Command+Enterで送信\"],\"settings.input.enter_key_behavior.description\":[\"メッセージ入力でのEnterキーの動作を選択\"],\"settings.input.enter_key_behavior.enter\":[\"Enterで送信\"],\"settings.input.enter_key_behavior.shift_enter\":[\"Shift+Enterで送信(デフォルト)\"],\"settings.loading\":[\"設定を読み込み中...\"],\"settings.locale\":[\"言語\"],\"settings.locale.description\":[\"お好みの言語を選択\"],\"settings.locale.en\":[\"English\"],\"settings.locale.ja\":[\"日本語\"],\"settings.notifications\":[\"通知\"],\"settings.permission.mode\":[\"権限モード\"],\"settings.permission.mode.accept_edits\":[\"編集を承認(ファイル編集を自動承認)\"],\"settings.permission.mode.bypass_permissions\":[\"権限をバイパス(プロンプトなし)\"],\"settings.permission.mode.default\":[\"デフォルト(権限を確認)\"],\"settings.permission.mode.description\":[\"ファイル操作の権限リクエストの処理方法を制御\"],\"settings.permission.mode.plan\":[\"プランモード(計画のみ)\"],\"settings.section.notifications\":[\"通知\"],\"settings.section.session_display\":[\"セッション表示\"],\"settings.section.system_info\":[\"システム情報\"],\"settings.session.display\":[\"セッション表示\"],\"settings.session.hide_no_user_message\":[\"ユーザーメッセージのないセッションを非表示\"],\"settings.session.hide_no_user_message.description\":[\"ユーザーコマンドまたはメッセージを含むセッションのみを表示\"],\"settings.session.unify_same_title\":[\"同じタイトルのセッションを統合\"],\"settings.session.unify_same_title.description\":[\"同じタイトルの複数のセッションがある場合、最新のセッションのみを表示\"],\"settings.tab.title\":[\"表示と通知の設定\"],\"settings.theme\":[\"テーマ\"],\"settings.theme.dark\":[\"ダーク\"],\"settings.theme.description\":[\"お好みのカラーテーマを選択\"],\"settings.theme.light\":[\"ライト\"],\"settings.theme.system\":[\"システム\"],\"settings.title\":[\"設定\"],\"sidebar.back.to.projects\":[\"プロジェクト一覧に戻る\"],\"sidebar.show.mcp.settings\":[\"MCPサーバー設定を表示\"],\"sidebar.show.session.list\":[\"セッション一覧を表示\"],\"system_info.available_features\":[\"利用可能機能\"],\"system_info.claude_code\":[\"Claude Code\"],\"system_info.description\":[\"バージョンと機能情報\"],\"system_info.executable_path\":[\"実行ファイル\"],\"system_info.feature.agent_sdk.description\":[\"Claude Code SDKではなくClaude Agent SDKを使用 (v1.0.125+)\"],\"system_info.feature.agent_sdk.title\":[\"Claude Agent SDK\"],\"system_info.feature.can_use_tool.description\":[\"動的にツールの使用許可を制御し、ツール実行前にユーザーの承認を求めることができます (v1.0.82+)\"],\"system_info.feature.can_use_tool.title\":[\"ツール使用権限制御\"],\"system_info.feature.unknown.description\":[\"機能情報は利用できません\"],\"system_info.feature.uuid_on_sdk_message.description\":[\"SDKメッセージに一意の識別子を追加して追跡を改善します (v1.0.86+)\"],\"system_info.feature.uuid_on_sdk_message.title\":[\"メッセージUUIDサポート\"],\"system_info.loading\":[\"システム情報を読み込んでいます...\"],\"system_info.title\":[\"システム情報\"],\"system_info.unknown\":[\"不明\"],\"system_info.version_label\":[\"バージョン\"],\"system_info.viewer_version\":[\"Claude Code Viewer\"],\"user.content.image\":[\"画像\"],\"user.content.image.description\":[\"ユーザーがアップロードした画像コンテンツ\"],\"user.content.unsupported_media\":[\"サポートされていないメディア\"],\"user.content.unsupported_media.description\":[\"表示がサポートされていないメディア形式です\"]}")as Messages;
\ No newline at end of file
+/*eslint-disable*/import type{Messages}from"@lingui/core";export const messages=JSON.parse("{\"Available commands\":[\"利用可能なコマンド\"],\"Available files and directories\":[\"利用可能なファイルとディレクトリ\"],\"Close sidebar\":[\"サイドバーを閉じる\"],\"Compare from\":[\"比較元\"],\"Compare to\":[\"比較先\"],\"Failed to commit\":[\"コミットに失敗しました\"],\"Failed to commit and push\":[\"コミットとプッシュに失敗しました\"],\"Failed to push\":[\"プッシュに失敗しました\"],\"Message input with completion support\":[\"補完機能付きメッセージ入力\"],\"Reload MCP servers\":[\"MCPサーバーを再読み込み\"],\"Retry Push\":[\"プッシュを再試行\"],\"Select enter key behavior\":[\"Enterキーの動作を選択\"],\"Select language\":[\"言語を選択\"],\"Select permission mode\":[\"権限モードを選択\"],\"Select theme\":[\"テーマを選択\"],\"Type your message here... (Start with / for commands, @ for files, Command+Enter to send)\":[\"ここにメッセージを入力... (/でコマンド、@でファイル、Command+Enterで送信)\"],\"Type your message here... (Start with / for commands, @ for files, Enter to send)\":[\"ここにメッセージを入力... (/でコマンド、@でファイル、Enterで送信)\"],\"Type your message here... (Start with / for commands, @ for files, Shift+Enter to send)\":[\"ここにメッセージを入力... (/でコマンド、@でファイル、Shift+Enterで送信)\"],\"Type your message... (Start with / for commands, @ for files, Command+Enter to send)\":[\"メッセージを入力... (/でコマンド、@でファイル、Command+Enterで送信)\"],\"Type your message... (Start with / for commands, @ for files, Enter to send)\":[\"メッセージを入力... (/でコマンド、@でファイル、Enterで送信)\"],\"Type your message... (Start with / for commands, @ for files, Shift+Enter to send)\":[\"メッセージを入力... (/でコマンド、@でファイル、Shift+Enterで送信)\"],\"Uncommitted changes\":[\"未コミットの変更\"],\"assistant.thinking\":[\"思考中\"],\"assistant.tool.input_parameters\":[\"入力パラメータ\"],\"assistant.tool.message_count\":[[\"count\"],\"件のメッセージ\"],\"assistant.tool.result\":[\"ツール実行結果\"],\"assistant.tool.task_id\":[\"タスクID\"],\"assistant.tool.tool_id\":[\"ツールID\"],\"assistant.tool.view_task\":[\"タスクを表示\"],\"assistant.tool.view_task_details\":[\"タスクを確認\"],\"chat.autocomplete.active\":[\"オートコンプリート有効\"],\"chat.button.start\":[\"チャット開始\"],\"chat.error.send_failed\":[\"メッセージの送信に失敗しました。もう一度お試しください。\"],\"chat.modal.title\":[\"新しいチャットを開始\"],\"chat.resume\":[\"再開\"],\"chat.send\":[\"送信\"],\"chat.status.processing\":[\"処理中...\"],\"common.action.cancel\":[\"キャンセル\"],\"common.error\":[\"エラー\"],\"common.loading\":[\"読み込み中...\"],\"conversation.error.raw_content\":[\"生データ:\"],\"conversation.error.report_issue\":[\"この問題を報告\"],\"conversation.error.schema\":[\"スキーマエラー\"],\"conversation.error.schema_validation\":[\"スキーマ検証エラー\"],\"conversation.error.schema_validation.description\":[\"この会話エントリの解析に失敗しました。フォーマットの変更または解析の問題が考えられます。\"],\"cron_builder.cron_expression\":[\"Cron式\"],\"cron_builder.custom\":[\"カスタム\"],\"cron_builder.daily\":[\"毎日\"],\"cron_builder.day_of_week\":[\"曜日\"],\"cron_builder.expression\":[\"Cron式\"],\"cron_builder.friday\":[\"金曜日\"],\"cron_builder.hour\":[\"時 (0-23)\"],\"cron_builder.hourly\":[\"毎時\"],\"cron_builder.minute\":[\"分 (0-59)\"],\"cron_builder.monday\":[\"月曜日\"],\"cron_builder.preview\":[\"プレビュー\"],\"cron_builder.saturday\":[\"土曜日\"],\"cron_builder.schedule_type\":[\"スケジュールタイプ\"],\"cron_builder.sunday\":[\"日曜日\"],\"cron_builder.thursday\":[\"木曜日\"],\"cron_builder.tuesday\":[\"火曜日\"],\"cron_builder.wednesday\":[\"水曜日\"],\"cron_builder.weekly\":[\"毎週\"],\"diff.commit\":[\"コミット\"],\"diff.commit.changes\":[\"変更をコミット\"],\"diff.commit.message\":[\"コミットメッセージ\"],\"diff.commit.push\":[\"コミット&プッシュ\"],\"diff.committing\":[\"コミット中...\"],\"diff.committing.pushing\":[\"コミット&プッシュ中...\"],\"diff.deselect.all\":[\"すべて選択解除\"],\"diff.enter.message\":[\"コミットメッセージを入力\"],\"diff.files\":[\"ファイル\"],\"diff.files.changed\":[\"ファイルが変更されました\"],\"diff.loading\":[\"差分を読み込み中...\"],\"diff.push\":[\"プッシュ\"],\"diff.pushing\":[\"プッシュ中...\"],\"diff.select.all\":[\"すべて選択\"],\"diff.select.file\":[\"少なくとも1つのファイルを選択してください\"],\"directory_picker.current\":[\"現在:\"],\"directory_picker.loading\":[\"読み込み中...\"],\"directory_picker.no_directories\":[\"ディレクトリが見つかりません\"],\"directory_picker.select\":[\"このディレクトリを選択\"],\"mcp.error.load_failed\":[\"MCPサーバーの読み込みに失敗しました: \",[\"error\"]],\"mcp.no.servers\":[\"MCPサーバーが見つかりません\"],\"mcp.title\":[\"MCPサーバー\"],\"notification.beep\":[\"ビープ音\"],\"notification.chime\":[\"チャイム\"],\"notification.description\":[\"Claude Code のタスクが完了した時に再生する音を選択してください\"],\"notification.none\":[\"なし\"],\"notification.ping\":[\"ピン\"],\"notification.pop\":[\"ポップ\"],\"notification.test\":[\"テスト\"],\"project.create.action.create\":[\"プロジェクトを作成\"],\"project.create.action.creating\":[\"作成中...\"],\"project.create.description\":[\"Claude Codeプロジェクトとして初期化するディレクトリを選択してください。選択したディレクトリで<0>/init0>が実行されます。\"],\"project.create.selected_directory\":[\"選択したディレクトリ:\"],\"project.create.title\":[\"新規プロジェクトを作成\"],\"project.error.back_to_projects\":[\"プロジェクト一覧に戻る\"],\"project.error.description\":[\"このプロジェクトの読み込み中にエラーが発生しました\"],\"project.error.details_title\":[\"エラー詳細\"],\"project.error.error_id\":[\"エラーID:\"],\"project.error.title\":[\"プロジェクトの読み込みに失敗しました\"],\"project.error.try_again\":[\"再試行\"],\"project.new\":[\"新規プロジェクト\"],\"project.not_found.back_to_projects\":[\"プロジェクト一覧に戻る\"],\"project.not_found.description\":[\"お探しのプロジェクトは存在しないか、削除されています\"],\"project.not_found.title\":[\"プロジェクトが見つかりません\"],\"project_list.last_modified\":[\"最終更新:\"],\"project_list.messages\":[\"メッセージ:\"],\"project_list.no_projects.description\":[\"~/.claude/projectsディレクトリにClaude Codeプロジェクトが見つかりません。Claude Codeとの会話を開始して、最初のプロジェクトを作成してください。\"],\"project_list.no_projects.title\":[\"プロジェクトが見つかりません\"],\"project_list.view_conversations\":[\"会話を表示\"],\"projects.page.description\":[\"Claude Codeの会話履歴とプロジェクトの操作を閲覧\"],\"projects.page.loading\":[\"プロジェクトを読み込み中...\"],\"projects.page.title\":[\"プロジェクト\"],\"session.conversation.abort\":[\"中止\"],\"session.conversation.in.progress\":[\"会話を進行中...\"],\"session.conversation.paused\":[\"会話を一時停止中...\"],\"session.processing\":[\"Claude Codeが処理中...\"],\"session.status.paused\":[\"一時停止\"],\"session.status.running\":[\"実行中\"],\"sessions.load.more\":[\"さらに読み込む\"],\"sessions.new\":[\"新規\"],\"sessions.title\":[\"セッション\"],\"sessions.total\":[\"合計\"],\"settings.description\":[\"表示と動作の設定\"],\"settings.input.enter_key_behavior\":[\"Enterキーの動作\"],\"settings.input.enter_key_behavior.command_enter\":[\"Command+Enterで送信\"],\"settings.input.enter_key_behavior.description\":[\"メッセージ入力でのEnterキーの動作を選択\"],\"settings.input.enter_key_behavior.enter\":[\"Enterで送信\"],\"settings.input.enter_key_behavior.shift_enter\":[\"Shift+Enterで送信(デフォルト)\"],\"settings.loading\":[\"設定を読み込み中...\"],\"settings.locale\":[\"言語\"],\"settings.locale.description\":[\"お好みの言語を選択\"],\"settings.locale.en\":[\"English\"],\"settings.locale.ja\":[\"日本語\"],\"settings.notifications\":[\"通知\"],\"settings.permission.mode\":[\"権限モード\"],\"settings.permission.mode.accept_edits\":[\"編集を承認(ファイル編集を自動承認)\"],\"settings.permission.mode.bypass_permissions\":[\"権限をバイパス(プロンプトなし)\"],\"settings.permission.mode.default\":[\"デフォルト(権限を確認)\"],\"settings.permission.mode.description\":[\"ファイル操作の権限リクエストの処理方法を制御\"],\"settings.permission.mode.plan\":[\"プランモード(計画のみ)\"],\"settings.section.notifications\":[\"通知\"],\"settings.section.session_display\":[\"セッション表示\"],\"settings.section.system_info\":[\"システム情報\"],\"settings.session.display\":[\"セッション表示\"],\"settings.session.hide_no_user_message\":[\"ユーザーメッセージのないセッションを非表示\"],\"settings.session.hide_no_user_message.description\":[\"ユーザーコマンドまたはメッセージを含むセッションのみを表示\"],\"settings.session.unify_same_title\":[\"同じタイトルのセッションを統合\"],\"settings.session.unify_same_title.description\":[\"同じタイトルの複数のセッションがある場合、最新のセッションのみを表示\"],\"settings.tab.title\":[\"表示と通知の設定\"],\"settings.theme\":[\"テーマ\"],\"settings.theme.dark\":[\"ダーク\"],\"settings.theme.description\":[\"お好みのカラーテーマを選択\"],\"settings.theme.light\":[\"ライト\"],\"settings.theme.system\":[\"システム\"],\"settings.title\":[\"設定\"],\"sidebar.back.to.projects\":[\"プロジェクト一覧に戻る\"],\"sidebar.show.mcp.settings\":[\"MCPサーバー設定を表示\"],\"sidebar.show.session.list\":[\"セッション一覧を表示\"],\"system.info.tab.title\":[\"システム情報を表示\"],\"system_info.available_features\":[\"利用可能機能\"],\"system_info.claude_code\":[\"Claude Code\"],\"system_info.description\":[\"バージョンと機能情報\"],\"system_info.executable_path\":[\"実行ファイル\"],\"system_info.feature.agent_sdk.description\":[\"Claude Code SDKではなくClaude Agent SDKを使用 (v1.0.125+)\"],\"system_info.feature.agent_sdk.title\":[\"Claude Agent SDK\"],\"system_info.feature.can_use_tool.description\":[\"動的にツールの使用許可を制御し、ツール実行前にユーザーの承認を求めることができます (v1.0.82+)\"],\"system_info.feature.can_use_tool.title\":[\"ツール使用権限制御\"],\"system_info.feature.unknown.description\":[\"機能情報は利用できません\"],\"system_info.feature.uuid_on_sdk_message.description\":[\"SDKメッセージに一意の識別子を追加して追跡を改善します (v1.0.86+)\"],\"system_info.feature.uuid_on_sdk_message.title\":[\"メッセージUUIDサポート\"],\"system_info.loading\":[\"システム情報を読み込んでいます...\"],\"system_info.title\":[\"システム情報\"],\"system_info.unknown\":[\"不明\"],\"system_info.version_label\":[\"バージョン\"],\"system_info.viewer_version\":[\"Claude Code Viewer\"],\"user.content.image\":[\"画像\"],\"user.content.image.description\":[\"ユーザーがアップロードした画像コンテンツ\"],\"user.content.unsupported_media\":[\"サポートされていないメディア\"],\"user.content.unsupported_media.description\":[\"表示がサポートされていないメディア形式です\"]}")as Messages;
\ No newline at end of file
diff --git a/src/server/core/scheduler/domain/Scheduler.ts b/src/server/core/scheduler/domain/Scheduler.ts
index 32b3f5e..bf61186 100644
--- a/src/server/core/scheduler/domain/Scheduler.ts
+++ b/src/server/core/scheduler/domain/Scheduler.ts
@@ -18,7 +18,7 @@ import type {
SchedulerJob,
UpdateSchedulerJob,
} from "../schema";
-import { calculateFixedDelay, executeJob, shouldExecuteJob } from "./Job";
+import { calculateFixedDelay, executeJob } from "./Job";
class SchedulerJobNotFoundError extends Data.TaggedError(
"SchedulerJobNotFoundError",
@@ -55,18 +55,28 @@ const LayerImpl = Effect.gen(function* () {
);
}
- const schedule = Schedule.cron(cronResult.right);
+ const cronSchedule = Schedule.cron(cronResult.right);
- const fiber = yield* Effect.repeat(
- runJobWithConcurrencyControl(job),
- schedule,
- ).pipe(Effect.forkDaemon);
+ // Wait for the next cron time before starting the repeat loop
+ // This prevents immediate execution on job creation/update
+ const fiber = yield* Effect.gen(function* () {
+ // Get the next scheduled time
+ const nextTime = Cron.next(cronResult.right, new Date());
+ const nextDelay = Math.max(0, nextTime.getTime() - Date.now());
+
+ // Wait until the next scheduled time
+ yield* Effect.sleep(Duration.millis(nextDelay));
+
+ // Then repeat on the cron schedule
+ yield* Effect.repeat(runJobWithConcurrencyControl(job), cronSchedule);
+ }).pipe(Effect.forkDaemon);
yield* Ref.update(fibersRef, (fibers) =>
new Map(fibers).set(job.id, fiber),
);
} else if (job.schedule.type === "fixed") {
- if (!shouldExecuteJob(job, now)) {
+ // For oneTime jobs, skip scheduling if already executed
+ if (job.schedule.oneTime && job.lastRunStatus !== null) {
return;
}
diff --git a/src/server/core/scheduler/presentation/SchedulerController.ts b/src/server/core/scheduler/presentation/SchedulerController.ts
index 27d6aae..3d0f8a7 100644
--- a/src/server/core/scheduler/presentation/SchedulerController.ts
+++ b/src/server/core/scheduler/presentation/SchedulerController.ts
@@ -1,99 +1,84 @@
-import type { FileSystem, Path } from "@effect/platform";
-import type { CommandExecutor } from "@effect/platform/CommandExecutor";
-import { Context, Effect, Layer, Runtime } from "effect";
-import { Hono, type Context as HonoContext } from "hono";
+import { Context, Effect, Layer } from "effect";
+import type { ControllerResponse } from "../../../lib/effect/toEffectResponse";
import type { InferEffect } from "../../../lib/effect/types";
-import type { ClaudeCodeLifeCycleService } from "../../claude-code/services/ClaudeCodeLifeCycleService";
-import type { EnvService } from "../../platform/services/EnvService";
-import type { UserConfigService } from "../../platform/services/UserConfigService";
-import type { ProjectRepository } from "../../project/infrastructure/ProjectRepository";
import { SchedulerService } from "../domain/Scheduler";
-import { newSchedulerJobSchema, updateSchedulerJobSchema } from "../schema";
+import type { NewSchedulerJob, UpdateSchedulerJob } from "../schema";
const LayerImpl = Effect.gen(function* () {
const schedulerService = yield* SchedulerService;
- const runtime = yield* Effect.runtime<
- | FileSystem.FileSystem
- | Path.Path
- | CommandExecutor
- | EnvService
- | ProjectRepository
- | UserConfigService
- | ClaudeCodeLifeCycleService
- >();
+ const getJobs = () =>
+ Effect.gen(function* () {
+ const jobs = yield* schedulerService.getJobs();
+ return {
+ response: jobs,
+ status: 200,
+ } as const satisfies ControllerResponse;
+ });
- const app = new Hono()
- .get("/jobs", async (c: HonoContext) => {
- const result = await Runtime.runPromise(runtime)(
- schedulerService.getJobs(),
- );
- return c.json(result);
- })
- .post("/jobs", async (c: HonoContext) => {
- const body = await c.req.json();
- const parsed = newSchedulerJobSchema.safeParse(body);
+ const addJob = (options: { job: NewSchedulerJob }) =>
+ Effect.gen(function* () {
+ const { job } = options;
+ const result = yield* schedulerService.addJob(job);
+ return {
+ response: result,
+ status: 201,
+ } as const satisfies ControllerResponse;
+ });
- if (!parsed.success) {
- return c.json(
- { error: "Invalid request body", details: parsed.error },
- 400,
- );
- }
-
- const result = await Runtime.runPromise(runtime)(
- schedulerService.addJob(parsed.data),
- );
- return c.json(result, 201);
- })
- .patch("/jobs/:id", async (c: HonoContext) => {
- const id = c.req.param("id");
- const body = await c.req.json();
- const parsed = updateSchedulerJobSchema.safeParse(body);
-
- if (!parsed.success) {
- return c.json(
- { error: "Invalid request body", details: parsed.error },
- 400,
- );
- }
-
- const result = await Runtime.runPromise(runtime)(
- schedulerService
- .updateJob(id, parsed.data)
- .pipe(
- Effect.catchTag("SchedulerJobNotFoundError", () =>
- Effect.succeed(null),
- ),
+ const updateJob = (options: { id: string; job: UpdateSchedulerJob }) =>
+ Effect.gen(function* () {
+ const { id, job } = options;
+ const result = yield* schedulerService
+ .updateJob(id, job)
+ .pipe(
+ Effect.catchTag("SchedulerJobNotFoundError", () =>
+ Effect.succeed(null),
),
- );
+ );
if (result === null) {
- return c.json({ error: "Job not found" }, 404);
+ return {
+ response: { error: "Job not found" },
+ status: 404,
+ } as const satisfies ControllerResponse;
}
- return c.json(result);
- })
- .delete("/jobs/:id", async (c: HonoContext) => {
- const id = c.req.param("id");
+ return {
+ response: result,
+ status: 200,
+ } as const satisfies ControllerResponse;
+ });
- const result = await Runtime.runPromise(runtime)(
- schedulerService.deleteJob(id).pipe(
- Effect.catchTag("SchedulerJobNotFoundError", () =>
- Effect.succeed(false),
- ),
- Effect.map(() => true),
+ const deleteJob = (options: { id: string }) =>
+ Effect.gen(function* () {
+ const { id } = options;
+ const result = yield* schedulerService.deleteJob(id).pipe(
+ Effect.catchTag("SchedulerJobNotFoundError", () =>
+ Effect.succeed(false),
),
+ Effect.map(() => true),
);
if (!result) {
- return c.json({ error: "Job not found" }, 404);
+ return {
+ response: { error: "Job not found" },
+ status: 404,
+ } as const satisfies ControllerResponse;
}
- return c.json({ success: true }, 200);
+ return {
+ response: { success: true },
+ status: 200,
+ } as const satisfies ControllerResponse;
});
- return { app };
+ return {
+ getJobs,
+ addJob,
+ updateJob,
+ deleteJob,
+ };
});
export type ISchedulerController = InferEffect;
diff --git a/src/server/hono/route.ts b/src/server/hono/route.ts
index 0136be4..8d07fdf 100644
--- a/src/server/hono/route.ts
+++ b/src/server/hono/route.ts
@@ -17,6 +17,7 @@ import { GitController } from "../core/git/presentation/GitController";
import { CommitRequestSchema, PushRequestSchema } from "../core/git/schema";
import { EnvService } from "../core/platform/services/EnvService";
import { UserConfigService } from "../core/platform/services/UserConfigService";
+import type { ProjectRepository } from "../core/project/infrastructure/ProjectRepository";
import { ProjectController } from "../core/project/presentation/ProjectController";
import { SchedulerController } from "../core/scheduler/presentation/SchedulerController";
import type { VirtualConversationDatabase } from "../core/session/infrastructure/VirtualConversationDatabase";
@@ -56,6 +57,9 @@ export const routes = (app: HonoAppType) =>
| FileSystem.FileSystem
| Path.Path
| CommandExecutor.CommandExecutor
+ | UserConfigService
+ | ClaudeCodeLifeCycleService
+ | ProjectRepository
>();
if ((yield* envService.getEnv("NEXT_PHASE")) !== "phase-production-build") {
@@ -446,7 +450,108 @@ export const routes = (app: HonoAppType) =>
* SchedulerController Routes
*/
- .route("/scheduler", schedulerController.app)
+ .get("/scheduler/jobs", async (c) => {
+ const response = await effectToResponse(
+ c,
+ schedulerController.getJobs().pipe(Effect.provide(runtime)),
+ );
+ return response;
+ })
+
+ .post(
+ "/scheduler/jobs",
+ zValidator(
+ "json",
+ z.object({
+ name: z.string(),
+ schedule: z.discriminatedUnion("type", [
+ z.object({
+ type: z.literal("cron"),
+ expression: z.string(),
+ }),
+ z.object({
+ type: z.literal("fixed"),
+ delayMs: z.number().int().positive(),
+ oneTime: z.boolean(),
+ }),
+ ]),
+ message: z.object({
+ content: z.string(),
+ projectId: z.string(),
+ baseSessionId: z.string().nullable(),
+ }),
+ enabled: z.boolean().default(true),
+ concurrencyPolicy: z.enum(["skip", "run"]).default("skip"),
+ }),
+ ),
+ async (c) => {
+ const response = await effectToResponse(
+ c,
+ schedulerController
+ .addJob({
+ job: c.req.valid("json"),
+ })
+ .pipe(Effect.provide(runtime)),
+ );
+ return response;
+ },
+ )
+
+ .patch(
+ "/scheduler/jobs/:id",
+ zValidator(
+ "json",
+ z.object({
+ name: z.string().optional(),
+ schedule: z
+ .discriminatedUnion("type", [
+ z.object({
+ type: z.literal("cron"),
+ expression: z.string(),
+ }),
+ z.object({
+ type: z.literal("fixed"),
+ delayMs: z.number().int().positive(),
+ oneTime: z.boolean(),
+ }),
+ ])
+ .optional(),
+ message: z
+ .object({
+ content: z.string(),
+ projectId: z.string(),
+ baseSessionId: z.string().nullable(),
+ })
+ .optional(),
+ enabled: z.boolean().optional(),
+ concurrencyPolicy: z.enum(["skip", "run"]).optional(),
+ }),
+ ),
+ async (c) => {
+ const response = await effectToResponse(
+ c,
+ schedulerController
+ .updateJob({
+ id: c.req.param("id"),
+ job: c.req.valid("json"),
+ })
+ .pipe(Effect.provide(runtime)),
+ );
+ return response;
+ },
+ )
+
+ .delete("/scheduler/jobs/:id", async (c) => {
+ const response = await effectToResponse(
+ c,
+ schedulerController
+ .deleteJob({
+ id: c.req.param("id"),
+ })
+ .pipe(Effect.provide(runtime)),
+ );
+ return response;
+ })
/**
* FileSystemController Routes
From 3b245cf18c0445abde84d6828a7c868fa2140693 Mon Sep 17 00:00:00 2001
From: d-kimsuon
Date: Sat, 25 Oct 2025 17:56:46 +0900
Subject: [PATCH 4/4] fix bugs after manual check
---
src/app/api/[[...route]]/route.ts | 2 +
.../sessionSidebar/SchedulerTab.tsx | 14 +-
.../scheduler/SchedulerJobDialog.tsx | 242 ++++++++----------
src/server/core/scheduler/config.test.ts | 19 +-
src/server/core/scheduler/config.ts | 13 +-
src/server/core/scheduler/domain/Job.test.ts | 153 ++++++-----
src/server/core/scheduler/domain/Job.ts | 44 ++--
.../core/scheduler/domain/Scheduler.test.ts | 88 ++++---
src/server/core/scheduler/domain/Scheduler.ts | 106 +++++---
src/server/core/scheduler/schema.ts | 21 +-
src/server/hono/route.ts | 60 +----
11 files changed, 383 insertions(+), 379 deletions(-)
diff --git a/src/app/api/[[...route]]/route.ts b/src/app/api/[[...route]]/route.ts
index 3d4d922..13072ff 100644
--- a/src/app/api/[[...route]]/route.ts
+++ b/src/app/api/[[...route]]/route.ts
@@ -16,6 +16,7 @@ import { GitService } from "../../../server/core/git/services/GitService";
import { ProjectRepository } from "../../../server/core/project/infrastructure/ProjectRepository";
import { ProjectController } from "../../../server/core/project/presentation/ProjectController";
import { ProjectMetaService } from "../../../server/core/project/services/ProjectMetaService";
+import { SchedulerConfigBaseDir } from "../../../server/core/scheduler/config";
import { SchedulerService } from "../../../server/core/scheduler/domain/Scheduler";
import { SchedulerController } from "../../../server/core/scheduler/presentation/SchedulerController";
import { SessionRepository } from "../../../server/core/session/infrastructure/SessionRepository";
@@ -57,6 +58,7 @@ await Effect.runPromise(
Effect.provide(ClaudeCodeService.Live),
Effect.provide(GitService.Live),
Effect.provide(SchedulerService.Live),
+ Effect.provide(SchedulerConfigBaseDir.Live),
)
.pipe(
/** Infrastructure */
diff --git a/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SchedulerTab.tsx b/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SchedulerTab.tsx
index ad64133..d66bcfd 100644
--- a/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SchedulerTab.tsx
+++ b/src/app/projects/[projectId]/sessions/[sessionId]/components/sessionSidebar/SchedulerTab.tsx
@@ -140,15 +140,11 @@ export const SchedulerTab: FC<{ projectId: string; sessionId: string }> = ({
if (job.schedule.type === "cron") {
return `Cron: ${job.schedule.expression}`;
}
- const hours = Math.floor(job.schedule.delayMs / 3600000);
- const minutes = Math.floor((job.schedule.delayMs % 3600000) / 60000);
- const timeStr =
- hours > 0
- ? `${hours}h ${minutes}m`
- : minutes > 0
- ? `${minutes}m`
- : `${job.schedule.delayMs}ms`;
- return `${job.schedule.oneTime ? "Once" : "Recurring"}: ${timeStr}`;
+ if (job.schedule.type === "reserved") {
+ const date = new Date(job.schedule.reservedExecutionTime);
+ return `Reserved: ${date.toLocaleString()}`;
+ }
+ return "Unknown schedule type";
};
const formatLastRun = (lastRunAt: string | null) => {
diff --git a/src/components/scheduler/SchedulerJobDialog.tsx b/src/components/scheduler/SchedulerJobDialog.tsx
index d044c00..03d1bd6 100644
--- a/src/components/scheduler/SchedulerJobDialog.tsx
+++ b/src/components/scheduler/SchedulerJobDialog.tsx
@@ -1,7 +1,7 @@
"use client";
import { Trans, useLingui } from "@lingui/react";
-import { type FC, useCallback, useEffect, useState } from "react";
+import { type FC, useEffect, useState } from "react";
import { InlineCompletion } from "@/app/projects/[projectId]/components/chatForm/InlineCompletion";
import { useMessageCompletion } from "@/app/projects/[projectId]/components/chatForm/useMessageCompletion";
import { Button } from "@/components/ui/button";
@@ -40,8 +40,6 @@ export interface SchedulerJobDialogProps {
isSubmitting?: boolean;
}
-type DelayUnit = "minutes" | "hours" | "days";
-
export const SchedulerJobDialog: FC = ({
open,
onOpenChange,
@@ -53,10 +51,13 @@ export const SchedulerJobDialog: FC = ({
const { _, i18n } = useLingui();
const [name, setName] = useState("");
- const [scheduleType, setScheduleType] = useState<"cron" | "fixed">("cron");
+ const [scheduleType, setScheduleType] = useState<"cron" | "reserved">("cron");
const [cronExpression, setCronExpression] = useState("0 9 * * *");
- const [delayValue, setDelayValue] = useState(60); // 60 minutes default
- const [delayUnit, setDelayUnit] = useState("minutes");
+ const [reservedDateTime, setReservedDateTime] = useState(() => {
+ const now = new Date();
+ now.setHours(now.getHours() + 1);
+ return now.toISOString().slice(0, 16);
+ });
const [messageContent, setMessageContent] = useState("");
const [enabled, setEnabled] = useState(true);
const [concurrencyPolicy, setConcurrencyPolicy] = useState<"skip" | "run">(
@@ -66,36 +67,6 @@ export const SchedulerJobDialog: FC = ({
// Message completion hook
const completion = useMessageCompletion();
- // Convert delay value and unit to milliseconds
- const delayToMs = useCallback((value: number, unit: DelayUnit): number => {
- switch (unit) {
- case "minutes":
- return value * 60 * 1000;
- case "hours":
- return value * 60 * 60 * 1000;
- case "days":
- return value * 24 * 60 * 60 * 1000;
- }
- }, []);
-
- // Convert milliseconds to delay value and unit
- const msToDelay = useCallback(
- (ms: number): { value: number; unit: DelayUnit } => {
- const minutes = ms / (60 * 1000);
- const hours = ms / (60 * 60 * 1000);
- const days = ms / (24 * 60 * 60 * 1000);
-
- if (days >= 1 && days === Math.floor(days)) {
- return { value: days, unit: "days" };
- }
- if (hours >= 1 && hours === Math.floor(hours)) {
- return { value: hours, unit: "hours" };
- }
- return { value: minutes, unit: "minutes" };
- },
- [],
- );
-
// Initialize form with job data when editing
useEffect(() => {
if (job) {
@@ -103,42 +74,82 @@ export const SchedulerJobDialog: FC = ({
setScheduleType(job.schedule.type);
if (job.schedule.type === "cron") {
setCronExpression(job.schedule.expression);
- } else {
- const { value, unit } = msToDelay(job.schedule.delayMs);
- setDelayValue(value);
- setDelayUnit(unit);
+ setConcurrencyPolicy(job.schedule.concurrencyPolicy);
+ } else if (job.schedule.type === "reserved") {
+ // Convert UTC time to local time for display
+ const date = new Date(job.schedule.reservedExecutionTime);
+ const year = date.getFullYear();
+ const month = String(date.getMonth() + 1).padStart(2, "0");
+ const day = String(date.getDate()).padStart(2, "0");
+ const hours = String(date.getHours()).padStart(2, "0");
+ const minutes = String(date.getMinutes()).padStart(2, "0");
+ setReservedDateTime(`${year}-${month}-${day}T${hours}:${minutes}`);
}
setMessageContent(job.message.content);
setEnabled(job.enabled);
- setConcurrencyPolicy(job.concurrencyPolicy);
} else {
// Reset form for new job
setName("");
setScheduleType("cron");
setCronExpression("0 9 * * *");
- setDelayValue(60);
- setDelayUnit("minutes");
+ const now = new Date();
+ now.setHours(now.getHours() + 1);
+ const year = now.getFullYear();
+ const month = String(now.getMonth() + 1).padStart(2, "0");
+ const day = String(now.getDate()).padStart(2, "0");
+ const hours = String(now.getHours()).padStart(2, "0");
+ const minutes = String(now.getMinutes()).padStart(2, "0");
+ setReservedDateTime(`${year}-${month}-${day}T${hours}:${minutes}`);
setMessageContent("");
setEnabled(true);
setConcurrencyPolicy("skip");
}
- }, [job, msToDelay]);
+ }, [job]);
const handleSubmit = () => {
- const delayMs = delayToMs(delayValue, delayUnit);
const newJob: NewSchedulerJob = {
name,
schedule:
scheduleType === "cron"
- ? { type: "cron", expression: cronExpression }
- : { type: "fixed", delayMs, oneTime: true },
+ ? {
+ type: "cron",
+ expression: cronExpression,
+ concurrencyPolicy,
+ }
+ : {
+ type: "reserved",
+ // datetime-local returns "YYYY-MM-DDTHH:mm" in local time
+ // We need to treat this as local time and convert to UTC
+ reservedExecutionTime: (() => {
+ // datetime-local format: "YYYY-MM-DDTHH:mm"
+ // Parse as local time and convert to ISO string (UTC)
+ const match = reservedDateTime.match(
+ /^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2})$/,
+ );
+ if (!match) {
+ throw new Error("Invalid datetime format");
+ }
+ const year = Number(match[1]);
+ const month = Number(match[2]);
+ const day = Number(match[3]);
+ const hours = Number(match[4]);
+ const minutes = Number(match[5]);
+ const localDate = new Date(
+ year,
+ month - 1,
+ day,
+ hours,
+ minutes,
+ );
+ return localDate.toISOString();
+ })(),
+ },
message: {
content: messageContent,
projectId,
baseSessionId: null,
},
enabled,
- concurrencyPolicy,
};
onSubmit(newJob);
@@ -220,7 +231,7 @@ export const SchedulerJobDialog: FC = ({
+ onValueChange={(value: "cron" | "reserved") =>
setScheduleType(value)
}
disabled={isSubmitting}
@@ -235,10 +246,10 @@ export const SchedulerJobDialog: FC = ({
message="定期実行 (Cron)"
/>
-
+
@@ -253,52 +264,23 @@ export const SchedulerJobDialog: FC = ({
/>
) : (
-
-
-
-
-
- setDelayValue(Number.parseInt(e.target.value, 10))
- }
- disabled={isSubmitting}
- className="flex-1"
- placeholder="60"
+
+
- setDelayUnit(value)}
- disabled={isSubmitting}
- >
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
setReservedDateTime(e.target.value)}
+ disabled={isSubmitting}
+ />
@@ -358,40 +340,42 @@ export const SchedulerJobDialog: FC = ({
- {/* Concurrency Policy */}
-
-
-
-
-
- setConcurrencyPolicy(value)
- }
- disabled={isSubmitting}
- >
-
-
-
-
-
-
-
-
-
-
-
-
-
+ {/* Concurrency Policy (only for cron schedules) */}
+ {scheduleType === "cron" && (
+
+
+
+
+
+ setConcurrencyPolicy(value)
+ }
+ disabled={isSubmitting}
+ >
+
+
+
+
+
+
+
+
+
+
+
+
+
+ )}
diff --git a/src/server/core/scheduler/config.test.ts b/src/server/core/scheduler/config.test.ts
index b483041..c83da96 100644
--- a/src/server/core/scheduler/config.test.ts
+++ b/src/server/core/scheduler/config.test.ts
@@ -9,17 +9,29 @@ import {
getConfigPath,
initializeConfig,
readConfig,
+ SchedulerConfigBaseDir,
writeConfig,
} from "./config";
import type { SchedulerConfig } from "./schema";
describe("scheduler config", () => {
let testDir: string;
- const testLayer = Layer.mergeAll(NodeFileSystem.layer, NodePath.layer);
+ let testLayer: Layer.Layer<
+ FileSystem.FileSystem | Path.Path | SchedulerConfigBaseDir
+ >;
beforeEach(async () => {
testDir = join(tmpdir(), `scheduler-test-${Date.now()}`);
await mkdir(testDir, { recursive: true });
+
+ // Use test directory as base for config files
+ const testConfigBaseDir = Layer.succeed(SchedulerConfigBaseDir, testDir);
+
+ testLayer = Layer.mergeAll(
+ NodeFileSystem.layer,
+ NodePath.layer,
+ testConfigBaseDir,
+ );
});
afterEach(async () => {
@@ -31,7 +43,8 @@ describe("scheduler config", () => {
getConfigPath.pipe(Effect.provide(testLayer)),
);
- expect(result).toContain(".claude-code-viewer/scheduler/config.json");
+ expect(result).toContain("/scheduler/schedules.json");
+ expect(result).toContain(testDir);
});
test("writeConfig and readConfig work correctly", async () => {
@@ -43,6 +56,7 @@ describe("scheduler config", () => {
schedule: {
type: "cron",
expression: "0 0 * * *",
+ concurrencyPolicy: "skip",
},
message: {
content: "test message",
@@ -50,7 +64,6 @@ describe("scheduler config", () => {
baseSessionId: null,
},
enabled: true,
- concurrencyPolicy: "skip",
createdAt: "2025-10-25T00:00:00Z",
lastRunAt: null,
lastRunStatus: null,
diff --git a/src/server/core/scheduler/config.ts b/src/server/core/scheduler/config.ts
index 69a3005..83e42f6 100644
--- a/src/server/core/scheduler/config.ts
+++ b/src/server/core/scheduler/config.ts
@@ -1,6 +1,6 @@
import { homedir } from "node:os";
import { FileSystem, Path } from "@effect/platform";
-import { Data, Effect } from "effect";
+import { Context, Data, Effect, Layer } from "effect";
import { type SchedulerConfig, schedulerConfigSchema } from "./schema";
class ConfigFileNotFoundError extends Data.TaggedError(
@@ -15,11 +15,18 @@ class ConfigParseError extends Data.TaggedError("ConfigParseError")<{
}> {}
const CONFIG_DIR = "scheduler";
-const CONFIG_FILE = "config.json";
+const CONFIG_FILE = "schedules.json";
+
+// Service to provide base directory (for testing)
+export class SchedulerConfigBaseDir extends Context.Tag(
+ "SchedulerConfigBaseDir",
+)() {
+ static Live = Layer.succeed(this, `${homedir()}/.claude-code-viewer`);
+}
export const getConfigPath = Effect.gen(function* () {
const path = yield* Path.Path;
- const baseDir = path.resolve(homedir(), ".claude-code-viewer");
+ const baseDir = yield* SchedulerConfigBaseDir;
return path.join(baseDir, CONFIG_DIR, CONFIG_FILE);
});
diff --git a/src/server/core/scheduler/domain/Job.test.ts b/src/server/core/scheduler/domain/Job.test.ts
index 7bb31ce..c84d511 100644
--- a/src/server/core/scheduler/domain/Job.test.ts
+++ b/src/server/core/scheduler/domain/Job.test.ts
@@ -1,16 +1,19 @@
import { describe, expect, test } from "vitest";
import type { SchedulerJob } from "../schema";
-import { calculateFixedDelay, shouldExecuteJob } from "./Job";
+import { calculateReservedDelay, shouldExecuteJob } from "./Job";
describe("shouldExecuteJob", () => {
test("returns false when job is disabled", () => {
const job: SchedulerJob = {
id: "test-job",
name: "Test Job",
- schedule: { type: "cron", expression: "* * * * *" },
+ schedule: {
+ type: "cron",
+ expression: "* * * * *",
+ concurrencyPolicy: "skip",
+ },
message: { content: "test", projectId: "proj-1", baseSessionId: null },
enabled: false,
- concurrencyPolicy: "skip",
createdAt: "2025-10-25T00:00:00Z",
lastRunAt: null,
lastRunStatus: null,
@@ -23,10 +26,13 @@ describe("shouldExecuteJob", () => {
const job: SchedulerJob = {
id: "test-job",
name: "Test Job",
- schedule: { type: "cron", expression: "* * * * *" },
+ schedule: {
+ type: "cron",
+ expression: "* * * * *",
+ concurrencyPolicy: "skip",
+ },
message: { content: "test", projectId: "proj-1", baseSessionId: null },
enabled: true,
- concurrencyPolicy: "skip",
createdAt: "2025-10-25T00:00:00Z",
lastRunAt: null,
lastRunStatus: null,
@@ -35,14 +41,16 @@ describe("shouldExecuteJob", () => {
expect(shouldExecuteJob(job, new Date())).toBe(true);
});
- test("returns false for oneTime fixed job that has already run", () => {
+ test("returns false for reserved job that has already run", () => {
const job: SchedulerJob = {
id: "test-job",
name: "Test Job",
- schedule: { type: "fixed", delayMs: 60000, oneTime: true },
+ schedule: {
+ type: "reserved",
+ reservedExecutionTime: "2025-10-25T00:01:00Z",
+ },
message: { content: "test", projectId: "proj-1", baseSessionId: null },
enabled: true,
- concurrencyPolicy: "skip",
createdAt: "2025-10-25T00:00:00Z",
lastRunAt: "2025-10-25T00:01:00Z",
lastRunStatus: "success",
@@ -51,18 +59,19 @@ describe("shouldExecuteJob", () => {
expect(shouldExecuteJob(job, new Date())).toBe(false);
});
- test("returns false for oneTime fixed job when scheduled time has not arrived", () => {
- const createdAt = new Date("2025-10-25T00:00:00Z");
+ test("returns false for reserved job when scheduled time has not arrived", () => {
const now = new Date("2025-10-25T00:00:30Z");
const job: SchedulerJob = {
id: "test-job",
name: "Test Job",
- schedule: { type: "fixed", delayMs: 60000, oneTime: true },
+ schedule: {
+ type: "reserved",
+ reservedExecutionTime: "2025-10-25T00:01:00Z",
+ },
message: { content: "test", projectId: "proj-1", baseSessionId: null },
enabled: true,
- concurrencyPolicy: "skip",
- createdAt: createdAt.toISOString(),
+ createdAt: "2025-10-25T00:00:00Z",
lastRunAt: null,
lastRunStatus: null,
};
@@ -70,98 +79,88 @@ describe("shouldExecuteJob", () => {
expect(shouldExecuteJob(job, now)).toBe(false);
});
- test("returns true for oneTime fixed job when scheduled time has arrived", () => {
- const createdAt = new Date("2025-10-25T00:00:00Z");
+ test("returns true for reserved job when scheduled time has arrived", () => {
const now = new Date("2025-10-25T00:01:01Z");
const job: SchedulerJob = {
id: "test-job",
name: "Test Job",
- schedule: { type: "fixed", delayMs: 60000, oneTime: true },
+ schedule: {
+ type: "reserved",
+ reservedExecutionTime: "2025-10-25T00:01:00Z",
+ },
message: { content: "test", projectId: "proj-1", baseSessionId: null },
enabled: true,
- concurrencyPolicy: "skip",
- createdAt: createdAt.toISOString(),
+ createdAt: "2025-10-25T00:00:00Z",
lastRunAt: null,
lastRunStatus: null,
};
expect(shouldExecuteJob(job, now)).toBe(true);
});
-
- test("returns true for recurring fixed job", () => {
- const job: SchedulerJob = {
- id: "test-job",
- name: "Test Job",
- schedule: { type: "fixed", delayMs: 60000, oneTime: false },
- message: { content: "test", projectId: "proj-1", baseSessionId: null },
- enabled: true,
- concurrencyPolicy: "skip",
- createdAt: "2025-10-25T00:00:00Z",
- lastRunAt: null,
- lastRunStatus: null,
- };
-
- expect(shouldExecuteJob(job, new Date())).toBe(true);
- });
});
-describe("calculateFixedDelay", () => {
+describe("calculateReservedDelay", () => {
test("calculates delay correctly for future scheduled time", () => {
- const createdAt = new Date("2025-10-25T00:00:00Z");
const now = new Date("2025-10-25T00:00:30Z");
const job: SchedulerJob = {
id: "test-job",
name: "Test Job",
- schedule: { type: "fixed", delayMs: 60000, oneTime: true },
+ schedule: {
+ type: "reserved",
+ reservedExecutionTime: "2025-10-25T00:01:00Z",
+ },
message: { content: "test", projectId: "proj-1", baseSessionId: null },
enabled: true,
- concurrencyPolicy: "skip",
- createdAt: createdAt.toISOString(),
- lastRunAt: null,
- lastRunStatus: null,
- };
-
- const delay = calculateFixedDelay(job, now);
- expect(delay).toBe(30000);
- });
-
- test("returns 0 for past scheduled time", () => {
- const createdAt = new Date("2025-10-25T00:00:00Z");
- const now = new Date("2025-10-25T00:02:00Z");
-
- const job: SchedulerJob = {
- id: "test-job",
- name: "Test Job",
- schedule: { type: "fixed", delayMs: 60000, oneTime: true },
- message: { content: "test", projectId: "proj-1", baseSessionId: null },
- enabled: true,
- concurrencyPolicy: "skip",
- createdAt: createdAt.toISOString(),
- lastRunAt: null,
- lastRunStatus: null,
- };
-
- const delay = calculateFixedDelay(job, now);
- expect(delay).toBe(0);
- });
-
- test("throws error for non-fixed schedule type", () => {
- const job: SchedulerJob = {
- id: "test-job",
- name: "Test Job",
- schedule: { type: "cron", expression: "* * * * *" },
- message: { content: "test", projectId: "proj-1", baseSessionId: null },
- enabled: true,
- concurrencyPolicy: "skip",
createdAt: "2025-10-25T00:00:00Z",
lastRunAt: null,
lastRunStatus: null,
};
- expect(() => calculateFixedDelay(job, new Date())).toThrow(
- "Job schedule type must be fixed",
+ const delay = calculateReservedDelay(job, now);
+ expect(delay).toBe(30000);
+ });
+
+ test("returns 0 for past scheduled time", () => {
+ const now = new Date("2025-10-25T00:02:00Z");
+
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: {
+ type: "reserved",
+ reservedExecutionTime: "2025-10-25T00:01:00Z",
+ },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ createdAt: "2025-10-25T00:00:00Z",
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ const delay = calculateReservedDelay(job, now);
+ expect(delay).toBe(0);
+ });
+
+ test("throws error for non-reserved schedule type", () => {
+ const job: SchedulerJob = {
+ id: "test-job",
+ name: "Test Job",
+ schedule: {
+ type: "cron",
+ expression: "* * * * *",
+ concurrencyPolicy: "skip",
+ },
+ message: { content: "test", projectId: "proj-1", baseSessionId: null },
+ enabled: true,
+ createdAt: "2025-10-25T00:00:00Z",
+ lastRunAt: null,
+ lastRunStatus: null,
+ };
+
+ expect(() => calculateReservedDelay(job, new Date())).toThrow(
+ "Job schedule type must be reserved",
);
});
});
diff --git a/src/server/core/scheduler/domain/Job.ts b/src/server/core/scheduler/domain/Job.ts
index 4537ae7..b985677 100644
--- a/src/server/core/scheduler/domain/Job.ts
+++ b/src/server/core/scheduler/domain/Job.ts
@@ -20,23 +20,15 @@ export const executeJob = (job: SchedulerJob) =>
);
}
- if (message.baseSessionId === null) {
- yield* lifeCycleService.startTask({
- baseSession: {
- cwd: project.meta.projectPath,
- projectId: message.projectId,
- sessionId: undefined,
- },
- userConfig,
- message: message.content,
- });
- } else {
- yield* lifeCycleService.continueTask({
- sessionProcessId: message.baseSessionId,
- message: message.content,
- baseSessionId: message.baseSessionId,
- });
- }
+ yield* lifeCycleService.startTask({
+ baseSession: {
+ cwd: project.meta.projectPath,
+ projectId: message.projectId,
+ sessionId: message.baseSessionId ?? undefined,
+ },
+ userConfig,
+ message: message.content,
+ });
});
export const shouldExecuteJob = (job: SchedulerJob, now: Date): boolean => {
@@ -48,26 +40,28 @@ export const shouldExecuteJob = (job: SchedulerJob, now: Date): boolean => {
return true;
}
- if (job.schedule.type === "fixed" && job.schedule.oneTime) {
+ if (job.schedule.type === "reserved") {
+ // Reserved jobs are one-time, skip if already executed
if (job.lastRunStatus !== null) {
return false;
}
- const createdAt = new Date(job.createdAt);
- const scheduledTime = new Date(createdAt.getTime() + job.schedule.delayMs);
+ const scheduledTime = new Date(job.schedule.reservedExecutionTime);
return now >= scheduledTime;
}
return true;
};
-export const calculateFixedDelay = (job: SchedulerJob, now: Date): number => {
- if (job.schedule.type !== "fixed") {
- throw new Error("Job schedule type must be fixed");
+export const calculateReservedDelay = (
+ job: SchedulerJob,
+ now: Date,
+): number => {
+ if (job.schedule.type !== "reserved") {
+ throw new Error("Job schedule type must be reserved");
}
- const createdAt = new Date(job.createdAt);
- const scheduledTime = new Date(createdAt.getTime() + job.schedule.delayMs);
+ const scheduledTime = new Date(job.schedule.reservedExecutionTime);
const delay = scheduledTime.getTime() - now.getTime();
return Math.max(0, delay);
diff --git a/src/server/core/scheduler/domain/Scheduler.test.ts b/src/server/core/scheduler/domain/Scheduler.test.ts
index 4059771..2c8452d 100644
--- a/src/server/core/scheduler/domain/Scheduler.test.ts
+++ b/src/server/core/scheduler/domain/Scheduler.test.ts
@@ -1,5 +1,5 @@
-import { mkdir, rm, unlink } from "node:fs/promises";
-import { homedir, tmpdir } from "node:os";
+import { mkdir, rm } from "node:fs/promises";
+import { tmpdir } from "node:os";
import { join } from "node:path";
import { NodeContext, NodeFileSystem, NodePath } from "@effect/platform-node";
import { Effect, Layer } from "effect";
@@ -9,6 +9,7 @@ import { ClaudeCodeSessionProcessService } from "../../claude-code/services/Clau
import { EnvService } from "../../platform/services/EnvService";
import { UserConfigService } from "../../platform/services/UserConfigService";
import { ProjectRepository } from "../../project/infrastructure/ProjectRepository";
+import { SchedulerConfigBaseDir } from "../config";
import type { NewSchedulerJob } from "../schema";
import { SchedulerService } from "./Scheduler";
@@ -66,37 +67,42 @@ describe("SchedulerService", () => {
getEnv: () => Effect.succeed(undefined),
} as never);
- const baseLayers = Layer.mergeAll(
- NodeFileSystem.layer,
- NodePath.layer,
- NodeContext.layer,
- mockSessionProcessService,
- mockLifeCycleService,
- mockProjectRepository,
- mockUserConfigService,
- mockEnvService,
- );
-
- const testLayer = Layer.mergeAll(SchedulerService.Live, baseLayers).pipe(
- Layer.provide(baseLayers),
- );
+ let testConfigBaseDir: Layer.Layer;
+ let testLayer: Layer.Layer<
+ | import("@effect/platform").FileSystem.FileSystem
+ | import("@effect/platform").Path.Path
+ | import("@effect/platform-node").NodeContext.NodeContext
+ | ClaudeCodeSessionProcessService
+ | ClaudeCodeLifeCycleService
+ | ProjectRepository
+ | UserConfigService
+ | EnvService
+ | SchedulerConfigBaseDir
+ | SchedulerService
+ >;
beforeEach(async () => {
testDir = join(tmpdir(), `scheduler-test-${Date.now()}`);
await mkdir(testDir, { recursive: true });
- // Clean up existing config file
- const configPath = join(
- homedir(),
- ".claude-code-viewer",
- "scheduler",
- "config.json",
+ // Use test directory as base for config files
+ testConfigBaseDir = Layer.succeed(SchedulerConfigBaseDir, testDir);
+
+ const baseLayers = Layer.mergeAll(
+ NodeFileSystem.layer,
+ NodePath.layer,
+ NodeContext.layer,
+ mockSessionProcessService,
+ mockLifeCycleService,
+ mockProjectRepository,
+ mockUserConfigService,
+ mockEnvService,
+ testConfigBaseDir,
+ );
+
+ testLayer = Layer.mergeAll(SchedulerService.Live, baseLayers).pipe(
+ Layer.provideMerge(baseLayers),
);
- try {
- await unlink(configPath);
- } catch {
- // Ignore if file doesn't exist
- }
});
afterEach(async () => {
@@ -106,14 +112,17 @@ describe("SchedulerService", () => {
test("addJob creates a new job with generated id", async () => {
const newJob: NewSchedulerJob = {
name: "Test Job",
- schedule: { type: "cron", expression: "0 0 * * *" },
+ schedule: {
+ type: "cron",
+ expression: "0 0 * * *",
+ concurrencyPolicy: "skip",
+ },
message: {
content: "test message",
projectId: "project-1",
baseSessionId: null,
},
enabled: false,
- concurrencyPolicy: "skip",
};
const result = await Effect.runPromise(
@@ -134,14 +143,17 @@ describe("SchedulerService", () => {
test("getJobs returns all jobs", async () => {
const newJob: NewSchedulerJob = {
name: "Test Job",
- schedule: { type: "cron", expression: "0 0 * * *" },
+ schedule: {
+ type: "cron",
+ expression: "0 0 * * *",
+ concurrencyPolicy: "skip",
+ },
message: {
content: "test message",
projectId: "project-1",
baseSessionId: null,
},
enabled: false,
- concurrencyPolicy: "skip",
};
const result = await Effect.runPromise(
@@ -159,14 +171,17 @@ describe("SchedulerService", () => {
test("updateJob modifies an existing job", async () => {
const newJob: NewSchedulerJob = {
name: "Test Job",
- schedule: { type: "cron", expression: "0 0 * * *" },
+ schedule: {
+ type: "cron",
+ expression: "0 0 * * *",
+ concurrencyPolicy: "skip",
+ },
message: {
content: "test message",
projectId: "project-1",
baseSessionId: null,
},
enabled: false,
- concurrencyPolicy: "skip",
};
const result = await Effect.runPromise(
@@ -186,14 +201,17 @@ describe("SchedulerService", () => {
test("deleteJob removes a job", async () => {
const newJob: NewSchedulerJob = {
name: "Test Job",
- schedule: { type: "cron", expression: "0 0 * * *" },
+ schedule: {
+ type: "cron",
+ expression: "0 0 * * *",
+ concurrencyPolicy: "skip",
+ },
message: {
content: "test message",
projectId: "project-1",
baseSessionId: null,
},
enabled: false,
- concurrencyPolicy: "skip",
};
const result = await Effect.runPromise(
diff --git a/src/server/core/scheduler/domain/Scheduler.ts b/src/server/core/scheduler/domain/Scheduler.ts
index bf61186..15269eb 100644
--- a/src/server/core/scheduler/domain/Scheduler.ts
+++ b/src/server/core/scheduler/domain/Scheduler.ts
@@ -1,4 +1,3 @@
-import { randomUUID } from "node:crypto";
import {
Context,
Cron,
@@ -10,6 +9,7 @@ import {
Ref,
Schedule,
} from "effect";
+import { ulid } from "ulid";
import type { InferEffect } from "../../../lib/effect/types";
import { initializeConfig, readConfig, writeConfig } from "../config";
import type {
@@ -18,7 +18,7 @@ import type {
SchedulerJob,
UpdateSchedulerJob,
} from "../schema";
-import { calculateFixedDelay, executeJob } from "./Job";
+import { calculateReservedDelay, executeJob } from "./Job";
class SchedulerJobNotFoundError extends Data.TaggedError(
"SchedulerJobNotFoundError",
@@ -74,42 +74,33 @@ const LayerImpl = Effect.gen(function* () {
yield* Ref.update(fibersRef, (fibers) =>
new Map(fibers).set(job.id, fiber),
);
- } else if (job.schedule.type === "fixed") {
- // For oneTime jobs, skip scheduling if already executed
- if (job.schedule.oneTime && job.lastRunStatus !== null) {
+ } else if (job.schedule.type === "reserved") {
+ // For reserved jobs, skip scheduling if already executed
+ if (job.lastRunStatus !== null) {
return;
}
- const delay = calculateFixedDelay(job, now);
+ const delay = calculateReservedDelay(job, now);
const delayDuration = Duration.millis(delay);
- if (job.schedule.oneTime) {
- const fiber = yield* Effect.delay(
- runJobWithConcurrencyControl(job),
- delayDuration,
- ).pipe(Effect.forkDaemon);
+ const fiber = yield* Effect.delay(
+ runJobWithConcurrencyControl(job),
+ delayDuration,
+ ).pipe(Effect.forkDaemon);
- yield* Ref.update(fibersRef, (fibers) =>
- new Map(fibers).set(job.id, fiber),
- );
- } else {
- const schedule = Schedule.spaced(delayDuration);
-
- const fiber = yield* Effect.repeat(
- runJobWithConcurrencyControl(job),
- schedule,
- ).pipe(Effect.forkDaemon);
-
- yield* Ref.update(fibersRef, (fibers) =>
- new Map(fibers).set(job.id, fiber),
- );
- }
+ yield* Ref.update(fibersRef, (fibers) =>
+ new Map(fibers).set(job.id, fiber),
+ );
}
});
const runJobWithConcurrencyControl = (job: SchedulerJob) =>
Effect.gen(function* () {
- if (job.concurrencyPolicy === "skip") {
+ // Check concurrency policy (only for cron jobs)
+ if (
+ job.schedule.type === "cron" &&
+ job.schedule.concurrencyPolicy === "skip"
+ ) {
const runningJobs = yield* Ref.get(runningJobsRef);
if (runningJobs.has(job.id)) {
return;
@@ -118,6 +109,35 @@ const LayerImpl = Effect.gen(function* () {
yield* Ref.update(runningJobsRef, (jobs) => new Set(jobs).add(job.id));
+ // For reserved jobs, delete after execution without updating status
+ if (job.schedule.type === "reserved") {
+ const result = yield* executeJob(job).pipe(
+ Effect.matchEffect({
+ onSuccess: () => Effect.void,
+ onFailure: () => Effect.void,
+ }),
+ );
+ yield* Ref.update(runningJobsRef, (jobs) => {
+ const newJobs = new Set(jobs);
+ newJobs.delete(job.id);
+ return newJobs;
+ });
+
+ // Delete reserved job after execution (skip fiber stop, just delete from config)
+ yield* deleteJobFromConfig(job.id).pipe(
+ Effect.catchAll((error) => {
+ console.error(
+ `[Scheduler] Failed to delete reserved job ${job.id}:`,
+ error,
+ );
+ return Effect.void;
+ }),
+ );
+
+ return result;
+ }
+
+ // For non-reserved jobs, update status
const result = yield* executeJob(job).pipe(
Effect.matchEffect({
onSuccess: () =>
@@ -223,7 +243,7 @@ const LayerImpl = Effect.gen(function* () {
);
const job: SchedulerJob = {
...newJob,
- id: randomUUID(),
+ id: ulid(),
createdAt: new Date().toISOString(),
lastRunAt: null,
lastRunStatus: null,
@@ -278,6 +298,29 @@ const LayerImpl = Effect.gen(function* () {
return updatedJob;
});
+ const deleteJobFromConfig = (jobId: string) =>
+ Effect.gen(function* () {
+ const config = yield* readConfig.pipe(
+ Effect.catchTags({
+ ConfigFileNotFoundError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ ConfigParseError: () =>
+ initializeConfig.pipe(Effect.map(() => ({ jobs: [] }))),
+ }),
+ );
+ const job = config.jobs.find((j) => j.id === jobId);
+
+ if (job === undefined) {
+ return yield* Effect.fail(new SchedulerJobNotFoundError({ jobId }));
+ }
+
+ const updatedConfig: SchedulerConfig = {
+ jobs: config.jobs.filter((j) => j.id !== jobId),
+ };
+
+ yield* writeConfig(updatedConfig);
+ });
+
const deleteJob = (jobId: string) =>
Effect.gen(function* () {
const config = yield* readConfig.pipe(
@@ -295,12 +338,7 @@ const LayerImpl = Effect.gen(function* () {
}
yield* stopJob(jobId);
-
- const updatedConfig: SchedulerConfig = {
- jobs: config.jobs.filter((j) => j.id !== jobId),
- };
-
- yield* writeConfig(updatedConfig);
+ yield* deleteJobFromConfig(jobId);
});
return {
diff --git a/src/server/core/scheduler/schema.ts b/src/server/core/scheduler/schema.ts
index 7b9b5a5..11eb1d5 100644
--- a/src/server/core/scheduler/schema.ts
+++ b/src/server/core/scheduler/schema.ts
@@ -1,20 +1,23 @@
import { z } from "zod";
+// Concurrency policy (for cron jobs only)
+export const concurrencyPolicySchema = z.enum(["skip", "run"]);
+
// Schedule type discriminated union
export const cronScheduleSchema = z.object({
type: z.literal("cron"),
expression: z.string(),
+ concurrencyPolicy: concurrencyPolicySchema,
});
-export const fixedScheduleSchema = z.object({
- type: z.literal("fixed"),
- delayMs: z.number().int().positive(),
- oneTime: z.boolean(),
+export const reservedScheduleSchema = z.object({
+ type: z.literal("reserved"),
+ reservedExecutionTime: z.iso.datetime(),
});
export const scheduleSchema = z.discriminatedUnion("type", [
cronScheduleSchema,
- fixedScheduleSchema,
+ reservedScheduleSchema,
]);
// Message configuration
@@ -27,9 +30,6 @@ export const messageConfigSchema = z.object({
// Job status
export const jobStatusSchema = z.enum(["success", "failed"]);
-// Concurrency policy
-export const concurrencyPolicySchema = z.enum(["skip", "run"]);
-
// Scheduler job
export const schedulerJobSchema = z.object({
id: z.string(),
@@ -37,7 +37,6 @@ export const schedulerJobSchema = z.object({
schedule: scheduleSchema,
message: messageConfigSchema,
enabled: z.boolean(),
- concurrencyPolicy: concurrencyPolicySchema,
createdAt: z.string().datetime(),
lastRunAt: z.string().datetime().nullable(),
lastRunStatus: jobStatusSchema.nullable(),
@@ -50,7 +49,7 @@ export const schedulerConfigSchema = z.object({
// Type exports
export type CronSchedule = z.infer;
-export type FixedSchedule = z.infer;
+export type ReservedSchedule = z.infer;
export type Schedule = z.infer;
export type MessageConfig = z.infer;
export type JobStatus = z.infer;
@@ -68,7 +67,6 @@ export const newSchedulerJobSchema = schedulerJobSchema
})
.extend({
enabled: z.boolean().default(true),
- concurrencyPolicy: concurrencyPolicySchema.default("skip"),
});
export type NewSchedulerJob = z.infer;
@@ -79,7 +77,6 @@ export const updateSchedulerJobSchema = schedulerJobSchema.partial().pick({
schedule: true,
message: true,
enabled: true,
- concurrencyPolicy: true,
});
export type UpdateSchedulerJob = z.infer;
diff --git a/src/server/hono/route.ts b/src/server/hono/route.ts
index 8d07fdf..b7b3229 100644
--- a/src/server/hono/route.ts
+++ b/src/server/hono/route.ts
@@ -19,7 +19,12 @@ import { EnvService } from "../core/platform/services/EnvService";
import { UserConfigService } from "../core/platform/services/UserConfigService";
import type { ProjectRepository } from "../core/project/infrastructure/ProjectRepository";
import { ProjectController } from "../core/project/presentation/ProjectController";
+import type { SchedulerConfigBaseDir } from "../core/scheduler/config";
import { SchedulerController } from "../core/scheduler/presentation/SchedulerController";
+import {
+ newSchedulerJobSchema,
+ updateSchedulerJobSchema,
+} from "../core/scheduler/schema";
import type { VirtualConversationDatabase } from "../core/session/infrastructure/VirtualConversationDatabase";
import { SessionController } from "../core/session/presentation/SessionController";
import type { SessionMetaService } from "../core/session/services/SessionMetaService";
@@ -60,6 +65,7 @@ export const routes = (app: HonoAppType) =>
| UserConfigService
| ClaudeCodeLifeCycleService
| ProjectRepository
+ | SchedulerConfigBaseDir
>();
if ((yield* envService.getEnv("NEXT_PHASE")) !== "phase-production-build") {
@@ -460,30 +466,7 @@ export const routes = (app: HonoAppType) =>
.post(
"/scheduler/jobs",
- zValidator(
- "json",
- z.object({
- name: z.string(),
- schedule: z.discriminatedUnion("type", [
- z.object({
- type: z.literal("cron"),
- expression: z.string(),
- }),
- z.object({
- type: z.literal("fixed"),
- delayMs: z.number().int().positive(),
- oneTime: z.boolean(),
- }),
- ]),
- message: z.object({
- content: z.string(),
- projectId: z.string(),
- baseSessionId: z.string().nullable(),
- }),
- enabled: z.boolean().default(true),
- concurrencyPolicy: z.enum(["skip", "run"]).default("skip"),
- }),
- ),
+ zValidator("json", newSchedulerJobSchema),
async (c) => {
const response = await effectToResponse(
c,
@@ -499,34 +482,7 @@ export const routes = (app: HonoAppType) =>
.patch(
"/scheduler/jobs/:id",
- zValidator(
- "json",
- z.object({
- name: z.string().optional(),
- schedule: z
- .discriminatedUnion("type", [
- z.object({
- type: z.literal("cron"),
- expression: z.string(),
- }),
- z.object({
- type: z.literal("fixed"),
- delayMs: z.number().int().positive(),
- oneTime: z.boolean(),
- }),
- ])
- .optional(),
- message: z
- .object({
- content: z.string(),
- projectId: z.string(),
- baseSessionId: z.string().nullable(),
- })
- .optional(),
- enabled: z.boolean().optional(),
- concurrencyPolicy: z.enum(["skip", "run"]).optional(),
- }),
- ),
+ zValidator("json", updateSchedulerJobSchema),
async (c) => {
const response = await effectToResponse(
c,