diff --git a/.gitignore b/.gitignore index 0f49e6d..aacdfc0 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ node_modules/ .DS_Store .vite/ coverage/ +CLAUDE.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c1aaf1..c8777ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **`graph.syncWith()`** - sync with HTTP peer or direct graph instance - **`graph.getWriterPatches(writerId)`** - public API for writer patch history +#### AUTOPILOT — Kill the Materialize Tax +- **Auto-invalidation** (`AP/INVAL/1-3`): `_stateDirty` flag tracks staleness. Local commits via `createPatch()`, `writer.commitPatch()`, and `PatchSession.commit()` eagerly apply patches to cached state — no stale reads after writes. +- **Auto-materialize** (`AP/LAZY/1-2`): `autoMaterialize: boolean` option on `WarpGraph.open()`. When enabled, query methods (`hasNode`, `getNodeProps`, `neighbors`, `getNodes`, `getEdges`, `query().run()`, `traverse.*`) auto-materialize instead of throwing. +- **Auto-checkpointing** (`AP/CKPT/1-3`): `checkpointPolicy: { every: N }` option on `WarpGraph.open()`. After `materialize()` processes N+ patches, a checkpoint is created automatically. Failures are swallowed — never breaks materialize. +- **Post-merge hook** (`AP/HOOK/1-2`): `post-merge` Git hook detects warp ref changes after `git pull` and prints a warning (or auto-materializes if `warp.autoMaterialize` git config is set). Installed via `scripts/hooks/` on `npm install`. +- **`git warp materialize`** CLI command: materializes and checkpoints all graphs (or a single graph with `--graph`). +- **`git warp install-hooks`** CLI command: installs/upgrades the post-merge hook with interactive conflict resolution. +- **ROADMAP.md** with task DAG and `scripts/roadmap.js` tracking tool. + +#### Error Handling +- **`QueryError` with error codes**: State guard throws now use `QueryError` with `E_NO_STATE` (no cached state) and `E_STALE_STATE` (dirty state) instead of bare `Error`. + #### Query API (V7 Task 7) - **`graph.hasNode(nodeId)`** - Check if node exists in materialized state - **`graph.getNodeProps(nodeId)`** - Get all properties for a node as Map @@ -31,6 +43,13 @@ All query methods operate on `WarpStateV5` (materialized state), never commit DA - Index built from materialized state, not Git commit parents (TECH-SPEC-V7.md compliance) ### Changed +- **ESLint hardened** to zero-tolerance: `typescript-eslint` strict type-checked rules on `src/` and `bin/`, max-complexity 10, max-lines-per-function 50, max-depth 3 (with relaxations for algorithm-heavy modules). +- **`_ensureFreshState()`** now throws `E_STALE_STATE` when cached state is dirty and `autoMaterialize` is off (previously silently returned stale data). +- **`QueryBuilder.run()`** `where`/`select` loops parallelized with `Promise.all`. +- **`StreamingBitmapIndexBuilder.registerNode()`** returns `Promise` via `Promise.resolve()` for API compatibility. +- **`createCheckpoint()`** reuses cached state when fresh, guarded against recursive auto-checkpoint calls. +- **`execGitConfigValue`** in CLI uses `execFileSync` with argument array instead of shell string (prevents command injection). +- **`eslint.config.js`** uses `fileURLToPath`-based `__dirname` for broader Node.js compatibility. - **Repo ping** now uses `git rev-parse --is-inside-work-tree` for plumbing compatibility - **CLI imports** avoid eager `index.js` loading to suppress `url.parse` warnings from optional deps - **v7-guards.test.js** - Added `WarpStateIndexBuilder.js` to required V7 components @@ -39,6 +58,8 @@ All query methods operate on `WarpStateV5` (materialized state), never commit DA - **Git ref reads** guard missing refs to avoid fatal `show-ref` errors in empty repos ### Documentation +- **Complete JSDoc coverage** across 21 source files +- **ROADMAP.md** — consolidated task tracking with dependency DAG - **`docs/V7_TEST_MAPPING.md`** - Maps TECH-SPEC-V7.md Task 5 requirements to existing test files - Documents how existing tests cover WARP contracts (write, materialize, convergence, determinism) - Confirms legacy tests deleted (not skipped) @@ -47,9 +68,17 @@ All query methods operate on `WarpStateV5` (materialized state), never commit DA - Example imports clarified for external consumers ### Tests +- Added `test/unit/domain/WarpGraph.invalidation.test.js` (11 tests) — dirty flag + eager re-materialize +- Added `test/unit/domain/WarpGraph.writerInvalidation.test.js` (10 tests) — Writer API invalidation +- Added `test/unit/domain/WarpGraph.lazyMaterialize.test.js` (46 tests) — auto-materialize guard +- Added `test/unit/domain/WarpGraph.autoCheckpoint.test.js` (14 tests) — auto-checkpoint trigger +- Added `test/unit/domain/WarpGraph.autoMaterialize.test.js` (7 tests) — option validation +- Added `test/unit/domain/WarpGraph.checkpointPolicy.test.js` (9 tests) — option validation +- Added `test/unit/domain/WarpGraph.patchCount.test.js` (7 tests) — patch counter tracking +- Added `test/unit/domain/services/HookInstaller.test.js` (29 tests) — hook install/upgrade/append/replace - Added `test/unit/domain/WarpGraph.query.test.js` (21 tests) - Query API tests - Added `test/unit/domain/services/WarpStateIndexBuilder.test.js` (13 tests) - WARP state index tests -- Total test count: 1438 +- Total test count: 1571 (67 test files) ## [6.0.0] - 2026-01-31 diff --git a/M1-M3-TASKS.md b/M1-M3-TASKS.md deleted file mode 100644 index f62d75a..0000000 --- a/M1-M3-TASKS.md +++ /dev/null @@ -1,94 +0,0 @@ -# M1–M3 Tasks Checklist - -> This checklist is the execution contract for Milestones 1–3. Keep it updated as tasks complete. - -## Global DoD -- [x] All JSON output is canonical and stable (sorted ordering) -- [x] Determinism test passes (double-run identical JSON) -- [x] No API ambiguity (match/select/labelFilter semantics exactly as specified) -- [x] Docs include 3 runnable examples: 2-hop query, shortestPath, CLI query+path - ---- - -## Milestone 1 — Fluent Query Builder (MVP) - -### M1.1 graph.query() builder -- [x] Implement fluent builder: match → where → outgoing/incoming → select → run -- [x] Enforce match(pattern: string) only; non-string throws E_QUERY_MATCH_TYPE -- [x] Define multi-hop semantics: each hop operates on current working set -- [x] where() filters nodes only; predicate receives read-only snapshot -- [x] Predicate snapshot uses plain objects/arrays (no live Maps) -- [x] Run auto-materializes (M0.1) -- [x] Determinism: node sets canonical sort by id each step -- [x] Edge snapshot ordering canonical: (label, peerId) -- [x] Tests: two-hop traversal, chaining order matters, mutation does not affect results - -### M1.2 Pattern matching (nodeId glob) -- [x] match supports glob string (case-sensitive) -- [x] Define wildcard semantics for * -- [x] Tests: user:* matches all user nodes; * matches all - -### M1.3 Result shaping (DTO) -- [x] Default QueryResult DTO: { stateHash, nodes: [{ id, props? }] } -- [x] select(['id','props']) only; unknown field throws E_QUERY_SELECT_FIELD -- [x] select([]) treated same as default (select not called) -- [x] Deterministic output ordering (nodes sorted by id) -- [x] Tests: select fields, unknown field error, default shape - ---- - -## Milestone 2 — Built-in Graph Traversal - -### M2.1 graph.traverse module -- [x] Implement bfs, dfs, shortestPath (unweighted), connectedComponent -- [x] dir enum: 'out' | 'in' | 'both' -- [x] labelFilter: string | string[] -- [x] labelFilter semantics: array = OR; empty array = none; undefined = all -- [x] Deterministic neighbor expansion order: (neighborId, label) -- [x] Deterministic shortestPath tie-break via canonical order -- [x] Tests: visit order, tie-break stability, labelFilter semantics - -### M2.2 Shared adjacency cache (materialized state owned) -- [x] Materialize returns/stores MaterializedGraph with adjacency + stateHash -- [x] Query/traverse consume MaterializedGraph adjacency -- [x] Cache bounded (configurable cap) -- [x] Tests: reuse adjacency for same stateHash; eviction under cap - -### M2.3 Naming cleanup: logical vs Git DAG -- [x] Rename Git DAG traversal to CommitDagTraversal* -- [x] Keep deprecated alias for 1 minor version -- [x] Update exports/imports/comments for clarity - ---- - -## Milestone 3 — CLI Tool (warp-graph) - -### M3.1 CLI skeleton -- [x] CLI entrypoint + routing (info, query, path, history, check) -- [x] Flags: --repo, --json (all commands) -- [x] Exit codes: 0 ok, 1 usage/config, 2 not found/no path, 3 internal - -### M3.2 Reuse query + traversal -- [x] CLI query calls M1.1 builder -- [x] CLI path calls M2.1 shortestPath -- [x] Human output derived from JSON - -### M3.3 Health & GC visibility -- [x] 30-min audit of existing health/GC metrics -- [x] If missing: scope to checkpoint freshness + writer heads + tombstone counts -- [x] Surface in CLI check output (JSON first) - -### M3.4 CLI history definition (MVP) -- [x] history --writer shows writer patch chain -- [x] optional: --node best-effort filter - ---- - -## Docs -- [x] Add 3 runnable examples (2-hop query, shortestPath, CLI query+path) - ---- - -## Extras (post-DoD) -- [x] Add CLI smoke tests (info, query, path, history, check) -- [x] Tighten CLI info output (writer counts + checkpoint/coverage when scoped) diff --git a/M4-M6-TASKS.md b/M4-M6-TASKS.md deleted file mode 100644 index efd168b..0000000 --- a/M4-M6-TASKS.md +++ /dev/null @@ -1,63 +0,0 @@ -# Milestones 4–6 Tasks Checklist - -## Milestone 4 — One-line Sync Transport - -### M4.1 — graph.serve({ port }) -- [x] Implement `graph.serve({ port, host?, path?, maxRequestBytes? })` returning `{ close(), url }`. -- [x] Built-in HTTP server; POST /sync JSON request/response. -- [x] Content-Type check (accept missing), request size limit, proper status codes. -- [x] Canonical JSON response ordering. -- [x] Tests: success, invalid JSON, size limits. -- [x] Docs + CHANGELOG updated. - -### M4.2 — graph.syncWith(remote) -- [x] API: `await graph.syncWith(remote, opts?)` where remote is URL string or WarpGraph instance. -- [x] HTTP mode: createSyncRequest → POST → applySyncResponse → materialize if needed. -- [x] Direct peer mode: `otherGraph.processSyncRequest(req)`. -- [x] Retry/backoff with jitter; configurable `{ retries, baseDelayMs, maxDelayMs, timeoutMs }`. -- [x] Status callbacks with stable events and attempt numbers. -- [x] Deterministic behavior (no ordering dependence on wall-clock). -- [x] Tests: HTTP success, direct peer, retries, invalid URL / 4xx no-retry. -- [x] Docs + CHANGELOG updated. - -### M4.3 — No-coordination regression suite -- [x] Tests enforce: no merge commits in writer refs after sync cycles. -- [x] Tests enforce: commit path does not read other writers’ heads. -- [x] Fuzz random sync/write interleavings; invariants hold. -- [x] CI coverage + CONTRIBUTING note. - -## Milestone 5 — Edge Properties - -### M5.1 — EdgePropKey encoding -- [ ] Encode/decode utilities with injective, reversible, deterministic rules. -- [ ] Tests: round-trip, separators, fuzz domain. - -### M5.2 — Patch ops for edge properties -- [ ] API: addEdge(props?) and/or setEdgeProperty(). -- [ ] LWW semantics; stored in prop map via EdgePropKey. -- [ ] Materialization: getEdge returns props. -- [ ] Tests: add/update, concurrent LWW, determinism. - -### M5.3 — Visibility rules -- [ ] Edge props visible iff edge visible. -- [ ] Tests: remove edge hides props; re-add behavior defined. - -### M5.4 — Schema v3 + compatibility -- [ ] Schema bump with reader compatibility for v2. -- [ ] Mixed-version sync safety. -- [ ] Tests: v2/v3 load + sync; unsupported schema errors. - -## Milestone 6 — Subscriptions / Reactivity - -### M6.1 — State diff engine -- [ ] Deterministic diff of materialized states (nodes/edges/props). -- [ ] Tests: expected diff output, determinism, perf sanity. - -### M6.2 — graph.subscribe({ handlers }) -- [ ] Subscribe/unsubscribe; optional initial replay. -- [ ] Error isolation; onError or aggregated errors. -- [ ] Tests: handler firing + unsubscribe. - -### M6.3 — graph.watch(pattern) -- [ ] Pattern-based filtering using glob semantics. -- [ ] Tests: watched node/edge changes only. diff --git a/README.md b/README.md index beed1c2..3a76ca9 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ const graph = await WarpGraph.open({ persistence, graphName: 'demo', writerId: 'writer-1', + autoMaterialize: true, // auto-materialize on query }); // Write data using the patch builder @@ -37,9 +38,6 @@ await (await graph.createPatch()) .addEdge('user:alice', 'user:bob', 'manages') .commit(); -// Materialize state from all writers -await graph.materialize(); - // Query the graph const result = await graph.query() .match('user:*') @@ -90,8 +88,8 @@ await (await graphB.createPatch()) // After git push/pull, materialize merges both writers const state = await graphA.materialize(); -graphA.hasNode('doc:1'); // true -graphA.hasNode('doc:2'); // true +await graphA.hasNode('doc:1'); // true +await graphA.hasNode('doc:2'); // true ``` ### HTTP Sync @@ -115,18 +113,18 @@ await graphA.syncWith(graphB); ## Querying -All query methods require a materialized state. Call `materialize()` first. +Query methods require materialized state. Either call `materialize()` first, or pass `autoMaterialize: true` to `WarpGraph.open()` to handle this automatically. ### Simple Queries ```javascript await graph.materialize(); -graph.getNodes(); // ['user:alice', 'user:bob'] -graph.hasNode('user:alice'); // true -graph.getNodeProps('user:alice'); // Map { 'name' => 'Alice', 'role' => 'admin' } -graph.neighbors('user:alice', 'outgoing'); // [{ nodeId: 'user:bob', label: 'manages', direction: 'outgoing' }] -graph.getEdges(); // [{ from: 'user:alice', to: 'user:bob', label: 'manages' }] +await graph.getNodes(); // ['user:alice', 'user:bob'] +await graph.hasNode('user:alice'); // true +await graph.getNodeProps('user:alice'); // Map { 'name' => 'Alice', 'role' => 'admin' } +await graph.neighbors('user:alice', 'outgoing'); // [{ nodeId: 'user:bob', label: 'manages', direction: 'outgoing' }] +await graph.getEdges(); // [{ from: 'user:alice', to: 'user:bob', label: 'manages' }] ``` ### Fluent Query Builder @@ -194,6 +192,14 @@ await graph.createCheckpoint(); // GC removes tombstones when safe const metrics = graph.getGCMetrics(); const { ran, result } = graph.maybeRunGC(); + +// Or configure automatic checkpointing +const graph = await WarpGraph.open({ + persistence, + graphName: 'demo', + writerId: 'writer-1', + checkpointPolicy: { every: 500 }, // auto-checkpoint every 500 patches +}); ``` ## CLI diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 0000000..d7dcdcd --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,1892 @@ +# Roadmap + +> Execution plan for `@git-stunts/empty-graph` from v7.1.0 onward. +> Current version: v7.0.0. All milestones prior to this document (M1–M4) are complete. + +## How to Read This Document + +Every item has an identifier: `{MILESTONE}/{FEATURE}/{TASK}`. +Milestones have codenames and target version numbers. +No task exceeds 6 estimated human hours; larger work is decomposed. + +**Management:** Use `node scripts/roadmap.js` (or `npm run roadmap`) to track progress. Commands: `close`, `open`, `status`, `show`. + +**Estimation conventions:** +- *Human Hours* = wall-clock hours for a developer familiar with the codebase. +- *LOC* = approximate lines of production code + test code combined. +- *Blocked by* references use full identifiers. + +--- + +## Versions + +### v7.1.0 — AUTOPILOT + +**Kill the Materialize Tax** + +Eliminates manual state freshness management. Cached state stays fresh after local writes, queries auto-materialize when needed, checkpoints happen automatically. + +**Features:** +- AP/INVAL — Auto-invalidation after local writes +- AP/LAZY — Lazy rematerialization on query +- AP/CKPT — Periodic auto-checkpointing +- AP/HOOK — Post-merge git hook for staleness notification + +**User-Facing Changes:** +- `createPatch().commit()` and `writer.commitPatch()` apply patches to cached state eagerly — no manual re-materialize needed after local writes. +- New `autoMaterialize: true` option on `WarpGraph.open()` — query methods auto-materialize when state is null or dirty. +- New `checkpointPolicy: { every: N }` option — checkpoints created automatically during materialization. +- New `post-merge` git hook warns when warp refs change during `git pull`. + +### v7.2.0 — GROUNDSKEEPER + +**Self-Managing Infrastructure** + +Indexes, GC, and frontier tracking manage themselves. + +**Features:** +- GK/IDX — Index staleness tracking +- GK/GC — Auto-GC after materialization +- GK/FRONTIER — Frontier change detection + +**User-Facing Changes:** +- Bitmap index stores frontier at build time; `loadIndex()` warns or auto-rebuilds when stale. +- `materialize()` automatically runs GC when tombstone ratio exceeds threshold. +- New `graph.hasFrontierChanged()` method for cheap "has anything changed?" polling. + +### v7.3.0 — WEIGHTED + +**Edge Properties** + +Extends the data model with properties on edges. + +**Features:** +- WT/EPKEY — EdgePropKey encoding +- WT/OPS — Patch ops for edge properties +- WT/VIS — Edge property visibility rules +- WT/SCHEMA — Schema v3 + compatibility + +**User-Facing Changes:** +- New `patch.setEdgeProperty(from, to, label, key, value)` API. +- `getEdges()` and query results include edge `props` field. +- Schema v3 with backward-compatible v2 reader support. +- Mixed-version sync safety between v2 and v3 writers. + +### v7.4.0 — HANDSHAKE + +**Multi-Writer Ergonomics** + +Smoother multi-writer experience with fewer footguns. + +**Features:** +- HS/WRITER — Simplified writer identity +- HS/SYNC — Sync-then-materialize +- HS/ERR — Actionable error messages +- HS/CAS — CAS failure recovery +- HS/DELGUARD — Deletion guards + +**User-Facing Changes:** +- Writer API consolidated to `graph.writer()` (stable) and `graph.writer('id')` (explicit). `createWriter()` deprecated. +- New `syncWith(remote, { materialize: true })` option. +- Error messages include codes (`E_STATE_NEVER_MATERIALIZED`, etc.) and recovery hints. +- CAS failures surface as `WRITER_CAS_CONFLICT` with retry guidance. +- New `onDeleteWithData: 'reject' | 'cascade' | 'warn'` option prevents silent data corruption on node deletion. + +### v7.5.0 — COMPASS + +**Advanced Query Language** + +Richer query capabilities without imperative code. + +**Features:** +- CP/WHERE — Property filters with object syntax +- CP/MULTIHOP — Multi-hop traversal in queries +- CP/AGG — Aggregation + +**User-Facing Changes:** +- `.where({ role: 'admin' })` object shorthand for property equality filters. +- `.outgoing(label, { depth: [1, 3] })` multi-hop traversal with depth ranges. +- `.aggregate({ count: true, sum: 'props.total' })` for count/sum/avg/min/max. + +### v7.6.0 — LIGHTHOUSE + +**Observability** + +Runtime visibility into graph health and materialization decisions. + +**Features:** +- LH/STATUS — graph.status() API +- LH/TIMING — Operation timing in LoggerPort +- LH/CLI — CLI status enhancement +- LH/RECEIPTS — Tick receipts + +**User-Facing Changes:** +- New `graph.status()` returns cached state freshness, patch counts, tombstone ratio, writer count, frontier. +- Core operations (`materialize()`, `syncWith()`, etc.) emit structured timing logs. +- `git warp check` surfaces full `graph.status()` output. +- New `materialize({ receipts: true })` returns per-op decision records (applied/superseded/redundant with reasons). + +### v7.7.0 — PULSE + +**Subscriptions & Reactivity** + +React to graph changes without polling. + +**Features:** +- PL/DIFF — State diff engine +- PL/SUB — graph.subscribe() +- PL/WATCH — graph.watch(pattern) + +**User-Facing Changes:** +- New `diffStates(before, after)` function for deterministic state comparison. +- New `graph.subscribe({ onChange, onError })` with isolated error handling and optional initial replay. +- New `graph.watch('user:*', { onChange, poll: 5000 })` for pattern-filtered reactive updates with optional frontier polling. + +### v8.0.0 — HOLOGRAM + +**Provenance & Holography** + +Implements Papers III–IV: provenance payloads, slicing, wormholes, BTRs, and prefix forks. + +**Features:** +- HG/IO — In/Out declarations on patches +- HG/PROV — Provenance payloads +- HG/SLICE — Slice materialization +- HG/WORM — Wormhole compression +- HG/BTR — Boundary Transition Records +- HG/FORK — Prefix forks + +**User-Facing Changes:** +- Patches carry `reads`/`writes` arrays. New `graph.patchesFor(nodeId)` index query. +- New `ProvenancePayload` class with monoid operations and `replay()`. +- New `graph.materializeSlice(nodeId)` for partial materialization of causal cones. +- New `graph.createWormhole(from, to)` for compressing patch ranges. +- New `createBTR()`/`verifyBTR()` for tamper-evident artifact exchange. +- New `graph.fork({ from, at })` for branching at the WARP layer. + +### v9.0.0 — ECHO + +**Observer Geometry (Speculative)** + +Observer-scoped views, translation costs, and temporal queries from Paper IV. + +**Features:** +- EC/VIEW — Observer-scoped views +- EC/COST — Translation cost estimation +- EC/TEMPORAL — Temporal queries + +**User-Facing Changes:** +- New `graph.observer(name, { match, expose, redact })` for projected read-only views. +- New `graph.translationCost(observerA, observerB)` for MDL cost estimation. +- New `graph.temporal.always()`/`eventually()` for CTL*-style temporal queries over history. + +--- + +## Milestone Summary + +| # | Codename | Version | Theme | Status | +|---|----------|---------|-------|--------| +| 1 | **AUTOPILOT** | v7.1.0 | Kill the Materialize Tax | In progress | +| 2 | **GROUNDSKEEPER** | v7.2.0 | Self-Managing Infrastructure | Planned | +| 3 | **WEIGHTED** | v7.3.0 | Edge Properties | Planned | +| 4 | **HANDSHAKE** | v7.4.0 | Multi-Writer Ergonomics | Planned | +| 5 | **COMPASS** | v7.5.0 | Advanced Query Language | Planned | +| 6 | **LIGHTHOUSE** | v7.6.0 | Observability | Planned | +| 7 | **PULSE** | v7.7.0 | Subscriptions & Reactivity | Planned | +| 8 | **HOLOGRAM** | v8.0.0 | Provenance & Holography | Planned | +| 9 | **ECHO** | v9.0.0 | Observer Geometry | Speculative | + +--- + +## Dependency Graph (Milestone Level) + +```text +AUTOPILOT ──→ GROUNDSKEEPER ──→ PULSE + │ ↑ + └──→ HANDSHAKE │ + │ +WEIGHTED (independent) │ + │ +COMPASS (independent) │ + │ +LIGHTHOUSE ────────────────→ HOLOGRAM ──→ ECHO +``` + +- GROUNDSKEEPER depends on AUTOPILOT (auto-materialize foundation). +- PULSE depends on GROUNDSKEEPER (frontier change detection). +- HOLOGRAM depends on LIGHTHOUSE (tick receipts as foundation). +- ECHO depends on HOLOGRAM (provenance payloads). +- WEIGHTED, COMPASS, HANDSHAKE can proceed independently. + +--- + +## Task DAG + + +```text +Key: ■ CLOSED ◆ OPEN ○ BLOCKED + +AUTOPILOT (v7.1.0) ████████████████████ 100% (10/10) + ■ AP/CKPT/1 → AP/CKPT/3 + ■ AP/CKPT/2 → AP/CKPT/3, LH/STATUS/1 + ■ AP/CKPT/3 + ■ AP/HOOK/1 → AP/HOOK/2 + ■ AP/HOOK/2 + ■ AP/INVAL/1 → AP/INVAL/2, AP/LAZY/2, LH/STATUS/1 + ■ AP/INVAL/2 → AP/INVAL/3 + ■ AP/INVAL/3 + ■ AP/LAZY/1 → AP/LAZY/2 + ■ AP/LAZY/2 + +GROUNDSKEEPER (v7.2.0) ░░░░░░░░░░░░░░░░░░░░ 0% (0/4) + ◆ GK/FRONTIER/1 → PL/WATCH/2 + ◆ GK/GC/1 + ◆ GK/IDX/1 → GK/IDX/2 + ○ GK/IDX/2 + +WEIGHTED (v7.3.0) ░░░░░░░░░░░░░░░░░░░░ 0% (0/7) + ◆ WT/EPKEY/1 → WT/OPS/1, WT/SCHEMA/1 + ○ WT/OPS/1 → WT/OPS/2, WT/OPS/3 + ○ WT/OPS/2 + ○ WT/OPS/3 → WT/VIS/1 + ○ WT/SCHEMA/1 → WT/SCHEMA/2 + ○ WT/SCHEMA/2 + ○ WT/VIS/1 + +HANDSHAKE (v7.4.0) ░░░░░░░░░░░░░░░░░░░░ 0% (0/8) + ◆ HS/CAS/1 + ◆ HS/DELGUARD/1 → HS/DELGUARD/2, HS/DELGUARD/3 + ○ HS/DELGUARD/2 + ○ HS/DELGUARD/3 + ◆ HS/ERR/1 → HS/ERR/2 + ○ HS/ERR/2 + ◆ HS/SYNC/1 + ◆ HS/WRITER/1 + +COMPASS (v7.5.0) ░░░░░░░░░░░░░░░░░░░░ 0% (0/3) + ◆ CP/AGG/1 + ◆ CP/MULTIHOP/1 + ◆ CP/WHERE/1 + +LIGHTHOUSE (v7.6.0) ░░░░░░░░░░░░░░░░░░░░ 0% (0/5) + ○ LH/CLI/1 + ◆ LH/RECEIPTS/1 → LH/RECEIPTS/2 + ○ LH/RECEIPTS/2 → HG/IO/1 + ◆ LH/STATUS/1 → LH/CLI/1 + ◆ LH/TIMING/1 + +PULSE (v7.7.0) ░░░░░░░░░░░░░░░░░░░░ 0% (0/5) + ◆ PL/DIFF/1 → PL/SUB/1 + ○ PL/SUB/1 → PL/WATCH/1, PL/SUB/2 + ○ PL/SUB/2 + ○ PL/WATCH/1 → PL/WATCH/2 + ○ PL/WATCH/2 + +HOLOGRAM (v8.0.0) ░░░░░░░░░░░░░░░░░░░░ 0% (0/7) + ○ HG/BTR/1 + ◆ HG/FORK/1 + ○ HG/IO/1 → HG/IO/2, HG/SLICE/1, EC/TEMPORAL/1 + ○ HG/IO/2 → HG/SLICE/1 + ◆ HG/PROV/1 → HG/SLICE/1, HG/WORM/1, HG/BTR/1 + ○ HG/SLICE/1 + ○ HG/WORM/1 + +ECHO (v9.0.0) ░░░░░░░░░░░░░░░░░░░░ 0% (0/3) + ○ EC/COST/1 + ○ EC/TEMPORAL/1 + ◆ EC/VIEW/1 → EC/COST/1 + +Cross-Milestone Dependencies: + AP/CKPT/2 → LH/STATUS/1 (LIGHTHOUSE) + AP/INVAL/1 → LH/STATUS/1 (LIGHTHOUSE) + GK/FRONTIER/1 → PL/WATCH/2 (PULSE) + HG/IO/1 → EC/TEMPORAL/1 (ECHO) + LH/RECEIPTS/2 → HG/IO/1 (HOLOGRAM) + +``` + + +--- + +## Milestone 1 — AUTOPILOT (v7.1.0) + +**Kill the Materialize Tax** + +The single biggest DX problem. Developers manually orchestrate state freshness across three independent axes (cached state, checkpoints, indexes) with no staleness detection. Fix this and the library becomes dramatically easier to use. + +### Feature: AP/INVAL — Auto-invalidation After Local Writes + +**Rationale:** `createPatch().commit()` and `writer.commitPatch()` can produce stale query results with no warning. The patch is already parsed and in memory at commit time — applying it to cached state is O(ops-in-patch), negligible compared to the Git I/O that just happened. + +#### AP/INVAL/1 — Add dirty flag to WarpGraph state tracking + +- **Status:** `CLOSED` +- **User Story:** As a developer, I want the graph to know when cached state is stale so I'm never silently reading outdated data. +- **Requirements:** + - Add `_stateDirty` boolean flag to `WarpGraph` instance. + - Flag starts `false` after `materialize()` completes. + - Flag is set `true` whenever a patch is committed by any path (`PatchBuilderV2.commit()`, `Writer.commitPatch()`). + - `_cachedState` getter logs a warning when state is dirty (if logger available). +- **Acceptance Criteria:** + - After `materialize()`, `_stateDirty === false`. + - After `createPatch().commit()`, `_stateDirty === true`. + - After `writer.commitPatch()`, `_stateDirty === true`. +- **Scope:** Dirty flag tracking only. No auto-remediation (that's AP/INVAL/2). +- **Out of Scope:** Remote writes, sync-triggered staleness. +- **Estimated Hours:** 2 +- **Estimated LOC:** ~40 prod + ~80 test +- **Blocked by:** None +- **Blocking:** AP/INVAL/2, AP/LAZY/2 +- **Definition of Done:** Flag correctly tracks staleness across all commit paths. Tests pass. +- **Test Plan:** + - Golden path: materialize → commit → assert dirty. + - Known failures: commit without prior materialize (flag should still set). + - Edge cases: multiple sequential commits, commit after failed commit. + +#### AP/INVAL/2 — Eager incremental re-materialize on commit + +- **Status:** `CLOSED` +- **User Story:** As a developer, I want my cached state to stay fresh after local writes without calling `materialize()` again. +- **Requirements:** + - After `PatchBuilderV2.commit()` succeeds, apply the just-committed patch to `_cachedState` in-place via `JoinReducer.join()`. + - Clear `_stateDirty` flag after successful application. + - If `_cachedState` is null (never materialized), skip — don't attempt incremental update. + - Pass the `onCommitSuccess` callback from WarpGraph to carry the patch object. +- **Acceptance Criteria:** + - `commit()` followed by `hasNode()` returns fresh result without explicit `materialize()`. + - `_stateDirty === false` after successful eager re-materialize. + - When `_cachedState` is null, commit still succeeds without error. +- **Scope:** Local writes only. Apply single patch to existing cached state. +- **Out of Scope:** Full re-materialize from scratch, remote/sync writes. +- **Estimated Hours:** 4 +- **Estimated LOC:** ~80 prod + ~150 test +- **Blocked by:** AP/INVAL/1 +- **Blocking:** None +- **Definition of Done:** Queries after commit return fresh data. No regression in existing tests. Benchmark shows negligible overhead vs. current commit path. +- **Test Plan:** + - Golden path: materialize → addNode → commit → hasNode returns true. + - Golden path: materialize → setProperty → commit → getNodeProps returns new value. + - Known failures: commit without prior materialize skips update gracefully. + - Fuzz/stress: 100 sequential commits, state matches full re-materialize. + - Edge cases: remove node then query, property overwrite, edge add/remove. + +#### AP/INVAL/3 — Wire Writer.commitPatch() to trigger invalidation + +- **Status:** `CLOSED` +- **User Story:** As a developer using the Writer API, I want the same freshness guarantees as the low-level patch API. +- **Requirements:** + - `Writer.commitPatch()` and `PatchSession.commit()` trigger the same eager re-materialize as `PatchBuilderV2.commit()`. + - The patch object is available to the callback after commit. +- **Acceptance Criteria:** + - `writer.commitPatch(fn)` followed by `hasNode()` returns fresh result. +- **Scope:** Wire existing callback through Writer/PatchSession. +- **Out of Scope:** New Writer API methods. +- **Estimated Hours:** 2 +- **Estimated LOC:** ~30 prod + ~60 test +- **Blocked by:** AP/INVAL/2 +- **Blocking:** None +- **Definition of Done:** Writer API commits produce the same freshness as direct patch API. +- **Test Plan:** + - Golden path: writer.commitPatch → query returns fresh data. + - Edge cases: writer commit failure does not corrupt state. + +--- + +### Feature: AP/LAZY — Lazy Rematerialization on Query + +**Rationale:** If the state is dirty (or null), query methods should auto-materialize before returning results. This eliminates the "call materialize() first" error that every new user hits. + +#### AP/LAZY/1 — Add autoMaterialize option to WarpGraph.open() + +- **Status:** `CLOSED` +- **User Story:** As a developer, I want to opt in to automatic materialization so I never see stale-state errors. +- **Requirements:** + - Add `autoMaterialize: boolean` option to `WarpGraph.open()` (default `false`). + - Store as instance property. + - Validate option type. +- **Acceptance Criteria:** + - `WarpGraph.open({ ..., autoMaterialize: true })` succeeds. + - `WarpGraph.open({ ..., autoMaterialize: 'yes' })` throws validation error. +- **Scope:** Option parsing and storage only. +- **Out of Scope:** Actual auto-materialize behavior (that's AP/LAZY/2). +- **Estimated Hours:** 1 +- **Estimated LOC:** ~15 prod + ~30 test +- **Blocked by:** None +- **Blocking:** AP/LAZY/2 +- **Definition of Done:** Option accepted, validated, stored. +- **Test Plan:** + - Golden path: open with `autoMaterialize: true` stores flag. + - Known failures: invalid type rejected. + +#### AP/LAZY/2 — Guard query methods with auto-materialize + +- **Status:** `CLOSED` +- **User Story:** As a developer with autoMaterialize enabled, I want query methods to just work without manual state management. +- **Requirements:** + - When `autoMaterialize === true` and `_cachedState` is null or `_stateDirty === true`, call `materialize()` before returning results. + - Affected methods: `hasNode()`, `getNodeProps()`, `neighbors()`, `getNodes()`, `getEdges()`, `query().run()`, all `traverse.*` methods. + - When `autoMaterialize === false`, preserve current behavior (throw or return stale). + - Guard must be async-safe (callers already await these methods). +- **Acceptance Criteria:** + - With autoMaterialize on: open graph → addNode → commit → hasNode returns true (no explicit materialize). + - With autoMaterialize on: open graph → query().run() works on first call (no prior materialize). + - With autoMaterialize off: current behavior unchanged. +- **Scope:** Add guard to all query entry points. +- **Out of Scope:** Incremental/partial materialization strategy. +- **Estimated Hours:** 4 +- **Estimated LOC:** ~60 prod + ~200 test +- **Blocked by:** AP/LAZY/1, AP/INVAL/1 +- **Blocking:** None +- **Definition of Done:** All query methods auto-materialize when enabled. Existing tests unaffected. +- **Test Plan:** + - Golden path: fresh open → query with autoMaterialize → results returned. + - Golden path: dirty state → query → auto-rematerializes → fresh results. + - Known failures: autoMaterialize off → null state → appropriate error. + - Edge cases: concurrent auto-materialize calls coalesce (no double work). + - Stress: 50 rapid queries, only one materialize() call triggered. + +--- + +### Feature: AP/CKPT — Periodic Auto-Checkpointing + +**Rationale:** Users who never think about checkpoints accumulate unbounded patch chains, making materialization increasingly expensive. A checkpoint policy eliminates this silently. + +#### AP/CKPT/1 — Add checkpointPolicy option to WarpGraph.open() + +- **Status:** `CLOSED` +- **User Story:** As a developer, I want to configure automatic checkpointing so materialization stays fast without manual intervention. +- **Requirements:** + - Add `checkpointPolicy: { every: number }` option to `WarpGraph.open()`. + - `every` = number of patches since last checkpoint before auto-checkpoint triggers. + - Validate: `every` must be positive integer. + - Store as instance property. +- **Acceptance Criteria:** + - `WarpGraph.open({ ..., checkpointPolicy: { every: 500 } })` succeeds. + - Invalid values rejected with clear error. +- **Scope:** Option parsing and storage. +- **Out of Scope:** Actual checkpoint triggering (that's AP/CKPT/2–3). +- **Estimated Hours:** 1 +- **Estimated LOC:** ~20 prod + ~40 test +- **Blocked by:** None +- **Blocking:** AP/CKPT/3 +- **Definition of Done:** Option accepted, validated, stored. +- **Test Plan:** + - Golden path: open with valid policy. + - Known failures: `every: 0`, `every: -1`, `every: 'foo'`. + +#### AP/CKPT/2 — Track patch count since last checkpoint + +- **Status:** `CLOSED` +- **User Story:** As the system, I need to know how many patches have been applied since the last checkpoint to decide when to auto-checkpoint. +- **Requirements:** + - During `materialize()`, count patches loaded since the last checkpoint (or total if no checkpoint). + - Store count as `_patchesSinceCheckpoint` on WarpGraph instance. + - Increment on each local commit. +- **Acceptance Criteria:** + - After materialize with checkpoint + 10 patches, count = 10. + - After materialize with no checkpoint + 50 patches, count = 50. + - After local commit, count increments by 1. +- **Scope:** Counting only. No triggering. +- **Out of Scope:** Remote patch counting. +- **Estimated Hours:** 2 +- **Estimated LOC:** ~30 prod + ~80 test +- **Blocked by:** None +- **Blocking:** AP/CKPT/3 +- **Definition of Done:** Count accurately tracks patches since checkpoint. +- **Test Plan:** + - Golden path: checkpoint → 10 patches → materialize → count = 10. + - Edge cases: no checkpoint exists, count starts from zero. + +#### AP/CKPT/3 — Wire auto-checkpoint into materialize() path + +- **Status:** `CLOSED` +- **User Story:** As a developer, I want checkpoints created automatically when my patch count exceeds the threshold. +- **Requirements:** + - At the end of `materialize()`, if `checkpointPolicy` is set and `_patchesSinceCheckpoint >= policy.every`, call `createCheckpoint()`. + - Reset `_patchesSinceCheckpoint` after successful checkpoint. + - Log checkpoint creation via LoggerPort. + - Do not block the materialize return on checkpoint failure — log warning and continue. +- **Acceptance Criteria:** + - With `checkpointPolicy: { every: 5 }`, after 5 patches, `materialize()` creates a checkpoint. + - After auto-checkpoint, next materialize is incremental from new checkpoint. + - Checkpoint failure does not break materialize. +- **Scope:** Wire trigger into materialize path. +- **Out of Scope:** Checkpoint policy based on time or size. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~40 prod + ~120 test +- **Blocked by:** AP/CKPT/1, AP/CKPT/2 +- **Blocking:** None +- **Definition of Done:** Auto-checkpointing works end-to-end. Incremental materialize benefits confirmed. +- **Test Plan:** + - Golden path: 5 patches → materialize → checkpoint created → next materialize is incremental. + - Known failures: checkpoint creation fails → materialize still succeeds. + - Edge cases: exactly at threshold, just below threshold, policy not set. + +--- + +### Feature: AP/HOOK — Post-Merge Git Hook for Staleness Notification + +**Rationale:** When a `git pull` or merge brings in new warp refs, the local cached state is silently stale. A lightweight hook provides immediate feedback. + +#### AP/HOOK/1 — Write post-merge hook script + +- **Status:** `CLOSED` +- **User Story:** As a developer, I want to be notified when a git pull changes warp refs so I know to rematerialize. +- **Requirements:** + - Shell script that runs after `git merge` / `git pull`. + - Compares warp refs before and after merge (using `git diff` on refs/warp/). + - If any refs changed, prints: `[warp] Writer refs changed during merge. Call materialize() to see updates.` + - Exit 0 always (informational, never blocks merge). +- **Acceptance Criteria:** + - After `git pull` that changes warp refs, warning printed. + - After `git pull` with no warp ref changes, no output. + - Hook never causes merge to fail. +- **Scope:** post-merge hook only. +- **Out of Scope:** post-rebase, post-checkout, automatic rematerialization. +- **Estimated Hours:** 2 +- **Estimated LOC:** ~40 hook script + ~60 test +- **Blocked by:** None +- **Blocking:** AP/HOOK/2 +- **Definition of Done:** Hook detects warp ref changes and prints warning. +- **Test Plan:** + - Golden path: simulate merge with ref changes → warning printed. + - Edge cases: no warp refs at all, refs directory doesn't exist. + +#### AP/HOOK/2 — Integrate hook into install scripts + +- **Status:** `CLOSED` +- **User Story:** As a developer, I want the post-merge hook installed automatically alongside existing hooks. +- **Requirements:** + - Add `post-merge` to the hooks installed by `scripts/setup-hooks.js`. + - Document in README or GUIDE.md. +- **Acceptance Criteria:** + - After `npm install`, post-merge hook is active. + - Existing pre-commit hook unaffected. +- **Scope:** Installation wiring. +- **Out of Scope:** Uninstall script. +- **Estimated Hours:** 1 +- **Estimated LOC:** ~20 prod + ~20 test +- **Blocked by:** AP/HOOK/1 +- **Blocking:** None +- **Definition of Done:** Hook installed automatically on npm install. +- **Test Plan:** + - Golden path: fresh npm install → hook exists in .git/hooks/. + - Edge cases: .git/hooks doesn't exist (created). + +--- + +## Milestone 2 — GROUNDSKEEPER (v7.2.0) + +**Self-Managing Infrastructure** + +Once the materialize tax is gone, the next friction layer is infrastructure that requires manual babysitting: indexes, GC, and frontier tracking. + +### Feature: GK/IDX — Index Staleness Tracking + +**Rationale:** Users can't tell if their bitmap index is stale. Today it's a mystery. Storing the frontier at build time makes staleness detection a cheap O(writers) ref comparison. + +#### GK/IDX/1 — Store frontier snapshot in index metadata at build time + +- **Status:** `OPEN` +- **User Story:** As the system, I need to record the frontier when an index was built so I can later detect staleness. +- **Requirements:** + - At `BitmapIndexBuilder.serialize()` time, accept and store current frontier (writer ID → tip SHA map). + - Write as `frontier.json` blob in the index tree alongside existing shard files. + - Format: `{ "version": 1, "frontier": { "alice": "abc...", "bob": "def..." } }`. +- **Acceptance Criteria:** + - Built index contains `frontier.json` with correct writer tips. + - Existing index loading code ignores `frontier.json` if not present (backward compat). +- **Scope:** Write frontier metadata at build time. +- **Out of Scope:** Reading/comparing frontier (that's GK/IDX/2). +- **Estimated Hours:** 3 +- **Estimated LOC:** ~50 prod + ~80 test +- **Blocked by:** None +- **Blocking:** GK/IDX/2 +- **Definition of Done:** Index tree contains frontier metadata. Backward compatible with existing indexes. +- **Test Plan:** + - Golden path: build index → frontier.json present with correct data. + - Edge cases: empty frontier (no writers), single writer. + +#### GK/IDX/2 — Detect and report index staleness on load + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to know if my index is stale so I can decide whether to rebuild. +- **Requirements:** + - On `loadIndex()`, read `frontier.json` from index tree. + - Compare stored frontier against current writer refs. + - If diverged, log warning via LoggerPort: `[warp] Index is stale. N writers have advanced since last build.` + - Add `autoRebuild: boolean` option (default `false`). When true, trigger rebuild on staleness. +- **Acceptance Criteria:** + - Stale index → warning logged. + - Fresh index → no warning. + - `autoRebuild: true` → index rebuilt automatically. +- **Scope:** Staleness detection and optional auto-rebuild. +- **Out of Scope:** Incremental index update (always full rebuild). +- **Estimated Hours:** 4 +- **Estimated LOC:** ~60 prod + ~120 test +- **Blocked by:** GK/IDX/1 +- **Blocking:** None +- **Definition of Done:** Stale indexes detected and reported. Auto-rebuild works when enabled. +- **Test Plan:** + - Golden path: build index → advance writer → load → warning. + - Golden path: build index → load immediately → no warning. + - Known failures: index has no frontier.json (legacy) → no warning, no crash. + - Edge cases: new writer added since index build, writer removed. + +--- + +### Feature: GK/GC — Auto-GC After Materialization + +**Rationale:** `maybeRunGC()` exists but is never called unless the user remembers. Wire it into the materialize path so tombstones are cleaned up automatically. + +#### GK/GC/1 — Wire GC check into post-materialize path + +- **Status:** `OPEN` +- **User Story:** As a developer, I want tombstones cleaned up automatically so I don't have to think about GC. +- **Requirements:** + - After `materialize()` completes (and after optional auto-checkpoint from AP/CKPT/3), check `getGCMetrics()` against configured `gcPolicy`. + - If `shouldRunGC()` returns true, execute GC. + - Flow: `materialize() → apply patches → maybe checkpoint → maybe GC`. + - Log GC execution and results via LoggerPort. + - GC failure does not break materialize — log warning and continue. +- **Acceptance Criteria:** + - After materialize with 40% tombstone ratio (threshold 30%), GC runs automatically. + - After materialize with 10% tombstone ratio, GC does not run. + - GC failure logged but materialize still returns valid state. +- **Scope:** Wire existing GC into materialize path. +- **Out of Scope:** New GC algorithms, concurrent GC. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~30 prod + ~100 test +- **Blocked by:** None (uses existing GC infrastructure) +- **Blocking:** None +- **Definition of Done:** GC runs automatically when thresholds exceeded after materialize. +- **Test Plan:** + - Golden path: create graph with many tombstones → materialize → GC runs. + - Known failures: GC throws → materialize still succeeds. + - Edge cases: exactly at threshold, gcPolicy not configured. + +--- + +### Feature: GK/FRONTIER — Frontier Change Detection + +**Rationale:** A cheap way to check "has anything changed?" without full materialization enables efficient polling and is the foundation for the PULSE milestone's watch API. + +#### GK/FRONTIER/1 — Implement hasFrontierChanged() method + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to cheaply check if the graph has new data without materializing. +- **Requirements:** + - `graph.hasFrontierChanged()` compares `_cachedState.observedFrontier` against current writer refs. + - Returns `boolean`. + - O(writers) cost — one ref read per writer. + - If `_cachedState` is null, returns `true` (unknown = assume changed). +- **Acceptance Criteria:** + - No changes since materialize → returns `false`. + - New patch committed by another writer → returns `true`. + - No prior materialize → returns `true`. +- **Scope:** Read-only comparison method. +- **Out of Scope:** File-system watching, push notifications. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~40 prod + ~100 test +- **Blocked by:** None +- **Blocking:** PL/WATCH/2 +- **Definition of Done:** Method returns correct result for all frontier states. +- **Test Plan:** + - Golden path: materialize → no changes → false. Materialize → external commit → true. + - Edge cases: new writer appears, writer ref deleted, empty graph. + - Stress: call 1000 times in sequence — consistent results, no leaks. + +--- + +## Milestone 3 — WEIGHTED (v7.3.0) + +**Edge Properties** + +Extends the data model to support properties on edges, enabling weighted graphs, typed relationships, and richer domain models. + +### Feature: WT/EPKEY — EdgePropKey Encoding + +**Rationale:** Edge properties need a deterministic, injective key encoding that avoids collisions with node property keys and is reversible for deserialization. + +#### WT/EPKEY/1 — Design and implement encode/decode utilities + +- **Status:** `OPEN` +- **User Story:** As the system, I need a canonical encoding for edge property keys that is injective, reversible, and collision-free with node property keys. +- **Requirements:** + - `encodeEdgePropKey(from, to, label, propKey)` → deterministic string. + - `decodeEdgePropKey(encoded)` → `{ from, to, label, propKey }`. + - Encoding must be injective (no two distinct tuples produce the same key). + - Must not collide with existing `encodePropKey(nodeId, key)` format (`"nodeId\0key"`). + - Use a distinct separator or prefix to namespace edge props. +- **Acceptance Criteria:** + - Round-trip: encode → decode === original for all valid inputs. + - No collision with node property keys. + - Deterministic (same input always produces same output). +- **Scope:** Pure encode/decode functions. +- **Out of Scope:** Integration with JoinReducer or patches. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~60 prod + ~100 test +- **Blocked by:** None +- **Blocking:** WT/OPS/1 +- **Definition of Done:** Encode/decode passes round-trip, injectivity, and collision-freedom tests. +- **Test Plan:** + - Golden path: encode → decode round-trip for typical edge + property. + - Fuzz: 10,000 random (from, to, label, propKey) tuples — all round-trip correctly. + - Edge cases: empty strings, strings containing separator characters, unicode. + +--- + +### Feature: WT/OPS — Patch Ops for Edge Properties + +**Rationale:** Edge properties need CRDT semantics (LWW) consistent with node properties, stored in the existing prop map via EdgePropKey. + +#### WT/OPS/1 — Extend PatchBuilderV2 with edge property ops + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to set properties on edges using the patch builder API. +- **Requirements:** + - Add `setEdgeProperty(from, to, label, key, value)` to `PatchBuilderV2`. + - Generates a `PropSet` op with `edgePropKey`-encoded key. + - Validates that edge exists in current state (or was added in current patch). +- **Acceptance Criteria:** + - `patch.setEdgeProperty('a', 'b', 'likes', 'weight', 0.8)` succeeds. + - Property stored under edge prop key namespace. +- **Scope:** Patch builder API extension. +- **Out of Scope:** Query-time retrieval (that's WT/OPS/3). +- **Estimated Hours:** 3 +- **Estimated LOC:** ~50 prod + ~80 test +- **Blocked by:** WT/EPKEY/1 +- **Blocking:** WT/OPS/2, WT/OPS/3 +- **Definition of Done:** Edge properties can be set via patch builder. +- **Test Plan:** + - Golden path: add edge → set edge property → commit succeeds. + - Known failures: set property on non-existent edge → error. + - Edge cases: set property on edge added in same patch. + +#### WT/OPS/2 — LWW semantics for edge properties in JoinReducer + +- **Status:** `BLOCKED` +- **User Story:** As the system, concurrent edge property writes must resolve deterministically via LWW. +- **Requirements:** + - Edge property `PropSet` ops processed identically to node property `PropSet` ops in JoinReducer. + - LWW ordering: lamport → writerId → patchSha → opIndex. + - No special-case logic — the existing LWW path handles edge props via key namespace. +- **Acceptance Criteria:** + - Two writers set same edge property concurrently → higher EventId wins. + - Result matches deterministic re-materialization. +- **Scope:** Verify and test existing LWW path with edge prop keys. +- **Out of Scope:** New CRDT semantics for edge props. +- **Estimated Hours:** 2 +- **Estimated LOC:** ~20 prod + ~120 test +- **Blocked by:** WT/OPS/1 +- **Blocking:** None +- **Definition of Done:** Concurrent edge property writes resolve deterministically. +- **Test Plan:** + - Golden path: two writers set same edge prop → materialize → LWW winner correct. + - Fuzz: random interleaving of edge prop sets → deterministic result. + - Edge cases: same lamport, different writers (writerId tiebreak). + +#### WT/OPS/3 — Surface edge properties in materialization and queries + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to read edge properties after materialization. +- **Requirements:** + - `getEdges()` returns edge objects with `props` field. + - `query().outgoing()` / `.incoming()` results include edge props. + - Edge props in query node snapshots (`edgesOut`, `edgesIn`) include props. + - Canonical JSON ordering for edge props. +- **Acceptance Criteria:** + - `getEdges()` returns `[{ from, to, label, props: { weight: 0.8 } }]`. + - Query results include edge props when `select(['props'])` used. +- **Scope:** Read-side surface of edge properties. +- **Out of Scope:** Edge property filtering/indexing. +- **Estimated Hours:** 4 +- **Estimated LOC:** ~80 prod + ~150 test +- **Blocked by:** WT/OPS/1 +- **Blocking:** None +- **Definition of Done:** Edge properties visible in all read paths. +- **Test Plan:** + - Golden path: set edge prop → materialize → getEdges returns props. + - Golden path: query with outgoing → edge snapshot includes props. + - Edge cases: edge with no props (empty object), edge with many props. + +--- + +### Feature: WT/VIS — Edge Property Visibility Rules + +**Rationale:** Edge properties must be invisible when their parent edge is tombstoned, and behavior on edge re-add must be well-defined. + +#### WT/VIS/1 — Gate edge property visibility on edge aliveness + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I expect edge properties to disappear when the edge is removed. +- **Requirements:** + - `getEdges()` and query results omit props for edges not in `edgeAlive` OR-Set. + - Property data remains in `prop` map (for potential re-add), but is not surfaced. + - Re-adding a previously removed edge does NOT restore old properties (clean slate). +- **Acceptance Criteria:** + - Remove edge → getEdges excludes it → props invisible. + - Re-add same edge → props are empty (not restored from before removal). +- **Scope:** Visibility gating logic. +- **Out of Scope:** Property cleanup/GC. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~40 prod + ~100 test +- **Blocked by:** WT/OPS/3 +- **Blocking:** None +- **Definition of Done:** Edge props correctly gated by edge aliveness. +- **Test Plan:** + - Golden path: add edge with props → remove edge → props invisible → re-add → props empty. + - Edge cases: concurrent add and remove with props. + +--- + +### Feature: WT/SCHEMA — Schema v3 + Compatibility + +**Rationale:** Edge properties require a schema version bump. Existing v2 data must remain readable. + +#### WT/SCHEMA/1 — Define schema v3 format + +- **Status:** `BLOCKED` +- **User Story:** As the system, I need a new schema version that supports edge properties while remaining backward compatible. +- **Requirements:** + - Bump patch schema to `3`. + - Schema v3 patches may contain edge property `PropSet` ops with edge prop keys. + - Schema v2 patches remain valid (no edge prop ops). + - `WarpMessageCodec` handles both v2 and v3 decoding. +- **Acceptance Criteria:** + - v3 patches encode/decode correctly. + - v2 patches still load without error. +- **Scope:** Schema definition and codec update. +- **Out of Scope:** Migration tooling. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~60 prod + ~80 test +- **Blocked by:** WT/EPKEY/1 +- **Blocking:** WT/SCHEMA/2 + +#### WT/SCHEMA/2 — Mixed-version sync safety + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to sync between v2 and v3 writers without data loss. +- **Requirements:** + - v3 writer syncing with v2 writer: v2 patches applied normally (no edge props). + - v2 writer syncing with v3 writer: v3 patches with edge prop ops decoded but edge props silently dropped (v2 has no edge prop support). + - Alternatively: v2 writer encountering v3 patch throws clear error with upgrade guidance. + - Choose one strategy and document the decision. +- **Acceptance Criteria:** + - Mixed-version sync either degrades gracefully or fails clearly. + - No silent data corruption. +- **Scope:** Sync compatibility behavior. +- **Out of Scope:** Online migration, schema negotiation protocol. +- **Estimated Hours:** 4 +- **Estimated LOC:** ~60 prod + ~150 test +- **Blocked by:** WT/SCHEMA/1 +- **Blocking:** None +- **Definition of Done:** Mixed-version sync tested and behavior documented. +- **Test Plan:** + - Golden path: v2 writer ↔ v3 writer sync in both directions. + - Known failures: unsupported schema error path. + - Edge cases: v3 patch with only node ops (should work with v2). + +--- + +## Milestone 4 — HANDSHAKE (v7.4.0) + +**Multi-Writer Ergonomics** + +The multi-writer story works but has sharp edges around writer identity, sync workflows, and error diagnostics. + +### Feature: HS/WRITER — Simplify Writer Identity + +**Rationale:** Three ways to get a writer (`writer()`, `writer('id')`, `createWriter()`) with different persistence semantics is confusing. Consolidate to two. + +#### HS/WRITER/1 — Consolidate to two-form writer() API + +- **Status:** `OPEN` +- **User Story:** As a developer, I want one obvious way to get a writer so I don't have to understand three different methods. +- **Requirements:** + - `graph.writer()` — stable identity, resolved from git config or generated and persisted on first use. + - `graph.writer('explicit-id')` — explicit identity, no side effects. + - `createWriter()` deprecated with console warning pointing to `writer()`. + - `{ persist: 'config' }` option moves to the no-arg `writer()` form. +- **Acceptance Criteria:** + - `graph.writer()` returns same ID across calls. + - `graph.writer('alice')` returns writer with ID 'alice'. + - `graph.createWriter()` logs deprecation warning. +- **Scope:** API consolidation with deprecation. +- **Out of Scope:** Removing `createWriter()` (keep for 1 minor version). +- **Estimated Hours:** 4 +- **Estimated LOC:** ~60 prod + ~120 test +- **Blocked by:** None +- **Blocking:** None +- **Definition of Done:** Two-form API works. Deprecation warning emitted. Existing tests updated. +- **Test Plan:** + - Golden path: `writer()` persists identity across calls. + - Golden path: `writer('id')` uses explicit identity. + - Known failures: `createWriter()` still works but warns. + - Edge cases: git config not writable (fallback to in-memory). + +--- + +### Feature: HS/SYNC — Sync-then-Materialize + +**Rationale:** The most common sync footgun is syncing onto stale state, then forgetting to rematerialize. Make it a single operation. + +#### HS/SYNC/1 — Add materialize option to syncWith() + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to sync and rematerialize in one call so I can't forget. +- **Requirements:** + - Add `{ materialize: boolean }` option to `syncWith()` (default `false`). + - When `true`, call `materialize()` after applying sync response. + - Return value includes materialization result when enabled. +- **Acceptance Criteria:** + - `syncWith(remote, { materialize: true })` returns fresh state. + - `syncWith(remote)` preserves current behavior. +- **Scope:** Option wiring. +- **Out of Scope:** Automatic materialization without opt-in. +- **Estimated Hours:** 2 +- **Estimated LOC:** ~20 prod + ~60 test +- **Blocked by:** None +- **Blocking:** None +- **Definition of Done:** Sync + materialize works as single call. +- **Test Plan:** + - Golden path: sync with materialize → query returns remote data. + - Edge cases: sync applies 0 patches → materialize still runs. + +--- + +### Feature: HS/ERR — Actionable Error Messages + +**Rationale:** "No cached state. Call materialize() first" doesn't distinguish between three different situations, each with a different fix. + +#### HS/ERR/1 — Audit and classify existing error messages + +- **Status:** `OPEN` +- **User Story:** As a developer, I want error messages that tell me exactly what went wrong and how to fix it. +- **Requirements:** + - Audit all error throws in WarpGraph.js and services. + - Classify each by cause: never-materialized, stale-after-write, stale-after-sync, configuration, validation. + - Document findings in a checklist for HS/ERR/2. +- **Acceptance Criteria:** + - Complete audit report with each error classified. +- **Scope:** Audit and classification only. +- **Out of Scope:** Changing error messages (that's HS/ERR/2). +- **Estimated Hours:** 2 +- **Estimated LOC:** ~0 prod (audit only, produces notes for HS/ERR/2) +- **Blocked by:** None +- **Blocking:** HS/ERR/2 +- **Definition of Done:** Audit complete with classification of all error paths. +- **Test Plan:** N/A (research task). + +#### HS/ERR/2 — Add error codes and recovery hints + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want error codes I can match on and human-readable recovery instructions. +- **Requirements:** + - Each domain error gets a unique code (e.g., `E_STATE_NEVER_MATERIALIZED`, `E_STATE_STALE_WRITE`, `E_STATE_STALE_SYNC`). + - Error `.message` includes recovery hint (e.g., "Call materialize() to load initial state"). + - Error `.code` property for programmatic matching. + - Backward compatible: existing error types unchanged, codes added. +- **Acceptance Criteria:** + - Each error path produces distinct code and actionable message. + - `catch (e) { if (e.code === 'E_STATE_STALE_WRITE') ... }` works. +- **Scope:** Error codes and messages for state-related errors. +- **Out of Scope:** Error codes for all domain errors (scope to state errors first). +- **Estimated Hours:** 4 +- **Estimated LOC:** ~80 prod + ~120 test +- **Blocked by:** HS/ERR/1 +- **Blocking:** None +- **Definition of Done:** State errors distinguishable by code with recovery hints. Tests verify each error path. +- **Test Plan:** + - Golden path: trigger each error condition → verify code and message. + - Edge cases: error in async context preserves code. + +--- + +### Feature: HS/CAS — CAS Failure Recovery + +**Rationale:** When `commitPatch()` fails because another process updated the writer ref, the error is a generic Git ref-update failure. It should explain what happened and suggest retry. + +#### HS/CAS/1 — Detect and surface CAS failures with guidance + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to know when my commit failed due to a concurrent write, not a Git error. +- **Requirements:** + - Detect when `PatchBuilderV2.commit()` fails due to ref compare-and-swap mismatch. + - Throw `WriterError` with code `WRITER_CAS_CONFLICT`. + - Message: "Commit failed: writer ref was updated by another process. Re-materialize and retry." + - Include `expectedSha` and `actualSha` in error properties for diagnostics. +- **Acceptance Criteria:** + - Concurrent commit by another process → `WRITER_CAS_CONFLICT` error. + - Error includes both expected and actual SHAs. +- **Scope:** Error detection and reporting. +- **Out of Scope:** Automatic retry logic. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~40 prod + ~80 test +- **Blocked by:** None +- **Blocking:** None +- **Definition of Done:** CAS failures produce descriptive error with recovery guidance. +- **Test Plan:** + - Golden path: simulate concurrent ref update → verify error code and properties. + - Edge cases: ref deleted between begin and commit. + +--- + +### Feature: HS/DELGUARD — Deletion Guards + +**Rationale:** `NodeRemove` doesn't check for attached data. Nodes get tombstoned, properties become orphaned, edges become dangling references. This is silent data corruption. + +#### HS/DELGUARD/1 — Add onDeleteWithData policy option + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to configure what happens when I delete a node that has properties or edges. +- **Requirements:** + - Add `onDeleteWithData: 'reject' | 'cascade' | 'warn'` option to `WarpGraph.open()`. + - Default: `'warn'` (preserves current implicit behavior, made explicit). + - Validate option value. +- **Acceptance Criteria:** + - Option accepted and stored. + - Invalid values rejected. +- **Scope:** Option parsing only. +- **Out of Scope:** Enforcement logic (that's HS/DELGUARD/2–3). +- **Estimated Hours:** 1 +- **Estimated LOC:** ~15 prod + ~30 test +- **Blocked by:** None +- **Blocking:** HS/DELGUARD/2, HS/DELGUARD/3 + +#### HS/DELGUARD/2 — Implement reject and warn modes + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want node deletion to fail or warn when the node has attached data. +- **Requirements:** + - In `commitPatch()`, before finalizing: inspect `NodeRemove` ops. + - For each removed node, check current state for properties and connected edges. + - `'reject'`: throw error listing the attached data. + - `'warn'`: log warning via LoggerPort, proceed with deletion. +- **Acceptance Criteria:** + - Reject mode: delete node with props → error thrown, commit aborted. + - Reject mode: delete node with edges → error thrown, commit aborted. + - Warn mode: delete node with props → warning logged, commit succeeds. + - Delete node with no data → succeeds in all modes. +- **Scope:** Reject and warn validation. +- **Out of Scope:** Cascade mode (that's HS/DELGUARD/3). +- **Estimated Hours:** 4 +- **Estimated LOC:** ~80 prod + ~150 test +- **Blocked by:** HS/DELGUARD/1 +- **Blocking:** None +- **Definition of Done:** Reject and warn modes work correctly for all cases. +- **Test Plan:** + - Golden path: reject mode blocks deletion of node with props. + - Golden path: reject mode blocks deletion of node with edges. + - Golden path: warn mode logs and proceeds. + - Edge cases: node with both props and edges, node with only outgoing edges, node with only incoming edges. + +#### HS/DELGUARD/3 — Implement cascade mode + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want the option to automatically clean up edges and properties when I delete a node. +- **Requirements:** + - `'cascade'`: auto-generate `EdgeRemove` ops for all connected edges and clear properties before `NodeRemove`. + - Generated ops use current state's observed dots. + - Generated ops appear in the committed patch (auditable). +- **Acceptance Criteria:** + - Cascade mode: delete node with 3 edges → patch contains 3 EdgeRemove + NodeRemove. + - After cascade delete, no orphaned properties or dangling edges. + - Materialized state is clean. +- **Scope:** Cascade deletion logic. +- **Out of Scope:** Recursive cascade (deleting connected nodes). +- **Estimated Hours:** 5 +- **Estimated LOC:** ~100 prod + ~180 test +- **Blocked by:** HS/DELGUARD/1 +- **Blocking:** None +- **Definition of Done:** Cascade mode produces clean state. Generated ops visible in patch. +- **Test Plan:** + - Golden path: cascade delete node with edges and props → clean state. + - Fuzz: random graph → cascade delete random node → materialize → no orphans. + - Edge cases: node with self-loop, node with both in and out edges to same peer. + +--- + +## Milestone 5 — COMPASS (v7.5.0) + +**Advanced Query Language** + +The fluent query builder is functional but limited. As graphs get larger, users need filtering, aggregation, and multi-hop traversal without writing imperative loops. + +### Feature: CP/WHERE — Property Filters with Object Syntax + +**Rationale:** `.where()` currently only accepts predicate functions. Object shorthand covers the 80% case (equality checks) more concisely. + +#### CP/WHERE/1 — Implement object shorthand in where() + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to filter nodes by property equality without writing a function. +- **Requirements:** + - `where({ role: 'admin' })` filters nodes where `props.role === 'admin'`. + - `where({ role: 'admin', active: true })` requires all properties to match (AND). + - Object form and function form can be mixed via chaining: `.where({ role: 'admin' }).where(n => n.props.age > 18)`. + - Detect argument type (object vs function) and dispatch accordingly. +- **Acceptance Criteria:** + - Object filter matches nodes with exact property values. + - Multiple properties = AND semantics. + - Chained where() calls = AND semantics. + - Non-existent property in filter → node excluded. +- **Scope:** Equality filters via object syntax. +- **Out of Scope:** Comparison operators ($gt, $lt, etc.), nested property paths. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~40 prod + ~120 test +- **Blocked by:** None +- **Blocking:** None +- **Definition of Done:** Object where filters work alongside existing predicate filters. +- **Test Plan:** + - Golden path: `where({ role: 'admin' })` returns only admin nodes. + - Golden path: chained object + function filters. + - Edge cases: empty object (matches all), property value is null, property value is array. + +--- + +### Feature: CP/MULTIHOP — Multi-Hop Traversal in Queries + +**Rationale:** Multi-hop today requires chaining multiple `.outgoing()` calls or dropping to the imperative `traverse` API. A depth range parameter makes common patterns one-liners. + +#### CP/MULTIHOP/1 — Add depth option to outgoing/incoming + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to traverse 1–3 hops in a single query call. +- **Requirements:** + - `outgoing(label, { depth: [1, 3] })` traverses 1 to 3 hops. + - `outgoing(label, { depth: 2 })` shorthand for exactly 2 hops. + - Default depth: `[1, 1]` (current behavior). + - Result includes all nodes reachable within depth range (union). + - Cycle detection: visited nodes not re-expanded. + - Deterministic output ordering. +- **Acceptance Criteria:** + - `depth: [1, 3]` returns nodes at hop 1, 2, and 3. + - `depth: 2` returns nodes at exactly hop 2. + - Cycles don't cause infinite loops. +- **Scope:** Depth range for query traversal. +- **Out of Scope:** Returning path information, weighted traversal. +- **Estimated Hours:** 5 +- **Estimated LOC:** ~80 prod + ~200 test +- **Blocked by:** None +- **Blocking:** None +- **Definition of Done:** Multi-hop traversal works with depth ranges. Deterministic and cycle-safe. +- **Test Plan:** + - Golden path: linear chain A→B→C→D, depth [1,3] from A returns B,C,D. + - Golden path: depth 2 from A returns C only. + - Fuzz: random graph, depth [1,5] → all results reachable within 5 hops. + - Edge cases: cycle in graph, disconnected components, depth [0,0] (start node only). + +--- + +### Feature: CP/AGG — Aggregation + +**Rationale:** Count/sum/avg over matched nodes without materializing the full result set. + +#### CP/AGG/1 — Implement aggregate() method + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to compute counts and sums over query results without fetching all nodes. +- **Requirements:** + - `query().match('order:*').aggregate({ count: true, sum: 'props.total' }).run()`. + - Returns `{ stateHash, count: number, sum: number }` (no `nodes` array). + - Supported aggregations: `count`, `sum`, `avg`, `min`, `max`. + - Property path must be dot-notation string. + - Non-numeric values for sum/avg/min/max are skipped with optional warning. + - `aggregate()` is terminal — calling `.select()` or `.outgoing()` after it throws. +- **Acceptance Criteria:** + - Count returns correct node count. + - Sum computes correctly over numeric property. + - Non-numeric values silently skipped. + - aggregate + select throws. +- **Scope:** Basic aggregation functions. +- **Out of Scope:** Group-by, having, nested aggregations. +- **Estimated Hours:** 5 +- **Estimated LOC:** ~100 prod + ~180 test +- **Blocked by:** None +- **Blocking:** None +- **Definition of Done:** Aggregations produce correct results. Non-numeric handling defined. +- **Test Plan:** + - Golden path: 10 order nodes with `total` prop → count=10, sum=correct. + - Golden path: avg, min, max on numeric props. + - Known failures: aggregate() + select() → error. + - Edge cases: all non-numeric values (sum=0? or NaN?), empty match set, single node. + +--- + +## Milestone 6 — LIGHTHOUSE (v7.6.0) + +**Observability** + +The library is opaque at runtime. Users can't see what's happening without adding their own instrumentation. + +### Feature: LH/STATUS — graph.status() API + +**Rationale:** A single method that reports everything about the graph's operational state. Today this information is scattered across multiple internal properties. + +#### LH/STATUS/1 — Implement graph.status() method + +- **Status:** `OPEN` +- **User Story:** As a developer, I want a single call that tells me everything about my graph's health. +- **Requirements:** + - Returns: + ```javascript + { + cachedState: 'fresh' | 'stale' | 'none', + patchesSinceCheckpoint: number, + tombstoneRatio: number, + writers: number, + frontier: { [writerId]: string }, + } + ``` + - `cachedState`: 'none' if never materialized, 'stale' if dirty flag set or frontier changed, 'fresh' otherwise. + - Patch count from AP/CKPT/2 tracking (or computed from refs). + - Tombstone ratio from existing `getGCMetrics()`. + - Writers discovered from refs. + - O(writers) cost, no materialization triggered. +- **Acceptance Criteria:** + - Returns correct values for each field. + - Does not trigger materialization. +- **Scope:** Status reporting method. +- **Out of Scope:** Historical status, status streaming. +- **Estimated Hours:** 4 +- **Estimated LOC:** ~60 prod + ~120 test +- **Blocked by:** AP/INVAL/1 (dirty flag), AP/CKPT/2 (patch count) +- **Blocking:** LH/CLI/1 +- **Definition of Done:** Status returns accurate operational health. Tested for all state combinations. +- **Test Plan:** + - Golden path: fresh graph → status shows 'none' → materialize → 'fresh' → commit → 'stale'. + - Edge cases: no writers, no checkpoint, no cached state. + +--- + +### Feature: LH/TIMING — Operation Timing in LoggerPort + +**Rationale:** The LoggerPort infrastructure exists but isn't wired into most operations. + +#### LH/TIMING/1 — Add structured timing to core operations + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to see how long each operation takes so I can identify bottlenecks. +- **Requirements:** + - Wrap `materialize()`, `syncWith()`, `createCheckpoint()`, `rebuildIndex()`, `runGC()` with timing. + - Use existing `ClockPort` for timestamps. + - Log via `LoggerPort` at info level: `[warp] materialize completed in 142ms (23 patches)`. + - Include operation-specific metrics (patch count, sync applied count, etc.). +- **Acceptance Criteria:** + - Each operation logs timing on completion. + - Timing uses injected ClockPort (testable). +- **Scope:** Timing for 5 core operations. +- **Out of Scope:** Per-patch timing, flame graphs, tracing spans. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~50 prod + ~80 test +- **Blocked by:** None +- **Blocking:** None +- **Definition of Done:** Core operations emit timing logs. Verified with test logger. +- **Test Plan:** + - Golden path: materialize with test logger → timing message captured. + - Edge cases: operation fails → timing still logged with error context. + +--- + +### Feature: LH/CLI — CLI Status Enhancement + +**Rationale:** `git warp check` exists but should surface the full `graph.status()` output. + +#### LH/CLI/1 — Wire graph.status() into CLI check output + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want `git warp check` to show the same info as `graph.status()`. +- **Requirements:** + - `git warp check` outputs all fields from `graph.status()`. + - JSON mode: raw status object. + - Human mode: formatted table with color-coded staleness. +- **Acceptance Criteria:** + - `git warp check --json` returns full status object. + - Human output shows staleness, patch count, tombstone ratio, writer count. +- **Scope:** CLI wiring. +- **Out of Scope:** New CLI commands. +- **Estimated Hours:** 2 +- **Estimated LOC:** ~30 prod + ~40 test (BATS) +- **Blocked by:** LH/STATUS/1 +- **Blocking:** None +- **Definition of Done:** CLI check outputs full status. BATS tests verify JSON and human output. +- **Test Plan:** + - Golden path: `git warp check --json` → valid JSON with all fields. + - Edge cases: empty graph, no checkpoint. + +--- + +### Feature: LH/RECEIPTS — Tick Receipts + +**Rationale:** During `materialize()`, the system applies patches and discards all decision information. When two writers set the same property concurrently, LWW picks a winner and the losing write vanishes. For multi-writer production use, "why does this node have this value?" needs an answer. Receipts are also the foundation for provenance in HOLOGRAM. + +#### LH/RECEIPTS/1 — Define receipt data structure + +- **Status:** `OPEN` +- **User Story:** As the system, I need a well-defined structure for recording materialization decisions. +- **Requirements:** + - Define `TickReceipt` type: + ```javascript + { + patchSha: string, + writer: string, + lamport: number, + ops: Array<{ + op: string, // 'NodeAdd' | 'PropSet' | etc. + target: string, // node or edge key + result: 'applied' | 'superseded' | 'redundant', + reason?: string, // e.g., "LWW: writer bob at lamport 43 wins" + }> + } + ``` + - Immutable after creation. + - Canonical JSON serialization. +- **Acceptance Criteria:** + - Type is importable and constructible. + - Serializes to deterministic JSON. +- **Scope:** Type definition only. +- **Out of Scope:** Emission logic (that's LH/RECEIPTS/2). +- **Estimated Hours:** 2 +- **Estimated LOC:** ~40 prod + ~30 test +- **Blocked by:** None +- **Blocking:** LH/RECEIPTS/2 + +#### LH/RECEIPTS/2 — Emit receipts during patch application + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to see exactly what happened during materialization — which ops were applied and which were superseded. +- **Requirements:** + - Add `{ receipts: true }` option to `materialize()`. + - When enabled, `JoinReducer.join()` records a `TickReceipt` for each patch. + - For `PropSet`: record whether LWW applied or superseded (and who won). + - For OR-Set add: record whether dot was new or re-add. + - For OR-Set remove: record whether remove was effective. + - Return receipts in materialize result: `{ state, receipts }`. + - When `{ receipts: false }` (default), no overhead — receipts array not allocated. +- **Acceptance Criteria:** + - `materialize({ receipts: true })` returns receipts array. + - Each receipt corresponds to one patch with per-op decisions. + - LWW conflicts show winner info in reason. + - Default call has no receipt overhead. +- **Scope:** Receipt emission in JoinReducer. +- **Out of Scope:** Receipt persistence, receipt querying. +- **Estimated Hours:** 6 +- **Estimated LOC:** ~120 prod + ~200 test +- **Blocked by:** LH/RECEIPTS/1 +- **Blocking:** HG/IO/1 (foundation for provenance) +- **Definition of Done:** Receipts accurately describe all materialization decisions. Zero overhead when disabled. +- **Test Plan:** + - Golden path: two writers set same prop → receipt shows winner and loser. + - Golden path: add + concurrent remove → receipt shows OR-Set decision. + - Known failures: receipts disabled → no receipt in result. + - Fuzz: random 50-patch materialization → receipt count matches patch count. + - Edge cases: empty patch (no ops), single-writer (no conflicts). + +--- + +## Milestone 7 — PULSE (v7.7.0) + +**Subscriptions & Reactivity** + +Enable developers to react to graph changes without polling. + +### Feature: PL/DIFF — State Diff Engine + +**Rationale:** To notify subscribers of changes, we need a deterministic diff between two materialized states. + +#### PL/DIFF/1 — Implement deterministic state diff + +- **Status:** `OPEN` +- **User Story:** As the system, I need to compute what changed between two materialized states. +- **Requirements:** + - `diffStates(before, after)` returns: + ```javascript + { + nodes: { added: string[], removed: string[] }, + edges: { added: EdgeKey[], removed: EdgeKey[] }, + props: { set: Array<{key, oldValue, newValue}>, removed: Array<{key, oldValue}> } + } + ``` + - Deterministic output ordering (sorted keys/IDs). + - Handles null `before` (initial state = everything added). + - O(N) where N = state size (single-pass comparison). +- **Acceptance Criteria:** + - Adding a node shows up in `nodes.added`. + - Removing a node shows up in `nodes.removed`. + - Property change shows old and new value. + - Output is deterministic across runs. +- **Scope:** Pure function, no integration with materialization. +- **Out of Scope:** Edge property diffs (deferred until WT lands). +- **Estimated Hours:** 4 +- **Estimated LOC:** ~80 prod + ~150 test +- **Blocked by:** None +- **Blocking:** PL/SUB/1 +- **Definition of Done:** Diff correctly captures all state changes. Deterministic. +- **Test Plan:** + - Golden path: add/remove nodes/edges/props → diff reflects changes. + - Edge cases: identical states (empty diff), null before, empty both. + - Fuzz: random state pair → diff applied to before produces after. + +--- + +### Feature: PL/SUB — graph.subscribe() + +**Rationale:** Polling-based change detection is wasteful. Subscription allows reactive updates. + +#### PL/SUB/1 — Implement subscribe/unsubscribe + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to register handlers that fire when the graph changes. +- **Requirements:** + - `graph.subscribe({ onChange(diff), onError?(err) })` returns `{ unsubscribe() }`. + - After `materialize()`, if state changed, compute diff and call `onChange` for all subscribers. + - Error in one handler doesn't block others — caught and forwarded to `onError` if provided. + - Multiple subscribers supported. +- **Acceptance Criteria:** + - Subscribe → commit → materialize → onChange called with diff. + - Unsubscribe → no more calls. + - Handler error isolated. +- **Scope:** Post-materialize subscription. +- **Out of Scope:** Real-time push (fs.watch integration is PL/WATCH). +- **Estimated Hours:** 4 +- **Estimated LOC:** ~80 prod + ~150 test +- **Blocked by:** PL/DIFF/1 +- **Blocking:** PL/WATCH/1 +- **Definition of Done:** Subscribe, unsubscribe, error isolation all work. +- **Test Plan:** + - Golden path: subscribe → commit → materialize → handler called. + - Golden path: unsubscribe → no more calls. + - Known failures: handler throws → other handlers still called. + - Edge cases: subscribe during materialize, unsubscribe in handler. + +#### PL/SUB/2 — Optional initial replay + +- **Status:** `BLOCKED` +- **User Story:** As a developer subscribing to an existing graph, I want an initial snapshot so I don't miss current state. +- **Requirements:** + - `subscribe({ onChange, replay: true })` immediately fires `onChange` with diff from empty state to current. + - Only fires if `_cachedState` is available. + - If `_cachedState` is null, replay deferred until first materialize. +- **Acceptance Criteria:** + - With replay: subscribe after materialize → immediate onChange with full state as additions. + - Without replay: no immediate call. +- **Scope:** Initial replay option. +- **Out of Scope:** Historical replay from specific point. +- **Estimated Hours:** 2 +- **Estimated LOC:** ~30 prod + ~60 test +- **Blocked by:** PL/SUB/1 +- **Blocking:** None +- **Definition of Done:** Replay fires correct initial diff. +- **Test Plan:** + - Golden path: materialize → subscribe with replay → handler called immediately. + - Edge cases: no cached state → replay deferred. + +--- + +### Feature: PL/WATCH — graph.watch(pattern) + +**Rationale:** Not all subscribers care about all changes. Pattern-based filtering reduces noise. + +#### PL/WATCH/1 — Implement pattern-based filtering + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to watch only specific nodes for changes. +- **Requirements:** + - `graph.watch('user:*', { onChange(diff) })` — only fires for changes matching the glob pattern. + - Pattern applied to node IDs in diff. + - Returns `{ unsubscribe() }`. + - Reuses subscription infrastructure from PL/SUB. +- **Acceptance Criteria:** + - `watch('user:*')` fires for user node changes, not for order node changes. + - Pattern supports same glob syntax as `query().match()`. +- **Scope:** Client-side filtering of diffs. +- **Out of Scope:** Server-side filtering, edge-pattern matching. +- **Estimated Hours:** 3 +- **Estimated LOC:** ~50 prod + ~100 test +- **Blocked by:** PL/SUB/1 +- **Blocking:** None +- **Definition of Done:** Watch filters diffs by pattern. Only matching changes trigger handler. +- **Test Plan:** + - Golden path: watch('user:*') → user change fires, order change doesn't. + - Edge cases: pattern matches no nodes, pattern is '*' (all), empty diff after filtering. + +#### PL/WATCH/2 — Integrate with frontier change detection + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want `watch()` to optionally poll for remote changes using frontier detection. +- **Requirements:** + - Add `{ poll: number }` option to `watch()` — interval in ms. + - When set, periodically call `hasFrontierChanged()` and auto-materialize if changed. + - Uses `setInterval` internally; cleaned up on `unsubscribe()`. + - Minimum poll interval: 1000ms. +- **Acceptance Criteria:** + - `watch('user:*', { poll: 5000 })` checks every 5s. + - Remote change detected → auto-materialize → handler fires. + - Unsubscribe stops polling. +- **Scope:** Polling-based remote change detection. +- **Out of Scope:** fs.watch on refs directory (future optimization). +- **Estimated Hours:** 3 +- **Estimated LOC:** ~50 prod + ~100 test +- **Blocked by:** GK/FRONTIER/1, PL/WATCH/1 +- **Blocking:** None +- **Definition of Done:** Polling watch detects remote changes and notifies subscribers. +- **Test Plan:** + - Golden path: watch with poll → external commit → handler fires within poll interval. + - Edge cases: unsubscribe stops timer, minimum interval enforced. + +--- + +## Milestone 8 — HOLOGRAM (v8.0.0) + +**Provenance & Holography** + +Implements the theory from Papers III–IV. The mathematical foundations for provenance payloads, slicing, and wormholes are fully developed in the papers but not yet implemented. + +### Feature: HG/IO — In/Out Declarations on Patches + +**Rationale:** The gate that unlocks the rest of HOLOGRAM. Declared read/write sets on each patch make provenance queries possible without full replay. + +#### HG/IO/1 — Extend PatchV2 with reads/writes fields + +- **Status:** `BLOCKED` +- **User Story:** As the system, I need each patch to declare which nodes it reads and writes for provenance tracking. +- **Requirements:** + - Add optional `reads: string[]` and `writes: string[]` fields to PatchV2. + - Auto-populate during `commitPatch()` by inspecting ops: + - `NodeAdd(X)` → writes X. + - `NodeRemove(X)` → reads X. + - `EdgeAdd(A→B)` → reads A, reads B, writes edge key. + - `EdgeRemove(A→B)` → reads edge key. + - `PropSet(X, key)` → reads X, writes X. + - Store as part of patch blob (CBOR-encoded). + - Backward compatible: missing fields treated as unknown (full replay required). +- **Acceptance Criteria:** + - Committed patch includes accurate reads/writes arrays. + - Legacy patches without fields load correctly. +- **Scope:** Patch metadata extension. +- **Out of Scope:** Index building (that's HG/IO/2). +- **Estimated Hours:** 4 +- **Estimated LOC:** ~60 prod + ~120 test +- **Blocked by:** LH/RECEIPTS/2 (receipt infrastructure informs I/O classification) +- **Blocking:** HG/IO/2, HG/SLICE/1 +- **Definition of Done:** Patches carry accurate I/O declarations. Backward compatible. +- **Test Plan:** + - Golden path: addNode + setProperty + addEdge → reads/writes correctly populated. + - Edge cases: empty patch (ops list empty but valid), legacy patch loads. + +#### HG/IO/2 — Build nodeId-to-patchSha index + +- **Status:** `BLOCKED` +- **User Story:** As the system, I need to quickly answer "which patches affected node X?" without replaying all patches. +- **Requirements:** + - Build index `Map>` from I/O declarations. + - Updated incrementally during materialization. + - Persisted as part of checkpoint (optional: separate tree blob). + - Query: `graph.patchesFor(nodeId)` returns `string[]` of contributing patch SHAs. +- **Acceptance Criteria:** + - Index accurately maps nodes to contributing patches. + - Incremental update after new patches. +- **Scope:** Index construction and query. +- **Out of Scope:** Causal cone computation (that's HG/SLICE/1). +- **Estimated Hours:** 5 +- **Estimated LOC:** ~100 prod + ~150 test +- **Blocked by:** HG/IO/1 +- **Blocking:** HG/SLICE/1 +- **Definition of Done:** Index built, persisted, queryable. Accurate for all test cases. +- **Test Plan:** + - Golden path: 3 patches affecting node X → patchesFor('X') returns 3 SHAs. + - Edge cases: node with no patches (added then removed?), patches from multiple writers. + - Stress: 1000 patches → index build time reasonable. + +--- + +### Feature: HG/PROV — Provenance Payloads + +**Rationale:** Implements the boundary encoding `(U_0, P)` from Paper III as a first-class type with monoid operations. + +#### HG/PROV/1 — Implement ProvenancePayload class + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to package a sequence of patches as a transferable provenance payload. +- **Requirements:** + - `ProvenancePayload` class with: + - `constructor(patches)` — ordered patch sequence. + - `concat(other)` — monoid composition (concatenation). + - `static identity()` — empty payload. + - `get length` — patch count. + - `replay(initialState)` — deterministic materialization. + - Immutable after construction. + - Monoid laws: `identity.concat(p) === p`, `p.concat(identity) === p`, `(a.concat(b)).concat(c) === a.concat(b.concat(c))`. +- **Acceptance Criteria:** + - Monoid laws hold. + - `replay()` produces same state as full materialization of same patches. +- **Scope:** Payload type with monoid operations and replay. +- **Out of Scope:** Serialization format, BTR packaging. +- **Estimated Hours:** 4 +- **Estimated LOC:** ~60 prod + ~120 test +- **Blocked by:** None +- **Blocking:** HG/SLICE/1, HG/WORM/1, HG/BTR/1 +- **Definition of Done:** Monoid laws verified. Replay matches full materialization. +- **Test Plan:** + - Golden path: construct → concat → replay → correct state. + - Fuzz: random payload pairs → associativity holds. + - Edge cases: identity, single-patch payload, very long payload. + +--- + +### Feature: HG/SLICE — Slice Materialization + +**Rationale:** Given a target node, materialize only the patches that contribute to that node's current state. This is the "partial materialization by slicing" theorem from Paper III. + +#### HG/SLICE/1 — Compute backward causal cone and partial materialize + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to materialize only the data relevant to a specific node instead of the full graph. +- **Requirements:** + - `graph.materializeSlice(nodeId)` returns `{ state, receipts? }` containing only the causal cone for that node. + - Uses I/O index (HG/IO/2) to compute backward cone D(v). + - Walks index: target node → contributing patches → their read dependencies → recursively. + - Constructs `ProvenancePayload` from cone patches (topologically sorted). + - Replays payload against empty state. +- **Acceptance Criteria:** + - Slice produces correct property values for target node. + - Slice is smaller than full materialization (measured in patch count). + - Result matches extracting target node from full materialization. +- **Scope:** Backward cone + partial replay. +- **Out of Scope:** Forward cone, incremental slice updates. +- **Estimated Hours:** 6 +- **Estimated LOC:** ~120 prod + ~200 test +- **Blocked by:** HG/IO/2, HG/PROV/1 +- **Blocking:** None +- **Definition of Done:** Slice materialization matches full materialization for target node. Fewer patches processed. +- **Test Plan:** + - Golden path: 100-patch graph, slice for 1 node → processes < 100 patches → correct result. + - Edge cases: node depends on all patches (degenerate case), node with no dependencies. + - Fuzz: random graph → slice → compare with full materialize extraction. + +--- + +### Feature: HG/WORM — Wormhole Compression + +**Rationale:** Compress multi-tick segments into single edges carrying sub-payloads. Useful for checkpointing long histories while preserving provenance. + +#### HG/WORM/1 — Implement wormhole compression + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to compress a range of patches into a single wormhole that preserves provenance. +- **Requirements:** + - `graph.createWormhole(fromPatchSha, toPatchSha)` → `WormholeEdge`. + - Wormhole contains sub-payload (ProvenancePayload of compressed segment). + - Payload monoid: concatenating two consecutive wormhole payloads yields the combined wormhole's payload. + - Materialization that encounters a wormhole replays its sub-payload. +- **Acceptance Criteria:** + - Wormhole + remaining patches produces same state as all patches. + - Two consecutive wormholes compose correctly. +- **Scope:** Wormhole creation and replay. +- **Out of Scope:** Automatic wormhole creation policy, storage optimization. +- **Estimated Hours:** 6 +- **Estimated LOC:** ~100 prod + ~180 test +- **Blocked by:** HG/PROV/1 +- **Blocking:** None +- **Definition of Done:** Wormhole compression preserves materialization correctness. Composition works. +- **Test Plan:** + - Golden path: 100 patches → compress first 50 into wormhole → materialize → same result. + - Golden path: two consecutive wormholes → compose → same result. + - Edge cases: wormhole over single patch, wormhole over empty range. + +--- + +### Feature: HG/BTR — Boundary Transition Records + +**Rationale:** Tamper-evident packaging for auditable exchange of graph segments between parties who don't share full history. + +#### HG/BTR/1 — Implement BTR packaging format + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to package a graph segment as a verifiable artifact for exchange. +- **Requirements:** + - BTR binds `(h_in, h_out, U_0, P, t, kappa)`: + - `h_in`: hash of input state. + - `h_out`: hash of output state. + - `U_0`: initial state snapshot. + - `P`: provenance payload. + - `t`: timestamp. + - `kappa`: authentication tag (HMAC or signature). + - `createBTR(initialState, payload, key)` → `BTR`. + - `verifyBTR(btr, key)` → `{ valid: boolean, reason?: string }`. + - Serializable to CBOR. +- **Acceptance Criteria:** + - Created BTR verifies correctly. + - Tampered BTR fails verification. + - Replay of BTR produces `h_out`. +- **Scope:** BTR creation and verification. +- **Out of Scope:** Key management, certificate chains. +- **Estimated Hours:** 5 +- **Estimated LOC:** ~100 prod + ~150 test +- **Blocked by:** HG/PROV/1 +- **Blocking:** None +- **Definition of Done:** BTR creation, verification, and tamper detection work. +- **Test Plan:** + - Golden path: create BTR → verify → valid. + - Known failures: tamper payload → verify → invalid. + - Edge cases: empty payload, large payload. + +--- + +### Feature: HG/FORK — Prefix Forks + +**Rationale:** Git-style branching at the WARP layer enables what-if analysis, safe experimentation, and undo. + +#### HG/FORK/1 — Implement graph.fork() + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to fork a graph at a specific point to experiment without affecting the original. +- **Requirements:** + - `graph.fork({ from: writerId, at: patchSha })` → new `WarpGraph` instance. + - Fork shares history up to `patchSha` (Git content addressing = automatic dedup). + - Fork gets a new writer ID. + - Original graph unaffected by fork operations. +- **Acceptance Criteria:** + - Fork materializes same state as original at fork point. + - Writes to fork don't appear in original. + - Writes to original after fork don't appear in fork. +- **Scope:** Fork creation with shared prefix. +- **Out of Scope:** Fork merging (merge back), fork comparison. +- **Estimated Hours:** 6 +- **Estimated LOC:** ~100 prod + ~200 test +- **Blocked by:** None +- **Blocking:** None +- **Definition of Done:** Fork creates independent graph sharing history. Mutual isolation verified. +- **Test Plan:** + - Golden path: fork → write to fork → original unchanged. + - Golden path: fork → write to original → fork unchanged. + - Edge cases: fork at tip (empty divergence), fork at genesis. + - Stress: fork, write 100 patches to each → both materialize correctly. + +--- + +## Milestone 9 — ECHO (v9.0.0) + +**Observer Geometry (Speculative)** + +Paper IV defines observers as resource-bounded functors and introduces rulial distance. This is the most theoretical milestone and the furthest from implementation, but has concrete engineering applications. + +### Feature: EC/VIEW — Observer-Scoped Views + +**Rationale:** Different users legitimately see different projections of the same graph. Access control and data minimization are natural observer applications. + +#### EC/VIEW/1 — Define observer configuration + +- **Status:** `OPEN` +- **User Story:** As a developer, I want to define named observers that project the graph into scoped views. +- **Requirements:** + - `graph.observer(name, config)` where config specifies: + - `match: string` — glob pattern for visible nodes. + - `expose: string[]` — property paths to include. + - `redact: string[]` — property paths to exclude (takes precedence over expose). + - Observer returns a read-only view object with same query/traverse API as WarpGraph. + - View is computed from current materialized state. +- **Acceptance Criteria:** + - Observer view shows only matching nodes with allowed properties. + - Redacted properties not accessible. + - View supports `query()`, `traverse.*`, `hasNode()`. +- **Scope:** Observer definition and view projection. +- **Out of Scope:** Observer persistence, observer composition. +- **Estimated Hours:** 6 +- **Estimated LOC:** ~120 prod + ~200 test +- **Blocked by:** None +- **Blocking:** EC/COST/1 +- **Definition of Done:** Observer views correctly project and redact. Read-only query API works. +- **Test Plan:** + - Golden path: observer matching 'user:*' → only user nodes visible. + - Golden path: redact 'props.ssn' → property not in view. + - Edge cases: expose and redact overlap (redact wins), empty match, observer on empty graph. + +--- + +### Feature: EC/COST — Translation Cost Estimation + +**Rationale:** Given two observer definitions, estimate the cost of translating between their views. Useful for system design. + +#### EC/COST/1 — Implement MDL cost estimation + +- **Status:** `BLOCKED` +- **User Story:** As a system designer, I want to estimate how much information is lost when translating between two observer views. +- **Requirements:** + - `graph.translationCost(observerA, observerB)` returns `{ cost: number, breakdown: {...} }`. + - Cost based on MDL (Minimum Description Length) of the translation function. + - Factors: nodes visible in A but not B, properties exposed in A but redacted in B, structural differences. + - Normalized to [0, 1] range (0 = identical views, 1 = completely disjoint). +- **Acceptance Criteria:** + - Identical observers → cost 0. + - Completely disjoint observers → cost 1. + - Superset observer → cost > 0 (information loss in one direction). +- **Scope:** Cost estimation based on observer configs. +- **Out of Scope:** Optimal translation synthesis, distortion metrics. +- **Estimated Hours:** 5 +- **Estimated LOC:** ~80 prod + ~120 test +- **Blocked by:** EC/VIEW/1 +- **Blocking:** None +- **Definition of Done:** Cost estimation produces meaningful values for test cases. +- **Test Plan:** + - Golden path: identical observers → 0, disjoint → 1, subset → intermediate. + - Edge cases: one observer sees nothing, both see everything. + +--- + +### Feature: EC/TEMPORAL — Temporal Queries + +**Rationale:** CTL*-style temporal logic over materialized history enables "was this always true?" and "does this eventually hold?" queries. + +#### EC/TEMPORAL/1 — Implement always/eventually operators + +- **Status:** `BLOCKED` +- **User Story:** As a developer, I want to query temporal properties of graph history. +- **Requirements:** + - `graph.temporal.always(nodeId, predicate, { since: tick })` — true if predicate held at every tick since `since`. + - `graph.temporal.eventually(nodeId, predicate, { since: tick })` — true if predicate held at some tick since `since`. + - Requires full history access (replay from `since` to current). + - Predicate receives node snapshot at each tick. +- **Acceptance Criteria:** + - `always('X', n => n.props.status === 'active', { since: 0 })` returns true if always active. + - `eventually('X', n => n.props.status === 'merged')` returns true if ever merged. +- **Scope:** Two temporal operators (always, eventually) over single-node history. +- **Out of Scope:** Path quantifiers (forAll/exists paths), branching-time semantics. +- **Estimated Hours:** 6 +- **Estimated LOC:** ~100 prod + ~180 test +- **Blocked by:** HG/IO/1 (needs patch-level history access) +- **Blocking:** None +- **Definition of Done:** Temporal operators produce correct results against known histories. +- **Test Plan:** + - Golden path: node status changes active → inactive → always('active') returns false. + - Golden path: node eventually becomes 'merged' → eventually('merged') returns true. + - Edge cases: node didn't exist at `since` tick, predicate never true, single-tick history. + +--- + +## Non-Goals + +Things this project should not try to become: + +- **A general-purpose database.** No SQL, no ACID transactions, no connection pooling. +- **A real-time system.** Git's I/O model is fundamentally batch-oriented. No WebSocket push, no sub-millisecond latency. +- **A distributed consensus system.** CRDTs give eventual consistency without coordination. If you need strong consistency or leader election, use a different tool. +- **A physics engine.** Paper V (emergent dynamics, Schrodinger-type evolution) is fascinating mathematics but not an implementation target. + +--- + +## Totals + +| Milestone | Features | Tasks | Est. Hours | Est. LOC | +|-----------|----------|-------|------------|----------| +| AUTOPILOT | 4 | 10 | 22 | ~1,075 | +| GROUNDSKEEPER | 3 | 4 | 13 | ~680 | +| WEIGHTED | 4 | 7 | 22 | ~1,110 | +| HANDSHAKE | 5 | 8 | 25 | ~1,035 | +| COMPASS | 3 | 3 | 13 | ~720 | +| LIGHTHOUSE | 4 | 5 | 17 | ~890 | +| PULSE | 3 | 5 | 16 | ~820 | +| HOLOGRAM | 6 | 7 | 36 | ~1,780 | +| ECHO | 3 | 3 | 17 | ~820 | +| **Total** | **35** | **52** | **181** | **~8,930** | diff --git a/bin/warp-graph.js b/bin/warp-graph.js index ed97e6d..6127225 100755 --- a/bin/warp-graph.js +++ b/bin/warp-graph.js @@ -1,7 +1,10 @@ #!/usr/bin/env node +import fs from 'node:fs'; import path from 'node:path'; import process from 'node:process'; +import readline from 'node:readline'; +import { execFileSync } from 'node:child_process'; import GitPlumbing, { ShellRunnerFactory } from '@git-stunts/plumbing'; import WarpGraph from '../src/domain/WarpGraph.js'; import GitGraphAdapter from '../src/infrastructure/adapters/GitGraphAdapter.js'; @@ -14,6 +17,7 @@ import { buildWritersPrefix, parseWriterIdFromRef, } from '../src/domain/utils/RefLayout.js'; +import { HookInstaller, classifyExistingHook } from '../src/domain/services/HookInstaller.js'; const EXIT_CODES = { OK: 0, @@ -26,11 +30,13 @@ const HELP_TEXT = `warp-graph [options] (or: git warp [options]) Commands: - info Summarize graphs in the repo - query Run a logical graph query - path Find a logical path between two nodes - history Show writer history - check Report graph health/GC status + info Summarize graphs in the repo + query Run a logical graph query + path Find a logical path between two nodes + history Show writer history + check Report graph health/GC status + materialize Materialize and checkpoint all graphs + install-hooks Install post-merge git hook Options: --repo Path to git repo (default: cwd) @@ -39,6 +45,9 @@ Options: --writer Writer id (default: cli) -h, --help Show this help +Install-hooks options: + --force Replace existing hook (backs up original) + Query options: --match Match node ids (default: *) --outgoing [label] Traverse outgoing edge (repeatable) @@ -57,7 +66,17 @@ History options: --node Filter patches touching node id `; +/** + * Structured CLI error with exit code and error code. + */ class CliError extends Error { + /** + * @param {string} message - Human-readable error message + * @param {Object} [options] + * @param {string} [options.code='E_CLI'] - Machine-readable error code + * @param {number} [options.exitCode=3] - Process exit code + * @param {Error} [options.cause] - Underlying cause + */ constructor(message, { code = 'E_CLI', exitCode = EXIT_CODES.INTERNAL, cause } = {}) { super(message); this.code = code; @@ -589,9 +608,26 @@ function renderCheck(payload) { lines.push(`Tombstone Ratio: ${payload.gc.tombstoneRatio}`); } + if (payload.hook) { + lines.push(formatHookStatusLine(payload.hook)); + } + return `${lines.join('\n')}\n`; } +function formatHookStatusLine(hook) { + if (!hook.installed && hook.foreign) { + return "Hook: foreign hook present — run 'git warp install-hooks'"; + } + if (!hook.installed) { + return "Hook: not installed — run 'git warp install-hooks'"; + } + if (hook.current) { + return `Hook: installed (v${hook.version}) — up to date`; + } + return `Hook: installed (v${hook.version}) — upgrade available, run 'git warp install-hooks'`; +} + function renderHistory(payload) { const lines = [ `Graph: ${payload.graph}`, @@ -645,6 +681,16 @@ function emit(payload, { json, command }) { return; } + if (command === 'materialize') { + process.stdout.write(renderMaterialize(payload)); + return; + } + + if (command === 'install-hooks') { + process.stdout.write(renderInstallHooks(payload)); + return; + } + if (payload?.error) { process.stderr.write(renderError(payload)); return; @@ -653,6 +699,13 @@ function emit(payload, { json, command }) { process.stdout.write(`${stableStringify(payload)}\n`); } +/** + * Handles the `info` command: summarizes graphs in the repository. + * @param {Object} params + * @param {Object} params.options - Parsed CLI options + * @returns {Promise<{repo: string, graphs: Object[]}>} Info payload + * @throws {CliError} If the specified graph is not found + */ async function handleInfo({ options }) { const { persistence } = await createPersistence(options.repo); const graphNames = await listGraphNames(persistence); @@ -683,6 +736,14 @@ async function handleInfo({ options }) { }; } +/** + * Handles the `query` command: runs a logical graph query. + * @param {Object} params + * @param {Object} params.options - Parsed CLI options + * @param {string[]} params.args - Remaining positional arguments (query spec) + * @returns {Promise<{payload: Object, exitCode: number}>} Query result payload + * @throws {CliError} On invalid query options or query execution errors + */ async function handleQuery({ options, args }) { const querySpec = parseQueryArgs(args); const { graph, graphName } = await openGraph(options); @@ -753,6 +814,14 @@ function mapQueryError(error) { throw error; } +/** + * Handles the `path` command: finds a shortest path between two nodes. + * @param {Object} params + * @param {Object} params.options - Parsed CLI options + * @param {string[]} params.args - Remaining positional arguments (path spec) + * @returns {Promise<{payload: Object, exitCode: number}>} Path result payload + * @throws {CliError} If --from/--to are missing or a node is not found + */ async function handlePath({ options, args }) { const pathOptions = parsePathArgs(args); const { graph, graphName } = await openGraph(options); @@ -785,6 +854,12 @@ async function handlePath({ options, args }) { } } +/** + * Handles the `check` command: reports graph health, GC, and hook status. + * @param {Object} params + * @param {Object} params.options - Parsed CLI options + * @returns {Promise<{payload: Object, exitCode: number}>} Health check payload + */ async function handleCheck({ options }) { const { graph, graphName, persistence } = await openGraph(options); const health = await getHealth(persistence); @@ -792,6 +867,7 @@ async function handleCheck({ options }) { const writerHeads = await collectWriterHeads(graph); const checkpoint = await loadCheckpointInfo(persistence, graphName); const coverage = await loadCoverageInfo(persistence, graphName, writerHeads); + const hook = getHookStatusForCheck(options.repo); return { payload: buildCheckPayload({ @@ -802,6 +878,7 @@ async function handleCheck({ options }) { writerHeads, coverage, gcMetrics, + hook, }), exitCode: EXIT_CODES.OK, }; @@ -810,7 +887,7 @@ async function handleCheck({ options }) { async function getHealth(persistence) { const clock = new PerformanceClockAdapter(); const healthService = new HealthCheckService({ persistence, clock }); - return healthService.getHealth(); + return await healthService.getHealth(); } async function getGcMetrics(graph) { @@ -891,6 +968,7 @@ function buildCheckPayload({ writerHeads, coverage, gcMetrics, + hook, }) { return { repo, @@ -903,9 +981,18 @@ function buildCheckPayload({ }, coverage, gc: gcMetrics, + hook: hook || null, }; } +/** + * Handles the `history` command: shows patch history for a writer. + * @param {Object} params + * @param {Object} params.options - Parsed CLI options + * @param {string[]} params.args - Remaining positional arguments (history options) + * @returns {Promise<{payload: Object, exitCode: number}>} History payload + * @throws {CliError} If no patches are found for the writer + */ async function handleHistory({ options, args }) { const historyOptions = parseHistoryArgs(args); const { graph, graphName } = await openGraph(options); @@ -934,14 +1021,274 @@ async function handleHistory({ options, args }) { return { payload, exitCode: EXIT_CODES.OK }; } +async function materializeOneGraph({ persistence, graphName, writerId }) { + const graph = await WarpGraph.open({ persistence, graphName, writerId }); + await graph.materialize(); + const nodes = await graph.getNodes(); + const edges = await graph.getEdges(); + const checkpoint = await graph.createCheckpoint(); + return { graph: graphName, nodes: nodes.length, edges: edges.length, checkpoint }; +} + +/** + * Handles the `materialize` command: materializes and checkpoints all graphs. + * @param {Object} params + * @param {Object} params.options - Parsed CLI options + * @returns {Promise<{payload: Object, exitCode: number}>} Materialize result payload + * @throws {CliError} If the specified graph is not found + */ +async function handleMaterialize({ options }) { + const { persistence } = await createPersistence(options.repo); + const graphNames = await listGraphNames(persistence); + + if (graphNames.length === 0) { + return { + payload: { graphs: [] }, + exitCode: EXIT_CODES.OK, + }; + } + + const targets = options.graph + ? [options.graph] + : graphNames; + + if (options.graph && !graphNames.includes(options.graph)) { + throw notFoundError(`Graph not found: ${options.graph}`); + } + + const results = []; + for (const name of targets) { + try { + const result = await materializeOneGraph({ + persistence, + graphName: name, + writerId: options.writer, + }); + results.push(result); + } catch (error) { + results.push({ + graph: name, + error: error instanceof Error ? error.message : String(error), + }); + } + } + + const allFailed = results.every((r) => r.error); + return { + payload: { graphs: results }, + exitCode: allFailed ? EXIT_CODES.INTERNAL : EXIT_CODES.OK, + }; +} + +function renderMaterialize(payload) { + if (payload.graphs.length === 0) { + return 'No graphs found in repo.\n'; + } + + const lines = []; + for (const entry of payload.graphs) { + if (entry.error) { + lines.push(`${entry.graph}: error — ${entry.error}`); + } else { + lines.push(`${entry.graph}: ${entry.nodes} nodes, ${entry.edges} edges, checkpoint ${entry.checkpoint}`); + } + } + return `${lines.join('\n')}\n`; +} + +function renderInstallHooks(payload) { + if (payload.action === 'up-to-date') { + return `Hook: already up to date (v${payload.version}) at ${payload.hookPath}\n`; + } + if (payload.action === 'skipped') { + return 'Hook: installation skipped\n'; + } + const lines = [`Hook: ${payload.action} (v${payload.version})`, `Path: ${payload.hookPath}`]; + if (payload.backupPath) { + lines.push(`Backup: ${payload.backupPath}`); + } + return `${lines.join('\n')}\n`; +} + +function createHookInstaller() { + return new HookInstaller({ + fs, + execGitConfig: execGitConfigValue, + }); +} + +function execGitConfigValue(repoPath, key) { + try { + if (key === '--git-dir') { + return execFileSync('git', ['-C', repoPath, 'rev-parse', '--git-dir'], { + encoding: 'utf8', + }).trim(); + } + return execFileSync('git', ['-C', repoPath, 'config', key], { + encoding: 'utf8', + }).trim(); + } catch { + return null; + } +} + +function isInteractive() { + return Boolean(process.stderr.isTTY); +} + +function promptUser(question) { + const rl = readline.createInterface({ + input: process.stdin, + output: process.stderr, + }); + return new Promise((resolve) => { + rl.question(question, (answer) => { + rl.close(); + resolve(answer.trim()); + }); + }); +} + +function parseInstallHooksArgs(args) { + const options = { force: false }; + for (const arg of args) { + if (arg === '--force') { + options.force = true; + } else if (arg.startsWith('-')) { + throw usageError(`Unknown install-hooks option: ${arg}`); + } + } + return options; +} + +async function resolveStrategy(classification, hookOptions) { + if (hookOptions.force) { + return 'replace'; + } + + if (classification.kind === 'none') { + return 'install'; + } + + if (classification.kind === 'ours') { + return await promptForOursStrategy(classification); + } + + return await promptForForeignStrategy(); +} + +async function promptForOursStrategy(classification) { + const installer = createHookInstaller(); + if (classification.version === installer._version) { + return 'up-to-date'; + } + + if (!isInteractive()) { + throw usageError('Existing hook found. Use --force or run interactively.'); + } + + const answer = await promptUser( + `Upgrade hook from v${classification.version} to v${installer._version}? [Y/n] `, + ); + if (answer === '' || answer.toLowerCase() === 'y') { + return 'upgrade'; + } + return 'skip'; +} + +async function promptForForeignStrategy() { + if (!isInteractive()) { + throw usageError('Existing hook found. Use --force or run interactively.'); + } + + process.stderr.write('Existing post-merge hook found.\n'); + process.stderr.write(' 1) Append (keep existing hook, add warp section)\n'); + process.stderr.write(' 2) Replace (back up existing, install fresh)\n'); + process.stderr.write(' 3) Skip\n'); + const answer = await promptUser('Choose [1-3]: '); + + if (answer === '1') { + return 'append'; + } + if (answer === '2') { + return 'replace'; + } + return 'skip'; +} + +/** + * Handles the `install-hooks` command: installs or upgrades the post-merge git hook. + * @param {Object} params + * @param {Object} params.options - Parsed CLI options + * @param {string[]} params.args - Remaining positional arguments (install-hooks options) + * @returns {Promise<{payload: Object, exitCode: number}>} Install result payload + * @throws {CliError} If an existing hook is found and the session is not interactive + */ +async function handleInstallHooks({ options, args }) { + const hookOptions = parseInstallHooksArgs(args); + const installer = createHookInstaller(); + const status = installer.getHookStatus(options.repo); + const content = readHookContent(status.hookPath); + const classification = classifyExistingHook(content); + const strategy = await resolveStrategy(classification, hookOptions); + + if (strategy === 'up-to-date') { + return { + payload: { + action: 'up-to-date', + hookPath: status.hookPath, + version: installer._version, + }, + exitCode: EXIT_CODES.OK, + }; + } + + if (strategy === 'skip') { + return { + payload: { action: 'skipped' }, + exitCode: EXIT_CODES.OK, + }; + } + + const result = installer.install(options.repo, { strategy }); + return { + payload: result, + exitCode: EXIT_CODES.OK, + }; +} + +function readHookContent(hookPath) { + try { + return fs.readFileSync(hookPath, 'utf8'); + } catch { + return null; + } +} + +function getHookStatusForCheck(repoPath) { + try { + const installer = createHookInstaller(); + return installer.getHookStatus(repoPath); + } catch { + return null; + } +} + const COMMANDS = new Map([ ['info', handleInfo], ['query', handleQuery], ['path', handlePath], ['history', handleHistory], ['check', handleCheck], + ['materialize', handleMaterialize], + ['install-hooks', handleInstallHooks], ]); +/** + * CLI entry point. Parses arguments, dispatches to the appropriate command handler, + * and emits the result to stdout (JSON or human-readable). + * @returns {Promise} + */ async function main() { const { options, positionals } = parseArgs(process.argv.slice(2)); diff --git a/docs/GUIDE.md b/docs/GUIDE.md index 1766e85..6b4feb5 100644 --- a/docs/GUIDE.md +++ b/docs/GUIDE.md @@ -118,6 +118,60 @@ const state = await graph.materialize(); // Edge 'temp->other' is not visible ``` +## Auto-Materialize and Auto-Checkpoint + +### Auto-Materialize + +By default, query methods throw if no materialized state exists. With `autoMaterialize: true`, query methods automatically materialize before returning results: + +```javascript +const graph = await WarpGraph.open({ + persistence, + graphName: 'my-graph', + writerId: 'local', + autoMaterialize: true, +}); + +// No explicit materialize() needed — queries auto-materialize +const nodes = await graph.getNodes(); +const exists = await graph.hasNode('user:alice'); +const result = await graph.query().match('user:*').run(); +``` + +When `autoMaterialize` is off (the default), querying dirty state throws `QueryError` with code `E_STALE_STATE`, and querying without any cached state throws `QueryError` with code `E_NO_STATE`. + +### Auto-Checkpoint + +Configure automatic checkpointing to keep materialization fast: + +```javascript +const graph = await WarpGraph.open({ + persistence, + graphName: 'my-graph', + writerId: 'local', + checkpointPolicy: { every: 500 }, +}); +``` + +After `materialize()` processes 500+ patches, a checkpoint is created automatically. The counter resets after each checkpoint. Checkpoint failures are swallowed — they never break materialization. + +### Eager Re-Materialize + +After a local commit, the patch is applied eagerly to cached state. This means queries immediately reflect local writes without calling `materialize()` again: + +```javascript +await graph.materialize(); + +await (await graph.createPatch()) + .addNode('user:carol') + .commit(); + +// No re-materialize needed — eager apply already updated state +await graph.hasNode('user:carol'); // true +``` + +This works for all write paths: `createPatch().commit()`, `writer.commitPatch()`, and `PatchSession.commit()`. + ### Frontiers and Checkpoints A **frontier** tracks the last-seen patch from each writer: @@ -213,6 +267,21 @@ const state = await graph.materializeAt(checkpointSha); // Only processes patches since checkpoint, not entire history ``` +#### Automatic Checkpointing + +```javascript +// Auto-checkpoint: no manual intervention needed +const graph = await WarpGraph.open({ + persistence, + graphName: 'todos', + writerId: 'local', + checkpointPolicy: { every: 500 }, +}); + +// After 500+ patches, materialize() creates a checkpoint automatically +await graph.materialize(); +``` + ### Discovering Writers ```javascript @@ -233,6 +302,94 @@ await graph.syncCoverage(); // All writer tips are now parents of this commit ``` +## Git Hooks + +### Post-Merge Hook + +WarpGraph ships a `post-merge` Git hook that runs after every `git merge` or `git pull` and checks whether any warp writer refs (`refs/empty-graph/`) changed during the merge. + +If warp refs changed, the hook prints an informational message: + +``` +[warp] Writer refs changed during merge. Call materialize() to see updates. +``` + +The hook **never blocks a merge** — it always exits 0. + +### Auto-Materialize + +Enable automatic materialization and checkpointing after pulls: + +```bash +git config warp.autoMaterialize true +``` + +When enabled, the post-merge hook will automatically run `git warp materialize` whenever warp refs change during a merge. This materializes all graphs and creates checkpoints so the local state is always up to date. + +When disabled or unset (the default), the hook prints the informational warning shown above. + +### `git warp materialize` + +Materialize and checkpoint graphs explicitly: + +```bash +git warp materialize # All graphs in the repo +git warp materialize --graph my-graph # Single graph +git warp materialize --json # JSON output +``` + +For each graph, the command materializes state, counts nodes and edges, and creates a checkpoint. Output: + +``` +my-graph: 42 nodes, 18 edges, checkpoint abc123... +``` + +### Installing the Hook + +Use the `install-hooks` CLI command: + +```bash +git warp install-hooks +# or: warp-graph install-hooks --repo /path/to/repo +``` + +If a `post-merge` hook already exists, the command offers three options interactively: + +1. **Append** — keeps your existing hook and adds the warp section (delimited, upgradeable) +2. **Replace** — backs up the existing hook to `post-merge.backup` and installs fresh +3. **Skip** — do nothing + +If the warp hook is already installed, running the command again either reports "up to date" or offers to upgrade to the current version. + +### Non-Interactive / CI Usage + +In non-interactive environments (no TTY), use `--force` to replace any existing hook: + +```bash +git warp install-hooks --force +``` + +The `--force` flag always backs up an existing hook before replacing it. + +Both `--json` and `--force` flags are supported: + +```bash +git warp install-hooks --json --force +``` + +### Checking Hook Status + +The `check` command reports hook status: + +```bash +git warp check +``` + +Example output lines: +- `Hook: installed (v7.1.0) — up to date` +- `Hook: installed (v7.0.0) — upgrade available, run 'git warp install-hooks'` +- `Hook: not installed — run 'git warp install-hooks'` + ## Troubleshooting ### "My changes aren't appearing" @@ -249,9 +406,10 @@ await graph.syncCoverage(); ### "Materialization is slow" -1. Create checkpoints periodically -2. Use `materializeAt(checkpointSha)` for incremental recovery -3. Consider reducing patch frequency (batch operations) +1. Enable auto-checkpointing: `checkpointPolicy: { every: 500 }` on `WarpGraph.open()` +2. Create checkpoints manually with `graph.createCheckpoint()` if not using auto-checkpointing +3. Use `materializeAt(checkpointSha)` for incremental recovery +4. Consider reducing patch frequency (batch operations) ### "Node should be deleted but still appears" @@ -267,7 +425,7 @@ Solution: Ensure tombstones have higher lamport than adds. ## Performance Tips 1. **Batch operations** - Group related changes into single patches -2. **Checkpoint regularly** - Every 500-1000 patches +2. **Checkpoint regularly** - Use `checkpointPolicy: { every: 500 }` for automatic checkpointing, or call `createCheckpoint()` manually 3. **Use incremental materialization** - `materializeAt()` vs `materialize()` 4. **Limit concurrent writers** - More writers = more merge overhead diff --git a/docs/ideas/IDEA-003-DX.md b/docs/ideas/IDEA-003-DX.md new file mode 100644 index 0000000..01a97c0 --- /dev/null +++ b/docs/ideas/IDEA-003-DX.md @@ -0,0 +1,496 @@ +⏺ Explore(Explore DX pain points) + ⎿  Prompt: + I need to understand the developer experience pain points in the @git-stunts/empty-graph codebase at /Users/james/git/git-stunts/empty-graph. Specifically, I want to find: + + 1. What happens after git pull / git fetch? Does the user need to manually re-materialize? Is there any hook or automation? Read through WarpGraph.js to understand if materialization is cached and when it becomes stale. + 2. What does the checkpoint workflow look like? Read CheckpointService.js to understand how checkpoints are created and loaded. Is there auto-checkpointing? Does the user have to manually call createCheckpoint()? + 3. What does the sync workflow look like end-to-end? Read SyncProtocol.js to understand what syncWith() does. Does it auto-materialize after sync? Does it invalidate caches? + 4. What does the index workflow look like? Read BitmapIndexBuilder.js and IndexRebuildService.js - when do indexes need rebuilding? Is there any staleness detection? + 5. What git hooks already exist? Look at scripts/hooks/ and scripts/setup-hooks.js to see what's already there. Are they dev-only (lint/test) or do they do anything graph-related? + 6. What does the GC workflow look like? Read GCPolicy.js and GCMetrics.js - when does GC run? Is it automatic or manual? + 7. What does frontier tracking look like? Read Frontier.js - how does the system know when new patches from other writers have arrived? + 8. How does WarpGraph.materialize() work internally? Does it diff against the frontier to know what's changed? Does it use checkpoints automatically? Read the materialize method carefully. + 9. Look at the examples (setup.js, explore.js, multi-writer.js) for patterns that seem tedious or boilerplate-heavy. + 10. What errors can the user hit from stale state? Look at error types and when QueryError, SyncError etc are thrown. + + Focus on understanding the friction points: manual steps that could be automated, state that gets stale silently, operations that should chain but don't, etc. Do NOT read markdown files. + ⎿  ❯ I need to understand the developer experience pain points in the @git-stunts/empty-graph codebase at /Users/james/git/git-stunts/empty-graph. Specifically, I want to find: + + 1. **What happens after `git pull` / `git fetch`?** Does the user need to manually re-materialize? Is there any hook or automation? Read through WarpGraph.js to understand if materialization is cached and when it becomes stale. + + 2. **What does the checkpoint workflow look like?** Read CheckpointService.js to understand how checkpoints are created and loaded. Is there auto-checkpointing? Does the user have to manually call createCheckpoint()? + + 3. **What does the sync workflow look like end-to-end?** Read SyncProtocol.js to understand what syncWith() does. Does it auto-materialize after sync? Does it invalidate caches? + + 4. **What does the index workflow look like?** Read BitmapIndexBuilder.js and IndexRebuildService.js - when do indexes need rebuilding? Is there any staleness detection? + + 5. **What git hooks already exist?** Look at scripts/hooks/ and scripts/setup-hooks.js to see what's already there. Are they dev-only (lint/test) or do they do anything graph-related? + + 6. **What does the GC workflow look like?** Read GCPolicy.js and GCMetrics.js - when does GC run? Is it automatic or manual? + + 7. **What does frontier tracking look like?** Read Frontier.js - how does the system know when new patches from other writers have arrived? + + 8. **How does WarpGraph.materialize() work internally?** Does it diff against the frontier to know what's changed? Does it use checkpoints automatically? Read the materialize method carefully. + + 9. **Look at the examples** (setup.js, explore.js, multi-writer.js) for patterns that seem tedious or boilerplate-heavy. + + 10. **What errors can the user hit from stale state?** Look at error types and when QueryError, SyncError etc are thrown. + + Focus on understanding the friction points: manual steps that could be automated, state that gets stale silently, operations that should chain but don't, etc. Do NOT read markdown files. + ⎿  Bash(find /Users/james/git/git-stunts/empty-graph -type f -name "*.js" | grep -E "(WarpGraph|Checkpoint|Sync|Bitmap|Index|Frontier|GC|hooks|error|example)" | head -30) + Waiting… + ⎿   + + ⎿  Bash(find /Users/james/git/git-stunts/empty-graph/src -type f -name "*.js" 2>/dev/null | sort) + Waiting… + ⎿   + + ⎿  Bash(find /Users/james/git/git-stunts/empty-graph -type f \( -name "setup.js" -o -name "explore.js" -o -name "multi-writer.js" -o -name "*.hook.*" \) 2>/dev/null) + Waiting… + ⎿   + + ⎿  Bash(find /Users/james/git/git-stunts/empty-graph/scripts -type f 2>/dev/null) + Waiting… + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/WarpGraph.js) + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/services/CheckpointService.js) + + ⎿   + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/services/SyncProtocol.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/services/BitmapIndexBuilder.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/services/IndexRebuildService.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/services/Frontier.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/services/GCPolicy.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/services/GCMetrics.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/scripts/setup-hooks.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/scripts/hooks/pre-commit) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/scripts/hooks/pre-push) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/examples/setup.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/examples/explore.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/examples/multi-writer.js) + + ⎿   + + ⎿  Bash(ls -la /Users/james/git/git-stunts/empty-graph/src/domain/errors/) + Waiting… + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/errors/QueryError.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/errors/SyncError.js) + + ⎿   + + ⎿  Read(/Users/james/git/git-stunts/empty-graph/src/domain/warp/Writer.js) + + ⎿   + + ⎿  Response: + Perfect! Now let me compile my analysis. I have a comprehensive understanding of the codebase. Let me create a detailed report. + + Developer Experience Pain Points Analysis: @git-stunts/empty-graph + + Based on my thorough review of the codebase, I've identified 10 significant developer experience (DX) pain points: + + --- + 1. Post-git pull / git fetch Materialization is Manual & Stale by Default + + Pain Point: After pulling remote changes, users must manually call materialize() to see new patches. There's no automatic hook or detection of stale state. + + Evidence from code: + - WarpGraph.materialize() (line 416) is the only way to load patches + - The _cachedState starts as null (line 107) + - No post-fetch hook triggers materialization + - Scripts pre-commit and pre-push are dev-only (ESLint, tests) - no graph operations + + The workflow is clunky: + git pull origin → new patches available in refs + user calls materialize() → finally sees data + + What's missing: + - Automatic cache invalidation on ref changes + - A post-fetch hook to materialize + - Stale state warnings (e.g., "Your materialized state is 5 patches behind") + - Auto-rematerialization option in syncWith() + + --- + 2. Checkpoint Workflow is Entirely Manual - No Auto-Checkpointing + + Pain Point: Users must explicitly call graph.createCheckpoint() at strategic times. Missing checkpoints = slow incremental materialization. + + Evidence from code: + - CheckpointService.js line 54: create() is explicitly called, never auto-triggered + - WarpGraph.createCheckpoint() line 632 requires manual invocation + - Checkpoints are discovered via _loadLatestCheckpoint() (line 767) but only during materialization, not via any periodic mechanism + - DEFAULT_GC_POLICY (line 39 in GCPolicy) has compactOnCheckpoint: true, but no auto-triggering + + Current manual pattern: + // User must remember to do this: + await graph.createCheckpoint(); + + // Forgetting it means: + // - Next materialize() rescans ALL writers + // - O(all patches) instead of O(patches since checkpoint) + + What's missing: + - maybeCreateCheckpoint() triggered after N patches + - Per-graph configuration for checkpoint frequency + - Warning when checkpoint is stale (>100 patches behind) + - Automatic creation on syncWith() completion (for convergence) + + --- + 3. Sync Workflow is Incomplete - Doesn't Auto-Materialize + + Pain Point: syncWith() applies patches to cached state but does NOT ensure state is materialized first, leading to silent data loss. + + Evidence from code: + - WarpGraph.syncWith() line 1286: Only materializes if !this._cachedState + - applySyncResponse() (SyncProtocol.js line 304) mutates the state in place without validating it's current + - Example sequence that breaks: + await graph.materialize(); // Alice's state + // (time passes, new patches arrive) + await graph.syncWith(bob); // Applies patches to stale Alice state! + + The sync protocol itself is clean (frontier comparison in computeSyncDelta line 142), but the API doesn't guarantee freshness. + + What's missing: + - syncWith() should materialize before syncing (or at least warn) + - Option to force re-materialize in syncWith({ forceFresh: true }) + - applySyncResponse() should validate frontier compatibility + + --- + 4. Index Staleness is Silent - No Rebuild Trigger + + Pain Point: Bitmap indexes don't self-invalidate when the graph changes. Users have no way to know when an index is stale. + + Evidence from code: + - BitmapIndexBuilder.js - pure in-memory builder, no staleness tracking + - IndexRebuildService.js line 86: rebuild() is explicitly called, never automatic + - load() method (line 281) validates shards with strict: true for integrity, but doesn't check if the graph has new commits since the index was built + - No OID reference stored anywhere to auto-detect staleness + + Current pattern requires manual decision: + // When is the index stale? Developer must guess: + const reader = await indexService.load(savedIndexOid); // Is this still valid? + + What's missing: + - Index OID + git commit date tracking + - Auto-detection: "This index is 1000 patches old, rebuild?" + - Automatic rebuild trigger on checkpoint or sync completion + - Staleness metrics exposed to user + + --- + 5. GC Workflow is Manual, Policies are Implicit + + Pain Point: maybeRunGC() and runGC() require explicit calls. Default policy (30% tombstone ratio) may not fit all workloads. + + Evidence from code: + - GCPolicy.js line 39: DEFAULT_GC_POLICY hardcoded with 0.3 tombstone ratio + - WarpGraph.maybeRunGC() (line 955) only checks if called, doesn't auto-trigger + - No per-graph GC scheduling + - User must call getGCMetrics() to know when to GC + + Silent inefficiency pattern: + await graph.materialize(); + // Queries get slower as tombstones accumulate + // User has no idea until performance tanks + await graph.maybeRunGC(); // Hope this runs + + What's missing: + - Periodic GC on a background timer + - Per-query performance degradation warnings + - Automatic trigger after N patches (like minPatchesSinceCompaction) + - Metrics dashboard to see tombstone growth + + --- + 6. Frontier Tracking is Passive - No Change Notifications + + Pain Point: Frontier.js is just a data structure (Map). There's no way to detect when the frontier has changed. + + Evidence from code: + - Frontier.js - simple CRUD: createFrontier(), updateFrontier(), getFrontierEntry() + - WarpGraph.getFrontier() (line 1041) returns the current frontier but doesn't track deltas + - No frontier change callbacks or events + - SyncProtocol.computeSyncDelta() (line 142) requires explicit comparison + + Users must manually poll: + const oldFrontier = await graph.getFrontier(); + // ... time passes ... + const newFrontier = await graph.getFrontier(); + if (oldFrontier !== newFrontier) { // Manual comparison! + // react to change + } + + What's missing: + - Frontier observer pattern (onChange callback) + - Automatic sync trigger when frontier changes + - watchFrontier() API that notifies on writer tip changes + - Cheap frontier diff operations + + --- + 7. Writer Identity is Confusing - Too Many Ways to Get WriterId + + Pain Point: Three ways to create a writer, all with different persistence semantics: + - graph.writer() - resolves from git config or generates + - graph.writer('alice') - explicit ID, no persistence + - graph.createWriter({ persist: 'config' }) - generates fresh, optionally persists + + Evidence from code: + - WarpGraph.writer() (line 1489) - uses resolveWriterId() for stable ID + - WarpGraph.createWriter() (line 1531) - generates fresh generateWriterId() + - Writer.beginPatch() (Writer.js line 114) captures head via ref but doesn't validate CAS semantics properly + + Confusion points: + - New users don't know which to use + - writer() silently saves to git config (hidden side effect) + - No validation that writerId is unique per machine + - CAS semantics in PatchBuilderV2 can still fail silently if two writers fork + + What's missing: + - Clear semantic distinction between "stable" and "ephemeral" writers + - Validation that multi-writer chains don't diverge + - Writer ID generation with machine hostname/process ID + - Error message when CAS commit fails + + --- + 8. Materialization Doesn't Use Checkpoints Automatically Unless You Remember + + Pain Point: materialize() checks for checkpoints (line 418), but only if one exists. Users who've never called createCheckpoint() pay full O(N) cost every time. + + Evidence from code: + // From WarpGraph.materialize() line 416-456 + async materialize() { + const checkpoint = await this._loadLatestCheckpoint(); // OK + if (checkpoint?.schema === 2) { + // Fast path: incremental + const patches = await this._loadPatchesSince(checkpoint); + const state = reduceV5(patches, checkpoint.state); + ... + } + // ELSE: Slow path, recompute everything + const writerIds = await this.discoverWriters(); + const allPatches = []; + for (const writerId of writerIds) { + const writerPatches = await this._loadWriterPatches(writerId); + allPatches.push(...writerPatches); + } + ... + } + + The trap: + - No checkpoint exists initially + - User materializes 1000 times, each costs O(all writers * all patches) + - User finally creates checkpoint, next materialize is 100x faster + - But they've already paid for 1000 slow materializations + + What's missing: + - Automatic checkpoint creation after N materializations + - Warning: "Consider creating a checkpoint, you've materialized 100x" + - Periodic checkpoint policy (e.g., "every 10000 patches") + + --- + 9. State Staleness is Silent - No Cache Invalidation + + Pain Point: _cachedState and _materializedGraph are set once in _setMaterializedState() (line 372) and never invalidated until explicit materialize() call. + + Evidence from code: + - Query methods like hasNode() (line 1582), neighbors() (line 1653) throw if !this._cachedState + - But there's NO detection that the cache is stale + - If user does: + await graph.materialize(); + await graph.createPatch().addNode('x').commit(); + console.log(graph.hasNode('x')); // FALSE - cache is stale! + + Silent data inconsistencies: + - User edits graph, queries return stale results + - No warning that cache is invalid + - _patchesSinceGC counter (line 116) tracks patches for GC but doesn't invalidate cache + + What's missing: + - _invalidateCache() method called after createPatch().commit() + - Automatic re-materialize after local writes + - Warning: "Your cached state is 1 patch behind, call materialize()" + - Cache lifetime tracking + + --- + 10. Error Messages Are Vague - Hard to Diagnose State Problems + + Pain Points in Error Handling: + + A. Backfill/divergence detection is strict but messages are cryptic: + // From WarpGraph._validatePatchAgainstCheckpoint() line 908-934 + async _validatePatchAgainstCheckpoint(writerId, incomingSha, checkpoint) { + const relation = await this._relationToCheckpointHead(ckHead, incomingSha); + if (relation === 'same' || relation === 'behind') { + throw new Error( + `Backfill rejected for writer ${writerId}: ` + + `incoming patch is ${relation} checkpoint frontier` + ); // VAGUE: What should user do? + } + } + + B. No distinction between "I haven't materialized yet" vs "state is corrupt": + // hasNode() throws same error for two different situations: + if (!this._cachedState) { + throw new Error('No cached state. Call materialize() first.'); // Could mean 3 things + } + + C. QueryError, SyncError, TraversalError exist but are under-documented: + - No examples of when each is thrown + - QueryError (QueryError.js) doesn't explain recovery strategy + - SyncError (SyncError.js) has codes like E_SYNC_REMOTE but users don't know what to do + + D. GC errors silently accumulate: + - No error thrown if GC fails (line 998 in executeGC) + - Tombstones keep growing, queries get slower + - User has no diagnostic data + + What's missing: + - Actionable error messages with recovery steps + - Diagnostic methods: graph.getStateHealth(), graph.getCacheStatus() + - Error codes with documented meanings and recovery actions + - Tracing/observability for state mutations + + --- + Summary Table: Automation Gaps + ┌────────────────────────┬────────────────┬──────────────────────┬────────────────┐ + │ Feature │ Current │ Ideal │ Friction Level │ + ├────────────────────────┼────────────────┼──────────────────────┼────────────────┤ + │ Post-fetch materialize │ Manual │ Auto │ High │ + ├────────────────────────┼────────────────┼──────────────────────┼────────────────┤ + │ Checkpointing │ Manual │ Periodic auto │ Medium │ + ├────────────────────────┼────────────────┼──────────────────────┼────────────────┤ + │ Index rebuild │ Manual │ On staleness │ High │ + ├────────────────────────┼────────────────┼──────────────────────┼────────────────┤ + │ GC trigger │ Manual │ Auto on metrics │ Medium │ + ├────────────────────────┼────────────────┼──────────────────────┼────────────────┤ + │ Cache invalidation │ Manual │ Auto on local writes │ High │ + ├────────────────────────┼────────────────┼──────────────────────┼────────────────┤ + │ Sync freshness │ Manual │ Auto-materialize │ High │ + ├────────────────────────┼────────────────┼──────────────────────┼────────────────┤ + │ Frontier watches │ Manual polling │ Observer pattern │ Low-Medium │ + ├────────────────────────┼────────────────┼──────────────────────┼────────────────┤ + │ Error diagnostics │ Generic │ Actionable │ High │ + └────────────────────────┴────────────────┴──────────────────────┴────────────────┘ + --- + Key Insight: The "Materialize Tax" + + The biggest DX problem is developers must manually orchestrate materialization. The system has three sources of staleness: + + 1. Cached state - not invalidated after local writes or git pull + 2. Checkpoints - users forget to create them, causing O(N) materializations + 3. Indexes - rebuilt manually, unclear when stale + + A developer's workflow becomes: + + write patch → must call materialize() to see it + time passes → must call materialize() to see remote changes + performance degrades → must call runGC() + queries slow → must rebuild index + 50% of code is state management boilerplate + + Fix: Implement a WarpGraph.withAutoMaterialize() mode that: + - Invalidates cache after local writes + - Re-materializes before queries if stale + - Creates checkpoints periodically + - Triggers index rebuilds on background timer + ⎿  Done (22 tool uses · 100.8k tokens · 1m 7s) + +∴ Thinking… + + This is a thorough analysis. Let me synthesize this into a clear answer for the user, focusing on whether git hooks specifically would help, and what the actual DX pain points are. + +⏺ The exploration turned up a clear picture. Here are the pain points, roughly ordered by how much friction they cause: + + The Core Problem: The "Materialize Tax" + + Almost every useful operation requires the developer to manually orchestrate state freshness. The system has three independent sources of staleness (cached state, checkpoints, indexes) and none of them self-invalidate. A typical + session looks like: + + await graph.materialize(); // manual + await graph.createPatch().addNode('x').commit(); + graph.hasNode('x'); // FALSE — cache is stale, no warning + await graph.materialize(); // must call again + // ... time passes, git pull brings new patches ... + graph.getNodes(); // returns stale data, no warning + await graph.materialize(); // must call AGAIN + + This is the single biggest friction source. About half of application code ends up being state management boilerplate. + + The Specific Pain Points + + 1. Local writes don't invalidate cached state. After createPatch().commit(), _cachedState is stale but queries silently return old data. No warning, no error, just wrong answers. + + 2. Remote changes arrive silently. After git pull or git fetch, new writer patches exist in refs but nothing triggers rematerialization. The user has to know to call materialize(). + + 3. Checkpoints are manual-only. Users who never call createCheckpoint() pay full O(all-patches) cost on every materialize(). There's no auto-checkpointing after N patches or periodic policy. + + 4. Index staleness is invisible. Bitmap indexes have no staleness tracking. After the graph grows by 10,000 patches, the index is silently outdated. No detection, no warning. + + 5. GC never runs automatically. maybeRunGC() only fires if explicitly called. Tombstones accumulate, queries degrade, and the user has no signal until performance tanks. + + 6. syncWith() doesn't guarantee freshness. It materializes only if _cachedState is null, meaning it can apply sync deltas to an already-stale local state. + + Would Git Hooks Help? + + Yes, but only for pain point #2 (remote changes arriving silently). A post-merge or post-checkout hook could trigger rematerialization or at least flag that the graph is stale. Something like: + + # .git/hooks/post-merge + # Check if any warp refs changed and notify + changed_refs=$(git diff --name-only ORIG_HEAD HEAD -- refs/warp/ 2>/dev/null) + if [ -n "$changed_refs" ]; then + echo "[warp] Graph refs changed — run materialize() to see updates" + fi + + But git hooks are the wrong layer for most of these problems. The real fixes belong inside WarpGraph itself: + + What would actually help: + + 1. Auto-invalidation after local writes. createPatch().commit() should mark _cachedState as dirty. Queries on dirty state either auto-rematerialize or throw a clear error. + 2. Lazy rematerialization. A mode where hasNode(), query(), neighbors() etc. check if state is dirty and rematerialize on-demand before returning results. + 3. Periodic auto-checkpointing. A policy like { checkpointEvery: 500 } that creates a checkpoint after N patches are materialized, so future materializations are incremental. + 4. Index staleness tracking. Store the frontier at index-build time. On next use, compare against current frontier. If diverged, warn or auto-rebuild. + 5. Background GC. Trigger maybeRunGC() automatically after materialization when tombstone ratio exceeds the policy threshold, instead of requiring an explicit call. + + The git hook approach is worth doing for the post-merge case (it's cheap and immediately helpful), but the bulk of the DX improvement comes from making WarpGraph manage its own state lifecycle instead of pushing that onto the + developer. diff --git a/docs/ideas/ROADMAP.md b/docs/ideas/ROADMAP.md new file mode 100644 index 0000000..95ee281 --- /dev/null +++ b/docs/ideas/ROADMAP.md @@ -0,0 +1,372 @@ +# Roadmap + +Where this project could go, roughly sequenced by leverage. + +--- + +## Phase 1: Kill the Materialize Tax + +The single biggest DX problem. Developers manually orchestrate state freshness across three independent axes (cached state, checkpoints, indexes) with no staleness detection on any of them. Fix this and the library becomes dramatically easier to use. + +### 1.1 Auto-invalidation after local writes + +`createPatch().commit()` and `writer.commitPatch()` should mark `_cachedState` as dirty. Today a user can write a patch and immediately get stale query results with no warning. Two options: + +- **Eager:** Re-materialize incrementally after each commit (apply the patch to cached state in-place, since we already have it in memory). +- **Lazy:** Mark state dirty, throw or auto-rematerialize on next query. + +Eager is better. The patch is already parsed and in memory at commit time. Applying it to the cached OR-Set/LWW state is O(ops-in-patch), which is negligible compared to the Git I/O that just happened. + +### 1.2 Lazy rematerialization on query + +If the state is dirty (or null), query methods (`hasNode`, `neighbors`, `query().run()`, `traverse.shortestPath`) should auto-materialize before returning results. Gate this behind an opt-in mode initially: + +```javascript +const graph = await WarpGraph.open({ + persistence, graphName, writerId, + autoMaterialize: true, // new option +}); +``` + +### 1.3 Periodic auto-checkpointing + +Add a checkpoint policy to `WarpGraph.open()`: + +```javascript +const graph = await WarpGraph.open({ + persistence, graphName, writerId, + checkpointPolicy: { every: 500 }, // create checkpoint every 500 patches +}); +``` + +`materialize()` checks patch count since last checkpoint and auto-creates one when the threshold is exceeded. Users who never think about checkpoints still get incremental materialization. + +### 1.4 Post-merge git hook for staleness notification + +A lightweight `post-merge` hook that detects when warp refs changed during a pull/merge and prints a warning. Not a full solution, but cheap and immediately useful: + +``` +[warp] Writer refs changed during merge. Call materialize() to see updates. +``` + +Ship this as an installable hook via `npm run install:hooks` alongside the existing lint/test hooks. + +--- + +## Phase 2: Self-Managing Infrastructure + +Once the materialize tax is gone, the next friction layer is infrastructure that requires manual babysitting: indexes, GC, and frontier tracking. + +### 2.1 Index staleness tracking + +Store the frontier (writer ID -> tip SHA map) at index-build time as metadata in the index tree. On next `loadIndex()`, compare stored frontier against current refs. If diverged, log a warning or auto-rebuild. This makes "is my index stale?" a cheap O(writers) ref comparison instead of a mystery. + +### 2.2 Auto-GC after materialization + +After `materialize()` completes, check `getGCMetrics()` against the configured policy. If tombstone ratio exceeds the threshold, run GC automatically. Today `maybeRunGC()` exists but is never called unless the user remembers. Wire it into the materialize path: + +``` +materialize() → apply patches → maybe checkpoint → maybe GC +``` + +### 2.3 Frontier change detection + +Add a `hasFrontierChanged()` method that compares the last-known frontier against current refs without full materialization. This enables cheap polling: + +```javascript +if (await graph.hasFrontierChanged()) { + await graph.materialize(); +} +``` + +Later, this could back a `graph.watch()` API using fs.watch on the refs directory. + +--- + +## Phase 3: Better Multi-Writer Ergonomics + +The multi-writer story works but has sharp edges around writer identity, sync workflows, and error diagnostics. + +### 3.1 Simplify writer identity + +Today there are three ways to get a writer (`writer()`, `writer('id')`, `createWriter()`), each with different persistence semantics. Consolidate to two: + +- `graph.writer()` — stable identity, resolved from git config or generated and persisted on first use. +- `graph.writer('explicit-id')` — explicit identity, no side effects. + +Drop `createWriter()` as a separate method. The `{ persist: 'config' }` option can move to the first form. + +### 3.2 Sync-then-materialize as a single operation + +`syncWith()` should optionally materialize after applying the sync delta: + +```javascript +await graph.syncWith(remote, { materialize: true }); +``` + +This eliminates the most common sync footgun (syncing onto stale state, then forgetting to rematerialize). + +### 3.3 Actionable error messages + +Errors like "No cached state. Call materialize() first" should distinguish between "never materialized", "stale after local write", and "stale after remote sync". Each has a different recovery action. Add error codes and recovery hints to all state-related errors. + +### 3.4 CAS failure recovery + +When a `commitPatch()` fails due to compare-and-swap (another process updated the writer ref), the error should explain what happened and suggest retry. Today this surfaces as a generic Git ref-update failure. + +### 3.5 Deletion guards (no-delete-under-descent) + +`NodeRemove` doesn't check whether the target node has properties or edges. The node gets tombstoned, its properties become orphaned in `state.prop`, and its edges become dangling references that `getEdges()` silently filters out. Nobody gets an error. Data just quietly disappears. + +Add a validation pass in `commitPatch()` that checks whether a `NodeRemove` targets a node with properties or connected edges. Configurable policy: + +```javascript +const graph = await WarpGraph.open({ + persistence, graphName, writerId, + onDeleteWithData: 'reject', // 'reject' | 'cascade' | 'warn' +}); +``` + +- **reject:** Throw an error if the node has properties or edges. +- **cascade:** Auto-generate `EdgeRemove` ops for connected edges and clear properties. +- **warn:** Log a warning but proceed (current implicit behavior, made explicit). + +This prevents a class of subtle data corruption bugs and establishes the referential integrity invariant that future features (recursive attachments, provenance tracking) will depend on. + +--- + +## Phase 4: Query Language + +The fluent query builder is functional but limited. As graphs get larger, users need filtering, aggregation, and multi-hop traversal without writing imperative loops. + +### 4.1 Property filters in query builder + +```javascript +graph.query() + .match('user:*') + .where({ role: 'admin' }) // property equality + .where(n => n.props.age > 18) // predicate function + .outgoing('manages') + .run(); +``` + +The `.where()` method exists but only supports predicate functions. Add shorthand object syntax for common equality/comparison filters. + +### 4.2 Multi-hop traversal in queries + +```javascript +graph.query() + .match('user:alice') + .outgoing('manages', { depth: 1..3 }) // 1-3 hops + .select(['id', 'props.name']) + .run(); +``` + +Today multi-hop requires chaining multiple `.outgoing()` calls or dropping to the imperative `traverse` API. + +### 4.3 Aggregation + +```javascript +graph.query() + .match('order:*') + .aggregate({ count: true, sum: 'props.total' }) + .run(); +``` + +Basic count/sum/avg over matched nodes without materializing the full result set. + +--- + +## Phase 5: Observability + +The library is opaque at runtime. Users can't see what's happening without adding their own instrumentation. + +### 5.1 State health diagnostics + +A single method that reports everything a user needs to know about their graph's operational state: + +```javascript +const status = await graph.status(); +// { +// cachedState: 'stale', // 'fresh' | 'stale' | 'none' +// patchesSinceCheckpoint: 847, +// patchesSinceIndex: 1203, +// tombstoneRatio: 0.12, +// writers: 3, +// frontier: { alice: 'abc...', bob: 'def...', carol: 'ghi...' }, +// } +``` + +### 5.2 Operation timing in LoggerPort + +The `ClockPort` and `LoggerPort` infrastructure exists but isn't wired into most operations. Add structured timing logs for `materialize()`, `syncWith()`, `createCheckpoint()`, `rebuildIndex()`, and `runGC()` so users can see where time is spent. + +### 5.3 CLI `status` command + +```bash +git warp status +``` + +Prints the same information as `graph.status()` from the command line. Useful for scripting and debugging. + +### 5.4 Tick receipts + +During `materialize()`, the system applies patches and discards all decision information. When two writers set the same property concurrently, LWW picks a winner, and the losing write vanishes without a trace. For multi-writer production use, "why does this node have this value?" is a question that comes up constantly, and today there's no answer short of manually inspecting CRDT internals. + +Add structured receipts emitted during patch application: + +```javascript +const { state, receipts } = await graph.materialize({ receipts: true }); +// receipts: [ +// { +// patchSha: 'abc123', +// writer: 'alice', +// lamport: 42, +// ops: [ +// { op: 'PropSet', node: 'user:bob', key: 'status', result: 'applied' }, +// { op: 'PropSet', node: 'user:carol', key: 'status', result: 'superseded', +// reason: 'LWW: writer bob at lamport 43 wins' }, +// ], +// }, +// ... +// ] +``` + +For each operation in each patch, the receipt records whether it was applied or superseded, and why. For LWW conflicts, record which EventId won. For OR-Set add/remove, record whether the add was a new dot or a re-add, and whether the remove was effective. + +Receipts are opt-in (gated behind `{ receipts: true }`) to avoid overhead in the common case. They serve three roles: + +- **Debugging:** Concrete answers to "why does this node have this value?" +- **Audit:** Structured evidence of every state transition for regulated environments. +- **Foundation for provenance (Phase 6):** Receipts are the raw trace data that provenance payloads and observer views are built on. + +--- + +## Phase 6: Provenance and Holography + +This is where the codebase meets Papers III-IV. The mathematical foundations for provenance payloads, slicing, and wormholes are fully developed in the papers but not yet implemented. + +### 6.1 In/Out declarations on patches + +The gate that unlocks the rest of Phase 6. Add declared read/write sets to each patch so that provenance queries don't require full replay. + +Extend `PatchV2` with optional `reads` and `writes` fields: + +```javascript +{ + schema: 2, + writer: 'alice', + lamport: 42, + context: VersionVector, + ops: [...], + reads: ['user:alice', 'user:bob'], // nodes read by this patch + writes: ['user:alice'], // nodes written by this patch +} +``` + +Auto-populate during `commitPatch()` by inspecting ops: `PropSet` on node X reads and writes X; `EdgeAdd(A→B)` reads A and B, writes the edge; `NodeAdd(X)` writes X; `NodeRemove(X)` reads X. Store as part of the patch blob. + +Build a lightweight index mapping `nodeId → [patchSha]` for contributing patches. This makes "which patches affected node X?" an index lookup instead of a full replay. + +**What this unlocks:** +- Derivation graph `D(v)` — backward traversal of the I/O index. +- Slice materialization (6.3) — compute causal cones without full replay. +- Smarter GC — identify which patches are still causally relevant to live nodes. +- Conflict diagnostics — surface overlapping I/O between concurrent writers. + +### 6.2 Provenance payloads + +Implement the boundary encoding `(U_0, P)` from Paper III as a first-class type. Each patch already contains the information needed; the remaining work is packaging a sequence of patches as a transferable provenance payload with monoid operations. + +```javascript +class ProvenancePayload { + constructor(patches) { this.patches = patches; } + concat(other) { return new ProvenancePayload([...this.patches, ...other.patches]); } + static identity() { return new ProvenancePayload([]); } + get length() { return this.patches.length; } +} +``` + +The payload monoid (concat, identity) is the algebraic structure that wormhole composition (6.4) and prefix forks (6.6) depend on. + +### 6.3 Slice materialization + +Given a target node ID, compute the backward causal cone (`D(v)`) and materialize only the patches that contribute to that node's current state. This is the "partial materialization by slicing" theorem from Paper III. Depends on 6.1 (In/Out declarations) for computing causal cones without full replay. + +Useful for large graphs where full materialization is expensive but the user only needs one subgraph. + +### 6.4 Wormhole compression + +Compress a multi-tick (multi-patch) segment into a single edge carrying the sub-payload. Useful for checkpointing long histories: instead of keeping all intermediate patches, collapse them into a wormhole that preserves provenance but reduces storage. + +The payload monoid (6.2) makes wormhole composition well-behaved: concatenating two consecutive wormhole payloads yields the payload for the combined wormhole. + +### 6.5 Boundary Transition Records (BTRs) + +Implement the tamper-evident packaging format from Paper III. A BTR binds `(h_in, h_out, U_0, P, t, kappa)` into a single verifiable artifact. This enables auditable exchange of graph segments between parties who don't share the full history. + +### 6.6 Prefix forks + +Expose Git's branching topology at the WARP layer. A fork creates a new writer chain whose first commit has the fork point as its parent. Under content-addressed storage, the shared prefix is automatically deduplicated — Git already handles this. + +```javascript +const branch = await graph.fork({ from: 'alice', at: patchSha }); +// branch shares alice's history up to patchSha +// diverges from there with its own patches +``` + +**What this unlocks:** +- **What-if analysis:** Fork, apply a speculative change, compare outcomes, discard or keep. +- **Safe experimentation:** Try a bulk import on a fork before committing to the main line. +- **Undo:** Fork before a destructive operation. If it goes wrong, the original is untouched. + +Depends on 6.2 (payload monoid for reasoning about shared prefixes) and composes well with 6.4 (compress the shared prefix into a wormhole, then fork). + +--- + +## Phase 7: Observer Geometry (Speculative) + +Paper IV defines observers as resource-bounded functors and introduces rulial distance. This is the most theoretical part of the series and the furthest from implementation, but there are concrete engineering applications. + +### 7.1 Observer-scoped views + +Define named observers that project the full graph into scoped views: + +```javascript +const adminView = graph.observer('admin', { + match: 'user:*', + expose: ['id', 'props.name', 'props.role'], + redact: ['props.email', 'props.ssn'], +}); +``` + +This connects to the Paper IV idea that different observers legitimately see different projections of the same underlying state. + +### 7.2 Translation cost estimation + +Given two observer definitions, estimate the MDL cost of translating between their views. This is useful for system design: if a compliance observer is far from a diagnostic observer, emitting only compliance traces makes future diagnosis expensive. + +### 7.3 Temporal queries + +Implement the CTL*-style temporal logic from Paper IV over materialized history: + +```javascript +// "Was this node always in state 'active' since tick 5?" +graph.temporal.always('node:x', { prop: 'status', equals: 'active' }, { since: 5 }); + +// "Does every branch eventually reach a merged state?" +graph.temporal.forAllPaths('eventually', { predicate: isMerged }); +``` + +This is speculative and depends on having full history access, but the mathematical foundations are in place. + +--- + +## Non-Goals + +Things this project should probably not try to become: + +- **A general-purpose database.** No SQL, no ACID transactions, no connection pooling. If you need those, use PostgreSQL. +- **A real-time system.** Git's I/O model is fundamentally batch-oriented. No WebSocket push, no sub-millisecond latency. +- **A distributed consensus system.** CRDTs give eventual consistency without coordination. If you need strong consistency or leader election, use a different tool. +- **A physics engine.** Paper V (emergent dynamics, Schrodinger-type evolution) is fascinating mathematics but not an implementation target for this library. diff --git a/eslint.config.js b/eslint.config.js index 803a01e..696a3fa 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -1,11 +1,38 @@ import js from "@eslint/js"; +import tseslint from "typescript-eslint"; +import { fileURLToPath } from "node:url"; +import { dirname } from "node:path"; -export default [ +const __dirname = dirname(fileURLToPath(import.meta.url)); + +// ── Base: recommended + strict-type-checked for src/ and bin/ ──────────────── +// Every rule is "error". Zero warnings. Zero tolerance. + +export default tseslint.config( + // ── Global ignores ───────────────────────────────────────────────────────── + { + ignores: [ + "node_modules/**", + "coverage/**", + "examples/html/assets/**", + "scripts/**", + ], + }, + + // ── All JS files: recommended baseline ───────────────────────────────────── js.configs.recommended, + + // ── Source + CLI: typed linting (the nuclear option) ──────────────────────── { + files: ["src/**/*.js", "bin/**/*.js"], languageOptions: { ecmaVersion: 2022, sourceType: "module", + parser: tseslint.parser, + parserOptions: { + projectService: true, + tsconfigRootDir: __dirname, + }, globals: { process: "readonly", Buffer: "readonly", @@ -18,45 +45,217 @@ export default [ TextDecoder: "readonly", TextEncoder: "readonly", fetch: "readonly", - // Node.js 20+ globals AbortController: "readonly", AbortSignal: "readonly", performance: "readonly", - global: "readonly" - } + global: "readonly", + }, + }, + plugins: { + "@typescript-eslint": tseslint.plugin, }, rules: { + // ── THE RULES THAT WOULD HAVE CAUGHT THE BUG ──────────────────────── + "@typescript-eslint/no-floating-promises": "error", + "@typescript-eslint/no-misused-promises": "error", + "@typescript-eslint/await-thenable": "error", + "@typescript-eslint/require-await": "error", + + // Turn off base rule that conflicts with TS version + "require-await": "off", + + // ── More typed rules ───────────────────────────────────────────────── + "@typescript-eslint/no-unnecessary-type-assertion": "error", + "@typescript-eslint/no-redundant-type-constituents": "error", + "@typescript-eslint/restrict-plus-operands": "error", + "@typescript-eslint/no-base-to-string": "error", + "@typescript-eslint/no-duplicate-type-constituents": "error", + "@typescript-eslint/unbound-method": "error", + "@typescript-eslint/no-for-in-array": "error", + "@typescript-eslint/no-unnecessary-boolean-literal-compare": "error", + "@typescript-eslint/return-await": ["error", "always"], + + // ── Complexity & structure ─────────────────────────────────────────── "complexity": ["error", 10], "max-depth": ["error", 3], "max-lines-per-function": ["error", 50], "max-params": ["error", 3], "max-nested-callbacks": ["error", 3], - "no-unused-vars": ["error", { "argsIgnorePattern": "^_" }], - "no-console": "error", - "eqeqeq": ["error", "always"], - "curly": ["error", "all"], - "no-eval": "error", - "no-implied-eval": "error", - "no-new-wrappers": "error", - "no-caller": "error", - "no-undef-init": "error", + + // ── Variables ──────────────────────────────────────────────────────── + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": ["error", { "argsIgnorePattern": "^_" }], "no-var": "error", "prefer-const": "error", - "prefer-template": "error", - "yoda": ["error", "never"], + "no-shadow": "off", + "@typescript-eslint/no-shadow": "error", + "no-use-before-define": "off", + "@typescript-eslint/no-use-before-define": ["error", { "functions": false }], + "no-undef-init": "error", + "one-var": ["error", "never"], + + // ── Equality & types ───────────────────────────────────────────────── + "eqeqeq": ["error", "always"], + "no-implicit-coercion": ["error", { "allow": ["!!"] }], + "no-new-wrappers": "error", + + // ── Control flow ───────────────────────────────────────────────────── + "curly": ["error", "all"], "consistent-return": "error", - "no-shadow": "error", - "no-use-before-define": ["error", { "functions": false }], + "default-case": "error", + "default-case-last": "error", "no-lonely-if": "error", + "no-else-return": ["error", { "allowElseIf": false }], "no-unneeded-ternary": "error", - "one-var": ["error", "never"] - } + "no-useless-return": "error", + "yoda": ["error", "never"], + + // ── Functions ──────────────────────────────────────────────────────── + "no-caller": "error", + "no-extra-bind": "error", + "no-loop-func": "error", + "no-param-reassign": "error", + "prefer-arrow-callback": "error", + "prefer-rest-params": "error", + "prefer-spread": "error", + "arrow-body-style": ["error", "as-needed"], + + // ── Objects & arrays ───────────────────────────────────────────────── + "no-array-constructor": "error", + "no-object-constructor": "error", + "object-shorthand": ["error", "always"], + "prefer-object-spread": "error", + "prefer-destructuring": ["error", { + "VariableDeclarator": { "array": false, "object": true }, + "AssignmentExpression": { "array": false, "object": false }, + }], + "no-useless-computed-key": "error", + "no-useless-rename": "error", + "dot-notation": "error", + "grouped-accessor-pairs": ["error", "getBeforeSet"], + "accessor-pairs": "error", + + // ── Strings & templates ────────────────────────────────────────────── + "prefer-template": "error", + "no-useless-concat": "error", + "no-multi-str": "error", + + // ── Classes ────────────────────────────────────────────────────────── + "no-constructor-return": "error", + "no-useless-constructor": "error", + + // ── Modules ────────────────────────────────────────────────────────── + "no-duplicate-imports": "error", + + // ── Security & dangerous patterns ──────────────────────────────────── + "no-eval": "error", + "no-implied-eval": "error", + "no-new-func": "error", + "no-script-url": "error", + "no-proto": "error", + "no-extend-native": "error", + "no-iterator": "error", + "no-labels": "error", + "no-extra-label": "error", + "no-with": "error", + "no-void": ["error", { "allowAsStatement": true }], + "no-console": "error", + + // ── Correctness ────────────────────────────────────────────────────── + "no-self-compare": "error", + "no-template-curly-in-string": "error", + "no-unreachable-loop": "error", + "no-promise-executor-return": "error", + "no-constant-binary-expression": "error", + "no-new": "error", + "no-return-assign": ["error", "always"], + "no-sequences": "error", + "no-throw-literal": "error", + "no-multi-assign": "error", + "no-useless-call": "error", + "symbol-description": "error", + "prefer-numeric-literals": "error", + "radix": "error", + "no-loss-of-precision": "error", + }, + }, + + // ── Relaxed complexity for algorithm-heavy modules ───────────────────────── + { + files: [ + "src/domain/services/TraversalService.js", + "src/domain/services/IndexRebuildService.js", + ], + rules: { + "complexity": ["error", 35], + "max-lines-per-function": ["error", 200], + "max-depth": ["error", 6], + "max-params": ["error", 6], + }, + }, + { + files: [ + "src/domain/WarpGraph.js", + "src/domain/services/CommitDagTraversalService.js", + "src/domain/services/CheckpointService.js", + "src/domain/services/QueryBuilder.js", + "src/domain/services/WarpMessageCodec.js", + "src/domain/services/SyncProtocol.js", + "src/domain/services/LogicalTraversal.js", + "src/domain/services/StateSerializerV5.js", + "src/domain/services/PatchBuilderV2.js", + "src/domain/utils/EventId.js", + "src/domain/types/WarpTypesV2.js", + "bin/warp-graph.js", + ], + rules: { + "complexity": ["error", 35], + "max-lines-per-function": ["error", 200], + "max-depth": ["error", 6], + "max-params": ["error", 6], + }, + }, + + // ── Port contracts: async is the interface, not the implementation ────────── + { + files: ["src/ports/**/*.js"], + rules: { + "@typescript-eslint/require-await": "off", + }, }, - // Relaxed rules for test files + + // ── JoinReducer: the algorithm from hell ─────────────────────────────────── + { + files: ["src/domain/services/JoinReducer.js"], + rules: { + "complexity": ["error", 35], + "max-lines-per-function": ["error", 200], + "max-depth": ["error", 6], + "max-params": ["error", 6], + "no-param-reassign": "off", + }, + }, + + // ── Test files: keep strict but relax structure rules ────────────────────── { files: ["test/**/*.js", "test/**/*.test.js"], languageOptions: { + ecmaVersion: 2022, + sourceType: "module", globals: { + process: "readonly", + Buffer: "readonly", + console: "readonly", + setTimeout: "readonly", + clearTimeout: "readonly", + AbortController: "readonly", + AbortSignal: "readonly", + fetch: "readonly", + URL: "readonly", + TextDecoder: "readonly", + TextEncoder: "readonly", + performance: "readonly", + global: "readonly", describe: "readonly", it: "readonly", expect: "readonly", @@ -65,25 +264,49 @@ export default [ beforeAll: "readonly", afterAll: "readonly", vi: "readonly", - bench: "readonly" - } + bench: "readonly", + }, }, rules: { + // Structure: tests can be long and nested "max-lines-per-function": "off", "max-nested-callbacks": "off", "max-params": "off", "complexity": "off", "max-depth": "off", + + // Tests need console, shadow, unused setup vars "no-console": "off", "no-shadow": "off", "no-unused-vars": "off", + + // Tests use flexible patterns "prefer-template": "off", - "curly": "off" - } + "curly": "off", + "no-new": "off", + "prefer-destructuring": "off", + "no-param-reassign": "off", + "no-throw-literal": "off", + "prefer-arrow-callback": "off", + "arrow-body-style": "off", + "no-empty-function": "off", + }, }, - // Relaxed rules for benchmarks + + // ── Benchmarks ───────────────────────────────────────────────────────────── { - files: ["benchmarks/**/*.js"], + files: ["benchmarks/**/*.js", "test/benchmark/**/*.js"], + languageOptions: { + globals: { + process: "readonly", + Buffer: "readonly", + console: "readonly", + setTimeout: "readonly", + clearTimeout: "readonly", + performance: "readonly", + global: "readonly", + }, + }, rules: { "no-unused-vars": "off", "no-console": "off", @@ -91,19 +314,34 @@ export default [ "max-depth": "off", "max-params": "off", "complexity": "off", - "prefer-template": "off" - } + "prefer-template": "off", + "max-lines-per-function": "off", + }, }, - // Relaxed rules for example scripts (CLI demos) + + // ── Example scripts ──────────────────────────────────────────────────────── { files: ["examples/**/*.js"], + languageOptions: { + globals: { + process: "readonly", + Buffer: "readonly", + console: "readonly", + setTimeout: "readonly", + clearTimeout: "readonly", + performance: "readonly", + global: "readonly", + }, + }, rules: { "no-console": "off", "max-lines-per-function": "off", - "complexity": "off" - } + "complexity": "off", + "no-unused-vars": "off", + }, }, - // Browser globals for HTML example assets + + // ── Browser globals for HTML example assets ──────────────────────────────── { files: ["examples/html/assets/**/*.js"], languageOptions: { @@ -112,41 +350,8 @@ export default [ document: "readonly", localStorage: "readonly", CustomEvent: "readonly", - Viz: "readonly" - } - } - }, - // Relaxed rules for specific algorithm files (graph algorithms have inherently high complexity) - { - files: ["src/domain/services/TraversalService.js", "src/domain/services/IndexRebuildService.js"], - rules: { - "complexity": ["error", 35], - "max-statements": ["error", 100], - "max-lines-per-function": ["error", 200], - "max-depth": ["error", 6], - "max-params": ["error", 6] - } + Viz: "readonly", + }, + }, }, - // Relaxed rules for core algorithm-heavy modules - { - files: [ - "src/domain/WarpGraph.js", - "src/domain/services/CommitDagTraversalService.js", - "src/domain/services/CheckpointService.js", - "src/domain/services/QueryBuilder.js", - "src/domain/services/WarpMessageCodec.js", - "src/domain/services/SyncProtocol.js", - "src/domain/services/LogicalTraversal.js", - "src/domain/services/StateSerializerV5.js", - "src/domain/services/PatchBuilderV2.js", - "src/domain/utils/EventId.js", - "src/domain/types/WarpTypesV2.js" - ], - rules: { - "complexity": ["error", 35], - "max-lines-per-function": ["error", 200], - "max-depth": ["error", 6], - "max-params": ["error", 6] - } - } -]; +); diff --git a/package-lock.json b/package-lock.json index da1be2d..857e715 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,6 +14,7 @@ "@git-stunts/plumbing": "^2.8.0", "@git-stunts/trailer-codec": "^2.1.1", "cbor-x": "^1.6.0", + "patch-package": "^8.0.0", "roaring": "^2.7.0", "zod": "^3.24.1" }, @@ -24,10 +25,13 @@ "devDependencies": { "@eslint/js": "^9.17.0", "@git-stunts/docker-guard": "^0.1.0", + "@typescript-eslint/eslint-plugin": "^8.54.0", + "@typescript-eslint/parser": "^8.54.0", "eslint": "^9.17.0", "fast-check": "^4.5.3", - "patch-package": "^8.0.0", "prettier": "^3.4.2", + "typescript": "^5.9.3", + "typescript-eslint": "^8.54.0", "vitest": "^2.1.8" }, "engines": { @@ -1207,6 +1211,262 @@ "dev": true, "license": "MIT" }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.54.0.tgz", + "integrity": "sha512-hAAP5io/7csFStuOmR782YmTthKBJ9ND3WVL60hcOjvtGFb+HJxH4O5huAcmcZ9v9G8P+JETiZ/G1B8MALnWZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.12.2", + "@typescript-eslint/scope-manager": "8.54.0", + "@typescript-eslint/type-utils": "8.54.0", + "@typescript-eslint/utils": "8.54.0", + "@typescript-eslint/visitor-keys": "8.54.0", + "ignore": "^7.0.5", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.54.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.54.0.tgz", + "integrity": "sha512-BtE0k6cjwjLZoZixN0t5AKP0kSzlGu7FctRXYuPAm//aaiZhmfq1JwdYpYr1brzEspYyFeF+8XF5j2VK6oalrA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.54.0", + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/typescript-estree": "8.54.0", + "@typescript-eslint/visitor-keys": "8.54.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.54.0.tgz", + "integrity": "sha512-YPf+rvJ1s7MyiWM4uTRhE4DvBXrEV+d8oC3P9Y2eT7S+HBS0clybdMIPnhiATi9vZOYDc7OQ1L/i6ga6NFYK/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.54.0", + "@typescript-eslint/types": "^8.54.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.54.0.tgz", + "integrity": "sha512-27rYVQku26j/PbHYcVfRPonmOlVI6gihHtXFbTdB5sb6qA0wdAQAbyXFVarQ5t4HRojIz64IV90YtsjQSSGlQg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/visitor-keys": "8.54.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.54.0.tgz", + "integrity": "sha512-dRgOyT2hPk/JwxNMZDsIXDgyl9axdJI3ogZ2XWhBPsnZUv+hPesa5iuhdYt2gzwA9t8RE5ytOJ6xB0moV0Ujvw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.54.0.tgz", + "integrity": "sha512-hiLguxJWHjjwL6xMBwD903ciAwd7DmK30Y9Axs/etOkftC3ZNN9K44IuRD/EB08amu+Zw6W37x9RecLkOo3pMA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/typescript-estree": "8.54.0", + "@typescript-eslint/utils": "8.54.0", + "debug": "^4.4.3", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.54.0.tgz", + "integrity": "sha512-PDUI9R1BVjqu7AUDsRBbKMtwmjWcn4J3le+5LpcFgWULN3LvHC5rkc9gCVxbrsrGmO1jfPybN5s6h4Jy+OnkAA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.54.0.tgz", + "integrity": "sha512-BUwcskRaPvTk6fzVWgDPdUndLjB87KYDrN5EYGetnktoeAvPtO4ONHlAZDnj5VFnUANg0Sjm7j4usBlnoVMHwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.54.0", + "@typescript-eslint/tsconfig-utils": "8.54.0", + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/visitor-keys": "8.54.0", + "debug": "^4.4.3", + "minimatch": "^9.0.5", + "semver": "^7.7.3", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.54.0.tgz", + "integrity": "sha512-9Cnda8GS57AQakvRyG0PTejJNlA2xhvyNtEVIMlDWOOeEyBkYWhGPnfrIAnqxLMTSTo6q8g12XVjjev5l1NvMA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.9.1", + "@typescript-eslint/scope-manager": "8.54.0", + "@typescript-eslint/types": "8.54.0", + "@typescript-eslint/typescript-estree": "8.54.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.54.0.tgz", + "integrity": "sha512-VFlhGSl4opC0bprJiItPQ1RfUhGDIBokcPwaFH4yiBCaNPeld/9VeXbiPO1cLyorQi1G1vL+ecBk1x8o1axORA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.54.0", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@vitest/expect": { "version": "2.1.9", "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.9.tgz", @@ -1324,7 +1584,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz", "integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==", - "dev": true, "license": "BSD-2-Clause" }, "node_modules/abbrev": { @@ -1389,7 +1648,6 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, "license": "MIT", "dependencies": { "color-convert": "^2.0.1" @@ -1440,7 +1698,6 @@ "version": "3.0.3", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "dev": true, "license": "MIT", "dependencies": { "fill-range": "^7.1.1" @@ -1486,7 +1743,6 @@ "version": "1.0.8", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", - "dev": true, "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.0", @@ -1505,7 +1761,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", - "dev": true, "license": "MIT", "dependencies": { "es-errors": "^1.3.0", @@ -1519,7 +1774,6 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", - "dev": true, "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.2", @@ -1594,7 +1848,6 @@ "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", @@ -1630,7 +1883,6 @@ "version": "3.9.0", "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", - "dev": true, "funding": [ { "type": "github", @@ -1646,7 +1898,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, "license": "MIT", "dependencies": { "color-name": "~1.1.4" @@ -1659,7 +1910,6 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true, "license": "MIT" }, "node_modules/concat-map": { @@ -1682,7 +1932,6 @@ "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "dev": true, "license": "MIT", "dependencies": { "path-key": "^3.1.0", @@ -1731,7 +1980,6 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", - "dev": true, "license": "MIT", "dependencies": { "es-define-property": "^1.0.0", @@ -1758,7 +2006,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "dev": true, "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.1", @@ -1800,7 +2047,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -1810,7 +2056,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -1827,7 +2072,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "dev": true, "license": "MIT", "dependencies": { "es-errors": "^1.3.0" @@ -2117,8 +2361,8 @@ "version": "6.5.0", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "devOptional": true, "license": "MIT", - "optional": true, "engines": { "node": ">=12.0.0" }, @@ -2148,7 +2392,6 @@ "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "dev": true, "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" @@ -2178,7 +2421,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/find-yarn-workspace-root/-/find-yarn-workspace-root-2.0.0.tgz", "integrity": "sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ==", - "dev": true, "license": "Apache-2.0", "dependencies": { "micromatch": "^4.0.2" @@ -2209,7 +2451,6 @@ "version": "10.1.0", "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, "license": "MIT", "dependencies": { "graceful-fs": "^4.2.0", @@ -2252,7 +2493,6 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "dev": true, "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" @@ -2262,7 +2502,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "dev": true, "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.2", @@ -2287,7 +2526,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "dev": true, "license": "MIT", "dependencies": { "dunder-proto": "^1.0.1", @@ -2361,7 +2599,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -2374,14 +2611,12 @@ "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "devOptional": true, "license": "ISC" }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -2391,7 +2626,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", - "dev": true, "license": "MIT", "dependencies": { "es-define-property": "^1.0.0" @@ -2404,7 +2638,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -2417,7 +2650,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "dev": true, "license": "MIT", "dependencies": { "function-bind": "^1.1.2" @@ -2524,7 +2756,6 @@ "version": "2.2.1", "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "dev": true, "license": "MIT", "bin": { "is-docker": "cli.js" @@ -2563,7 +2794,6 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, "license": "MIT", "engines": { "node": ">=0.12.0" @@ -2573,7 +2803,6 @@ "version": "2.2.0", "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "dev": true, "license": "MIT", "dependencies": { "is-docker": "^2.0.0" @@ -2586,14 +2815,12 @@ "version": "2.0.5", "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", - "dev": true, "license": "MIT" }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true, "license": "ISC" }, "node_modules/js-yaml": { @@ -2627,7 +2854,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.3.0.tgz", "integrity": "sha512-qtYiSSFlwot9XHtF9bD9c7rwKjr+RecWT//ZnPvSmEjpV5mmPOCN4j8UjY5hbjNkOwZ/jQv3J6R1/pL7RwgMsg==", - "dev": true, "license": "MIT", "dependencies": { "call-bind": "^1.0.8", @@ -2654,7 +2880,6 @@ "version": "6.2.0", "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, "license": "MIT", "dependencies": { "universalify": "^2.0.0" @@ -2667,7 +2892,6 @@ "version": "0.0.1", "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.1.tgz", "integrity": "sha512-2/Ki0GcmuqSrgFyelQq9M05y7PS0mEwuIzrf3f1fPqkVDVRvZrPZtVSMHxdgo8Aq0sxAOb/cr2aqqA3LeWHVPg==", - "dev": true, "license": "Public Domain", "funding": { "url": "https://github.com/sponsors/ljharb" @@ -2687,7 +2911,6 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", - "dev": true, "license": "MIT", "dependencies": { "graceful-fs": "^4.1.11" @@ -2784,7 +3007,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -2794,7 +3016,6 @@ "version": "4.0.8", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "dev": true, "license": "MIT", "dependencies": { "braces": "^3.0.3", @@ -2808,7 +3029,6 @@ "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, "license": "MIT", "engines": { "node": ">=8.6" @@ -2834,7 +3054,6 @@ "version": "1.2.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true, "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" @@ -3164,7 +3383,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -3174,7 +3392,6 @@ "version": "7.4.2", "resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz", "integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==", - "dev": true, "license": "MIT", "dependencies": { "is-docker": "^2.0.0", @@ -3267,7 +3484,6 @@ "version": "8.0.1", "resolved": "https://registry.npmjs.org/patch-package/-/patch-package-8.0.1.tgz", "integrity": "sha512-VsKRIA8f5uqHQ7NGhwIna6Bx6D9s/1iXlA1hthBVBEbkq+t4kXD0HHt+rJhf/Z+Ci0F/HCB2hvn0qLdLG+Qxlw==", - "dev": true, "license": "MIT", "dependencies": { "@yarnpkg/lockfile": "^1.1.0", @@ -3307,7 +3523,6 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -3358,8 +3573,8 @@ "version": "4.0.3", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "devOptional": true, "license": "MIT", - "optional": true, "engines": { "node": ">=12" }, @@ -3585,7 +3800,6 @@ "version": "1.2.2", "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", - "dev": true, "license": "MIT", "dependencies": { "define-data-property": "^1.1.4", @@ -3603,7 +3817,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, "license": "MIT", "dependencies": { "shebang-regex": "^3.0.0" @@ -3616,7 +3829,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -3633,7 +3845,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", - "dev": true, "license": "MIT", "engines": { "node": ">=6" @@ -3734,7 +3945,6 @@ "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, "license": "MIT", "dependencies": { "has-flag": "^4.0.0" @@ -3777,8 +3987,8 @@ "version": "0.2.15", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "devOptional": true, "license": "MIT", - "optional": true, "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" @@ -3824,7 +4034,6 @@ "version": "0.2.5", "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz", "integrity": "sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==", - "dev": true, "license": "MIT", "engines": { "node": ">=14.14" @@ -3834,7 +4043,6 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, "license": "MIT", "dependencies": { "is-number": "^7.0.0" @@ -3849,6 +4057,19 @@ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", "license": "MIT" }, + "node_modules/ts-api-utils": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.4.0.tgz", + "integrity": "sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -3862,6 +4083,44 @@ "node": ">= 0.8.0" } }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typescript-eslint": { + "version": "8.54.0", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.54.0.tgz", + "integrity": "sha512-CKsJ+g53QpsNPqbzUsfKVgd3Lny4yKZ1pP4qN3jdMOg/sisIDLGyDMezycquXLE5JsEU0wp3dGNdzig0/fmSVQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.54.0", + "@typescript-eslint/parser": "8.54.0", + "@typescript-eslint/typescript-estree": "8.54.0", + "@typescript-eslint/utils": "8.54.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, "node_modules/unique-filename": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-5.0.0.tgz", @@ -3892,7 +4151,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, "license": "MIT", "engines": { "node": ">= 10.0.0" @@ -4077,7 +4335,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, "license": "ISC", "dependencies": { "isexe": "^2.0.0" @@ -4129,7 +4386,6 @@ "version": "2.8.2", "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", - "dev": true, "license": "ISC", "bin": { "yaml": "bin.mjs" diff --git a/package.json b/package.json index 3b269a4..630c086 100644 --- a/package.json +++ b/package.json @@ -44,6 +44,7 @@ "demo:lagrangian": "cd examples && docker compose exec demo node /app/examples/lagrangian-path.js", "demo:bench-streaming": "cd examples && docker compose up -d && docker compose exec demo node /app/examples/streaming-benchmark.js", "demo:bench-traversal": "cd examples && docker compose up -d && docker compose exec demo node /app/examples/traversal-benchmark.js", + "roadmap": "node scripts/roadmap.js", "setup:hooks": "node scripts/setup-hooks.js", "postinstall": "patch-package && node scripts/setup-hooks.js", "install:git-warp": "bash scripts/install-git-warp.sh", @@ -63,9 +64,13 @@ "devDependencies": { "@eslint/js": "^9.17.0", "@git-stunts/docker-guard": "^0.1.0", + "@typescript-eslint/eslint-plugin": "^8.54.0", + "@typescript-eslint/parser": "^8.54.0", "eslint": "^9.17.0", "fast-check": "^4.5.3", "prettier": "^3.4.2", + "typescript": "^5.9.3", + "typescript-eslint": "^8.54.0", "vitest": "^2.1.8" }, "files": [ diff --git a/scripts/hooks/post-merge b/scripts/hooks/post-merge new file mode 100755 index 0000000..c289027 --- /dev/null +++ b/scripts/hooks/post-merge @@ -0,0 +1,52 @@ +#!/bin/sh +# +# Post-merge hook: notify when warp refs changed during merge/pull. +# Always exits 0 — never blocks a merge. + +GIT_DIR=$(git rev-parse --git-dir 2>/dev/null) || exit 0 +SNAPSHOT="${GIT_DIR}/warp-ref-snapshot" + +# Capture current warp refs (sorted for stable comparison) +CURRENT=$(git for-each-ref --format='%(refname) %(objectname)' --sort=refname refs/empty-graph/ 2>/dev/null) || true + +if [ -z "$CURRENT" ]; then + # No warp refs exist — clean up any stale snapshot and exit + rm -f "$SNAPSHOT" + exit 0 +fi + +CHANGED=0 + +if [ -f "$SNAPSHOT" ]; then + PREVIOUS=$(cat "$SNAPSHOT") + if [ "$CURRENT" != "$PREVIOUS" ]; then + CHANGED=1 + fi +else + # First encounter — refs exist but no snapshot yet + CHANGED=1 +fi + +# Save current state for next comparison +printf '%s\n' "$CURRENT" > "$SNAPSHOT" + +if [ "$CHANGED" -eq 0 ]; then + exit 0 +fi + +AUTO_MAT=$(git config --bool warp.autoMaterialize 2>/dev/null) || true + +if [ "$AUTO_MAT" = "true" ]; then + echo "[warp] Refs changed — auto-materializing..." + if command -v git-warp >/dev/null 2>&1; then + git-warp materialize || echo "[warp] Warning: auto-materialize failed." + elif command -v warp-graph >/dev/null 2>&1; then + warp-graph materialize || echo "[warp] Warning: auto-materialize failed." + else + echo "[warp] Warning: neither git-warp nor warp-graph found in PATH." + fi +else + echo "[warp] Writer refs changed during merge. Call materialize() to see updates." +fi + +exit 0 diff --git a/scripts/roadmap.js b/scripts/roadmap.js new file mode 100755 index 0000000..71d0fab --- /dev/null +++ b/scripts/roadmap.js @@ -0,0 +1,646 @@ +#!/usr/bin/env node + +/** + * scripts/roadmap.js — Roadmap management tool + * + * Usage: + * node scripts/roadmap.js close Close a task, propagate unblocks + * node scripts/roadmap.js open List OPEN tasks + * node scripts/roadmap.js status Show stats per milestone + * node scripts/roadmap.js show Draw colored ASCII DAG + * node scripts/roadmap.js init Bootstrap Status fields + DAG + */ + +import { readFileSync, writeFileSync } from 'node:fs'; +import { resolve, dirname } from 'node:path'; +import { fileURLToPath } from 'node:url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const ROADMAP_PATH = resolve(__dirname, '..', 'ROADMAP.md'); + +const DAG_START = ''; +const DAG_END = ''; + +const MILESTONES = [ + { code: 'AP', name: 'AUTOPILOT', version: 'v7.1.0' }, + { code: 'GK', name: 'GROUNDSKEEPER', version: 'v7.2.0' }, + { code: 'WT', name: 'WEIGHTED', version: 'v7.3.0' }, + { code: 'HS', name: 'HANDSHAKE', version: 'v7.4.0' }, + { code: 'CP', name: 'COMPASS', version: 'v7.5.0' }, + { code: 'LH', name: 'LIGHTHOUSE', version: 'v7.6.0' }, + { code: 'PL', name: 'PULSE', version: 'v7.7.0' }, + { code: 'HG', name: 'HOLOGRAM', version: 'v8.0.0' }, + { code: 'EC', name: 'ECHO', version: 'v9.0.0' }, +]; + +// ── Helpers ────────────────────────────────────────────────────────────────── + +function escapeRegex(s) { + return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); +} + +/** Extract task IDs (XX/YYY/N pattern) from a string, ignoring parenthetical notes. */ +function extractTaskIds(str) { + if (!str || str.trim() === 'None') return []; + return [...str.matchAll(/[A-Z]{2}\/[A-Z]+\/\d+/g)].map(m => m[0]); +} + +function getMilestone(taskId) { + const prefix = taskId.split('/')[0]; + return MILESTONES.find(m => m.code === prefix); +} + +function pad(s, n) { + return s.length >= n ? s : s + ' '.repeat(n - s.length); +} + +// ── Parsing ────────────────────────────────────────────────────────────────── + +function parseTasks(content) { + const tasks = new Map(); + const regex = /^####\s+([\w/]+)\s+—\s+(.+)$/gm; + const headers = []; + let m; + + while ((m = regex.exec(content)) !== null) { + headers.push({ id: m[1], title: m[2], pos: m.index }); + } + + for (let i = 0; i < headers.length; i++) { + const { id, title, pos } = headers[i]; + const endPos = i + 1 < headers.length ? headers[i + 1].pos : content.length; + const section = content.substring(pos, endPos); + + const statusMatch = section.match(/\*\*Status:\*\*\s*`(\w+)`/); + const blockedByMatch = section.match(/\*\*Blocked [Bb]y:\*\*\s*(.+)/); + const blockingMatch = section.match(/\*\*Blocking:\*\*\s*(.+)/); + const hoursMatch = section.match(/\*\*Estimated Hours:\*\*\s*(\d+)/); + const ms = getMilestone(id); + + tasks.set(id, { + id, + title, + status: statusMatch ? statusMatch[1] : null, + blockedBy: blockedByMatch ? extractTaskIds(blockedByMatch[1]) : [], + blocking: blockingMatch ? extractTaskIds(blockingMatch[1]) : [], + hours: hoursMatch ? parseInt(hoursMatch[1]) : 0, + milestone: ms ? ms.name : id.split('/')[0], + milestoneCode: id.split('/')[0], + }); + } + + normalizeGraph(tasks); + return tasks; +} + +/** + * Normalize the graph: ensure blocking/blockedBy are symmetric. + * Some cross-milestone edges are only recorded on one side in the ROADMAP. + */ +function normalizeGraph(tasks) { + for (const [id, task] of tasks) { + // For each task I block, make sure that child's blockedBy includes me + for (const childId of task.blocking) { + const child = tasks.get(childId); + if (child && !child.blockedBy.includes(id)) { + child.blockedBy.push(id); + } + } + // For each task that blocks me, make sure that parent's blocking includes me + for (const parentId of task.blockedBy) { + const parent = tasks.get(parentId); + if (parent && !parent.blocking.includes(id)) { + parent.blocking.push(id); + } + } + } +} + +// ── File mutation ──────────────────────────────────────────────────────────── + +/** + * Set (or insert) a task's Status field in the ROADMAP content string. + * Searches by task ID pattern so it works even after prior edits shift positions. + */ +function setTaskStatus(content, taskId, newStatus) { + const escapedId = escapeRegex(taskId); + + // Find the task header + const headerRegex = new RegExp(`####\\s+${escapedId}\\s+—\\s+.+`); + const headerMatch = headerRegex.exec(content); + if (!headerMatch) { + console.error(`Task ${taskId} not found in ROADMAP.md`); + process.exit(1); + } + + const headerStart = headerMatch.index; + + // Find end of this task's section (next #### or ### or ## or EOF) + const rest = content.substring(headerStart + 1); + const nextHeader = rest.search(/\n#{2,4}\s/); + const sectionEnd = nextHeader > -1 ? headerStart + 1 + nextHeader : content.length; + const section = content.substring(headerStart, sectionEnd); + + // Check if Status line already exists + const statusLineRegex = /- \*\*Status:\*\* `\w+`/; + const statusMatch = statusLineRegex.exec(section); + + if (statusMatch) { + // Replace existing + const absPos = headerStart + statusMatch.index; + const absEnd = absPos + statusMatch[0].length; + return ( + content.substring(0, absPos) + + `- **Status:** \`${newStatus}\`` + + content.substring(absEnd) + ); + } + + // Insert new Status line before the first bullet in the section + const bulletRegex = /\n- \*\*/; + const bulletMatch = bulletRegex.exec(section); + if (bulletMatch) { + const absPos = headerStart + bulletMatch.index + 1; // +1 to skip the \n + return ( + content.substring(0, absPos) + + `- **Status:** \`${newStatus}\`\n` + + content.substring(absPos) + ); + } + + // Fallback: insert after header line + const eol = content.indexOf('\n', headerStart); + const insertPos = eol > -1 ? eol + 1 : content.length; + return ( + content.substring(0, insertPos) + + `\n- **Status:** \`${newStatus}\`\n` + + content.substring(insertPos) + ); +} + +// ── DAG generation ─────────────────────────────────────────────────────────── + +function statusIcon(status) { + switch (status) { + case 'CLOSED': + return '■'; + case 'OPEN': + return '◆'; + case 'BLOCKED': + return '○'; + default: + return '?'; + } +} + +function progressBar(done, total, width = 20) { + const filled = total > 0 ? Math.round((done / total) * width) : 0; + return '█'.repeat(filled) + '░'.repeat(width - filled); +} + +function generateDagMarkdown(tasks) { + const lines = []; + lines.push('```'); + lines.push('Key: ■ CLOSED ◆ OPEN ○ BLOCKED'); + lines.push(''); + + for (const ms of MILESTONES) { + const msTasks = [...tasks.values()] + .filter(t => t.milestoneCode === ms.code) + .sort((a, b) => a.id.localeCompare(b.id)); + if (msTasks.length === 0) continue; + + const done = msTasks.filter(t => t.status === 'CLOSED').length; + const total = msTasks.length; + const pct = total > 0 ? Math.round((done / total) * 100) : 0; + const bar = progressBar(done, total); + + lines.push(`${pad(ms.name, 16)} (${ms.version}) ${bar} ${String(pct).padStart(3)}% (${done}/${total})`); + + for (const task of msTasks) { + const icon = statusIcon(task.status); + const outEdges = + task.blocking.length > 0 ? ` → ${task.blocking.join(', ')}` : ''; + lines.push(` ${icon} ${pad(task.id, 18)}${outEdges}`); + } + lines.push(''); + } + + // Cross-milestone dependencies + const crossDeps = []; + for (const task of tasks.values()) { + for (const childId of task.blocking) { + const child = tasks.get(childId); + if (child && child.milestoneCode !== task.milestoneCode) { + crossDeps.push({ + from: task.id, + to: childId, + toMs: child.milestone, + }); + } + } + } + + if (crossDeps.length > 0) { + lines.push('Cross-Milestone Dependencies:'); + for (const dep of crossDeps.sort((a, b) => a.from.localeCompare(b.from))) { + lines.push(` ${pad(dep.from, 18)} → ${dep.to} (${dep.toMs})`); + } + lines.push(''); + } + + lines.push('```'); + return lines.join('\n'); +} + +function updateDag(content, tasks) { + const dag = generateDagMarkdown(tasks); + const startIdx = content.indexOf(DAG_START); + const endIdx = content.indexOf(DAG_END); + + if (startIdx === -1 || endIdx === -1) { + console.error('DAG sentinel markers not found in ROADMAP.md'); + console.error(`Expected ${DAG_START} and ${DAG_END}`); + console.error('Run: node scripts/roadmap.js init'); + process.exit(1); + } + + return ( + content.substring(0, startIdx + DAG_START.length) + + '\n' + + dag + + '\n' + + content.substring(endIdx) + ); +} + +// ── Commands ───────────────────────────────────────────────────────────────── + +function cmdClose(taskId) { + let content = readFileSync(ROADMAP_PATH, 'utf8'); + const tasks = parseTasks(content); + + const task = tasks.get(taskId); + if (!task) { + console.error(`Unknown task: ${taskId}`); + const similar = [...tasks.keys()].filter(k => + k.toLowerCase().includes(taskId.toLowerCase()), + ); + if (similar.length > 0) { + console.error(`Did you mean: ${similar.join(', ')}?`); + } + process.exit(1); + } + + if (task.status === 'CLOSED') { + console.log(`${taskId} is already CLOSED.`); + return; + } + + if (task.status === 'BLOCKED') { + const openBlockers = task.blockedBy.filter(bid => { + const b = tasks.get(bid); + return !b || b.status !== 'CLOSED'; + }); + console.error(`Cannot close ${taskId} — it is BLOCKED by:`); + for (const bid of openBlockers) { + const b = tasks.get(bid); + console.error(` ○ ${bid} (${b ? b.status : 'unknown'})`); + } + process.exit(1); + } + + // Close the task + content = setTaskStatus(content, taskId, 'CLOSED'); + task.status = 'CLOSED'; + console.log(` ■ ${taskId} → CLOSED`); + + // Propagate: check each child + const unblocked = []; + for (const childId of task.blocking) { + const child = tasks.get(childId); + if (!child || child.status === 'CLOSED') continue; + + const allBlockersClosed = child.blockedBy.every(bid => { + if (bid === taskId) return true; // just closed + const b = tasks.get(bid); + return b && b.status === 'CLOSED'; + }); + + if (allBlockersClosed && child.status === 'BLOCKED') { + content = setTaskStatus(content, childId, 'OPEN'); + child.status = 'OPEN'; + unblocked.push(childId); + console.log(` ◆ ${childId} → OPEN (unblocked)`); + } + } + + // Regenerate DAG + // Re-parse to get fresh task data after status changes + const freshTasks = parseTasks(content); + content = updateDag(content, freshTasks); + + writeFileSync(ROADMAP_PATH, content, 'utf8'); + + if (unblocked.length === 0) { + console.log('\n No new tasks unblocked.'); + } else { + console.log(`\n ${unblocked.length} task(s) now ready to work on.`); + } +} + +function cmdOpen() { + const content = readFileSync(ROADMAP_PATH, 'utf8'); + const tasks = parseTasks(content); + + const openTasks = [...tasks.values()] + .filter(t => t.status === 'OPEN') + .sort((a, b) => a.id.localeCompare(b.id)); + + if (openTasks.length === 0) { + console.log('No OPEN tasks.'); + return; + } + + console.log(`\n ◆ OPEN tasks (${openTasks.length}):\n`); + + let currentMs = ''; + for (const task of openTasks) { + if (task.milestone !== currentMs) { + currentMs = task.milestone; + const ms = getMilestone(task.id); + console.log(` ${currentMs} (${ms?.version || '?'}):`); + } + console.log( + ` ◆ ${pad(task.id, 18)} ${task.title} (${task.hours}h)`, + ); + } + console.log(''); +} + +function cmdStatus() { + const content = readFileSync(ROADMAP_PATH, 'utf8'); + const tasks = parseTasks(content); + + const totalTasks = tasks.size; + const totalClosed = [...tasks.values()].filter( + t => t.status === 'CLOSED', + ).length; + const totalOpen = [...tasks.values()].filter( + t => t.status === 'OPEN', + ).length; + const totalBlocked = [...tasks.values()].filter( + t => t.status === 'BLOCKED', + ).length; + const totalHours = [...tasks.values()].reduce((s, t) => s + t.hours, 0); + const closedHours = [...tasks.values()] + .filter(t => t.status === 'CLOSED') + .reduce((s, t) => s + t.hours, 0); + + const W = 72; + const line = '═'.repeat(W); + const thin = '─'.repeat(W); + + console.log(`\n ╔${line}╗`); + console.log(` ║${pad(' ROADMAP STATUS', W)}║`); + console.log(` ╠${line}╣`); + + for (const ms of MILESTONES) { + const msTasks = [...tasks.values()].filter( + t => t.milestoneCode === ms.code, + ); + if (msTasks.length === 0) continue; + + const done = msTasks.filter(t => t.status === 'CLOSED').length; + const open = msTasks.filter(t => t.status === 'OPEN').length; + const blocked = msTasks.filter(t => t.status === 'BLOCKED').length; + const total = msTasks.length; + const pct = total > 0 ? Math.round((done / total) * 100) : 0; + const hrs = msTasks.reduce((s, t) => s + t.hours, 0); + const doneHrs = msTasks + .filter(t => t.status === 'CLOSED') + .reduce((s, t) => s + t.hours, 0); + const bar = progressBar(done, total); + + const label = `${ms.name} (${ms.version})`; + const stats = `${bar} ${String(pct).padStart(3)}% ${done}/${total} (${doneHrs}/${hrs}h)`; + const row = ` ${pad(label, 28)}${stats}`; + console.log(` ║${pad(row, W)}║`); + } + + console.log(` ╠${line}╣`); + + const totalPct = + totalTasks > 0 ? Math.round((totalClosed / totalTasks) * 100) : 0; + const totalBar = progressBar(totalClosed, totalTasks); + const totalRow = ` ${pad('TOTAL', 28)}${totalBar} ${String(totalPct).padStart(3)}% ${totalClosed}/${totalTasks} (${closedHours}/${totalHours}h)`; + console.log(` ║${pad(totalRow, W)}║`); + console.log(` ╚${line}╝`); + console.log( + `\n ■ ${totalClosed} closed ◆ ${totalOpen} open ○ ${totalBlocked} blocked\n`, + ); +} + +function cmdShow() { + const content = readFileSync(ROADMAP_PATH, 'utf8'); + const tasks = parseTasks(content); + + // ANSI escape codes + const R = '\x1b[0m'; // reset + const GREEN = '\x1b[32m'; + const YELLOW = '\x1b[33m'; + const DIM = '\x1b[2m'; + const BOLD = '\x1b[1m'; + const CYAN = '\x1b[36m'; + + function colorIcon(status) { + switch (status) { + case 'CLOSED': + return `${GREEN}■${R}`; + case 'OPEN': + return `${YELLOW}${BOLD}◆${R}`; + case 'BLOCKED': + return `${DIM}○${R}`; + default: + return '?'; + } + } + + function colorId(id, status) { + switch (status) { + case 'CLOSED': + return `${GREEN}${id}${R}`; + case 'OPEN': + return `${YELLOW}${BOLD}${id}${R}`; + case 'BLOCKED': + return `${DIM}${id}${R}`; + default: + return id; + } + } + + console.log(''); + + for (const ms of MILESTONES) { + const msTasks = [...tasks.values()] + .filter(t => t.milestoneCode === ms.code) + .sort((a, b) => a.id.localeCompare(b.id)); + if (msTasks.length === 0) continue; + + const done = msTasks.filter(t => t.status === 'CLOSED').length; + const total = msTasks.length; + const pct = total > 0 ? Math.round((done / total) * 100) : 0; + + const filled = total > 0 ? Math.round((done / total) * 20) : 0; + const bar = `${GREEN}${'█'.repeat(filled)}${R}${DIM}${'░'.repeat(20 - filled)}${R}`; + + console.log( + ` ${CYAN}${BOLD}${pad(ms.name, 16)}${R} ${DIM}(${ms.version})${R} ${bar} ${pct}% (${done}/${total})`, + ); + console.log(''); + + for (const task of msTasks) { + const icon = colorIcon(task.status); + const id = colorId(pad(task.id, 18), task.status); + const arrows = + task.blocking.length > 0 + ? `${DIM} → ${task.blocking.join(', ')}${R}` + : ''; + console.log(` ${icon} ${id}${arrows}`); + } + console.log(''); + } + + // Cross-milestone dependencies + const crossDeps = []; + for (const task of tasks.values()) { + for (const childId of task.blocking) { + const child = tasks.get(childId); + if (child && child.milestoneCode !== task.milestoneCode) { + crossDeps.push({ + from: task.id, + to: childId, + toMs: child.milestone, + }); + } + } + } + + if (crossDeps.length > 0) { + console.log(` ${CYAN}${BOLD}Cross-Milestone Dependencies${R}`); + console.log(''); + for (const dep of crossDeps.sort((a, b) => + a.from.localeCompare(b.from), + )) { + console.log( + ` ${pad(dep.from, 18)} ${DIM}→${R} ${dep.to} ${DIM}(${dep.toMs})${R}`, + ); + } + console.log(''); + } +} + +function cmdInit() { + let content = readFileSync(ROADMAP_PATH, 'utf8'); + + // Ensure DAG sentinels exist + if (!content.includes(DAG_START)) { + console.error( + `Missing DAG sentinel: ${DAG_START}\nAdd it to ROADMAP.md first.`, + ); + process.exit(1); + } + + let tasks = parseTasks(content); + let modified = 0; + + // Add Status field to tasks that don't have one + for (const [id, task] of tasks) { + if (task.status !== null) continue; + + const hasOpenBlocker = task.blockedBy.some(bid => { + const b = tasks.get(bid); + return !b || b.status !== 'CLOSED'; + }); + const status = + task.blockedBy.length === 0 || !hasOpenBlocker ? 'OPEN' : 'BLOCKED'; + + content = setTaskStatus(content, id, status); + task.status = status; + modified++; + } + + // Re-parse after all status insertions (positions shifted) + tasks = parseTasks(content); + + // Generate DAG + content = updateDag(content, tasks); + + writeFileSync(ROADMAP_PATH, content, 'utf8'); + console.log(` Initialized ${modified} task status fields.`); + console.log(` Total tasks: ${tasks.size}\n`); + + const open = [...tasks.values()].filter(t => t.status === 'OPEN').length; + const blocked = [...tasks.values()].filter( + t => t.status === 'BLOCKED', + ).length; + const closed = [...tasks.values()].filter(t => t.status === 'CLOSED').length; + console.log( + ` ■ ${closed} closed ◆ ${open} open ○ ${blocked} blocked\n`, + ); +} + +function cmdHelp() { + console.log(` + Usage: node scripts/roadmap.js [args] + + Commands: + close Mark a task as CLOSED and propagate status changes + open List all OPEN tasks (ready to work on) + status Show completion stats per milestone + show Draw colored ASCII task DAG to terminal + init Initialize status fields and DAG (run once after setup) + help Show this help message + + Examples: + node scripts/roadmap.js close AP/INVAL/1 + node scripts/roadmap.js open + node scripts/roadmap.js status + node scripts/roadmap.js show +`); +} + +// ── Main ───────────────────────────────────────────────────────────────────── + +const [, , command, ...args] = process.argv; + +switch (command) { + case 'close': + if (!args[0]) { + console.error('Usage: roadmap close '); + process.exit(1); + } + cmdClose(args[0]); + break; + case 'open': + cmdOpen(); + break; + case 'status': + cmdStatus(); + break; + case 'show': + cmdShow(); + break; + case 'init': + cmdInit(); + break; + case 'help': + case '--help': + case '-h': + cmdHelp(); + break; + default: + if (command) console.error(`Unknown command: ${command}\n`); + cmdHelp(); + process.exit(command ? 1 : 0); +} diff --git a/src/domain/WarpGraph.js b/src/domain/WarpGraph.js index 027b13f..48ec55d 100644 --- a/src/domain/WarpGraph.js +++ b/src/domain/WarpGraph.js @@ -10,7 +10,7 @@ import { validateGraphName, validateWriterId, buildWriterRef, buildCoverageRef, buildCheckpointRef, buildWritersPrefix, parseWriterIdFromRef } from './utils/RefLayout.js'; import { PatchBuilderV2 } from './services/PatchBuilderV2.js'; -import { reduceV5, createEmptyStateV5, joinStates, decodeEdgeKey, decodePropKey } from './services/JoinReducer.js'; +import { reduceV5, createEmptyStateV5, joinStates, join as joinPatch, decodeEdgeKey, decodePropKey } from './services/JoinReducer.js'; import { orsetContains, orsetElements } from './crdt/ORSet.js'; import { decode } from '../infrastructure/codecs/CborCodec.js'; import { decodePatchMessage, detectMessageKind, encodeAnchorMessage } from './services/WarpMessageCodec.js'; @@ -34,6 +34,7 @@ import QueryBuilder from './services/QueryBuilder.js'; import LogicalTraversal from './services/LogicalTraversal.js'; import LRUCache from './utils/LRUCache.js'; import SyncError from './errors/SyncError.js'; +import QueryError from './errors/QueryError.js'; import { checkAborted } from './utils/cancellation.js'; import OperationAbortedError from './errors/OperationAbortedError.js'; @@ -89,8 +90,10 @@ export default class WarpGraph { * @param {string} options.writerId - This writer's ID * @param {Object} [options.gcPolicy] - GC policy configuration (overrides defaults) * @param {number} [options.adjacencyCacheSize] - Max materialized adjacency cache entries + * @param {{every: number}} [options.checkpointPolicy] - Auto-checkpoint policy; creates a checkpoint every N patches + * @param {boolean} [options.autoMaterialize=false] - If true, query methods auto-materialize instead of throwing */ - constructor({ persistence, graphName, writerId, gcPolicy = {}, adjacencyCacheSize = DEFAULT_ADJACENCY_CACHE_SIZE }) { + constructor({ persistence, graphName, writerId, gcPolicy = {}, adjacencyCacheSize = DEFAULT_ADJACENCY_CACHE_SIZE, checkpointPolicy, autoMaterialize = false }) { /** @type {import('../ports/GraphPersistencePort.js').default} */ this._persistence = persistence; @@ -106,6 +109,9 @@ export default class WarpGraph { /** @type {import('./services/JoinReducer.js').WarpStateV5|null} */ this._cachedState = null; + /** @type {boolean} */ + this._stateDirty = false; + /** @type {Object} */ this._gcPolicy = { ...DEFAULT_GC_POLICY, ...gcPolicy }; @@ -115,6 +121,18 @@ export default class WarpGraph { /** @type {number} */ this._patchesSinceGC = 0; + /** @type {number} */ + this._patchesSinceCheckpoint = 0; + + /** @type {{every: number}|null} */ + this._checkpointPolicy = checkpointPolicy || null; + + /** @type {boolean} */ + this._checkpointing = false; + + /** @type {boolean} */ + this._autoMaterialize = autoMaterialize; + /** @type {LogicalTraversal} */ this.traverse = new LogicalTraversal(this); @@ -133,8 +151,11 @@ export default class WarpGraph { * @param {string} options.graphName - Graph namespace * @param {string} options.writerId - This writer's ID * @param {Object} [options.gcPolicy] - GC policy configuration (overrides defaults) + * @param {number} [options.adjacencyCacheSize] - Max materialized adjacency cache entries + * @param {{every: number}} [options.checkpointPolicy] - Auto-checkpoint policy; creates a checkpoint every N patches + * @param {boolean} [options.autoMaterialize] - If true, query methods auto-materialize instead of throwing * @returns {Promise} The opened graph instance - * @throws {Error} If graphName or writerId is invalid + * @throws {Error} If graphName, writerId, or checkpointPolicy is invalid * * @example * const graph = await WarpGraph.open({ @@ -143,7 +164,7 @@ export default class WarpGraph { * writerId: 'node-1' * }); */ - static async open({ persistence, graphName, writerId, gcPolicy = {}, adjacencyCacheSize }) { + static async open({ persistence, graphName, writerId, gcPolicy = {}, adjacencyCacheSize, checkpointPolicy, autoMaterialize }) { // Validate inputs validateGraphName(graphName); validateWriterId(writerId); @@ -152,7 +173,22 @@ export default class WarpGraph { throw new Error('persistence is required'); } - const graph = new WarpGraph({ persistence, graphName, writerId, gcPolicy, adjacencyCacheSize }); + // Validate checkpointPolicy + if (checkpointPolicy !== undefined && checkpointPolicy !== null) { + if (typeof checkpointPolicy !== 'object' || checkpointPolicy === null) { + throw new Error('checkpointPolicy must be an object with { every: number }'); + } + if (!Number.isInteger(checkpointPolicy.every) || checkpointPolicy.every <= 0) { + throw new Error('checkpointPolicy.every must be a positive integer'); + } + } + + // Validate autoMaterialize + if (autoMaterialize !== undefined && typeof autoMaterialize !== 'boolean') { + throw new Error('autoMaterialize must be a boolean'); + } + + const graph = new WarpGraph({ persistence, graphName, writerId, gcPolicy, adjacencyCacheSize, checkpointPolicy, autoMaterialize }); // Validate migration boundary await graph._validateMigrationBoundary(); @@ -187,6 +223,11 @@ export default class WarpGraph { /** * Creates a new PatchBuilder for building and committing patches. * + * On successful commit, the internal `onCommitSuccess` callback receives + * `{ patch, sha }` where `patch` is the committed patch object and `sha` + * is the Git commit SHA. This updates the version vector and applies the + * patch to cached state for eager re-materialization. + * * @returns {Promise} A fluent patch builder * * @example @@ -206,8 +247,17 @@ export default class WarpGraph { versionVector: this._versionVector, getCurrentState: () => this._cachedState, expectedParentSha: parentSha, - onCommitSuccess: () => { + onCommitSuccess: ({ patch, sha } = {}) => { vvIncrement(this._versionVector, this._writerId); + this._patchesSinceCheckpoint++; + // Eager re-materialize: apply the just-committed patch to cached state + // Only when the cache is clean — applying a patch to stale state would be incorrect + if (this._cachedState && !this._stateDirty && patch && sha) { + joinPatch(this._cachedState, patch, sha); + this._setMaterializedState(this._cachedState); + } else { + this._stateDirty = true; + } }, }); } @@ -220,7 +270,7 @@ export default class WarpGraph { * @returns {Promise>} Array of patches */ async getWriterPatches(writerId, stopAtSha = null) { - return this._loadWriterPatches(writerId, stopAtSha); + return await this._loadWriterPatches(writerId, stopAtSha); } /** @@ -285,7 +335,7 @@ export default class WarpGraph { while (currentSha && currentSha !== stopAtSha) { // Get commit info and message const nodeInfo = await this._persistence.getNodeInfo(currentSha); - const message = nodeInfo.message; + const {message} = nodeInfo; // Check if this is a patch commit const kind = detectMessageKind(message); @@ -371,6 +421,7 @@ export default class WarpGraph { */ _setMaterializedState(state) { this._cachedState = state; + this._stateDirty = false; this._versionVector = vvClone(state.observedFrontier); const stateHash = computeStateHashV5(state); @@ -417,41 +468,54 @@ export default class WarpGraph { // Check for checkpoint const checkpoint = await this._loadLatestCheckpoint(); + let state; + let patchCount = 0; + // If checkpoint exists, use incremental materialization if (checkpoint?.schema === 2) { const patches = await this._loadPatchesSince(checkpoint); - const state = reduceV5(patches, checkpoint.state); - this._setMaterializedState(state); - return state; - } + state = reduceV5(patches, checkpoint.state); + patchCount = patches.length; + } else { + // 1. Discover all writers + const writerIds = await this.discoverWriters(); - // 1. Discover all writers - const writerIds = await this.discoverWriters(); + // 2. If no writers, return empty state + if (writerIds.length === 0) { + state = createEmptyStateV5(); + } else { + // 3. For each writer, collect all patches + const allPatches = []; + for (const writerId of writerIds) { + const writerPatches = await this._loadWriterPatches(writerId); + allPatches.push(...writerPatches); + } - // 2. If no writers, return empty state - if (writerIds.length === 0) { - const emptyState = createEmptyStateV5(); - this._setMaterializedState(emptyState); - return emptyState; + // 4. If no patches, return empty state + if (allPatches.length === 0) { + state = createEmptyStateV5(); + } else { + // 5. Reduce all patches to state + state = reduceV5(allPatches); + patchCount = allPatches.length; + } + } } - // 3. For each writer, collect all patches - const allPatches = []; - for (const writerId of writerIds) { - const writerPatches = await this._loadWriterPatches(writerId); - allPatches.push(...writerPatches); - } + this._setMaterializedState(state); + this._patchesSinceCheckpoint = patchCount; - // 4. If no patches, return empty state - if (allPatches.length === 0) { - const emptyState = createEmptyStateV5(); - this._setMaterializedState(emptyState); - return emptyState; + // Auto-checkpoint if policy is set and threshold exceeded. + // Guard prevents recursion: createCheckpoint() calls materialize() internally. + if (this._checkpointPolicy && !this._checkpointing && patchCount >= this._checkpointPolicy.every) { + try { + await this.createCheckpoint(); + this._patchesSinceCheckpoint = 0; + } catch { + // Checkpoint failure does not break materialize — continue silently + } } - // 5. Reduce all patches to state - const state = reduceV5(allPatches); - this._setMaterializedState(state); return state; } @@ -474,7 +538,7 @@ export default class WarpGraph { * frontierMerged: boolean * } * }} The merged state and a receipt describing the merge - * @throws {Error} If no cached state exists + * @throws {QueryError} If no cached state exists (code: `E_NO_STATE`) * * @example * const graph = await WarpGraph.open({ persistence, graphName, writerId }); @@ -489,7 +553,9 @@ export default class WarpGraph { */ join(otherState) { if (!this._cachedState) { - throw new Error('No cached state. Call materialize() first.'); + throw new QueryError('No cached state. Call materialize() first.', { + code: 'E_NO_STATE', + }); } if (!otherState || !otherState.nodeAlive || !otherState.edgeAlive) { @@ -586,7 +652,7 @@ export default class WarpGraph { while (currentSha && currentSha !== fromSha) { const nodeInfo = await this._persistence.getNodeInfo(currentSha); - const message = nodeInfo.message; + const {message} = nodeInfo; const kind = detectMessageKind(message); if (kind !== 'patch') { @@ -646,8 +712,17 @@ export default class WarpGraph { } } - // 3. Materialize current state - const state = await this.materialize(); + // 3. Materialize current state (reuse cached if fresh, guard against recursion) + const prevCheckpointing = this._checkpointing; + this._checkpointing = true; + let state; + try { + state = (this._cachedState && !this._stateDirty) + ? this._cachedState + : await this.materialize(); + } finally { + this._checkpointing = prevCheckpointing; + } // 4. Call CheckpointService.create() const checkpointSha = await createCheckpointCommit({ @@ -740,6 +815,7 @@ export default class WarpGraph { * Graphs cannot be opened if there is schema:1 history without * a migration checkpoint. This ensures data consistency during migration. * + * @returns {Promise} * @throws {Error} If v1 history exists without migration checkpoint * @private */ @@ -818,8 +894,8 @@ export default class WarpGraph { /** * Loads patches since a checkpoint for incremental materialization. * - * @param {Object} checkpoint - The checkpoint to start from - * @returns {Promise>} Patches since checkpoint + * @param {{state: Object, frontier: Map, stateHash: string, schema: number}} checkpoint - The checkpoint to start from + * @returns {Promise>} Patches since checkpoint * @private */ async _loadPatchesSince(checkpoint) { @@ -901,8 +977,10 @@ export default class WarpGraph { * * @param {string} writerId - The writer ID for this patch * @param {string} incomingSha - The incoming patch commit SHA - * @param {Object} checkpoint - The checkpoint to validate against - * @throws {Error} if patch is backfill or diverged + * @param {{state: Object, frontier: Map, stateHash: string, schema: number}} checkpoint - The checkpoint to validate against + * @returns {Promise} + * @throws {Error} If patch is behind/same as checkpoint frontier (backfill rejected) + * @throws {Error} If patch does not extend checkpoint head (writer fork detected) * @private */ async _validatePatchAgainstCheckpoint(writerId, incomingSha, checkpoint) { @@ -978,7 +1056,7 @@ export default class WarpGraph { * **Requires a cached state.** * * @returns {{nodesCompacted: number, edgesCompacted: number, tombstonesRemoved: number, durationMs: number}} - * @throws {Error} If no cached state exists + * @throws {QueryError} If no cached state exists (code: `E_NO_STATE`) * * @example * await graph.materialize(); @@ -987,7 +1065,9 @@ export default class WarpGraph { */ runGC() { if (!this._cachedState) { - throw new Error('No cached state. Call materialize() first.'); + throw new QueryError('No cached state. Call materialize() first.', { + code: 'E_NO_STATE', + }); } // Compute appliedVV from current state @@ -1081,7 +1161,7 @@ export default class WarpGraph { */ async processSyncRequest(request) { const localFrontier = await this.getFrontier(); - return processSyncRequest( + return await processSyncRequest( request, localFrontier, this._persistence, @@ -1097,7 +1177,7 @@ export default class WarpGraph { * * @param {{type: 'sync-response', frontier: Map, patches: Map}} response - The sync response * @returns {{state: Object, frontier: Map, applied: number}} Result with updated state - * @throws {Error} If no cached state exists + * @throws {QueryError} If no cached state exists (code: `E_NO_STATE`) * * @example * await graph.materialize(); // Cache state first @@ -1106,7 +1186,9 @@ export default class WarpGraph { */ applySyncResponse(response) { if (!this._cachedState) { - throw new Error('No cached state. Call materialize() first.'); + throw new QueryError('No cached state. Call materialize() first.', { + code: 'E_NO_STATE', + }); } const currentFrontier = this._cachedState.observedFrontier; @@ -1145,6 +1227,10 @@ export default class WarpGraph { * @param {AbortSignal} [options.signal] - Optional abort signal to cancel sync * @param {(event: {type: string, attempt: number, durationMs?: number, status?: number, error?: Error}) => void} [options.onStatus] * @returns {Promise<{applied: number, attempts: number}>} + * @throws {SyncError} If remote URL is invalid (code: `E_SYNC_REMOTE_URL`) + * @throws {SyncError} If remote returns error or invalid response (code: `E_SYNC_REMOTE`, `E_SYNC_PROTOCOL`) + * @throws {SyncError} If request times out (code: `E_SYNC_TIMEOUT`) + * @throws {OperationAbortedError} If abort signal fires */ async syncWith(remote, options = {}) { const { @@ -1169,7 +1255,7 @@ export default class WarpGraph { } catch { throw new SyncError('Invalid remote URL', { code: 'E_SYNC_REMOTE_URL', - context: { remote: String(remote) }, + context: { remote }, }); } @@ -1351,6 +1437,7 @@ export default class WarpGraph { * @param {string} [options.path='/sync'] - Path to handle sync requests * @param {number} [options.maxRequestBytes=4194304] - Max request size in bytes * @returns {Promise<{close: () => Promise, url: string}>} Server handle + * @throws {Error} If port is not a number */ async serve({ port, host = '127.0.0.1', path = '/sync', maxRequestBytes = DEFAULT_SYNC_SERVER_MAX_BYTES } = {}) { if (typeof port !== 'number') { @@ -1360,7 +1447,7 @@ export default class WarpGraph { const normalizedPath = path.startsWith('/') ? path : `/${path}`; const { createServer } = await import('node:http'); - const server = createServer(async (req, res) => { + const server = createServer((req, res) => { const contentType = (req.headers['content-type'] || '').toLowerCase(); if (contentType && !contentType.startsWith('application/json')) { res.writeHead(400, { 'content-type': 'application/json' }); @@ -1488,8 +1575,8 @@ export default class WarpGraph { */ async writer(writerId) { // Build config adapters for resolveWriterId - const configGet = async (key) => this._persistence.configGet(key); - const configSet = async (key, value) => this._persistence.configSet(key, value); + const configGet = async (key) => await this._persistence.configGet(key); + const configSet = async (key, value) => await this._persistence.configSet(key, value); // Resolve the writer ID const resolvedWriterId = await resolveWriterId({ @@ -1505,6 +1592,16 @@ export default class WarpGraph { writerId: resolvedWriterId, versionVector: this._versionVector, getCurrentState: () => this._cachedState, + onCommitSuccess: ({ patch, sha } = {}) => { + vvIncrement(this._versionVector, resolvedWriterId); + this._patchesSinceCheckpoint++; + if (this._cachedState && !this._stateDirty && patch && sha) { + joinPatch(this._cachedState, patch, sha); + this._setMaterializedState(this._cachedState); + } else { + this._stateDirty = true; + } + }, }); } @@ -1548,9 +1645,49 @@ export default class WarpGraph { writerId: freshWriterId, versionVector: this._versionVector, getCurrentState: () => this._cachedState, + onCommitSuccess: ({ patch, sha } = {}) => { + vvIncrement(this._versionVector, freshWriterId); + this._patchesSinceCheckpoint++; + if (this._cachedState && !this._stateDirty && patch && sha) { + joinPatch(this._cachedState, patch, sha); + this._setMaterializedState(this._cachedState); + } else { + this._stateDirty = true; + } + }, }); } + // ============================================================================ + // Auto-Materialize Guard + // ============================================================================ + + /** + * Ensures cached state is fresh. When autoMaterialize is enabled, + * materializes if state is null or dirty. Otherwise throws. + * + * @returns {Promise} + * @throws {QueryError} If no cached state and autoMaterialize is off (code: `E_NO_STATE`) + * @throws {QueryError} If cached state is dirty and autoMaterialize is off (code: `E_STALE_STATE`) + * @private + */ + async _ensureFreshState() { + if (this._autoMaterialize && (!this._cachedState || this._stateDirty)) { + await this.materialize(); + return; + } + if (!this._cachedState) { + throw new QueryError('No cached state. Call materialize() first.', { + code: 'E_NO_STATE', + }); + } + if (this._stateDirty) { + throw new QueryError('Cached state is dirty. Call materialize() to refresh.', { + code: 'E_STALE_STATE', + }); + } + } + // ============================================================================ // Query API (Task 7) - Queries on Materialized WARP State // ============================================================================ @@ -1570,19 +1707,18 @@ export default class WarpGraph { * **Requires a cached state.** Call materialize() first if not already cached. * * @param {string} nodeId - The node ID to check - * @returns {boolean} True if the node exists in the materialized state - * @throws {Error} If no cached state exists + * @returns {Promise} True if the node exists in the materialized state + * @throws {QueryError} If no cached state exists (code: `E_NO_STATE`) + * @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`) * * @example * await graph.materialize(); - * if (graph.hasNode('user:alice')) { + * if (await graph.hasNode('user:alice')) { * console.log('Alice exists in the graph'); * } */ - hasNode(nodeId) { - if (!this._cachedState) { - throw new Error('No cached state. Call materialize() first.'); - } + async hasNode(nodeId) { + await this._ensureFreshState(); return orsetContains(this._cachedState.nodeAlive, nodeId); } @@ -1595,20 +1731,19 @@ export default class WarpGraph { * **Requires a cached state.** Call materialize() first if not already cached. * * @param {string} nodeId - The node ID to get properties for - * @returns {Map|null} Map of property key → value, or null if node doesn't exist - * @throws {Error} If no cached state exists + * @returns {Promise|null>} Map of property key → value, or null if node doesn't exist + * @throws {QueryError} If no cached state exists (code: `E_NO_STATE`) + * @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`) * * @example * await graph.materialize(); - * const props = graph.getNodeProps('user:alice'); + * const props = await graph.getNodeProps('user:alice'); * if (props) { * console.log('Name:', props.get('name')); * } */ - getNodeProps(nodeId) { - if (!this._cachedState) { - throw new Error('No cached state. Call materialize() first.'); - } + async getNodeProps(nodeId) { + await this._ensureFreshState(); // Check if node exists if (!orsetContains(this._cachedState.nodeAlive, nodeId)) { @@ -1640,20 +1775,19 @@ export default class WarpGraph { * @param {string} nodeId - The node ID to get neighbors for * @param {'outgoing' | 'incoming' | 'both'} [direction='both'] - Edge direction to follow * @param {string} [edgeLabel] - Optional edge label filter - * @returns {Array<{nodeId: string, label: string, direction: 'outgoing' | 'incoming'}>} Array of neighbor info - * @throws {Error} If no cached state exists + * @returns {Promise>} Array of neighbor info + * @throws {QueryError} If no cached state exists (code: `E_NO_STATE`) + * @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`) * * @example * await graph.materialize(); * // Get all outgoing neighbors - * const outgoing = graph.neighbors('user:alice', 'outgoing'); + * const outgoing = await graph.neighbors('user:alice', 'outgoing'); * // Get neighbors connected by 'follows' edges - * const follows = graph.neighbors('user:alice', 'outgoing', 'follows'); + * const follows = await graph.neighbors('user:alice', 'outgoing', 'follows'); */ - neighbors(nodeId, direction = 'both', edgeLabel = undefined) { - if (!this._cachedState) { - throw new Error('No cached state. Call materialize() first.'); - } + async neighbors(nodeId, direction = 'both', edgeLabel = undefined) { + await this._ensureFreshState(); const neighbors = []; @@ -1690,19 +1824,18 @@ export default class WarpGraph { * * **Requires a cached state.** Call materialize() first if not already cached. * - * @returns {string[]} Array of node IDs - * @throws {Error} If no cached state exists + * @returns {Promise} Array of node IDs + * @throws {QueryError} If no cached state exists (code: `E_NO_STATE`) + * @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`) * * @example * await graph.materialize(); - * for (const nodeId of graph.getNodes()) { + * for (const nodeId of await graph.getNodes()) { * console.log(nodeId); * } */ - getNodes() { - if (!this._cachedState) { - throw new Error('No cached state. Call materialize() first.'); - } + async getNodes() { + await this._ensureFreshState(); return [...orsetElements(this._cachedState.nodeAlive)]; } @@ -1711,19 +1844,18 @@ export default class WarpGraph { * * **Requires a cached state.** Call materialize() first if not already cached. * - * @returns {Array<{from: string, to: string, label: string}>} Array of edge info - * @throws {Error} If no cached state exists + * @returns {Promise>} Array of edge info + * @throws {QueryError} If no cached state exists (code: `E_NO_STATE`) + * @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`) * * @example * await graph.materialize(); - * for (const edge of graph.getEdges()) { + * for (const edge of await graph.getEdges()) { * console.log(`${edge.from} --${edge.label}--> ${edge.to}`); * } */ - getEdges() { - if (!this._cachedState) { - throw new Error('No cached state. Call materialize() first.'); - } + async getEdges() { + await this._ensureFreshState(); const edges = []; for (const edgeKey of orsetElements(this._cachedState.edgeAlive)) { diff --git a/src/domain/crdt/ORSet.js b/src/domain/crdt/ORSet.js index 8511e3d..939907a 100644 --- a/src/domain/crdt/ORSet.js +++ b/src/domain/crdt/ORSet.js @@ -199,7 +199,7 @@ export function orsetCompact(set, includedVV) { * Tombstones are sorted. * * @param {ORSet} set - * @returns {Object} + * @returns {{entries: Array<[*, string[]]>, tombstones: string[]}} */ export function orsetSerialize(set) { // Serialize entries: convert Map to array of [element, sortedDots] @@ -236,7 +236,7 @@ export function orsetSerialize(set) { /** * Deserializes a plain object back to an ORSet. * - * @param {Object} obj + * @param {{entries?: Array<[*, string[]]>, tombstones?: string[]}} obj * @returns {ORSet} */ export function orsetDeserialize(obj) { diff --git a/src/domain/crdt/VersionVector.js b/src/domain/crdt/VersionVector.js index 9010453..855af0b 100644 --- a/src/domain/crdt/VersionVector.js +++ b/src/domain/crdt/VersionVector.js @@ -109,6 +109,7 @@ export function vvSerialize(vv) { * * @param {Object} obj * @returns {VersionVector} + * @throws {Error} If any counter value is not a non-negative integer */ export function vvDeserialize(obj) { const vv = new Map(); diff --git a/src/domain/services/BitmapIndexBuilder.js b/src/domain/services/BitmapIndexBuilder.js index c7a6e83..f6a6cc1 100644 --- a/src/domain/services/BitmapIndexBuilder.js +++ b/src/domain/services/BitmapIndexBuilder.js @@ -104,6 +104,7 @@ export default class BitmapIndexBuilder { * * @param {string} srcSha - Source node SHA (parent) * @param {string} tgtSha - Target node SHA (child) + * @returns {void} */ addEdge(srcSha, tgtSha) { const srcId = this._getOrCreateId(srcSha); @@ -135,7 +136,7 @@ export default class BitmapIndexBuilder { const wrapShard = (data) => ({ version: SHARD_VERSION, checksum: computeChecksum(data), - data: data, + data, }); // Serialize ID mappings (sharded by prefix) diff --git a/src/domain/services/BitmapIndexReader.js b/src/domain/services/BitmapIndexReader.js index 0eced2e..ef88a77 100644 --- a/src/domain/services/BitmapIndexReader.js +++ b/src/domain/services/BitmapIndexReader.js @@ -124,6 +124,7 @@ export default class BitmapIndexReader { * - `shards_rev_XX.json` - Reverse edge bitmaps (child→parents) * * @param {Record} shardOids - Map of shard path to blob OID + * @returns {void} * @example * // Typical shardOids structure from IndexRebuildService.load() * reader.setup({ @@ -162,7 +163,7 @@ export default class BitmapIndexReader { * @returns {Promise} Array of parent SHAs */ async getParents(sha) { - return this._getEdges(sha, 'rev'); + return await this._getEdges(sha, 'rev'); } /** @@ -171,7 +172,7 @@ export default class BitmapIndexReader { * @returns {Promise} Array of child SHAs */ async getChildren(sha) { - return this._getEdges(sha, 'fwd'); + return await this._getEdges(sha, 'fwd'); } /** diff --git a/src/domain/services/CheckpointSerializerV5.js b/src/domain/services/CheckpointSerializerV5.js index d3306c6..cbefed7 100644 --- a/src/domain/services/CheckpointSerializerV5.js +++ b/src/domain/services/CheckpointSerializerV5.js @@ -48,8 +48,8 @@ export function serializeFullStateV5(state) { } // Sort by key for determinism propArray.sort((a, b) => { - const keyA = String(a[0]); - const keyB = String(b[0]); + const keyA = /** @type {string} */ (a[0]); + const keyB = /** @type {string} */ (b[0]); return keyA < keyB ? -1 : keyA > keyB ? 1 : 0; }); diff --git a/src/domain/services/CheckpointService.js b/src/domain/services/CheckpointService.js index 9adc0e1..b9a0edb 100644 --- a/src/domain/services/CheckpointService.js +++ b/src/domain/services/CheckpointService.js @@ -52,7 +52,7 @@ import { encodeEdgeKey, encodePropKey, cloneStateV5, reduceV5 } from './JoinRedu * @returns {Promise} The checkpoint commit SHA */ export async function create({ persistence, graphName, state, frontier, parents = [], compact = true }) { - return createV5({ persistence, graphName, state, frontier, parents, compact }); + return await createV5({ persistence, graphName, state, frontier, parents, compact }); } /** diff --git a/src/domain/services/CommitDagTraversalService.js b/src/domain/services/CommitDagTraversalService.js index 3c090c3..0a47f72 100644 --- a/src/domain/services/CommitDagTraversalService.js +++ b/src/domain/services/CommitDagTraversalService.js @@ -80,9 +80,9 @@ export default class CommitDagTraversalService { */ async _getNeighbors(sha, direction) { if (direction === 'forward') { - return this._indexReader.getChildren(sha); + return await this._indexReader.getChildren(sha); } - return this._indexReader.getParents(sha); + return await this._indexReader.getParents(sha); } /** @@ -815,6 +815,11 @@ export default class CommitDagTraversalService { /** * Reconstructs path from weighted search previous pointers. + * + * @param {Map} previous - Maps each node to its predecessor + * @param {string} from - Start node + * @param {string} to - End node + * @returns {string[]} Path from start to end * @private */ _reconstructWeightedPath(previous, from, to) { @@ -823,6 +828,11 @@ export default class CommitDagTraversalService { /** * Reconstructs path from parent map. + * + * @param {Map} parentMap - Maps each node to its predecessor + * @param {string} from - Start node + * @param {string} to - End node + * @returns {string[]} Path from start to end * @private */ _reconstructPath(parentMap, from, to) { @@ -831,6 +841,13 @@ export default class CommitDagTraversalService { /** * Reconstructs path from bidirectional search. + * + * @param {Map} fwdParent - Forward search predecessor map + * @param {Map} bwdParent - Backward search successor map + * @param {string} from - Start node + * @param {string} to - End node + * @param {string} meeting - Meeting point node + * @returns {string[]} Complete path from start to end * @private */ _reconstructBidirectionalPath(fwdParent, bwdParent, from, to, meeting) { diff --git a/src/domain/services/Frontier.js b/src/domain/services/Frontier.js index 41d94b6..96958a6 100644 --- a/src/domain/services/Frontier.js +++ b/src/domain/services/Frontier.js @@ -15,9 +15,12 @@ export function createFrontier() { /** * Updates the frontier with a new patch. - * @param {Frontier} frontier - Mutated in place - * @param {string} writerId - * @param {string} patchSha + * Mutates the frontier in place. + * + * @param {Frontier} frontier - The frontier to update + * @param {string} writerId - Writer ID + * @param {string} patchSha - Latest patch SHA for this writer + * @returns {void} */ export function updateFrontier(frontier, writerId, patchSha) { frontier.set(writerId, patchSha); diff --git a/src/domain/services/GCPolicy.js b/src/domain/services/GCPolicy.js index f92806b..eb853c7 100644 --- a/src/domain/services/GCPolicy.js +++ b/src/domain/services/GCPolicy.js @@ -36,13 +36,14 @@ import { collectGCMetrics } from './GCMetrics.js'; * @property {number} timeSinceCompaction - Time (ms) since last GC */ -export const DEFAULT_GC_POLICY = { +/** @type {Readonly} */ +export const DEFAULT_GC_POLICY = Object.freeze({ tombstoneRatioThreshold: 0.3, // 30% tombstones triggers GC entryCountThreshold: 50000, // 50K entries triggers GC minPatchesSinceCompaction: 1000, // Min patches between GCs maxTimeSinceCompaction: 86400000, // 24 hours max between GCs compactOnCheckpoint: true, // Auto-compact on checkpoint -}; +}); /** * Determines if GC should run based on metrics and policy. diff --git a/src/domain/services/HealthCheckService.js b/src/domain/services/HealthCheckService.js index a8f5197..384698e 100644 --- a/src/domain/services/HealthCheckService.js +++ b/src/domain/services/HealthCheckService.js @@ -70,7 +70,9 @@ export default class HealthCheckService { /** * Sets the index reader for index health checks. * Call this when an index is loaded. + * * @param {import('./BitmapIndexReader.js').default|null} reader - The index reader, or null to clear + * @returns {void} */ setIndexReader(reader) { this._indexReader = reader; diff --git a/src/domain/services/HookInstaller.js b/src/domain/services/HookInstaller.js new file mode 100644 index 0000000..7add5af --- /dev/null +++ b/src/domain/services/HookInstaller.js @@ -0,0 +1,286 @@ +/** + * HookInstaller — Installs and manages the post-merge Git hook. + * + * Follows hexagonal architecture: all I/O is injected via constructor. + * The service executes a strategy decided by the caller (CLI handler). + * + * @module domain/services/HookInstaller + */ + +import { createRequire } from 'node:module'; +import { join, resolve, dirname } from 'node:path'; +import { fileURLToPath } from 'node:url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const DEFAULT_TEMPLATE_DIR = resolve(__dirname, '..', '..', 'hooks'); + +const DELIMITER_START_PREFIX = '# --- @git-stunts/empty-graph post-merge hook'; +const DELIMITER_END = '# --- end @git-stunts/empty-graph ---'; +const VERSION_MARKER_PREFIX = '# warp-hook-version:'; +const VERSION_PLACEHOLDER = '__WARP_HOOK_VERSION__'; + +/** + * Classifies an existing hook file's content. + * + * Determines whether the hook is absent, ours (with version), or foreign (third-party). + * + * @param {string|null} content - File content or null if missing + * @returns {{ kind: 'none'|'ours'|'foreign', version?: string, appended?: boolean }} + */ +export function classifyExistingHook(content) { + if (!content || content.trim() === '') { + return { kind: 'none' }; + } + + const versionMatch = extractVersion(content); + if (versionMatch) { + const appended = content.includes(DELIMITER_START_PREFIX); + return { kind: 'ours', version: versionMatch, appended }; + } + + return { kind: 'foreign' }; +} + +function extractVersion(content) { + for (const line of content.split('\n')) { + const trimmed = line.trim(); + if (trimmed.startsWith(VERSION_MARKER_PREFIX)) { + const version = trimmed.slice(VERSION_MARKER_PREFIX.length).trim(); + if (version && version !== VERSION_PLACEHOLDER) { + return version; + } + } + } + return null; +} + +export class HookInstaller { + /** + * Creates a new HookInstaller. + * + * @param {Object} deps - Injected dependencies + * @param {Object} deps.fs - Filesystem adapter with methods: readFileSync, writeFileSync, mkdirSync, existsSync, chmodSync, copyFileSync + * @param {(repoPath: string, key: string) => string|null} deps.execGitConfig - Function to read git config values + * @param {string} [deps.version] - Package version (default: read from package.json) + * @param {string} [deps.templateDir] - Directory containing hook templates + */ + constructor({ fs, execGitConfig, version, templateDir } = {}) { + this._fs = fs; + this._execGitConfig = execGitConfig; + this._templateDir = templateDir || DEFAULT_TEMPLATE_DIR; + this._version = version || readPackageVersion(); + } + + /** + * Get the current hook status for a repo. + * + * @param {string} repoPath - Path to git repo + * @returns {{ installed: boolean, version?: string, current: boolean, hookPath: string }} + */ + getHookStatus(repoPath) { + const hookPath = this._resolveHookPath(repoPath); + const content = this._readFile(hookPath); + const classification = classifyExistingHook(content); + + if (classification.kind === 'none') { + return { installed: false, hookPath }; + } + + if (classification.kind === 'foreign') { + return { installed: false, foreign: true, hookPath }; + } + + const current = classification.version === this._version; + return { + installed: true, + version: classification.version, + current, + hookPath, + }; + } + + /** + * Installs the post-merge hook. + * + * @param {string} repoPath - Path to git repo + * @param {Object} opts - Install options + * @param {'install'|'upgrade'|'append'|'replace'} opts.strategy - Installation strategy + * @returns {{ action: string, hookPath: string, version: string, backupPath?: string }} + * @throws {Error} If the strategy is unknown + */ + install(repoPath, { strategy }) { + const hooksDir = this._resolveHooksDir(repoPath); + const hookPath = join(hooksDir, 'post-merge'); + const template = this._loadTemplate(); + const stamped = this._stampVersion(template); + + this._ensureDir(hooksDir); + + if (strategy === 'install') { + return this._freshInstall(hookPath, stamped); + } + if (strategy === 'upgrade') { + return this._upgradeInstall(hookPath, stamped); + } + if (strategy === 'append') { + return this._appendInstall(hookPath, stamped); + } + if (strategy === 'replace') { + return this._replaceInstall(hookPath, stamped); + } + + throw new Error(`Unknown install strategy: ${strategy}`); + } + + _freshInstall(hookPath, content) { + this._fs.writeFileSync(hookPath, content, { mode: 0o755 }); + this._fs.chmodSync(hookPath, 0o755); + return { + action: 'installed', + hookPath, + version: this._version, + }; + } + + _upgradeInstall(hookPath, stamped) { + const existing = this._readFile(hookPath); + const classification = classifyExistingHook(existing); + + if (classification.appended) { + const updated = replaceDelimitedSection(existing, stamped); + // If delimiters were corrupted, replaceDelimitedSection returns unchanged content — fall back to overwrite + if (updated === existing) { + this._fs.writeFileSync(hookPath, stamped, { mode: 0o755 }); + } else { + this._fs.writeFileSync(hookPath, updated, { mode: 0o755 }); + } + } else { + this._fs.writeFileSync(hookPath, stamped, { mode: 0o755 }); + } + this._fs.chmodSync(hookPath, 0o755); + + return { + action: 'upgraded', + hookPath, + version: this._version, + }; + } + + _appendInstall(hookPath, stamped) { + const existing = this._readFile(hookPath) || ''; + const body = stripShebang(stamped); + const appended = buildAppendedContent(existing, body); + this._fs.writeFileSync(hookPath, appended, { mode: 0o755 }); + this._fs.chmodSync(hookPath, 0o755); + return { + action: 'appended', + hookPath, + version: this._version, + }; + } + + _replaceInstall(hookPath, stamped) { + const existing = this._readFile(hookPath); + let backupPath; + if (existing) { + backupPath = `${hookPath}.backup`; + this._fs.writeFileSync(backupPath, existing); + this._fs.chmodSync(backupPath, 0o755); + } + + this._fs.writeFileSync(hookPath, stamped, { mode: 0o755 }); + this._fs.chmodSync(hookPath, 0o755); + return { + action: 'replaced', + hookPath, + version: this._version, + backupPath, + }; + } + + _loadTemplate() { + const templatePath = join(this._templateDir, 'post-merge.sh'); + return this._fs.readFileSync(templatePath, 'utf8'); + } + + _stampVersion(template) { + return template.replaceAll(VERSION_PLACEHOLDER, this._version); + } + + _resolveHooksDir(repoPath) { + const customPath = this._execGitConfig(repoPath, 'core.hooksPath'); + if (customPath) { + return resolveHooksPath(customPath, repoPath); + } + + const gitDir = this._execGitConfig(repoPath, '--git-dir'); + if (gitDir) { + return join(resolve(repoPath, gitDir), 'hooks'); + } + + return join(repoPath, '.git', 'hooks'); + } + + _resolveHookPath(repoPath) { + return join(this._resolveHooksDir(repoPath), 'post-merge'); + } + + _readFile(filePath) { + try { + return this._fs.readFileSync(filePath, 'utf8'); + } catch { + return null; + } + } + + _ensureDir(dirPath) { + if (!this._fs.existsSync(dirPath)) { + this._fs.mkdirSync(dirPath, { recursive: true }); + } + } +} + +function resolveHooksPath(customPath, repoPath) { + if (customPath.startsWith('/')) { + return customPath; + } + return resolve(repoPath, customPath); +} + +function readPackageVersion() { + const require = createRequire(import.meta.url); + const pkg = require('../../../package.json'); + return pkg.version; +} + +function stripShebang(content) { + const lines = content.split('\n'); + if (lines[0] && lines[0].startsWith('#!')) { + return lines.slice(1).join('\n'); + } + return content; +} + +function buildAppendedContent(existing, body) { + const trimmed = existing.trimEnd(); + return `${trimmed}\n\n${body}`; +} + +function replaceDelimitedSection(existing, stamped) { + const body = stripShebang(stamped); + const startIdx = existing.indexOf(DELIMITER_START_PREFIX); + const endIdx = existing.indexOf(DELIMITER_END); + + if (startIdx === -1 || endIdx === -1) { + return existing; + } + + const endOfEnd = endIdx + DELIMITER_END.length; + const before = existing.slice(0, startIdx).trimEnd(); + const after = existing.slice(endOfEnd).trimStart(); + const parts = [before, '', body]; + if (after) { + parts.push(after); + } + return parts.join('\n'); +} diff --git a/src/domain/services/IndexRebuildService.js b/src/domain/services/IndexRebuildService.js index ea35541..745d981 100644 --- a/src/domain/services/IndexRebuildService.js +++ b/src/domain/services/IndexRebuildService.js @@ -159,7 +159,7 @@ export default class IndexRebuildService { } } - return this._persistIndex(builder); + return await this._persistIndex(builder); } /** @@ -203,7 +203,7 @@ export default class IndexRebuildService { } } - return builder.finalize({ signal }); + return await builder.finalize({ signal }); } /** @@ -223,7 +223,7 @@ export default class IndexRebuildService { const oid = await this.storage.writeBlob(buffer); flatEntries.push(`100644 blob ${oid}\t${path}`); } - return this.storage.writeTree(flatEntries); + return await this.storage.writeTree(flatEntries); } /** diff --git a/src/domain/services/JoinReducer.js b/src/domain/services/JoinReducer.js index 864f4c3..e790c9a 100644 --- a/src/domain/services/JoinReducer.js +++ b/src/domain/services/JoinReducer.js @@ -105,6 +105,9 @@ export function applyOpV2(state, op, eventId) { state.prop.set(key, lwwMax(current, lwwSet(eventId, op.value))); break; } + default: + // Unknown op types are silently ignored (forward-compat) + break; } } diff --git a/src/domain/services/LogicalTraversal.js b/src/domain/services/LogicalTraversal.js index 6655fac..5ff26de 100644 --- a/src/domain/services/LogicalTraversal.js +++ b/src/domain/services/LogicalTraversal.js @@ -71,7 +71,9 @@ function getNeighbors({ nodeId, direction, adjacency, labelSet }) { export default class LogicalTraversal { /** - * @param {import('../WarpGraph.js').default} graph + * Creates a new LogicalTraversal. + * + * @param {import('../WarpGraph.js').default} graph - The WarpGraph instance to traverse */ constructor(graph) { this._graph = graph; @@ -80,7 +82,7 @@ export default class LogicalTraversal { async _prepare(start, { dir, labelFilter, maxDepth }) { const materialized = await this._graph._materializeGraph(); - if (!this._graph.hasNode(start)) { + if (!(await this._graph.hasNode(start))) { throw new TraversalError(`Start node not found: ${start}`, { code: 'NODE_NOT_FOUND', context: { start }, @@ -98,9 +100,13 @@ export default class LogicalTraversal { /** * Breadth-first traversal. * - * @param {string} start - * @param {{maxDepth?: number, dir?: 'out'|'in'|'both', labelFilter?: string | string[]}} [options] + * @param {string} start - Starting node ID + * @param {Object} [options] - Traversal options + * @param {number} [options.maxDepth] - Maximum depth to traverse + * @param {'out'|'in'|'both'} [options.dir] - Edge direction to follow + * @param {string|string[]} [options.labelFilter] - Edge label(s) to include * @returns {Promise} Node IDs in visit order + * @throws {TraversalError} If the start node is not found or direction is invalid */ async bfs(start, options = {}) { const { dir, labelSet, adjacency, depthLimit } = await this._prepare(start, options); @@ -144,9 +150,13 @@ export default class LogicalTraversal { /** * Depth-first traversal (pre-order). * - * @param {string} start - * @param {{maxDepth?: number, dir?: 'out'|'in'|'both', labelFilter?: string | string[]}} [options] + * @param {string} start - Starting node ID + * @param {Object} [options] - Traversal options + * @param {number} [options.maxDepth] - Maximum depth to traverse + * @param {'out'|'in'|'both'} [options.dir] - Edge direction to follow + * @param {string|string[]} [options.labelFilter] - Edge label(s) to include * @returns {Promise} Node IDs in visit order + * @throws {TraversalError} If the start node is not found or direction is invalid */ async dfs(start, options = {}) { const { dir, labelSet, adjacency, depthLimit } = await this._prepare(start, options); @@ -191,10 +201,14 @@ export default class LogicalTraversal { /** * Shortest path (unweighted) using BFS. * - * @param {string} from - * @param {string} to - * @param {{maxDepth?: number, dir?: 'out'|'in'|'both', labelFilter?: string | string[]}} [options] + * @param {string} from - Source node ID + * @param {string} to - Target node ID + * @param {Object} [options] - Traversal options + * @param {number} [options.maxDepth] - Maximum search depth + * @param {'out'|'in'|'both'} [options.dir] - Edge direction to follow + * @param {string|string[]} [options.labelFilter] - Edge label(s) to include * @returns {Promise<{found: boolean, path: string[], length: number}>} + * @throws {TraversalError} If the start node is not found or direction is invalid */ async shortestPath(from, to, options = {}) { const { dir, labelSet, adjacency, depthLimit } = await this._prepare(from, options); @@ -250,11 +264,13 @@ export default class LogicalTraversal { /** * Connected component (undirected by default). * - * @param {string} start - * @param {{labelFilter?: string | string[]}} [options] + * @param {string} start - Starting node ID + * @param {Object} [options] - Traversal options + * @param {string|string[]} [options.labelFilter] - Edge label(s) to include * @returns {Promise} Node IDs in visit order + * @throws {TraversalError} If the start node is not found */ async connectedComponent(start, options = {}) { - return this.bfs(start, { ...options, dir: 'both' }); + return await this.bfs(start, { ...options, dir: 'both' }); } } diff --git a/src/domain/services/PatchBuilderV2.js b/src/domain/services/PatchBuilderV2.js index 06856d1..13287e8 100644 --- a/src/domain/services/PatchBuilderV2.js +++ b/src/domain/services/PatchBuilderV2.js @@ -268,9 +268,9 @@ export class PatchBuilderV2 { // 9. Update writer ref to point to new commit await this._persistence.updateRef(writerRef, newCommitSha); - // 10. Notify success callback (updates graph's version vector) + // 10. Notify success callback (updates graph's version vector + eager re-materialize) if (this._onCommitSuccess) { - this._onCommitSuccess(); + this._onCommitSuccess({ patch, sha: newCommitSha }); } // 11. Return the new commit SHA diff --git a/src/domain/services/QueryBuilder.js b/src/domain/services/QueryBuilder.js index 7239918..1f4f767 100644 --- a/src/domain/services/QueryBuilder.js +++ b/src/domain/services/QueryBuilder.js @@ -157,11 +157,18 @@ function applyHop({ direction, label, workingSet, adjacency }) { } /** - * Fluent query builder. + * Fluent query builder for materialized WARP state. + * + * Supports pattern matching, predicate filtering, multi-hop traversal + * over outgoing/incoming edges, and field selection. + * + * @throws {QueryError} On invalid match patterns, where predicates, label types, or select fields */ export default class QueryBuilder { /** - * @param {import('../WarpGraph.js').default} graph + * Creates a new QueryBuilder. + * + * @param {import('../WarpGraph.js').default} graph - The WarpGraph instance to query */ constructor(graph) { this._graph = graph; @@ -235,13 +242,15 @@ export default class QueryBuilder { } /** - * Runs the query and returns a result. - * @returns {Promise<{stateHash: string, nodes: string[]}>} + * Runs the query and returns matching nodes with their state hash. + * + * @returns {Promise<{stateHash: string, nodes: Array<{id?: string, props?: Record}>}>} + * @throws {QueryError} If an unknown select field is specified */ async run() { const materialized = await this._graph._materializeGraph(); const { adjacency, stateHash } = materialized; - const allNodes = sortIds(this._graph.getNodes()); + const allNodes = sortIds(await this._graph.getNodes()); const pattern = this._pattern ?? DEFAULT_PATTERN; @@ -250,21 +259,20 @@ export default class QueryBuilder { for (const op of this._operations) { if (op.type === 'where') { - const filtered = []; - for (const nodeId of workingSet) { - const propsMap = this._graph.getNodeProps(nodeId) || new Map(); - const edgesOut = adjacency.outgoing.get(nodeId) || []; - const edgesIn = adjacency.incoming.get(nodeId) || []; - const snapshot = createNodeSnapshot({ - id: nodeId, - propsMap, - edgesOut, - edgesIn, - }); - if (op.fn(snapshot)) { - filtered.push(nodeId); - } - } + const snapshots = await Promise.all( + workingSet.map(async (nodeId) => { + const propsMap = (await this._graph.getNodeProps(nodeId)) || new Map(); + const edgesOut = adjacency.outgoing.get(nodeId) || []; + const edgesIn = adjacency.incoming.get(nodeId) || []; + return { + nodeId, + snapshot: createNodeSnapshot({ id: nodeId, propsMap, edgesOut, edgesIn }), + }; + }) + ); + const filtered = snapshots + .filter(({ snapshot }) => op.fn(snapshot)) + .map(({ nodeId }) => nodeId); workingSet = sortIds(filtered); continue; } @@ -296,20 +304,22 @@ export default class QueryBuilder { const includeId = !selectFields || selectFields.includes('id'); const includeProps = !selectFields || selectFields.includes('props'); - const nodes = workingSet.map((nodeId) => { - const entry = {}; - if (includeId) { - entry.id = nodeId; - } - if (includeProps) { - const propsMap = this._graph.getNodeProps(nodeId) || new Map(); - const props = buildPropsSnapshot(propsMap); - if (selectFields || Object.keys(props).length > 0) { - entry.props = props; + const nodes = await Promise.all( + workingSet.map(async (nodeId) => { + const entry = {}; + if (includeId) { + entry.id = nodeId; } - } - return entry; - }); + if (includeProps) { + const propsMap = (await this._graph.getNodeProps(nodeId)) || new Map(); + const props = buildPropsSnapshot(propsMap); + if (selectFields || Object.keys(props).length > 0) { + entry.props = props; + } + } + return entry; + }) + ); return { stateHash, nodes }; } diff --git a/src/domain/services/StreamingBitmapIndexBuilder.js b/src/domain/services/StreamingBitmapIndexBuilder.js index b4eb0b3..36ac743 100644 --- a/src/domain/services/StreamingBitmapIndexBuilder.js +++ b/src/domain/services/StreamingBitmapIndexBuilder.js @@ -138,8 +138,8 @@ export default class StreamingBitmapIndexBuilder { * @param {string} sha - The node's SHA * @returns {Promise} The assigned numeric ID */ - async registerNode(sha) { - return this._getOrCreateId(sha); + registerNode(sha) { + return Promise.resolve(this._getOrCreateId(sha)); } /** @@ -283,7 +283,7 @@ export default class StreamingBitmapIndexBuilder { * @private */ async _writeMetaShards(idShards) { - return Promise.all( + return await Promise.all( Object.entries(idShards).map(async ([prefix, map]) => { const path = `meta_${prefix}.json`; const envelope = { @@ -307,7 +307,7 @@ export default class StreamingBitmapIndexBuilder { * @private */ async _processBitmapShards({ signal } = {}) { - return Promise.all( + return await Promise.all( Array.from(this.flushedChunks.entries()).map(async ([path, oids]) => { checkAborted(signal, 'processBitmapShards'); const finalOid = oids.length === 1 ? oids[0] : await this._mergeChunks(oids, { signal }); diff --git a/src/domain/services/WarpStateIndexBuilder.js b/src/domain/services/WarpStateIndexBuilder.js index 11ae0de..e2a33cd 100644 --- a/src/domain/services/WarpStateIndexBuilder.js +++ b/src/domain/services/WarpStateIndexBuilder.js @@ -45,6 +45,7 @@ export default class WarpStateIndexBuilder { * * @param {import('./JoinReducer.js').WarpStateV5} state - The materialized state * @returns {{builder: BitmapIndexBuilder, stats: {nodes: number, edges: number}}} The populated builder and stats + * @throws {Error} If state is null or missing nodeAlive/edgeAlive fields * * @example * const state = await graph.materialize(); diff --git a/src/domain/types/WarpTypes.js b/src/domain/types/WarpTypes.js index 326438c..f98ff9c 100644 --- a/src/domain/types/WarpTypes.js +++ b/src/domain/types/WarpTypes.js @@ -86,6 +86,56 @@ export function createBlobValue(oid) { return { type: 'blob', oid }; } +// ============================================================================ +// Operations +// ============================================================================ + +/** + * Node add operation + * @typedef {Object} OpNodeAdd + * @property {'NodeAdd'} type - Operation type discriminator + * @property {NodeId} node - Node ID to add + */ + +/** + * Node tombstone operation + * @typedef {Object} OpNodeTombstone + * @property {'NodeTombstone'} type - Operation type discriminator + * @property {NodeId} node - Node ID to tombstone + */ + +/** + * Edge add operation + * @typedef {Object} OpEdgeAdd + * @property {'EdgeAdd'} type - Operation type discriminator + * @property {NodeId} from - Source node ID + * @property {NodeId} to - Target node ID + * @property {string} label - Edge label/type + */ + +/** + * Edge tombstone operation + * @typedef {Object} OpEdgeTombstone + * @property {'EdgeTombstone'} type - Operation type discriminator + * @property {NodeId} from - Source node ID + * @property {NodeId} to - Target node ID + * @property {string} label - Edge label/type + */ + +/** + * Property set operation + * @typedef {Object} OpPropSet + * @property {'PropSet'} type - Operation type discriminator + * @property {NodeId} node - Node ID to set property on + * @property {string} key - Property key + * @property {ValueRef} value - Property value reference + */ + +/** + * Any graph operation + * @typedef {OpNodeAdd | OpNodeTombstone | OpEdgeAdd | OpEdgeTombstone | OpPropSet} Op + */ + // ============================================================================ // Factory Functions - Operations // ============================================================================ diff --git a/src/domain/utils/MinHeap.js b/src/domain/utils/MinHeap.js index d3fc271..12f8796 100644 --- a/src/domain/utils/MinHeap.js +++ b/src/domain/utils/MinHeap.js @@ -5,6 +5,9 @@ * @class MinHeap */ class MinHeap { + /** + * Creates an empty MinHeap. + */ constructor() { /** @type {Array<{item: *, priority: number}>} */ this.heap = []; @@ -67,14 +70,15 @@ class MinHeap { * Restore heap property by bubbling up from index. * * @private - * @param {number} index - Starting index + * @param {number} pos - Starting index */ - _bubbleUp(index) { - while (index > 0) { - const parentIndex = Math.floor((index - 1) / 2); - if (this.heap[parentIndex].priority <= this.heap[index].priority) { break; } - [this.heap[parentIndex], this.heap[index]] = [this.heap[index], this.heap[parentIndex]]; - index = parentIndex; + _bubbleUp(pos) { + let current = pos; + while (current > 0) { + const parentIndex = Math.floor((current - 1) / 2); + if (this.heap[parentIndex].priority <= this.heap[current].priority) { break; } + [this.heap[parentIndex], this.heap[current]] = [this.heap[current], this.heap[parentIndex]]; + current = parentIndex; } } @@ -82,14 +86,15 @@ class MinHeap { * Restore heap property by bubbling down from index. * * @private - * @param {number} index - Starting index + * @param {number} pos - Starting index */ - _bubbleDown(index) { - const length = this.heap.length; + _bubbleDown(pos) { + const {length} = this.heap; + let current = pos; while (true) { - const leftChild = 2 * index + 1; - const rightChild = 2 * index + 2; - let smallest = index; + const leftChild = 2 * current + 1; + const rightChild = 2 * current + 2; + let smallest = current; if (leftChild < length && this.heap[leftChild].priority < this.heap[smallest].priority) { smallest = leftChild; @@ -97,10 +102,10 @@ class MinHeap { if (rightChild < length && this.heap[rightChild].priority < this.heap[smallest].priority) { smallest = rightChild; } - if (smallest === index) { break; } + if (smallest === current) { break; } - [this.heap[index], this.heap[smallest]] = [this.heap[smallest], this.heap[index]]; - index = smallest; + [this.heap[current], this.heap[smallest]] = [this.heap[smallest], this.heap[current]]; + current = smallest; } } } diff --git a/src/domain/utils/WriterId.js b/src/domain/utils/WriterId.js index 3de7534..ffa202d 100644 --- a/src/domain/utils/WriterId.js +++ b/src/domain/utils/WriterId.js @@ -47,6 +47,7 @@ const CANONICAL_RE = /^w_[0-9a-hjkmnp-tv-z]{26}$/; * - Total length: 28 chars * * @param {string} id - The writer ID to validate + * @returns {void} * @throws {WriterIdError} If the ID is not canonical * * @example diff --git a/src/domain/warp/PatchSession.js b/src/domain/warp/PatchSession.js index d210373..a131cac 100644 --- a/src/domain/warp/PatchSession.js +++ b/src/domain/warp/PatchSession.js @@ -60,9 +60,7 @@ export class PatchSession { * * @param {string} nodeId - The node ID to add * @returns {this} This session for chaining - * - * @example - * patch.addNode('user:alice'); + * @throws {Error} If this session has already been committed */ addNode(nodeId) { this._ensureNotCommitted(); @@ -77,9 +75,7 @@ export class PatchSession { * * @param {string} nodeId - The node ID to remove * @returns {this} This session for chaining - * - * @example - * patch.removeNode('user:alice'); + * @throws {Error} If this session has already been committed */ removeNode(nodeId) { this._ensureNotCommitted(); @@ -94,9 +90,7 @@ export class PatchSession { * @param {string} to - Target node ID * @param {string} label - Edge label/type * @returns {this} This session for chaining - * - * @example - * patch.addEdge('user:alice', 'user:bob', 'follows'); + * @throws {Error} If this session has already been committed */ addEdge(from, to, label) { this._ensureNotCommitted(); @@ -113,9 +107,7 @@ export class PatchSession { * @param {string} to - Target node ID * @param {string} label - Edge label/type * @returns {this} This session for chaining - * - * @example - * patch.removeEdge('user:alice', 'user:bob', 'follows'); + * @throws {Error} If this session has already been committed */ removeEdge(from, to, label) { this._ensureNotCommitted(); @@ -130,10 +122,7 @@ export class PatchSession { * @param {string} key - Property key * @param {*} value - Property value (must be JSON-serializable) * @returns {this} This session for chaining - * - * @example - * patch.setProperty('user:alice', 'name', 'Alice'); - * patch.setProperty('user:alice', 'age', 30); + * @throws {Error} If this session has already been committed */ setProperty(nodeId, key, value) { this._ensureNotCommitted(); diff --git a/src/domain/warp/Writer.js b/src/domain/warp/Writer.js index fa155fe..023c470 100644 --- a/src/domain/warp/Writer.js +++ b/src/domain/warp/Writer.js @@ -22,6 +22,13 @@ import { vvClone } from '../crdt/VersionVector.js'; /** * Error class for Writer operations. + * + * @class WriterError + * @extends Error + * + * @property {string} name - The error name ('WriterError') + * @property {string} code - Error code for programmatic handling + * @property {Error} [cause] - Original error that caused this error */ export class WriterError extends Error { /** @@ -39,6 +46,8 @@ export class WriterError extends Error { /** * Writer class for creating and committing patches to a WARP graph. + * + * @class Writer */ export class Writer { /** @@ -50,8 +59,9 @@ export class Writer { * @param {string} options.writerId - This writer's ID * @param {import('../crdt/VersionVector.js').VersionVector} options.versionVector - Current version vector * @param {Function} options.getCurrentState - Function to get current materialized state + * @param {Function} [options.onCommitSuccess] - Callback invoked after successful commit with { patch, sha } */ - constructor({ persistence, graphName, writerId, versionVector, getCurrentState }) { + constructor({ persistence, graphName, writerId, versionVector, getCurrentState, onCommitSuccess }) { validateWriterId(writerId); /** @type {import('../../ports/GraphPersistencePort.js').default} */ @@ -68,6 +78,9 @@ export class Writer { /** @type {Function} */ this._getCurrentState = getCurrentState; + + /** @type {Function|undefined} */ + this._onCommitSuccess = onCommitSuccess; } /** @@ -93,7 +106,7 @@ export class Writer { */ async head() { const writerRef = buildWriterRef(this._graphName, this._writerId); - return this._persistence.readRef(writerRef); + return await this._persistence.readRef(writerRef); } /** @@ -140,6 +153,7 @@ export class Writer { versionVector: vvClone(this._versionVector), getCurrentState: this._getCurrentState, expectedParentSha: expectedOldHead, + onCommitSuccess: this._onCommitSuccess, }); // Return PatchSession wrapping the builder @@ -157,6 +171,9 @@ export class Writer { * * @param {(p: PatchSession) => void | Promise} build - Function to build the patch * @returns {Promise} The commit SHA of the new patch + * @throws {WriterError} EMPTY_PATCH if no operations were added + * @throws {WriterError} WRITER_REF_ADVANCED if CAS fails (ref moved since beginPatch) + * @throws {WriterError} PERSIST_WRITE_FAILED if git operations fail * * @example * const sha = await writer.commitPatch(p => { @@ -167,6 +184,6 @@ export class Writer { async commitPatch(build) { const patch = await this.beginPatch(); await build(patch); - return patch.commit(); + return await patch.commit(); } } diff --git a/src/hooks/post-merge.sh b/src/hooks/post-merge.sh new file mode 100755 index 0000000..74ed339 --- /dev/null +++ b/src/hooks/post-merge.sh @@ -0,0 +1,60 @@ +#!/bin/sh +# --- @git-stunts/empty-graph post-merge hook __WARP_HOOK_VERSION__ --- +# warp-hook-version: __WARP_HOOK_VERSION__ +# +# Post-merge hook: notify (or auto-materialize) when warp refs changed. +# +# Compares refs/empty-graph/ before and after merge by maintaining +# a snapshot file at .git/warp-ref-snapshot. If any warp writer refs +# changed and warp.autoMaterialize is true, runs `git warp materialize`. +# Otherwise prints an informational message advising re-materialization. +# Always exits 0 — never blocks a merge. + +GIT_DIR=$(git rev-parse --git-dir 2>/dev/null) || exit 0 +SNAPSHOT="${GIT_DIR}/warp-ref-snapshot" + +# Capture current warp refs (sorted for stable comparison) +CURRENT=$(git for-each-ref --format='%(refname) %(objectname)' --sort=refname refs/empty-graph/ 2>/dev/null) || true + +if [ -z "$CURRENT" ]; then + # No warp refs exist — clean up any stale snapshot and exit + rm -f "$SNAPSHOT" + exit 0 +fi + +CHANGED=0 + +if [ -f "$SNAPSHOT" ]; then + PREVIOUS=$(cat "$SNAPSHOT") + if [ "$CURRENT" != "$PREVIOUS" ]; then + CHANGED=1 + fi +else + # First encounter — refs exist but no snapshot yet + CHANGED=1 +fi + +# Save current state for next comparison +printf '%s\n' "$CURRENT" > "$SNAPSHOT" + +if [ "$CHANGED" -eq 0 ]; then + exit 0 +fi + +AUTO_MAT=$(git config --bool warp.autoMaterialize 2>/dev/null) || true + +if [ "$AUTO_MAT" = "true" ]; then + echo "[warp] Refs changed — auto-materializing..." + if command -v git-warp >/dev/null 2>&1; then + git-warp materialize || echo "[warp] Warning: auto-materialize failed." + elif command -v warp-graph >/dev/null 2>&1; then + warp-graph materialize || echo "[warp] Warning: auto-materialize failed." + else + echo "[warp] Warning: neither git-warp nor warp-graph found in PATH." + fi +else + echo "[warp] Writer refs changed during merge. Call materialize() to see updates." +fi + +exit 0 +# --- end @git-stunts/empty-graph --- diff --git a/src/infrastructure/adapters/GitGraphAdapter.js b/src/infrastructure/adapters/GitGraphAdapter.js index feac056..eab8e12 100644 --- a/src/infrastructure/adapters/GitGraphAdapter.js +++ b/src/infrastructure/adapters/GitGraphAdapter.js @@ -35,12 +35,20 @@ const DEFAULT_RETRY_OPTIONS = { shouldRetry: isTransientError, }; +/** + * Checks whether a Git ref exists without resolving it. + * @param {function(Object): Promise} execute - The git command executor function + * @param {string} ref - The ref to check (e.g., 'refs/warp/events/writers/alice') + * @returns {Promise} True if the ref exists, false otherwise + * @throws {Error} If the git command fails for reasons other than a missing ref + */ async function refExists(execute, ref) { try { await execute({ args: ['show-ref', '--verify', '--quiet', ref] }); return true; } catch (err) { - if (err?.details?.code === 1) { + const exitCode = err?.details?.code ?? err?.exitCode ?? err?.code; + if (exitCode === 1) { return false; } throw err; @@ -49,6 +57,11 @@ async function refExists(execute, ref) { /** * Implementation of GraphPersistencePort using GitPlumbing. + * + * Translates graph persistence operations into Git plumbing commands + * (commit-tree, hash-object, update-ref, etc.). All write operations + * use retry logic with exponential backoff to handle transient Git + * lock contention in concurrent multi-writer scenarios. */ export default class GitGraphAdapter extends GraphPersistencePort { /** @@ -69,13 +82,27 @@ export default class GitGraphAdapter extends GraphPersistencePort { * @private */ async _executeWithRetry(options) { - return retry(() => this.plumbing.execute(options), this._retryOptions); + return await retry(() => this.plumbing.execute(options), this._retryOptions); } + /** + * The well-known SHA for Git's empty tree object. + * @type {string} + * @readonly + */ get emptyTree() { return this.plumbing.emptyTree; } + /** + * Creates a commit pointing to the empty tree. + * @param {Object} options + * @param {string} options.message - The commit message (typically CBOR-encoded patch data) + * @param {string[]} [options.parents=[]] - Parent commit SHAs + * @param {boolean} [options.sign=false] - Whether to GPG-sign the commit + * @returns {Promise} The SHA of the created commit + * @throws {Error} If any parent OID is invalid + */ async commitNode({ message, parents = [], sign = false }) { for (const p of parents) { this._validateOid(p); @@ -111,6 +138,12 @@ export default class GitGraphAdapter extends GraphPersistencePort { return oid.trim(); } + /** + * Retrieves the raw commit message for a given SHA. + * @param {string} sha - The commit SHA to read + * @returns {Promise} The raw commit message content + * @throws {Error} If the SHA is invalid + */ async showNode(sha) { this._validateOid(sha); return await this._executeWithRetry({ args: ['show', '-s', '--format=%B', sha] }); @@ -120,7 +153,8 @@ export default class GitGraphAdapter extends GraphPersistencePort { * Gets full commit metadata for a node. * @param {string} sha - The commit SHA to retrieve * @returns {Promise<{sha: string, message: string, author: string, date: string, parents: string[]}>} - * Full commit metadata + * Full commit metadata including SHA, message, author, date, and parent SHAs + * @throws {Error} If the SHA is invalid or the commit format is malformed */ async getNodeInfo(sha) { this._validateOid(sha); @@ -149,6 +183,15 @@ export default class GitGraphAdapter extends GraphPersistencePort { }; } + /** + * Returns raw git log output for a ref. + * @param {Object} options + * @param {string} options.ref - The Git ref to log from + * @param {number} [options.limit=50] - Maximum number of commits to return + * @param {string} [options.format] - Custom format string for git log + * @returns {Promise} The raw log output + * @throws {Error} If the ref is invalid or the limit is out of range + */ async logNodes({ ref, limit = 50, format }) { this._validateRef(ref); this._validateLimit(limit); @@ -169,7 +212,8 @@ export default class GitGraphAdapter extends GraphPersistencePort { * @param {string} options.ref - The ref to log from * @param {number} [options.limit=1000000] - Maximum number of commits to return * @param {string} [options.format] - Custom format string for git log - * @returns {Promise} A stream of git log output (NUL-terminated records) + * @returns {Promise} A readable stream of git log output (NUL-terminated records) + * @throws {Error} If the ref is invalid or the limit is out of range */ async logNodesStream({ ref, limit = 1000000, format }) { this._validateRef(ref); @@ -215,6 +259,11 @@ export default class GitGraphAdapter extends GraphPersistencePort { } } + /** + * Writes content as a Git blob and returns its OID. + * @param {Buffer|string} content - The blob content to write + * @returns {Promise} The Git OID of the created blob + */ async writeBlob(content) { const oid = await this._executeWithRetry({ args: ['hash-object', '-w', '--stdin'], @@ -223,6 +272,11 @@ export default class GitGraphAdapter extends GraphPersistencePort { return oid.trim(); } + /** + * Creates a Git tree from mktree-formatted entries. + * @param {string[]} entries - Lines in git mktree format (e.g., "100644 blob \t") + * @returns {Promise} The Git OID of the created tree + */ async writeTree(entries) { const oid = await this._executeWithRetry({ args: ['mktree'], @@ -231,6 +285,12 @@ export default class GitGraphAdapter extends GraphPersistencePort { return oid.trim(); } + /** + * Reads a tree and returns a map of path to content. + * Processes blobs sequentially to avoid spawning too many concurrent reads. + * @param {string} treeOid - The tree OID to read + * @returns {Promise>} Map of file path to blob content + */ async readTree(treeOid) { const oids = await this.readTreeOids(treeOid); const files = {}; @@ -241,6 +301,13 @@ export default class GitGraphAdapter extends GraphPersistencePort { return files; } + /** + * Reads a tree and returns a map of path to blob OID. + * Useful for lazy-loading shards without reading all blob contents. + * @param {string} treeOid - The tree OID to read + * @returns {Promise>} Map of file path to blob OID + * @throws {Error} If the tree OID is invalid + */ async readTreeOids(treeOid) { this._validateOid(treeOid); const output = await this._executeWithRetry({ @@ -267,6 +334,12 @@ export default class GitGraphAdapter extends GraphPersistencePort { return oids; } + /** + * Reads the content of a Git blob. + * @param {string} oid - The blob OID to read + * @returns {Promise} The blob content + * @throws {Error} If the OID is invalid + */ async readBlob(oid) { this._validateOid(oid); const stream = await this.plumbing.executeStream({ @@ -277,9 +350,10 @@ export default class GitGraphAdapter extends GraphPersistencePort { /** * Updates a ref to point to an OID. - * @param {string} ref - The ref name (e.g., 'refs/empty-graph/index') + * @param {string} ref - The ref name (e.g., 'refs/warp/events/writers/alice') * @param {string} oid - The OID to point to * @returns {Promise} + * @throws {Error} If the ref or OID is invalid */ async updateRef(ref, oid) { this._validateRef(ref); @@ -292,7 +366,8 @@ export default class GitGraphAdapter extends GraphPersistencePort { /** * Reads the OID a ref points to. * @param {string} ref - The ref name - * @returns {Promise} The OID or null if ref doesn't exist + * @returns {Promise} The OID, or null if the ref does not exist + * @throws {Error} If the ref format is invalid */ async readRef(ref) { this._validateRef(ref); @@ -300,16 +375,25 @@ export default class GitGraphAdapter extends GraphPersistencePort { if (!exists) { return null; } - const oid = await this._executeWithRetry({ - args: ['rev-parse', ref] - }); - return oid.trim(); + try { + const oid = await this._executeWithRetry({ + args: ['rev-parse', ref] + }); + return oid.trim(); + } catch (err) { + const exitCode = err?.details?.code ?? err?.exitCode ?? err?.code; + if (exitCode === 1) { + return null; + } + throw err; + } } /** * Deletes a ref. * @param {string} ref - The ref name to delete * @returns {Promise} + * @throws {Error} If the ref format is invalid */ async deleteRef(ref) { this._validateRef(ref); @@ -363,21 +447,27 @@ export default class GitGraphAdapter extends GraphPersistencePort { * Uses `git cat-file -e` for efficient existence checking without loading content. * @param {string} sha - The commit SHA to check * @returns {Promise} True if the node exists, false otherwise + * @throws {Error} If the SHA format is invalid */ async nodeExists(sha) { this._validateOid(sha); try { await this._executeWithRetry({ args: ['cat-file', '-e', sha] }); return true; - } catch { - return false; + } catch (err) { + const exitCode = err?.details?.code ?? err?.exitCode ?? err?.code; + if (exitCode === 1) { + return false; + } + throw err; } } /** * Lists refs matching a prefix. - * @param {string} prefix - The ref prefix to match (e.g., 'refs/empty-graph/events/writers/') + * @param {string} prefix - The ref prefix to match (e.g., 'refs/warp/events/writers/') * @returns {Promise} Array of matching ref paths + * @throws {Error} If the prefix is invalid */ async listRefs(prefix) { this._validateRef(prefix); @@ -410,6 +500,7 @@ export default class GitGraphAdapter extends GraphPersistencePort { * Uses `git rev-list --count` for O(1) memory efficiency. * @param {string} ref - Git ref to count from (e.g., 'HEAD', 'main', SHA) * @returns {Promise} The count of reachable nodes + * @throws {Error} If the ref is invalid */ async countNodes(ref) { this._validateRef(ref); @@ -426,6 +517,7 @@ export default class GitGraphAdapter extends GraphPersistencePort { * @param {string} potentialAncestor - The commit that might be an ancestor * @param {string} descendant - The commit that might be a descendant * @returns {Promise} True if potentialAncestor is an ancestor of descendant + * @throws {Error} If either OID is invalid */ async isAncestor(potentialAncestor, descendant) { this._validateOid(potentialAncestor); @@ -444,6 +536,7 @@ export default class GitGraphAdapter extends GraphPersistencePort { * Reads a git config value. * @param {string} key - The config key to read (e.g., 'warp.writerId.events') * @returns {Promise} The config value or null if not set + * @throws {Error} If the key format is invalid */ async configGet(key) { this._validateConfigKey(key); @@ -469,6 +562,7 @@ export default class GitGraphAdapter extends GraphPersistencePort { * @param {string} key - The config key to set (e.g., 'warp.writerId.events') * @param {string} value - The value to set * @returns {Promise} + * @throws {Error} If the key format is invalid or value is not a string */ async configSet(key, value) { this._validateConfigKey(key); diff --git a/src/ports/GraphPersistencePort.js b/src/ports/GraphPersistencePort.js index a5c1130..70c58cd 100644 --- a/src/ports/GraphPersistencePort.js +++ b/src/ports/GraphPersistencePort.js @@ -1,21 +1,33 @@ /** - * Port for graph persistence operations. + * Abstract port for graph persistence operations. + * + * Defines the contract for reading and writing graph data to a Git-backed + * storage layer. Concrete adapters (e.g., GitGraphAdapter) implement this + * interface to provide actual Git operations. + * + * All methods throw by default and must be overridden by implementations. + * + * @abstract */ export default class GraphPersistencePort { /** + * Creates a commit pointing to the empty tree. * @param {Object} options - * @param {string} options.message - * @param {string[]} [options.parents] - * @param {boolean} [options.sign] - * @returns {Promise} The SHA of the new node. + * @param {string} options.message - The commit message (typically CBOR-encoded patch data) + * @param {string[]} [options.parents=[]] - Parent commit SHAs for the commit graph + * @param {boolean} [options.sign=false] - Whether to GPG-sign the commit + * @returns {Promise} The SHA of the created commit + * @throws {Error} If not implemented by a concrete adapter */ async commitNode(_options) { throw new Error('Not implemented'); } /** - * @param {string} sha - * @returns {Promise} The raw message content. + * Retrieves the raw commit message for a given SHA. + * @param {string} sha - The commit SHA to read + * @returns {Promise} The raw commit message content + * @throws {Error} If not implemented by a concrete adapter */ async showNode(_sha) { throw new Error('Not implemented'); @@ -25,77 +37,95 @@ export default class GraphPersistencePort { * Gets full commit metadata for a node. * @param {string} sha - The commit SHA to retrieve * @returns {Promise<{sha: string, message: string, author: string, date: string, parents: string[]}>} - * Full commit metadata + * Full commit metadata including SHA, message, author, date, and parent SHAs + * @throws {Error} If not implemented by a concrete adapter */ async getNodeInfo(_sha) { throw new Error('Not implemented'); } /** + * Streams git log output for a ref. * @param {Object} options - * @param {string} options.ref - * @param {number} [options.limit] - * @returns {Promise} A stream of log output. + * @param {string} options.ref - The Git ref to log from + * @param {number} [options.limit=1000000] - Maximum number of commits to return + * @param {string} [options.format] - Custom format string for git log + * @returns {Promise} A readable stream of log output + * @throws {Error} If not implemented by a concrete adapter */ async logNodesStream(_options) { throw new Error('Not implemented'); } /** + * Returns raw git log output for a ref. * @param {Object} options - * @param {string} options.ref - * @param {number} [options.limit] - * @returns {Promise} The raw log output. + * @param {string} options.ref - The Git ref to log from + * @param {number} [options.limit=50] - Maximum number of commits to return + * @param {string} [options.format] - Custom format string for git log + * @returns {Promise} The raw log output + * @throws {Error} If not implemented by a concrete adapter */ async logNodes(_options) { throw new Error('Not implemented'); } /** - * @returns {string} + * The well-known SHA for Git's empty tree object. + * All WARP graph commits point to this tree so that no files appear in the working directory. + * @type {string} + * @readonly */ get emptyTree() { throw new Error('Not implemented'); } /** - * @param {Buffer|string} content - * @returns {Promise} The Git OID. + * Writes content as a Git blob and returns its OID. + * @param {Buffer|string} content - The blob content to write + * @returns {Promise} The Git OID of the created blob + * @throws {Error} If not implemented by a concrete adapter */ async writeBlob(_content) { throw new Error('Not implemented'); } /** - * @param {string[]} entries - Lines for git mktree. - * @returns {Promise} The Git OID of the created tree. + * Creates a Git tree from mktree-formatted entries. + * @param {string[]} entries - Lines in git mktree format (e.g., "100644 blob \t") + * @returns {Promise} The Git OID of the created tree + * @throws {Error} If not implemented by a concrete adapter */ async writeTree(_entries) { throw new Error('Not implemented'); } /** - * Reads a tree and returns a map of path -> content. - * @param {string} treeOid - * @returns {Promise>} + * Reads a tree and returns a map of path to content. + * @param {string} treeOid - The tree OID to read + * @returns {Promise>} Map of file path to blob content + * @throws {Error} If not implemented by a concrete adapter */ async readTree(_treeOid) { throw new Error('Not implemented'); } /** - * Reads a tree and returns a map of path -> blob OID. + * Reads a tree and returns a map of path to blob OID. * Useful for lazy-loading shards without reading all blob contents. - * @param {string} treeOid - * @returns {Promise>} + * @param {string} treeOid - The tree OID to read + * @returns {Promise>} Map of file path to blob OID + * @throws {Error} If not implemented by a concrete adapter */ async readTreeOids(_treeOid) { throw new Error('Not implemented'); } /** - * @param {string} oid - * @returns {Promise} + * Reads the content of a Git blob. + * @param {string} oid - The blob OID to read + * @returns {Promise} The blob content + * @throws {Error} If not implemented by a concrete adapter */ async readBlob(_oid) { throw new Error('Not implemented'); @@ -103,9 +133,10 @@ export default class GraphPersistencePort { /** * Updates a ref to point to an OID. - * @param {string} ref - The ref name + * @param {string} ref - The ref name (e.g., 'refs/warp/events/writers/alice') * @param {string} oid - The OID to point to * @returns {Promise} + * @throws {Error} If not implemented by a concrete adapter */ async updateRef(_ref, _oid) { throw new Error('Not implemented'); @@ -114,7 +145,8 @@ export default class GraphPersistencePort { /** * Reads the OID a ref points to. * @param {string} ref - The ref name - * @returns {Promise} The OID or null if ref doesn't exist + * @returns {Promise} The OID, or null if the ref does not exist + * @throws {Error} If not implemented by a concrete adapter */ async readRef(_ref) { throw new Error('Not implemented'); @@ -124,6 +156,7 @@ export default class GraphPersistencePort { * Deletes a ref. * @param {string} ref - The ref name to delete * @returns {Promise} + * @throws {Error} If not implemented by a concrete adapter */ async deleteRef(_ref) { throw new Error('Not implemented'); @@ -132,6 +165,7 @@ export default class GraphPersistencePort { /** * Pings the repository to verify accessibility. * @returns {Promise<{ok: boolean, latencyMs: number}>} Health check result with latency + * @throws {Error} If not implemented by a concrete adapter */ async ping() { throw new Error('Not implemented'); @@ -139,9 +173,9 @@ export default class GraphPersistencePort { /** * Counts nodes reachable from a ref without loading them into memory. - * Uses git rev-list --count for efficiency. - * @param {string} ref - Git ref to count from + * @param {string} ref - Git ref to count from (e.g., 'HEAD', 'main', or a SHA) * @returns {Promise} The count of reachable nodes + * @throws {Error} If not implemented by a concrete adapter */ async countNodes(_ref) { throw new Error('Not implemented'); @@ -150,7 +184,8 @@ export default class GraphPersistencePort { /** * Reads a git config value. * @param {string} key - The config key to read (e.g., 'warp.writerId.events') - * @returns {Promise} The config value or null if not set + * @returns {Promise} The config value, or null if not set + * @throws {Error} If not implemented by a concrete adapter */ async configGet(_key) { throw new Error('Not implemented'); @@ -161,6 +196,7 @@ export default class GraphPersistencePort { * @param {string} key - The config key to set (e.g., 'warp.writerId.events') * @param {string} value - The value to set * @returns {Promise} + * @throws {Error} If not implemented by a concrete adapter */ async configSet(_key, _value) { throw new Error('Not implemented'); diff --git a/test/unit/domain/WarpGraph.autoCheckpoint.test.js b/test/unit/domain/WarpGraph.autoCheckpoint.test.js new file mode 100644 index 0000000..e68e8a7 --- /dev/null +++ b/test/unit/domain/WarpGraph.autoCheckpoint.test.js @@ -0,0 +1,479 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import WarpGraph from '../../../src/domain/WarpGraph.js'; +import { encode } from '../../../src/infrastructure/codecs/CborCodec.js'; +import { encodePatchMessage } from '../../../src/domain/services/WarpMessageCodec.js'; +import { createEmptyStateV5 } from '../../../src/domain/services/JoinReducer.js'; + +function createMockPersistence() { + return { + readRef: vi.fn(), + showNode: vi.fn(), + writeBlob: vi.fn(), + writeTree: vi.fn(), + readBlob: vi.fn(), + readTreeOids: vi.fn(), + commitNode: vi.fn(), + commitNodeWithTree: vi.fn(), + updateRef: vi.fn(), + listRefs: vi.fn().mockResolvedValue([]), + getNodeInfo: vi.fn(), + ping: vi.fn().mockResolvedValue({ ok: true, latencyMs: 1 }), + configGet: vi.fn().mockResolvedValue(null), + configSet: vi.fn().mockResolvedValue(undefined), + }; +} + +/** + * Creates a minimal schema:2 patch object. + */ +function createPatch(writer, lamport, nodeId) { + return { + schema: 2, + writer, + lamport, + context: { [writer]: lamport }, + ops: [{ type: 'NodeAdd', node: nodeId, dot: { writer, counter: lamport } }], + }; +} + +/** + * A fake 40-char hex SHA for use in tests. + */ +function fakeSha(label) { + const hex = Buffer.from(String(label)).toString('hex'); + return hex.padEnd(40, 'a').slice(0, 40); +} + +/** + * Builds a chain of N patch commits for a given writer. Sets up + * persistence mocks so that _loadWriterPatches walks the chain. + * + * Returns the tip SHA so it can be wired to readRef. + */ +function buildPatchChain(persistence, writer, count) { + const shas = []; + for (let i = 1; i <= count; i++) { + shas.push(fakeSha(`${writer}${i}`)); + } + + // getNodeInfo returns commit info (message + parents) + persistence.getNodeInfo.mockImplementation((querySha) => { + for (let j = 0; j < count; j++) { + if (querySha === shas[j]) { + const l = j + 1; + const po = fakeSha(`blob-${writer}-${l}`); + const m = encodePatchMessage({ + graph: 'test', + writer, + lamport: l, + patchOid: po, + schema: 2, + }); + const par = j < count - 1 ? [shas[j + 1]] : []; + return Promise.resolve({ message: m, parents: par }); + } + } + return Promise.resolve({ message: '', parents: [] }); + }); + + // readBlob returns CBOR for the patch + persistence.readBlob.mockImplementation((oid) => { + for (let j = 0; j < count; j++) { + const l = j + 1; + const po = fakeSha(`blob-${writer}-${l}`); + if (oid === po) { + const p = createPatch(writer, l, `n:${writer}:${l}`); + return Promise.resolve(encode(p)); + } + } + return Promise.resolve(Buffer.alloc(0)); + }); + + // tip is the newest commit (index 0) + return shas[0]; +} + +/** + * Helper: wire persistence mocks so materialize() discovers the given + * writer and walks its chain. No checkpoint is present. + */ +function wirePersistenceForWriter(persistence, writer, tipSha) { + persistence.readRef.mockImplementation((ref) => { + if (ref === 'refs/empty-graph/test/checkpoints/head') { + return Promise.resolve(null); + } + if (ref === `refs/empty-graph/test/writers/${writer}`) { + return Promise.resolve(tipSha); + } + return Promise.resolve(null); + }); + persistence.listRefs.mockResolvedValue([ + `refs/empty-graph/test/writers/${writer}`, + ]); +} + +describe('AP/CKPT/3: auto-checkpoint in materialize() path', () => { + let persistence; + + beforeEach(() => { + persistence = createMockPersistence(); + }); + + // -------------------------------------------------------------------------- + // 1. Trigger at threshold + // -------------------------------------------------------------------------- + it('calls createCheckpoint when patchCount >= policy.every', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 3 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 3); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + const spy = vi + .spyOn(graph, 'createCheckpoint') + .mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(spy).toHaveBeenCalledOnce(); + }); + + // -------------------------------------------------------------------------- + // 2. Does NOT trigger below threshold + // -------------------------------------------------------------------------- + it('does NOT call createCheckpoint when patchCount < policy.every', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 5 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 3); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + const spy = vi + .spyOn(graph, 'createCheckpoint') + .mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(spy).not.toHaveBeenCalled(); + }); + + // -------------------------------------------------------------------------- + // 3. Counter resets to 0 after successful checkpoint + // -------------------------------------------------------------------------- + it('resets _patchesSinceCheckpoint to 0 after auto-checkpoint', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 3 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 5); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + vi.spyOn(graph, 'createCheckpoint').mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(graph._patchesSinceCheckpoint).toBe(0); + }); + + // -------------------------------------------------------------------------- + // 4. Checkpoint failure does not break materialize + // -------------------------------------------------------------------------- + it('materialize resolves even when createCheckpoint rejects', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 3 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 4); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + vi.spyOn(graph, 'createCheckpoint').mockRejectedValue( + new Error('disk full') + ); + + const state = await graph.materialize(); + + // materialize returns a valid state despite checkpoint failure + expect(state).toBeDefined(); + expect(state.nodeAlive).toBeDefined(); + }); + + it('state is correct even when auto-checkpoint throws', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 2 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 3); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + vi.spyOn(graph, 'createCheckpoint').mockRejectedValue( + new Error('transient failure') + ); + + const state = await graph.materialize(); + + // All 3 nodes should be alive in the materialized state + const nodeIds = [...state.nodeAlive.entries.keys()]; + expect(nodeIds).toHaveLength(3); + expect(nodeIds).toContain('n:w1:1'); + expect(nodeIds).toContain('n:w1:2'); + expect(nodeIds).toContain('n:w1:3'); + }); + + // -------------------------------------------------------------------------- + // 5. Counter is NOT reset when checkpoint fails + // -------------------------------------------------------------------------- + it('_patchesSinceCheckpoint retains patchCount when checkpoint fails', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 3 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 4); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + vi.spyOn(graph, 'createCheckpoint').mockRejectedValue( + new Error('fail') + ); + + await graph.materialize(); + + // The counter should remain at the patchCount since checkpoint failed + expect(graph._patchesSinceCheckpoint).toBe(4); + }); + + // -------------------------------------------------------------------------- + // 6. No policy → no checkpoint + // -------------------------------------------------------------------------- + it('never calls createCheckpoint when no checkpointPolicy is set', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + // no checkpointPolicy + }); + + const tipSha = buildPatchChain(persistence, 'w1', 10); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + const spy = vi + .spyOn(graph, 'createCheckpoint') + .mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(spy).not.toHaveBeenCalled(); + }); + + // -------------------------------------------------------------------------- + // 7. Exact threshold triggers + // -------------------------------------------------------------------------- + it('triggers at exactly the threshold (every: 5, patches: 5)', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 5 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 5); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + const spy = vi + .spyOn(graph, 'createCheckpoint') + .mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(spy).toHaveBeenCalledOnce(); + }); + + // -------------------------------------------------------------------------- + // 8. Above threshold also triggers + // -------------------------------------------------------------------------- + it('triggers when patchCount exceeds the threshold', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 3 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 7); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + const spy = vi + .spyOn(graph, 'createCheckpoint') + .mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(spy).toHaveBeenCalledOnce(); + }); + + // -------------------------------------------------------------------------- + // 9. Zero patches → no checkpoint even with policy + // -------------------------------------------------------------------------- + it('does not trigger checkpoint when no patches exist', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 1 }, + }); + + // No writers → 0 patches + persistence.readRef.mockResolvedValue(null); + + const spy = vi + .spyOn(graph, 'createCheckpoint') + .mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(spy).not.toHaveBeenCalled(); + expect(graph._patchesSinceCheckpoint).toBe(0); + }); + + // -------------------------------------------------------------------------- + // 10. every: 1 triggers on a single patch + // -------------------------------------------------------------------------- + it('every: 1 triggers auto-checkpoint on a single patch', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 1 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 1); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + const spy = vi + .spyOn(graph, 'createCheckpoint') + .mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(spy).toHaveBeenCalledOnce(); + expect(graph._patchesSinceCheckpoint).toBe(0); + }); + + // -------------------------------------------------------------------------- + // 11. Incremental (checkpoint-based) materialize also triggers + // -------------------------------------------------------------------------- + it('triggers auto-checkpoint after incremental materialize from checkpoint', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 3 }, + }); + + const checkpointState = createEmptyStateV5(); + + // Build 4 fake patch objects for _loadPatchesSince + const patches = []; + for (let i = 1; i <= 4; i++) { + patches.push({ + patch: createPatch('w1', i, `n:w1:${i}`), + sha: fakeSha(i), + }); + } + + vi.spyOn(graph, '_loadLatestCheckpoint').mockResolvedValue({ + schema: 2, + state: checkpointState, + frontier: {}, + }); + vi.spyOn(graph, '_loadPatchesSince').mockResolvedValue(patches); + + const spy = vi + .spyOn(graph, 'createCheckpoint') + .mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(spy).toHaveBeenCalledOnce(); + expect(graph._patchesSinceCheckpoint).toBe(0); + }); + + it('does NOT trigger after incremental materialize below threshold', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 10 }, + }); + + const checkpointState = createEmptyStateV5(); + + const patches = []; + for (let i = 1; i <= 3; i++) { + patches.push({ + patch: createPatch('w1', i, `n:w1:${i}`), + sha: fakeSha(i), + }); + } + + vi.spyOn(graph, '_loadLatestCheckpoint').mockResolvedValue({ + schema: 2, + state: checkpointState, + frontier: {}, + }); + vi.spyOn(graph, '_loadPatchesSince').mockResolvedValue(patches); + + const spy = vi + .spyOn(graph, 'createCheckpoint') + .mockResolvedValue(fakeSha('ckpt')); + + await graph.materialize(); + + expect(spy).not.toHaveBeenCalled(); + expect(graph._patchesSinceCheckpoint).toBe(3); + }); + + // -------------------------------------------------------------------------- + // 12. Return value of materialize is the state, not the checkpoint + // -------------------------------------------------------------------------- + it('materialize returns the state, not the checkpoint SHA', async () => { + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + checkpointPolicy: { every: 2 }, + }); + + const tipSha = buildPatchChain(persistence, 'w1', 3); + wirePersistenceForWriter(persistence, 'w1', tipSha); + + vi.spyOn(graph, 'createCheckpoint').mockResolvedValue(fakeSha('ckpt')); + + const state = await graph.materialize(); + + // Should return a WarpStateV5, not a SHA string + expect(typeof state).toBe('object'); + expect(state.nodeAlive).toBeDefined(); + expect(state.edgeAlive).toBeDefined(); + expect(state.prop).toBeDefined(); + expect(state.observedFrontier).toBeDefined(); + }); +}); diff --git a/test/unit/domain/WarpGraph.autoMaterialize.test.js b/test/unit/domain/WarpGraph.autoMaterialize.test.js new file mode 100644 index 0000000..70000af --- /dev/null +++ b/test/unit/domain/WarpGraph.autoMaterialize.test.js @@ -0,0 +1,99 @@ +import { describe, it, expect, vi } from 'vitest'; +import WarpGraph from '../../../src/domain/WarpGraph.js'; + +function createMockPersistence() { + return { + readRef: vi.fn(), + showNode: vi.fn(), + writeBlob: vi.fn(), + writeTree: vi.fn(), + readBlob: vi.fn(), + readTreeOids: vi.fn(), + commitNode: vi.fn(), + commitNodeWithTree: vi.fn(), + updateRef: vi.fn(), + listRefs: vi.fn().mockResolvedValue([]), + getNodeInfo: vi.fn(), + ping: vi.fn().mockResolvedValue({ ok: true, latencyMs: 1 }), + configGet: vi.fn().mockResolvedValue(null), + configSet: vi.fn().mockResolvedValue(undefined), + }; +} + +describe('WarpGraph autoMaterialize option (AP/LAZY/1)', () => { + it('stores flag when opened with autoMaterialize: true', async () => { + const graph = await WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + + expect(graph._autoMaterialize).toBe(true); + }); + + it('stores flag when opened with autoMaterialize: false', async () => { + const graph = await WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: false, + }); + + expect(graph._autoMaterialize).toBe(false); + }); + + it('defaults to false when autoMaterialize is not provided', async () => { + const graph = await WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + }); + + expect(graph._autoMaterialize).toBe(false); + }); + + it('defaults to false when autoMaterialize is explicitly undefined', async () => { + const graph = await WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: undefined, + }); + + expect(graph._autoMaterialize).toBe(false); + }); + + it('rejects autoMaterialize: "yes" (string)', async () => { + await expect( + WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: 'yes', + }), + ).rejects.toThrow('autoMaterialize must be a boolean'); + }); + + it('rejects autoMaterialize: 1 (number)', async () => { + await expect( + WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: 1, + }), + ).rejects.toThrow('autoMaterialize must be a boolean'); + }); + + it('rejects autoMaterialize: null', async () => { + await expect( + WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: null, + }), + ).rejects.toThrow('autoMaterialize must be a boolean'); + }); +}); diff --git a/test/unit/domain/WarpGraph.checkpointPolicy.test.js b/test/unit/domain/WarpGraph.checkpointPolicy.test.js new file mode 100644 index 0000000..0bd091e --- /dev/null +++ b/test/unit/domain/WarpGraph.checkpointPolicy.test.js @@ -0,0 +1,121 @@ +import { describe, it, expect, vi } from 'vitest'; +import WarpGraph from '../../../src/domain/WarpGraph.js'; + +function createMockPersistence() { + return { + readRef: vi.fn(), + showNode: vi.fn(), + writeBlob: vi.fn(), + writeTree: vi.fn(), + readBlob: vi.fn(), + readTreeOids: vi.fn(), + commitNode: vi.fn(), + commitNodeWithTree: vi.fn(), + updateRef: vi.fn(), + listRefs: vi.fn().mockResolvedValue([]), + getNodeInfo: vi.fn(), + ping: vi.fn().mockResolvedValue({ ok: true, latencyMs: 1 }), + configGet: vi.fn().mockResolvedValue(null), + configSet: vi.fn().mockResolvedValue(undefined), + }; +} + +describe('WarpGraph checkpointPolicy (AP/CKPT/1)', () => { + it('stores checkpointPolicy when opened with { every: 500 }', async () => { + const graph = await WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + checkpointPolicy: { every: 500 }, + }); + + expect(graph._checkpointPolicy).toEqual({ every: 500 }); + }); + + it('accepts minimum valid value { every: 1 }', async () => { + const graph = await WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + checkpointPolicy: { every: 1 }, + }); + + expect(graph._checkpointPolicy).toEqual({ every: 1 }); + }); + + it('defaults _checkpointPolicy to null when not provided', async () => { + const graph = await WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + }); + + expect(graph._checkpointPolicy).toBeNull(); + }); + + it('rejects every: 0', async () => { + await expect( + WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + checkpointPolicy: { every: 0 }, + }) + ).rejects.toThrow('checkpointPolicy.every must be a positive integer'); + }); + + it('rejects every: -1', async () => { + await expect( + WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + checkpointPolicy: { every: -1 }, + }) + ).rejects.toThrow('checkpointPolicy.every must be a positive integer'); + }); + + it('rejects every: "foo" (non-integer string)', async () => { + await expect( + WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + checkpointPolicy: { every: 'foo' }, + }) + ).rejects.toThrow('checkpointPolicy.every must be a positive integer'); + }); + + it('rejects every: 1.5 (non-integer float)', async () => { + await expect( + WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + checkpointPolicy: { every: 1.5 }, + }) + ).rejects.toThrow('checkpointPolicy.every must be a positive integer'); + }); + + it('rejects checkpointPolicy that is not an object', async () => { + await expect( + WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + checkpointPolicy: 'auto', + }) + ).rejects.toThrow('checkpointPolicy must be an object with { every: number }'); + }); + + it('treats checkpointPolicy: null as no policy', async () => { + const graph = await WarpGraph.open({ + persistence: createMockPersistence(), + graphName: 'test', + writerId: 'writer-1', + checkpointPolicy: null, + }); + + expect(graph._checkpointPolicy).toBeNull(); + }); +}); diff --git a/test/unit/domain/WarpGraph.invalidation.test.js b/test/unit/domain/WarpGraph.invalidation.test.js new file mode 100644 index 0000000..1e81147 --- /dev/null +++ b/test/unit/domain/WarpGraph.invalidation.test.js @@ -0,0 +1,202 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import WarpGraph from '../../../src/domain/WarpGraph.js'; +import { encodePatchMessage } from '../../../src/domain/services/WarpMessageCodec.js'; + +/** + * AP/INVAL/1 + AP/INVAL/2 — Dirty flag tracking and eager re-materialize. + * + * The _stateDirty flag tracks whether the materialized state is stale: + * - Starts false after construction + * - Set to false by _setMaterializedState() (called from materialize()) + * - After commit with _cachedState: patch applied eagerly, stays false + * - After commit without _cachedState: set to true (can't eagerly apply) + */ + +const FAKE_BLOB_OID = 'a'.repeat(40); +const FAKE_TREE_OID = 'b'.repeat(40); +const FAKE_COMMIT_SHA = 'c'.repeat(40); +const FAKE_COMMIT_SHA_2 = 'd'.repeat(40); + +function createMockPersistence() { + return { + readRef: vi.fn(), + showNode: vi.fn(), + writeBlob: vi.fn(), + writeTree: vi.fn(), + readBlob: vi.fn(), + readTreeOids: vi.fn(), + commitNode: vi.fn(), + commitNodeWithTree: vi.fn(), + updateRef: vi.fn(), + listRefs: vi.fn().mockResolvedValue([]), + getNodeInfo: vi.fn(), + ping: vi.fn().mockResolvedValue({ ok: true, latencyMs: 1 }), + configGet: vi.fn().mockResolvedValue(null), + configSet: vi.fn().mockResolvedValue(undefined), + }; +} + +/** + * Configure the mock persistence so that a single createPatch().addNode().commit() + * succeeds for a first-time writer (no existing ref). + */ +function mockFirstCommit(persistence) { + persistence.readRef.mockResolvedValue(null); + persistence.writeBlob.mockResolvedValue(FAKE_BLOB_OID); + persistence.writeTree.mockResolvedValue(FAKE_TREE_OID); + persistence.commitNodeWithTree.mockResolvedValue(FAKE_COMMIT_SHA); + persistence.updateRef.mockResolvedValue(undefined); +} + +/** + * After the first commit, the writer ref points to FAKE_COMMIT_SHA. + * Configure mocks so a second commit succeeds. + */ +function mockSecondCommit(persistence) { + const patchMessage = encodePatchMessage({ + graph: 'test', + writer: 'writer-1', + lamport: 1, + patchOid: FAKE_BLOB_OID, + schema: 2, + }); + + persistence.readRef.mockResolvedValue(FAKE_COMMIT_SHA); + persistence.showNode.mockResolvedValue(patchMessage); + persistence.writeBlob.mockResolvedValue(FAKE_BLOB_OID); + persistence.writeTree.mockResolvedValue(FAKE_TREE_OID); + persistence.commitNodeWithTree.mockResolvedValue(FAKE_COMMIT_SHA_2); + persistence.updateRef.mockResolvedValue(undefined); +} + +describe('WarpGraph dirty flag + eager re-materialize (AP/INVAL/1 + AP/INVAL/2)', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + }); + }); + + // ── AP/INVAL/1: Basic dirty flag ────────────────────────────────── + + it('_stateDirty is false after construction', () => { + expect(graph._stateDirty).toBe(false); + }); + + it('_stateDirty is false after materialize()', async () => { + await graph.materialize(); + expect(graph._stateDirty).toBe(false); + }); + + // ── AP/INVAL/2: Eager re-materialize on commit ──────────────────── + + it('_stateDirty stays false after commit when _cachedState exists (eager re-materialize)', async () => { + await graph.materialize(); + expect(graph._stateDirty).toBe(false); + + mockFirstCommit(persistence); + await (await graph.createPatch()).addNode('test:node').commit(); + + // Eager re-materialize applied the patch, so state is fresh + expect(graph._stateDirty).toBe(false); + }); + + it('hasNode returns true after commit without explicit re-materialize', async () => { + await graph.materialize(); + + mockFirstCommit(persistence); + await (await graph.createPatch()).addNode('test:node').commit(); + + // Query reflects the commit immediately + expect(await graph.hasNode('test:node')).toBe(true); + }); + + it('getNodeProps returns updated properties after commit', async () => { + await graph.materialize(); + + mockFirstCommit(persistence); + await (await graph.createPatch()) + .addNode('test:node') + .setProperty('test:node', 'name', 'Alice') + .commit(); + + const props = await graph.getNodeProps('test:node'); + expect(props).not.toBeNull(); + expect(props.get('name')).toBe('Alice'); + }); + + it('multiple sequential commits with _cachedState keep state fresh', async () => { + await graph.materialize(); + + mockFirstCommit(persistence); + await (await graph.createPatch()).addNode('test:a').commit(); + expect(graph._stateDirty).toBe(false); + expect(await graph.hasNode('test:a')).toBe(true); + + mockSecondCommit(persistence); + await (await graph.createPatch()).addNode('test:b').commit(); + expect(graph._stateDirty).toBe(false); + expect(await graph.hasNode('test:b')).toBe(true); + }); + + // ── AP/INVAL/1: Dirty flag when _cachedState is null ────────────── + + it('_stateDirty is true after commit without prior materialize', async () => { + mockFirstCommit(persistence); + await (await graph.createPatch()).addNode('test:node').commit(); + + // No _cachedState, so can't eagerly apply — dirty + expect(graph._stateDirty).toBe(true); + }); + + it('multiple commits without materialize keep _stateDirty true', async () => { + mockFirstCommit(persistence); + await (await graph.createPatch()).addNode('test:a').commit(); + expect(graph._stateDirty).toBe(true); + + mockSecondCommit(persistence); + await (await graph.createPatch()).addNode('test:b').commit(); + expect(graph._stateDirty).toBe(true); + }); + + // ── Edge cases: failed commits ───────────────────────────────────── + + it('_stateDirty remains false if commit fails (writeBlob rejects)', async () => { + persistence.readRef.mockResolvedValue(null); + persistence.writeBlob.mockRejectedValue(new Error('disk full')); + + const patch = (await graph.createPatch()).addNode('test:node'); + await expect(patch.commit()).rejects.toThrow('disk full'); + + expect(graph._stateDirty).toBe(false); + }); + + it('_stateDirty remains false if updateRef fails', async () => { + persistence.readRef.mockResolvedValue(null); + persistence.writeBlob.mockResolvedValue(FAKE_BLOB_OID); + persistence.writeTree.mockResolvedValue(FAKE_TREE_OID); + persistence.commitNodeWithTree.mockResolvedValue(FAKE_COMMIT_SHA); + persistence.updateRef.mockRejectedValue(new Error('ref lock failed')); + + const patch = (await graph.createPatch()).addNode('test:node'); + await expect(patch.commit()).rejects.toThrow('ref lock failed'); + + expect(graph._stateDirty).toBe(false); + }); + + it('_stateDirty remains false if race detection rejects', async () => { + persistence.readRef + .mockResolvedValueOnce(null) + .mockResolvedValueOnce(FAKE_COMMIT_SHA); + + const patch = (await graph.createPatch()).addNode('test:node'); + await expect(patch.commit()).rejects.toThrow('Concurrent commit detected'); + + expect(graph._stateDirty).toBe(false); + }); +}); diff --git a/test/unit/domain/WarpGraph.lazyMaterialize.test.js b/test/unit/domain/WarpGraph.lazyMaterialize.test.js new file mode 100644 index 0000000..0902727 --- /dev/null +++ b/test/unit/domain/WarpGraph.lazyMaterialize.test.js @@ -0,0 +1,670 @@ +/** + * AP/LAZY/2 — Guard query methods with auto-materialize. + * + * When autoMaterialize === true and _cachedState is null or _stateDirty === true, + * query methods should call materialize() before returning results. + * When autoMaterialize === false, preserve current behavior (throw if no cached state). + * + * Tests cover: + * 1. Fresh open + query with autoMaterialize -> results returned + * 2. Dirty state + query -> auto-rematerializes -> fresh results + * 3. autoMaterialize off + null state -> throws + * 4. autoMaterialize off + materialize -> current behavior unchanged + * 5. All query methods respect autoMaterialize + * 6. query().run() works with autoMaterialize + * 7. Concurrent auto-materialize calls (stretch goal) + * 8. traverse methods work with autoMaterialize + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import WarpGraph from '../../../src/domain/WarpGraph.js'; +import QueryError from '../../../src/domain/errors/QueryError.js'; +import { encodePatchMessage } from '../../../src/domain/services/WarpMessageCodec.js'; +import { createEmptyStateV5, encodeEdgeKey, encodePropKey } from '../../../src/domain/services/JoinReducer.js'; +import { orsetAdd } from '../../../src/domain/crdt/ORSet.js'; +import { createDot } from '../../../src/domain/crdt/Dot.js'; + +const FAKE_BLOB_OID = 'a'.repeat(40); +const FAKE_TREE_OID = 'b'.repeat(40); +const FAKE_COMMIT_SHA = 'c'.repeat(40); + +function createMockPersistence() { + return { + readRef: vi.fn(), + showNode: vi.fn(), + writeBlob: vi.fn(), + writeTree: vi.fn(), + readBlob: vi.fn(), + readTreeOids: vi.fn(), + commitNode: vi.fn(), + commitNodeWithTree: vi.fn(), + updateRef: vi.fn(), + listRefs: vi.fn().mockResolvedValue([]), + getNodeInfo: vi.fn(), + ping: vi.fn().mockResolvedValue({ ok: true, latencyMs: 1 }), + configGet: vi.fn().mockResolvedValue(null), + configSet: vi.fn().mockResolvedValue(undefined), + }; +} + +/** + * Configure mock persistence so a first-time writer commit succeeds. + */ +function mockFirstCommit(persistence) { + persistence.readRef.mockResolvedValue(null); + persistence.writeBlob.mockResolvedValue(FAKE_BLOB_OID); + persistence.writeTree.mockResolvedValue(FAKE_TREE_OID); + persistence.commitNodeWithTree.mockResolvedValue(FAKE_COMMIT_SHA); + persistence.updateRef.mockResolvedValue(undefined); +} + +// ──────────────────────────────────────────────────────────────────────────── +// 1. Fresh open -> query with autoMaterialize -> results returned +// ──────────────────────────────────────────────────────────────────────────── + +describe('AP/LAZY/2: auto-materialize guards on query methods', () => { + describe('1. Fresh open with autoMaterialize: true -> query returns results', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + }); + + it('getNodes() returns empty array without explicit materialize()', async () => { + const nodes = await graph.getNodes(); + expect(nodes).toEqual([]); + }); + + it('hasNode() returns false without explicit materialize()', async () => { + const result = await graph.hasNode('test:x'); + expect(result).toBe(false); + }); + + it('getEdges() returns empty array without explicit materialize()', async () => { + const edges = await graph.getEdges(); + expect(edges).toEqual([]); + }); + + it('getNodeProps() returns null for non-existent node without explicit materialize()', async () => { + const props = await graph.getNodeProps('test:x'); + expect(props).toBe(null); + }); + + it('neighbors() returns empty array without explicit materialize()', async () => { + const result = await graph.neighbors('test:x'); + expect(result).toEqual([]); + }); + + it('_cachedState is populated after first query triggers auto-materialize', async () => { + expect(graph._cachedState).toBe(null); + await graph.getNodes(); + expect(graph._cachedState).not.toBe(null); + }); + }); + + // ──────────────────────────────────────────────────────────────────────── + // 2. Dirty state -> query -> auto-rematerializes -> fresh results + // ──────────────────────────────────────────────────────────────────────── + + describe('2. Dirty state triggers auto-rematerialization on query', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + }); + + it('eagerly applied commit keeps state clean, hasNode returns true', async () => { + // First materialize (empty state) + await graph.materialize(); + + // Commit a node — with _cachedState present, eager apply works + mockFirstCommit(persistence); + await (await graph.createPatch()).addNode('test:node').commit(); + + // State should still be fresh (eager re-materialize) + expect(graph._stateDirty).toBe(false); + expect(await graph.hasNode('test:node')).toBe(true); + }); + + it('dirty state auto-rematerializes on hasNode query', async () => { + // First materialize (empty state) + await graph.materialize(); + + // Commit a node eagerly + mockFirstCommit(persistence); + await (await graph.createPatch()).addNode('test:node').commit(); + + // Manually mark dirty to simulate external change + graph._stateDirty = true; + + // Mock listRefs to return the writer ref for rematerialization + const patchMessage = encodePatchMessage({ + graph: 'test', + writer: 'writer-1', + lamport: 1, + patchOid: FAKE_BLOB_OID, + schema: 2, + }); + persistence.listRefs.mockResolvedValue([ + 'refs/warp/test/writers/writer-1', + ]); + persistence.showNode.mockResolvedValue(patchMessage); + persistence.readBlob.mockResolvedValue( + // Empty patch ops — we just need the codec to not blow up + Buffer.from([0x80]), // CBOR empty array + ); + persistence.getNodeInfo.mockResolvedValue({ + parents: [], + message: patchMessage, + }); + + // Query should trigger auto-rematerialize (not throw) + const result = await graph.hasNode('test:node'); + expect(typeof result).toBe('boolean'); + }); + + it('auto-materialize is triggered when _stateDirty is true', async () => { + await graph.materialize(); + graph._stateDirty = true; + + const materializeSpy = vi.spyOn(graph, 'materialize'); + + await graph.getNodes(); + + expect(materializeSpy).toHaveBeenCalled(); + }); + + it('auto-materialize is triggered when _cachedState is null', async () => { + // Don't call materialize — _cachedState is null + const materializeSpy = vi.spyOn(graph, 'materialize'); + + await graph.getNodes(); + + expect(materializeSpy).toHaveBeenCalled(); + }); + + it('auto-materialize is NOT triggered when state is clean', async () => { + await graph.materialize(); + expect(graph._stateDirty).toBe(false); + expect(graph._cachedState).not.toBe(null); + + const materializeSpy = vi.spyOn(graph, 'materialize'); + + await graph.getNodes(); + + expect(materializeSpy).not.toHaveBeenCalled(); + }); + }); + + // ──────────────────────────────────────────────────────────────────────── + // 3. autoMaterialize off -> null state -> throws + // ──────────────────────────────────────────────────────────────────────── + + describe('3. autoMaterialize: false -> null state -> throws', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: false, + }); + }); + + it('hasNode throws without prior materialize()', async () => { + await expect(graph.hasNode('test:x')).rejects.toThrow(QueryError); + }); + + it('getNodes throws without prior materialize()', async () => { + await expect(graph.getNodes()).rejects.toThrow(QueryError); + }); + + it('getEdges throws without prior materialize()', async () => { + await expect(graph.getEdges()).rejects.toThrow(QueryError); + }); + + it('getNodeProps throws without prior materialize()', async () => { + await expect(graph.getNodeProps('test:x')).rejects.toThrow(QueryError); + }); + + it('neighbors throws without prior materialize()', async () => { + await expect(graph.neighbors('test:x')).rejects.toThrow(QueryError); + }); + }); + + // ──────────────────────────────────────────────────────────────────────── + // 4. autoMaterialize off -> materialize -> current behavior unchanged + // ──────────────────────────────────────────────────────────────────────── + + describe('4. autoMaterialize: false -> explicit materialize -> normal query behavior', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: false, + }); + }); + + it('getNodes works after explicit materialize()', async () => { + await graph.materialize(); + const nodes = await graph.getNodes(); + expect(nodes).toEqual([]); + }); + + it('hasNode works after explicit materialize()', async () => { + await graph.materialize(); + const result = await graph.hasNode('test:x'); + expect(result).toBe(false); + }); + + it('getEdges works after explicit materialize()', async () => { + await graph.materialize(); + const edges = await graph.getEdges(); + expect(edges).toEqual([]); + }); + + it('getNodeProps returns null for absent node after materialize()', async () => { + await graph.materialize(); + const props = await graph.getNodeProps('test:x'); + expect(props).toBe(null); + }); + + it('neighbors returns empty after materialize()', async () => { + await graph.materialize(); + const result = await graph.neighbors('test:x'); + expect(result).toEqual([]); + }); + + it('querying state with data works after materialize + manual seed', async () => { + await graph.materialize(); + const state = graph._cachedState; + orsetAdd(state.nodeAlive, 'test:alice', createDot('w1', 1)); + + expect(await graph.hasNode('test:alice')).toBe(true); + expect(await graph.getNodes()).toContain('test:alice'); + }); + }); + + // ──────────────────────────────────────────────────────────────────────── + // 5. All query methods respect autoMaterialize + // ──────────────────────────────────────────────────────────────────────── + + describe('5. All query methods respect autoMaterialize: true', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + }); + + it('hasNode auto-materializes and returns result', async () => { + expect(graph._cachedState).toBe(null); + const result = await graph.hasNode('test:x'); + expect(result).toBe(false); + expect(graph._cachedState).not.toBe(null); + }); + + it('getNodeProps auto-materializes and returns result', async () => { + expect(graph._cachedState).toBe(null); + const result = await graph.getNodeProps('test:x'); + expect(result).toBe(null); + expect(graph._cachedState).not.toBe(null); + }); + + it('neighbors auto-materializes and returns result', async () => { + expect(graph._cachedState).toBe(null); + const result = await graph.neighbors('test:x'); + expect(result).toEqual([]); + expect(graph._cachedState).not.toBe(null); + }); + + it('getNodes auto-materializes and returns result', async () => { + expect(graph._cachedState).toBe(null); + const result = await graph.getNodes(); + expect(result).toEqual([]); + expect(graph._cachedState).not.toBe(null); + }); + + it('getEdges auto-materializes and returns result', async () => { + expect(graph._cachedState).toBe(null); + const result = await graph.getEdges(); + expect(result).toEqual([]); + expect(graph._cachedState).not.toBe(null); + }); + + it('all methods return consistent data from auto-materialized state', async () => { + // First call triggers materialize; seed state for subsequent calls + await graph.getNodes(); + const state = graph._cachedState; + + // Seed data + orsetAdd(state.nodeAlive, 'test:alice', createDot('w1', 1)); + orsetAdd(state.nodeAlive, 'test:bob', createDot('w1', 2)); + orsetAdd(state.edgeAlive, encodeEdgeKey('test:alice', 'test:bob', 'knows'), createDot('w1', 3)); + const propKey = encodePropKey('test:alice', 'name'); + state.prop.set(propKey, { value: 'Alice', lamport: 1, writerId: 'w1' }); + + // All methods work without re-materializing (state is clean) + expect(await graph.hasNode('test:alice')).toBe(true); + expect(await graph.hasNode('test:bob')).toBe(true); + expect(await graph.hasNode('test:nonexistent')).toBe(false); + + const nodes = await graph.getNodes(); + expect(nodes).toContain('test:alice'); + expect(nodes).toContain('test:bob'); + + const edges = await graph.getEdges(); + expect(edges).toHaveLength(1); + expect(edges[0]).toEqual({ from: 'test:alice', to: 'test:bob', label: 'knows' }); + + const props = await graph.getNodeProps('test:alice'); + expect(props.get('name')).toBe('Alice'); + + const outgoing = await graph.neighbors('test:alice', 'outgoing'); + expect(outgoing).toHaveLength(1); + expect(outgoing[0].nodeId).toBe('test:bob'); + + const incoming = await graph.neighbors('test:bob', 'incoming'); + expect(incoming).toHaveLength(1); + expect(incoming[0].nodeId).toBe('test:alice'); + }); + }); + + // ──────────────────────────────────────────────────────────────────────── + // 6. query().run() works with autoMaterialize + // ──────────────────────────────────────────────────────────────────────── + + describe('6. query().run() works with autoMaterialize: true', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + }); + + it('query().match("*").run() does not throw on null state', async () => { + const result = await graph.query().match('*').run(); + expect(result).toBeDefined(); + expect(result.nodes).toEqual([]); + }); + + it('query().match("test:*").run() does not throw on null state', async () => { + const result = await graph.query().match('test:*').run(); + expect(result).toBeDefined(); + expect(result.nodes).toEqual([]); + }); + + it('query().run() returns data after auto-materialize + seed', async () => { + // query().run() calls _materializeGraph() which calls materialize(). + // We need to mock materialize to return a pre-seeded state so it + // does not get overwritten on each call (same pattern as queryBuilder tests). + const state = createEmptyStateV5(); + orsetAdd(state.nodeAlive, 'test:alice', createDot('w1', 1)); + orsetAdd(state.nodeAlive, 'test:bob', createDot('w1', 2)); + orsetAdd(state.edgeAlive, encodeEdgeKey('test:alice', 'test:bob', 'follows'), createDot('w1', 3)); + + graph._cachedState = state; + graph.materialize = vi.fn().mockResolvedValue(state); + + const result = await graph.query().match('test:alice').outgoing().run(); + expect(result.nodes).toEqual([{ id: 'test:bob' }]); + }); + + it('query().run() auto-materializes when state is null', async () => { + expect(graph._cachedState).toBe(null); + const result = await graph.query().match('*').run(); + expect(graph._cachedState).not.toBe(null); + expect(result.nodes).toEqual([]); + }); + }); + + // ──────────────────────────────────────────────────────────────────────── + // 7. Concurrent auto-materialize calls + // ──────────────────────────────────────────────────────────────────────── + + describe('7. Concurrent auto-materialize calls', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + }); + + it('concurrent queries all resolve without errors', async () => { + const [nodes, edges, hasNode, props, neighbors] = await Promise.all([ + graph.getNodes(), + graph.getEdges(), + graph.hasNode('test:x'), + graph.getNodeProps('test:x'), + graph.neighbors('test:x'), + ]); + + expect(nodes).toEqual([]); + expect(edges).toEqual([]); + expect(hasNode).toBe(false); + expect(props).toBe(null); + expect(neighbors).toEqual([]); + }); + + it('materialize is called when state is null, regardless of concurrent callers', async () => { + const materializeSpy = vi.spyOn(graph, 'materialize'); + + await Promise.all([graph.getNodes(), graph.hasNode('test:x')]); + + // materialize should have been called (at least once, possibly twice + // if there is no coalescing). The important thing is no errors. + expect(materializeSpy).toHaveBeenCalled(); + }); + }); + + // ──────────────────────────────────────────────────────────────────────── + // 8. traverse methods work with autoMaterialize + // ──────────────────────────────────────────────────────────────────────── + + describe('8. traverse methods work with autoMaterialize: true', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + }); + + it('traverse.bfs does not throw on null state (node not found is OK)', async () => { + // bfs will auto-materialize, then throw NODE_NOT_FOUND for absent start node + // which is the expected behavior — not a "no cached state" error + await expect(graph.traverse.bfs('test:x')).rejects.toThrow('Start node not found'); + }); + + it('traverse.dfs does not throw on null state (node not found is OK)', async () => { + await expect(graph.traverse.dfs('test:x')).rejects.toThrow('Start node not found'); + }); + + it('traverse.shortestPath does not throw on null state (node not found is OK)', async () => { + await expect( + graph.traverse.shortestPath('test:x', 'test:y'), + ).rejects.toThrow('Start node not found'); + }); + + it('traverse.connectedComponent does not throw on null state (node not found is OK)', async () => { + await expect( + graph.traverse.connectedComponent('test:x'), + ).rejects.toThrow('Start node not found'); + }); + + it('traverse.bfs works with seeded data after auto-materialize', async () => { + // traverse._prepare() calls _materializeGraph() -> materialize(), so + // we mock materialize to return a pre-seeded state (same pattern as traverse tests). + const state = createEmptyStateV5(); + orsetAdd(state.nodeAlive, 'test:a', createDot('w1', 1)); + orsetAdd(state.nodeAlive, 'test:b', createDot('w1', 2)); + orsetAdd(state.nodeAlive, 'test:c', createDot('w1', 3)); + orsetAdd(state.edgeAlive, encodeEdgeKey('test:a', 'test:b', 'x'), createDot('w1', 4)); + orsetAdd(state.edgeAlive, encodeEdgeKey('test:b', 'test:c', 'x'), createDot('w1', 5)); + + graph._cachedState = state; + graph.materialize = vi.fn().mockResolvedValue(state); + + const result = await graph.traverse.bfs('test:a', { dir: 'out' }); + expect(result).toEqual(['test:a', 'test:b', 'test:c']); + }); + + it('traverse.shortestPath works with seeded data after auto-materialize', async () => { + const state = createEmptyStateV5(); + orsetAdd(state.nodeAlive, 'test:a', createDot('w1', 1)); + orsetAdd(state.nodeAlive, 'test:b', createDot('w1', 2)); + orsetAdd(state.edgeAlive, encodeEdgeKey('test:a', 'test:b', 'x'), createDot('w1', 3)); + + graph._cachedState = state; + graph.materialize = vi.fn().mockResolvedValue(state); + + const result = await graph.traverse.shortestPath('test:a', 'test:b', { dir: 'out' }); + expect(result).toEqual({ found: true, path: ['test:a', 'test:b'], length: 1 }); + }); + + it('traverse errors are NODE_NOT_FOUND, not "No cached state"', async () => { + // The key behavior: with autoMaterialize on, the error should be about + // the missing node, NOT about missing cached state + try { + await graph.traverse.bfs('test:missing'); + expect.fail('should have thrown'); + } catch (err) { + expect(err.message).toContain('Start node not found'); + expect(err.message).not.toContain('No cached state'); + } + }); + }); + + // ──────────────────────────────────────────────────────────────────────── + // Edge cases + // ──────────────────────────────────────────────────────────────────────── + + describe('Edge cases', () => { + it('default autoMaterialize (undefined) behaves like false', async () => { + const persistence = createMockPersistence(); + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + }); + + await expect(graph.hasNode('test:x')).rejects.toThrow(QueryError); + }); + + it('_ensureFreshState does not materialize when autoMaterialize is true and state is clean', async () => { + const persistence = createMockPersistence(); + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + + // First call materializes + await graph.getNodes(); + expect(graph._cachedState).not.toBe(null); + expect(graph._stateDirty).toBe(false); + + // Spy on materialize for subsequent call + const spy = vi.spyOn(graph, 'materialize'); + await graph.getNodes(); + + // Should NOT have called materialize (state is clean) + expect(spy).not.toHaveBeenCalled(); + }); + + it('_ensureFreshState materializes when autoMaterialize is true and _stateDirty', async () => { + const persistence = createMockPersistence(); + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + + await graph.materialize(); + graph._stateDirty = true; + + const spy = vi.spyOn(graph, 'materialize'); + await graph.getNodes(); + + expect(spy).toHaveBeenCalled(); + }); + + it('_ensureFreshState materializes when autoMaterialize is true and _cachedState is null', async () => { + const persistence = createMockPersistence(); + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: true, + }); + + expect(graph._cachedState).toBe(null); + const spy = vi.spyOn(graph, 'materialize'); + await graph.hasNode('test:x'); + + expect(spy).toHaveBeenCalled(); + }); + + it('autoMaterialize false with dirty state throws', async () => { + const persistence = createMockPersistence(); + const graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + autoMaterialize: false, + }); + + await graph.materialize(); + graph._stateDirty = true; + + await expect(graph.getNodes()).rejects.toThrow(QueryError); + }); + }); +}); diff --git a/test/unit/domain/WarpGraph.patchCount.test.js b/test/unit/domain/WarpGraph.patchCount.test.js new file mode 100644 index 0000000..048435a --- /dev/null +++ b/test/unit/domain/WarpGraph.patchCount.test.js @@ -0,0 +1,288 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import WarpGraph from '../../../src/domain/WarpGraph.js'; +import { encode } from '../../../src/infrastructure/codecs/CborCodec.js'; +import { encodePatchMessage } from '../../../src/domain/services/WarpMessageCodec.js'; + +function createMockPersistence() { + return { + readRef: vi.fn(), + showNode: vi.fn(), + writeBlob: vi.fn(), + writeTree: vi.fn(), + readBlob: vi.fn(), + readTreeOids: vi.fn(), + commitNode: vi.fn(), + commitNodeWithTree: vi.fn(), + updateRef: vi.fn(), + listRefs: vi.fn().mockResolvedValue([]), + getNodeInfo: vi.fn(), + ping: vi.fn().mockResolvedValue({ ok: true, latencyMs: 1 }), + configGet: vi.fn().mockResolvedValue(null), + configSet: vi.fn().mockResolvedValue(undefined), + }; +} + +/** + * Creates a minimal schema:2 patch object. + */ +function createPatch(writer, lamport, nodeId) { + return { + schema: 2, + writer, + lamport, + context: { [writer]: lamport }, + ops: [{ type: 'NodeAdd', nodeId, dot: { writer, counter: lamport } }], + }; +} + +/** + * A fake 40-char hex SHA for use in tests. + * Converts the input to a hex string padded/truncated to exactly 40 hex chars. + */ +function fakeSha(label) { + const hex = Buffer.from(String(label)).toString('hex'); + return hex.padEnd(40, 'a').slice(0, 40); +} + +/** + * Builds a chain of N patch commits for a given writer. Sets up + * persistence mocks so that _loadWriterPatches walks the chain. + * + * Returns the tip SHA so it can be wired to readRef. + */ +function buildPatchChain(persistence, writer, count) { + const shas = []; + for (let i = 1; i <= count; i++) { + shas.push(fakeSha(`${writer}${i}`)); + } + + for (let i = 0; i < count; i++) { + const sha = shas[i]; + const lamport = i + 1; + const patch = createPatch(writer, lamport, `n:${writer}:${lamport}`); + const patchCbor = encode(patch); + const patchOid = fakeSha(`blob-${writer}-${lamport}`); + + const message = encodePatchMessage({ + graph: 'test', + writer, + lamport, + patchOid, + schema: 2, + }); + + const parents = i < count - 1 ? [shas[i + 1]] : []; + + // getNodeInfo returns commit info (message + parents) + persistence.getNodeInfo.mockImplementation((querySha) => { + // Find the matching SHA among all configured commits + for (let j = 0; j < count; j++) { + if (querySha === shas[j]) { + const l = j + 1; + const p = createPatch(writer, l, `n:${writer}:${l}`); + const po = fakeSha(`blob-${writer}-${l}`); + const m = encodePatchMessage({ + graph: 'test', + writer, + lamport: l, + patchOid: po, + schema: 2, + }); + const par = j < count - 1 ? [shas[j + 1]] : []; + return Promise.resolve({ message: m, parents: par }); + } + } + return Promise.resolve({ message: '', parents: [] }); + }); + + // readBlob returns CBOR for the patch + persistence.readBlob.mockImplementation((oid) => { + for (let j = 0; j < count; j++) { + const l = j + 1; + const po = fakeSha(`blob-${writer}-${l}`); + if (oid === po) { + const p = createPatch(writer, l, `n:${writer}:${l}`); + return Promise.resolve(encode(p)); + } + } + return Promise.resolve(Buffer.alloc(0)); + }); + } + + // tip is the newest commit (index 0) + return shas[0]; +} + +describe('AP/CKPT/2: _patchesSinceCheckpoint tracking', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'w1', + }); + }); + + it('starts at 0 after construction', () => { + expect(graph._patchesSinceCheckpoint).toBe(0); + }); + + it('remains 0 after materialize with no writers', async () => { + // listRefs returns [] (no writers) — default mock + // readRef returns null for checkpoint ref + persistence.readRef.mockResolvedValue(null); + + await graph.materialize(); + + expect(graph._patchesSinceCheckpoint).toBe(0); + }); + + it('equals total patch count after materialize without checkpoint', async () => { + const patchCount = 5; + const tipSha = buildPatchChain(persistence, 'w1', patchCount); + + // checkpoint ref returns null (no checkpoint) + persistence.readRef.mockImplementation((ref) => { + if (ref === 'refs/empty-graph/test/checkpoints/head') { + return Promise.resolve(null); + } + if (ref === 'refs/empty-graph/test/writers/w1') { + return Promise.resolve(tipSha); + } + return Promise.resolve(null); + }); + + // discoverWriters needs listRefs to return the writer ref + persistence.listRefs.mockResolvedValue([ + 'refs/empty-graph/test/writers/w1', + ]); + + await graph.materialize(); + + expect(graph._patchesSinceCheckpoint).toBe(patchCount); + }); + + it('golden path: checkpoint then 10 patches yields count = 10', async () => { + const patchCount = 10; + + // We simulate materialization with a checkpoint by mocking _loadLatestCheckpoint + // and _loadPatchesSince. The simplest approach: spy on private methods. + const { createEmptyStateV5 } = await import( + '../../../src/domain/services/JoinReducer.js' + ); + + const checkpointState = createEmptyStateV5(); + + // Build 10 fake patch objects for _loadPatchesSince to return + const patches = []; + for (let i = 1; i <= patchCount; i++) { + patches.push({ + patch: createPatch('w1', i, `n:w1:${i}`), + sha: fakeSha(i), + }); + } + + // Mock _loadLatestCheckpoint to return a checkpoint + vi.spyOn(graph, '_loadLatestCheckpoint').mockResolvedValue({ + schema: 2, + state: checkpointState, + frontier: {}, + }); + + // Mock _loadPatchesSince to return the 10 patches + vi.spyOn(graph, '_loadPatchesSince').mockResolvedValue(patches); + + await graph.materialize(); + + expect(graph._patchesSinceCheckpoint).toBe(10); + }); + + it('increments by 1 after createPatch().commit()', async () => { + // readRef returns null (no existing writer ref) for _nextLamport + persistence.readRef.mockResolvedValue(null); + persistence.writeBlob.mockResolvedValue(fakeSha('blob')); + persistence.writeTree.mockResolvedValue(fakeSha('tree')); + persistence.commitNodeWithTree.mockResolvedValue(fakeSha('commit')); + persistence.updateRef.mockResolvedValue(undefined); + + expect(graph._patchesSinceCheckpoint).toBe(0); + + const builder = await graph.createPatch(); + await builder.addNode('n:1').commit(); + + expect(graph._patchesSinceCheckpoint).toBe(1); + }); + + it('accumulates: materialize sets base, then commit increments', async () => { + const patchCount = 3; + const tipSha = buildPatchChain(persistence, 'w1', patchCount); + + // Phase 1: materialize with 3 patches (no checkpoint) + persistence.readRef.mockImplementation((ref) => { + if (ref === 'refs/empty-graph/test/checkpoints/head') { + return Promise.resolve(null); + } + if (ref === 'refs/empty-graph/test/writers/w1') { + return Promise.resolve(tipSha); + } + return Promise.resolve(null); + }); + persistence.listRefs.mockResolvedValue([ + 'refs/empty-graph/test/writers/w1', + ]); + + await graph.materialize(); + expect(graph._patchesSinceCheckpoint).toBe(3); + + // Phase 2: commit a new patch + // After materialize, readRef for writer ref returns tipSha + // showNode needs to return a patch message for _nextLamport + const tipMessage = encodePatchMessage({ + graph: 'test', + writer: 'w1', + lamport: patchCount, + patchOid: fakeSha('blob-w1-last'), + schema: 2, + }); + persistence.showNode.mockResolvedValue(tipMessage); + persistence.writeBlob.mockResolvedValue(fakeSha('new-blob')); + persistence.writeTree.mockResolvedValue(fakeSha('new-tree')); + persistence.commitNodeWithTree.mockResolvedValue(fakeSha('new-commit')); + persistence.updateRef.mockResolvedValue(undefined); + + const builder = await graph.createPatch(); + await builder.addNode('n:new').commit(); + + expect(graph._patchesSinceCheckpoint).toBe(4); + }); + + it('subsequent commits each increment by 1', async () => { + persistence.readRef.mockResolvedValue(null); + persistence.writeBlob.mockResolvedValue(fakeSha('blob')); + persistence.writeTree.mockResolvedValue(fakeSha('tree')); + persistence.commitNodeWithTree.mockResolvedValue(fakeSha('commit')); + persistence.updateRef.mockResolvedValue(undefined); + + const builder1 = await graph.createPatch(); + await builder1.addNode('n:1').commit(); + expect(graph._patchesSinceCheckpoint).toBe(1); + + // For second commit, readRef now returns the previous commit SHA + persistence.readRef.mockResolvedValue(fakeSha('commit')); + const patchMsg = encodePatchMessage({ + graph: 'test', + writer: 'w1', + lamport: 1, + patchOid: fakeSha('blob'), + schema: 2, + }); + persistence.showNode.mockResolvedValue(patchMsg); + + const builder2 = await graph.createPatch(); + await builder2.addNode('n:2').commit(); + expect(graph._patchesSinceCheckpoint).toBe(2); + }); +}); diff --git a/test/unit/domain/WarpGraph.query.test.js b/test/unit/domain/WarpGraph.query.test.js index e97c2fb..0b0f04c 100644 --- a/test/unit/domain/WarpGraph.query.test.js +++ b/test/unit/domain/WarpGraph.query.test.js @@ -11,6 +11,7 @@ import { describe, it, expect, beforeEach, vi } from 'vitest'; import WarpGraph from '../../../src/domain/WarpGraph.js'; +import QueryError from '../../../src/domain/errors/QueryError.js'; import { createEmptyStateV5, encodeEdgeKey, encodePropKey } from '../../../src/domain/services/JoinReducer.js'; import { orsetAdd } from '../../../src/domain/crdt/ORSet.js'; import { createDot } from '../../../src/domain/crdt/Dot.js'; @@ -36,8 +37,8 @@ describe('WarpGraph Query API', () => { }); describe('hasNode()', () => { - it('throws if no cached state', () => { - expect(() => graph.hasNode('node-1')).toThrow('No cached state'); + it('throws if no cached state', async () => { + await expect(graph.hasNode('node-1')).rejects.toThrow(QueryError); }); it('returns true for existing nodes', async () => { @@ -48,23 +49,23 @@ describe('WarpGraph Query API', () => { const state = graph._cachedState; orsetAdd(state.nodeAlive, 'user:alice', createDot('w1', 1)); - expect(graph.hasNode('user:alice')).toBe(true); + expect(await graph.hasNode('user:alice')).toBe(true); }); it('returns false for non-existing nodes', async () => { await graph.materialize(); - expect(graph.hasNode('user:nonexistent')).toBe(false); + expect(await graph.hasNode('user:nonexistent')).toBe(false); }); }); describe('getNodeProps()', () => { - it('throws if no cached state', () => { - expect(() => graph.getNodeProps('node-1')).toThrow('No cached state'); + it('throws if no cached state', async () => { + await expect(graph.getNodeProps('node-1')).rejects.toThrow(QueryError); }); it('returns null for non-existing nodes', async () => { await graph.materialize(); - expect(graph.getNodeProps('user:nonexistent')).toBe(null); + expect(await graph.getNodeProps('user:nonexistent')).toBe(null); }); it('returns empty map for node with no props', async () => { @@ -72,7 +73,7 @@ describe('WarpGraph Query API', () => { const state = graph._cachedState; orsetAdd(state.nodeAlive, 'user:alice', createDot('w1', 1)); - const props = graph.getNodeProps('user:alice'); + const props = await graph.getNodeProps('user:alice'); expect(props).toBeInstanceOf(Map); expect(props.size).toBe(0); }); @@ -92,15 +93,15 @@ describe('WarpGraph Query API', () => { state.prop.set(propKey1, { value: 'Alice', lamport: 1, writerId: 'w1' }); state.prop.set(propKey2, { value: 30, lamport: 1, writerId: 'w1' }); - const props = graph.getNodeProps('user:alice'); + const props = await graph.getNodeProps('user:alice'); expect(props.get('name')).toBe('Alice'); expect(props.get('age')).toBe(30); }); }); describe('neighbors()', () => { - it('throws if no cached state', () => { - expect(() => graph.neighbors('node-1')).toThrow('No cached state'); + it('throws if no cached state', async () => { + await expect(graph.neighbors('node-1')).rejects.toThrow(QueryError); }); it('returns empty array for node with no edges', async () => { @@ -108,7 +109,7 @@ describe('WarpGraph Query API', () => { const state = graph._cachedState; orsetAdd(state.nodeAlive, 'user:alice', createDot('w1', 1)); - expect(graph.neighbors('user:alice')).toEqual([]); + expect(await graph.neighbors('user:alice')).toEqual([]); }); it('returns outgoing neighbors', async () => { @@ -123,7 +124,7 @@ describe('WarpGraph Query API', () => { const edgeKey = encodeEdgeKey('user:alice', 'user:bob', 'follows'); orsetAdd(state.edgeAlive, edgeKey, createDot('w1', 3)); - const outgoing = graph.neighbors('user:alice', 'outgoing'); + const outgoing = await graph.neighbors('user:alice', 'outgoing'); expect(outgoing).toHaveLength(1); expect(outgoing[0]).toEqual({ nodeId: 'user:bob', @@ -144,7 +145,7 @@ describe('WarpGraph Query API', () => { const edgeKey = encodeEdgeKey('user:alice', 'user:bob', 'follows'); orsetAdd(state.edgeAlive, edgeKey, createDot('w1', 3)); - const incoming = graph.neighbors('user:bob', 'incoming'); + const incoming = await graph.neighbors('user:bob', 'incoming'); expect(incoming).toHaveLength(1); expect(incoming[0]).toEqual({ nodeId: 'user:alice', @@ -167,7 +168,7 @@ describe('WarpGraph Query API', () => { // carol --follows--> alice orsetAdd(state.edgeAlive, encodeEdgeKey('user:carol', 'user:alice', 'follows'), createDot('w1', 5)); - const neighbors = graph.neighbors('user:alice'); + const neighbors = await graph.neighbors('user:alice'); expect(neighbors).toHaveLength(2); expect(neighbors.find(n => n.nodeId === 'user:bob' && n.direction === 'outgoing')).toBeDefined(); expect(neighbors.find(n => n.nodeId === 'user:carol' && n.direction === 'incoming')).toBeDefined(); @@ -187,7 +188,7 @@ describe('WarpGraph Query API', () => { // alice --blocks--> carol orsetAdd(state.edgeAlive, encodeEdgeKey('user:alice', 'user:carol', 'blocks'), createDot('w1', 5)); - const follows = graph.neighbors('user:alice', 'outgoing', 'follows'); + const follows = await graph.neighbors('user:alice', 'outgoing', 'follows'); expect(follows).toHaveLength(1); expect(follows[0].nodeId).toBe('user:bob'); }); @@ -203,19 +204,19 @@ describe('WarpGraph Query API', () => { orsetAdd(state.edgeAlive, encodeEdgeKey('user:alice', 'user:bob', 'follows'), createDot('w1', 2)); // Should not return bob since it doesn't exist - const neighbors = graph.neighbors('user:alice', 'outgoing'); + const neighbors = await graph.neighbors('user:alice', 'outgoing'); expect(neighbors).toHaveLength(0); }); }); describe('getNodes()', () => { - it('throws if no cached state', () => { - expect(() => graph.getNodes()).toThrow('No cached state'); + it('throws if no cached state', async () => { + await expect(graph.getNodes()).rejects.toThrow(QueryError); }); it('returns empty array for empty graph', async () => { await graph.materialize(); - expect(graph.getNodes()).toEqual([]); + expect(await graph.getNodes()).toEqual([]); }); it('returns all visible nodes', async () => { @@ -226,7 +227,7 @@ describe('WarpGraph Query API', () => { orsetAdd(state.nodeAlive, 'node-b', createDot('w1', 2)); orsetAdd(state.nodeAlive, 'node-c', createDot('w1', 3)); - const nodes = graph.getNodes(); + const nodes = await graph.getNodes(); expect(nodes).toHaveLength(3); expect(nodes).toContain('node-a'); expect(nodes).toContain('node-b'); @@ -235,13 +236,13 @@ describe('WarpGraph Query API', () => { }); describe('getEdges()', () => { - it('throws if no cached state', () => { - expect(() => graph.getEdges()).toThrow('No cached state'); + it('throws if no cached state', async () => { + await expect(graph.getEdges()).rejects.toThrow(QueryError); }); it('returns empty array for empty graph', async () => { await graph.materialize(); - expect(graph.getEdges()).toEqual([]); + expect(await graph.getEdges()).toEqual([]); }); it('returns all visible edges', async () => { @@ -257,7 +258,7 @@ describe('WarpGraph Query API', () => { orsetAdd(state.edgeAlive, encodeEdgeKey('a', 'b', 'e1'), createDot('w1', 4)); orsetAdd(state.edgeAlive, encodeEdgeKey('b', 'c', 'e2'), createDot('w1', 5)); - const edges = graph.getEdges(); + const edges = await graph.getEdges(); expect(edges).toHaveLength(2); expect(edges.find(e => e.from === 'a' && e.to === 'b' && e.label === 'e1')).toBeDefined(); expect(edges.find(e => e.from === 'b' && e.to === 'c' && e.label === 'e2')).toBeDefined(); @@ -273,7 +274,7 @@ describe('WarpGraph Query API', () => { // Add edge to non-existent 'b' orsetAdd(state.edgeAlive, encodeEdgeKey('a', 'b', 'e1'), createDot('w1', 2)); - const edges = graph.getEdges(); + const edges = await graph.getEdges(); expect(edges).toHaveLength(0); }); }); diff --git a/test/unit/domain/WarpGraph.writerInvalidation.test.js b/test/unit/domain/WarpGraph.writerInvalidation.test.js new file mode 100644 index 0000000..aa48ec9 --- /dev/null +++ b/test/unit/domain/WarpGraph.writerInvalidation.test.js @@ -0,0 +1,245 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import WarpGraph from '../../../src/domain/WarpGraph.js'; +import { encodePatchMessage } from '../../../src/domain/services/WarpMessageCodec.js'; + +/** + * AP/INVAL/3 — Writer.commitPatch() and PatchSession.commit() trigger + * the same eager re-materialize as the low-level createPatch() API. + * + * The Writer and PatchSession are higher-level APIs that delegate to + * PatchBuilderV2. The onCommitSuccess callback wired in WarpGraph.writer() + * and WarpGraph.createWriter() must trigger eager state update so that + * queries after a writer commit reflect the new state immediately. + */ + +const FAKE_BLOB_OID = 'a'.repeat(40); +const FAKE_TREE_OID = 'b'.repeat(40); +const FAKE_COMMIT_SHA = 'c'.repeat(40); +const FAKE_COMMIT_SHA_2 = 'd'.repeat(40); + +function createMockPersistence() { + return { + readRef: vi.fn(), + showNode: vi.fn(), + writeBlob: vi.fn(), + writeTree: vi.fn(), + readBlob: vi.fn(), + readTreeOids: vi.fn(), + commitNode: vi.fn(), + commitNodeWithTree: vi.fn(), + updateRef: vi.fn(), + listRefs: vi.fn().mockResolvedValue([]), + getNodeInfo: vi.fn(), + ping: vi.fn().mockResolvedValue({ ok: true, latencyMs: 1 }), + configGet: vi.fn().mockResolvedValue(null), + configSet: vi.fn().mockResolvedValue(undefined), + }; +} + +/** + * Configure the mock persistence so that a Writer-based first commit succeeds. + * + * Writer flow hits readRef 3 times for a first commit: + * 1. Writer.beginPatch() reads ref to get expectedOldHead + * 2. PatchSession.commit() reads ref for CAS pre-check + * 3. PatchBuilderV2.commit() reads ref for its own CAS check + * All return null for a first commit. + */ +function mockWriterFirstCommit(persistence) { + persistence.readRef.mockResolvedValue(null); + persistence.writeBlob.mockResolvedValue(FAKE_BLOB_OID); + persistence.writeTree.mockResolvedValue(FAKE_TREE_OID); + persistence.commitNodeWithTree.mockResolvedValue(FAKE_COMMIT_SHA); + persistence.updateRef.mockResolvedValue(undefined); +} + +/** + * Configure the mock persistence so that a Writer-based second commit succeeds. + * + * After the first commit, the writer ref points to FAKE_COMMIT_SHA. + * readRef returns FAKE_COMMIT_SHA (3 times), and showNode returns a valid + * patch message so lamport can be extracted. + */ +function mockWriterSecondCommit(persistence) { + const patchMessage = encodePatchMessage({ + graph: 'test', + writer: 'writer-1', + lamport: 1, + patchOid: FAKE_BLOB_OID, + schema: 2, + }); + + persistence.readRef.mockResolvedValue(FAKE_COMMIT_SHA); + persistence.showNode.mockResolvedValue(patchMessage); + persistence.writeBlob.mockResolvedValue(FAKE_BLOB_OID); + persistence.writeTree.mockResolvedValue(FAKE_TREE_OID); + persistence.commitNodeWithTree.mockResolvedValue(FAKE_COMMIT_SHA_2); + persistence.updateRef.mockResolvedValue(undefined); +} + +describe('WarpGraph Writer invalidation (AP/INVAL/3)', () => { + let persistence; + let graph; + + beforeEach(async () => { + persistence = createMockPersistence(); + graph = await WarpGraph.open({ + persistence, + graphName: 'test', + writerId: 'writer-1', + }); + }); + + // ── writer.commitPatch() golden path ───────────────────────────── + + it('writer.commitPatch() followed by hasNode() returns true without explicit re-materialize', async () => { + await graph.materialize(); + + mockWriterFirstCommit(persistence); + const writer = await graph.writer('writer-1'); + await writer.commitPatch((p) => p.addNode('test:node')); + + // Query reflects the commit immediately — no explicit materialize needed + expect(await graph.hasNode('test:node')).toBe(true); + expect(graph._stateDirty).toBe(false); + }); + + it('writer.commitPatch() keeps _stateDirty false when _cachedState exists', async () => { + await graph.materialize(); + expect(graph._stateDirty).toBe(false); + + mockWriterFirstCommit(persistence); + const writer = await graph.writer('writer-1'); + await writer.commitPatch((p) => p.addNode('test:node')); + + // Eager re-materialize applied the patch, so state is fresh + expect(graph._stateDirty).toBe(false); + }); + + // ── writer.beginPatch() / patch.commit() two-step API ──────────── + + it('beginPatch() + patch.commit() followed by hasNode() returns true', async () => { + await graph.materialize(); + + mockWriterFirstCommit(persistence); + const writer = await graph.writer('writer-1'); + const patch = await writer.beginPatch(); + patch.addNode('test:node'); + await patch.commit(); + + expect(await graph.hasNode('test:node')).toBe(true); + expect(graph._stateDirty).toBe(false); + }); + + it('beginPatch() + setProperty reflected in getNodeProps() after commit', async () => { + await graph.materialize(); + + mockWriterFirstCommit(persistence); + const writer = await graph.writer('writer-1'); + const patch = await writer.beginPatch(); + patch.addNode('test:node'); + patch.setProperty('test:node', 'name', 'Alice'); + await patch.commit(); + + const props = await graph.getNodeProps('test:node'); + expect(props).not.toBeNull(); + expect(props.get('name')).toBe('Alice'); + }); + + // ── Multiple sequential writer commits ─────────────────────────── + + it('multiple sequential writer commits keep state fresh', async () => { + await graph.materialize(); + + mockWriterFirstCommit(persistence); + const writer = await graph.writer('writer-1'); + await writer.commitPatch((p) => p.addNode('test:a')); + expect(graph._stateDirty).toBe(false); + expect(await graph.hasNode('test:a')).toBe(true); + + mockWriterSecondCommit(persistence); + const writer2 = await graph.writer('writer-1'); + await writer2.commitPatch((p) => p.addNode('test:b')); + expect(graph._stateDirty).toBe(false); + expect(await graph.hasNode('test:b')).toBe(true); + + // Both nodes should be present + expect(await graph.hasNode('test:a')).toBe(true); + }); + + // ── writer commit without prior materialize ────────────────────── + + it('writer commit without prior materialize sets _stateDirty to true', async () => { + // No materialize() call — _cachedState is null + mockWriterFirstCommit(persistence); + const writer = await graph.writer('writer-1'); + await writer.commitPatch((p) => p.addNode('test:node')); + + // No _cachedState, so can't eagerly apply — dirty + expect(graph._stateDirty).toBe(true); + }); + + // ── createWriter() path ────────────────────────────────────────── + + it('createWriter() path also triggers eager invalidation', async () => { + await graph.materialize(); + + mockWriterFirstCommit(persistence); + const writer = await graph.createWriter(); + + await writer.commitPatch((p) => p.addNode('test:node')); + + expect(await graph.hasNode('test:node')).toBe(true); + expect(graph._stateDirty).toBe(false); + }); + + // ── Failure cases ──────────────────────────────────────────────── + + it('writer commit failure (writeBlob rejects) does not corrupt state', async () => { + await graph.materialize(); + const stateBeforeAttempt = graph._cachedState; + + persistence.readRef.mockResolvedValue(null); + persistence.writeBlob.mockRejectedValue(new Error('disk full')); + + const writer = await graph.writer('writer-1'); + await expect(writer.commitPatch((p) => p.addNode('test:node'))).rejects.toThrow('disk full'); + + // State should be unchanged + expect(graph._stateDirty).toBe(false); + expect(graph._cachedState).toBe(stateBeforeAttempt); + }); + + it('writer commit failure (updateRef rejects) does not corrupt state', async () => { + await graph.materialize(); + const stateBeforeAttempt = graph._cachedState; + + persistence.readRef.mockResolvedValue(null); + persistence.writeBlob.mockResolvedValue(FAKE_BLOB_OID); + persistence.writeTree.mockResolvedValue(FAKE_TREE_OID); + persistence.commitNodeWithTree.mockResolvedValue(FAKE_COMMIT_SHA); + persistence.updateRef.mockRejectedValue(new Error('ref lock failed')); + + const writer = await graph.writer('writer-1'); + await expect(writer.commitPatch((p) => p.addNode('test:node'))).rejects.toThrow('ref lock failed'); + + expect(graph._stateDirty).toBe(false); + expect(graph._cachedState).toBe(stateBeforeAttempt); + }); + + it('writer commit failure (CAS race in PatchSession) does not corrupt state', async () => { + await graph.materialize(); + const stateBeforeAttempt = graph._cachedState; + + // beginPatch() sees null, but by the time PatchSession.commit() checks, ref has advanced + persistence.readRef + .mockResolvedValueOnce(null) // Writer.beginPatch() — get expectedOldHead + .mockResolvedValueOnce(FAKE_COMMIT_SHA); // PatchSession.commit() — CAS pre-check + + const writer = await graph.writer('writer-1'); + await expect(writer.commitPatch((p) => p.addNode('test:node'))).rejects.toThrow(); + + expect(graph._stateDirty).toBe(false); + expect(graph._cachedState).toBe(stateBeforeAttempt); + }); +}); diff --git a/test/unit/domain/services/GitGraphAdapter.test.js b/test/unit/domain/services/GitGraphAdapter.test.js index ce20b5b..7ee24bc 100644 --- a/test/unit/domain/services/GitGraphAdapter.test.js +++ b/test/unit/domain/services/GitGraphAdapter.test.js @@ -241,19 +241,20 @@ describe('GitGraphAdapter', () => { }); it('returns false when node does not exist', async () => { - mockPlumbing.execute.mockRejectedValue(new Error('fatal: Not a valid object name')); + const err = new Error('fatal: Not a valid object name'); + err.details = { code: 1 }; + mockPlumbing.execute.mockRejectedValue(err); const exists = await adapter.nodeExists('abc123def456789012345678901234567890abcd'); expect(exists).toBe(false); }); - it('returns false on any git error', async () => { + it('rethrows non-missing-object errors', async () => { mockPlumbing.execute.mockRejectedValue(new Error('some git error')); - const exists = await adapter.nodeExists('abc123'); - - expect(exists).toBe(false); + await expect(adapter.nodeExists('abc123')) + .rejects.toThrow('some git error'); }); it('validates OID before calling git', async () => { diff --git a/test/unit/domain/services/HookInstaller.test.js b/test/unit/domain/services/HookInstaller.test.js new file mode 100644 index 0000000..636e2d0 --- /dev/null +++ b/test/unit/domain/services/HookInstaller.test.js @@ -0,0 +1,345 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { HookInstaller, classifyExistingHook } from '../../../../src/domain/services/HookInstaller.js'; + +const VERSION = '7.1.0'; + +function makeFs(files = {}) { + const store = new Map(Object.entries(files)); + return { + readFileSync: vi.fn((p, _enc) => { + if (!store.has(p)) throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' }); + return store.get(p); + }), + writeFileSync: vi.fn((p, content) => { store.set(p, content); }), + existsSync: vi.fn((p) => store.has(p) || [...store.keys()].some(k => k.startsWith(p + '/'))), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + _store: store, + }; +} + +function makeExecGitConfig(overrides = {}) { + return vi.fn((repoPath, key) => { + if (key === '--git-dir') return overrides.gitDir || '.git'; + if (key === 'core.hooksPath') return overrides.hooksPath || null; + return null; + }); +} + +function makeInstaller(fsFiles = {}, gitOverrides = {}) { + const fs = makeFs(fsFiles); + const execGitConfig = makeExecGitConfig(gitOverrides); + const installer = new HookInstaller({ + fs, + execGitConfig, + version: VERSION, + templateDir: '/tmpl', + }); + return { installer, fs, execGitConfig }; +} + +const TEMPLATE = `#!/bin/sh +# --- @git-stunts/empty-graph post-merge hook __WARP_HOOK_VERSION__ --- +# warp-hook-version: __WARP_HOOK_VERSION__ +# body +echo "hello" +# --- end @git-stunts/empty-graph --- +`; + +const STAMPED = TEMPLATE.replaceAll('__WARP_HOOK_VERSION__', VERSION); + +// ── classifyExistingHook ──────────────────────────────────────────────────── + +describe('classifyExistingHook', () => { + it('returns none for null content', () => { + expect(classifyExistingHook(null)).toEqual({ kind: 'none' }); + }); + + it('returns none for empty string', () => { + expect(classifyExistingHook('')).toEqual({ kind: 'none' }); + }); + + it('returns none for whitespace-only', () => { + expect(classifyExistingHook(' \n ')).toEqual({ kind: 'none' }); + }); + + it('identifies our standalone hook', () => { + const result = classifyExistingHook(STAMPED); + expect(result.kind).toBe('ours'); + expect(result.version).toBe(VERSION); + expect(result.appended).toBe(true); + }); + + it('identifies our hook with version only (no delimiters)', () => { + const content = '#!/bin/sh\n# warp-hook-version: 7.0.0\necho hi\n'; + const result = classifyExistingHook(content); + expect(result.kind).toBe('ours'); + expect(result.version).toBe('7.0.0'); + expect(result.appended).toBe(false); + }); + + it('identifies appended hook', () => { + const content = [ + '#!/bin/sh', + 'echo "foreign stuff"', + '', + '# --- @git-stunts/empty-graph post-merge hook 7.1.0 ---', + '# warp-hook-version: 7.1.0', + 'echo "warp"', + '# --- end @git-stunts/empty-graph ---', + ].join('\n'); + const result = classifyExistingHook(content); + expect(result.kind).toBe('ours'); + expect(result.version).toBe('7.1.0'); + expect(result.appended).toBe(true); + }); + + it('identifies foreign hook', () => { + const content = '#!/bin/sh\necho "some other hook"\n'; + const result = classifyExistingHook(content); + expect(result.kind).toBe('foreign'); + }); + + it('ignores placeholder version', () => { + const content = '#!/bin/sh\n# warp-hook-version: __WARP_HOOK_VERSION__\n'; + const result = classifyExistingHook(content); + expect(result.kind).toBe('foreign'); + }); +}); + +// ── install() ─────────────────────────────────────────────────────────────── + +describe('HookInstaller.install', () => { + it('fresh install writes hook with correct version', () => { + const { installer, fs } = makeInstaller({ + '/tmpl/post-merge.sh': TEMPLATE, + }); + + const result = installer.install('/repo', { strategy: 'install' }); + + expect(result.action).toBe('installed'); + expect(result.version).toBe(VERSION); + expect(result.hookPath).toContain('post-merge'); + expect(fs.writeFileSync).toHaveBeenCalled(); + + const written = fs._store.get(result.hookPath); + expect(written).toContain(`# warp-hook-version: ${VERSION}`); + expect(written).not.toContain('__WARP_HOOK_VERSION__'); + }); + + it('creates hooks directory if missing', () => { + const { installer, fs } = makeInstaller({ + '/tmpl/post-merge.sh': TEMPLATE, + }); + + installer.install('/repo', { strategy: 'install' }); + expect(fs.mkdirSync).toHaveBeenCalled(); + }); + + it('upgrade replaces standalone hook', () => { + const oldHook = TEMPLATE.replaceAll('__WARP_HOOK_VERSION__', '7.0.0'); + const { installer, fs } = makeInstaller({ + '/tmpl/post-merge.sh': TEMPLATE, + '/repo/.git/hooks/post-merge': oldHook, + }); + + const result = installer.install('/repo', { strategy: 'upgrade' }); + expect(result.action).toBe('upgraded'); + + const written = fs._store.get(result.hookPath); + expect(written).toContain(`# warp-hook-version: ${VERSION}`); + }); + + it('upgrade replaces delimited section in appended hook', () => { + const appended = [ + '#!/bin/sh', + 'echo "foreign"', + '', + '# --- @git-stunts/empty-graph post-merge hook 7.0.0 ---', + '# warp-hook-version: 7.0.0', + 'echo "old warp"', + '# --- end @git-stunts/empty-graph ---', + ].join('\n'); + + const { installer, fs } = makeInstaller({ + '/tmpl/post-merge.sh': TEMPLATE, + '/repo/.git/hooks/post-merge': appended, + }); + + const result = installer.install('/repo', { strategy: 'upgrade' }); + expect(result.action).toBe('upgraded'); + + const written = fs._store.get(result.hookPath); + expect(written).toContain('echo "foreign"'); + expect(written).toContain(`# warp-hook-version: ${VERSION}`); + expect(written).not.toContain('7.0.0'); + }); + + it('append adds delimited section to foreign hook', () => { + const foreign = '#!/bin/sh\necho "existing hook"\n'; + const { installer, fs } = makeInstaller({ + '/tmpl/post-merge.sh': TEMPLATE, + '/repo/.git/hooks/post-merge': foreign, + }); + + const result = installer.install('/repo', { strategy: 'append' }); + expect(result.action).toBe('appended'); + + const written = fs._store.get(result.hookPath); + expect(written).toContain('echo "existing hook"'); + expect(written).toContain(`# warp-hook-version: ${VERSION}`); + expect(written).toContain('# --- end @git-stunts/empty-graph ---'); + }); + + it('replace backs up existing hook', () => { + const foreign = '#!/bin/sh\necho "existing"\n'; + const { installer, fs } = makeInstaller({ + '/tmpl/post-merge.sh': TEMPLATE, + '/repo/.git/hooks/post-merge': foreign, + }); + + const result = installer.install('/repo', { strategy: 'replace' }); + expect(result.action).toBe('replaced'); + expect(result.backupPath).toContain('.backup'); + + const backup = fs._store.get(result.backupPath); + expect(backup).toBe(foreign); + + const written = fs._store.get(result.hookPath); + expect(written).toContain(`# warp-hook-version: ${VERSION}`); + }); + + it('replace with no existing hook skips backup', () => { + const { installer } = makeInstaller({ + '/tmpl/post-merge.sh': TEMPLATE, + }); + + const result = installer.install('/repo', { strategy: 'replace' }); + expect(result.action).toBe('replaced'); + expect(result.backupPath).toBeUndefined(); + }); + + it('throws on unknown strategy', () => { + const { installer } = makeInstaller({ + '/tmpl/post-merge.sh': TEMPLATE, + }); + + expect(() => installer.install('/repo', { strategy: 'bogus' })) + .toThrow('Unknown install strategy: bogus'); + }); +}); + +// ── getHookStatus ─────────────────────────────────────────────────────────── + +describe('HookInstaller.getHookStatus', () => { + it('not installed when hook file missing', () => { + const { installer } = makeInstaller({}); + const status = installer.getHookStatus('/repo'); + expect(status.installed).toBe(false); + expect(status.hookPath).toContain('post-merge'); + }); + + it('installed and current', () => { + const { installer } = makeInstaller({ + '/repo/.git/hooks/post-merge': STAMPED, + }); + const status = installer.getHookStatus('/repo'); + expect(status.installed).toBe(true); + expect(status.version).toBe(VERSION); + expect(status.current).toBe(true); + }); + + it('installed but outdated', () => { + const old = TEMPLATE.replaceAll('__WARP_HOOK_VERSION__', '7.0.0'); + const { installer } = makeInstaller({ + '/repo/.git/hooks/post-merge': old, + }); + const status = installer.getHookStatus('/repo'); + expect(status.installed).toBe(true); + expect(status.version).toBe('7.0.0'); + expect(status.current).toBe(false); + }); + + it('foreign hook shows as not installed', () => { + const { installer } = makeInstaller({ + '/repo/.git/hooks/post-merge': '#!/bin/sh\necho "other"\n', + }); + const status = installer.getHookStatus('/repo'); + expect(status.installed).toBe(false); + expect(status.foreign).toBe(true); + }); +}); + +// ── Hooks directory resolution ────────────────────────────────────────────── + +describe('hooks directory resolution', () => { + it('uses core.hooksPath when set (absolute)', () => { + const { installer, execGitConfig } = makeInstaller( + { '/tmpl/post-merge.sh': TEMPLATE }, + { hooksPath: '/custom/hooks' }, + ); + + const result = installer.install('/repo', { strategy: 'install' }); + expect(result.hookPath).toBe('/custom/hooks/post-merge'); + expect(execGitConfig).toHaveBeenCalledWith('/repo', 'core.hooksPath'); + }); + + it('resolves relative core.hooksPath against repo root', () => { + const { installer } = makeInstaller( + { '/tmpl/post-merge.sh': TEMPLATE }, + { hooksPath: 'my-hooks' }, + ); + + const result = installer.install('/repo', { strategy: 'install' }); + expect(result.hookPath).toBe('/repo/my-hooks/post-merge'); + }); + + it('falls back to gitDir/hooks when no core.hooksPath', () => { + const { installer } = makeInstaller( + { '/tmpl/post-merge.sh': TEMPLATE }, + { gitDir: '.git' }, + ); + + const result = installer.install('/repo', { strategy: 'install' }); + expect(result.hookPath).toContain('.git/hooks/post-merge'); + }); +}); + +// ── Template integrity ────────────────────────────────────────────────────── + +describe('template integrity', () => { + let templateContent; + + beforeEach(async () => { + const { readFileSync } = await import('node:fs'); + const { resolve, dirname } = await import('node:path'); + const { fileURLToPath } = await import('node:url'); + const dir = dirname(fileURLToPath(import.meta.url)); + const templatePath = resolve(dir, '..', '..', '..', '..', 'src', 'hooks', 'post-merge.sh'); + templateContent = readFileSync(templatePath, 'utf8'); + }); + + it('has shebang line', () => { + expect(templateContent.startsWith('#!/bin/sh')).toBe(true); + }); + + it('contains version placeholder', () => { + expect(templateContent).toContain('__WARP_HOOK_VERSION__'); + }); + + it('contains start delimiter', () => { + expect(templateContent).toContain('# --- @git-stunts/empty-graph post-merge hook'); + }); + + it('contains end delimiter', () => { + expect(templateContent).toContain('# --- end @git-stunts/empty-graph ---'); + }); + + it('contains version marker line', () => { + expect(templateContent).toContain('# warp-hook-version: __WARP_HOOK_VERSION__'); + }); + + it('contains warp.autoMaterialize config check', () => { + expect(templateContent).toContain('warp.autoMaterialize'); + }); +}); diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..de46fc4 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,19 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "moduleResolution": "bundler", + "allowJs": true, + "checkJs": true, + "noEmit": true, + "strict": true, + "skipLibCheck": true, + "resolveJsonModule": true + }, + "include": [ + "src/**/*.js", + "bin/**/*.js", + "scripts/**/*.js", + "test/**/*.js" + ] +}