diff --git a/.gitignore b/.gitignore
index 59a3935..2d29a5a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -141,3 +141,4 @@ analysis*.html
# Performance analysis results
dev/perf_results/
dev/perf_plots/
+.benchmarks/
diff --git a/Makefile b/Makefile
index 56076b8..ea33967 100644
--- a/Makefile
+++ b/Makefile
@@ -1,25 +1,30 @@
# NetGraph Development Makefile
# This Makefile provides convenient shortcuts for common development tasks
-.PHONY: help dev install check check-ci lint format test qt clean docs docs-serve docs-diagrams build check-dist publish-test publish validate perf info
+.PHONY: help venv clean-venv dev install check check-ci lint format test qt clean docs docs-serve docs-diagrams build check-dist publish-test publish validate perf info check-python hooks
# Default target - show help
.DEFAULT_GOAL := help
# Toolchain (prefer project venv if present)
-VENV_BIN := $(PWD)/ngraph-venv/bin
-PYTHON := $(if $(wildcard $(VENV_BIN)/python),$(VENV_BIN)/python,python3)
-PIP := $(PYTHON) -m pip
-PYTEST := $(PYTHON) -m pytest
-RUFF := $(PYTHON) -m ruff
-PRECOMMIT := $(PYTHON) -m pre_commit
+VENV_BIN := $(PWD)/venv/bin
+# Use dynamic (recursive) assignment so a newly created venv is picked up
+# Prefer the python3 on PATH (e.g., set by setup-python)
+PY_FIND := $(shell command -v python3 2>/dev/null || command -v python 2>/dev/null)
+PYTHON ?= $(if $(wildcard $(VENV_BIN)/python),$(VENV_BIN)/python,$(PY_FIND))
+PIP = $(PYTHON) -m pip
+PYTEST = $(PYTHON) -m pytest
+RUFF = $(PYTHON) -m ruff
+PRECOMMIT = $(PYTHON) -m pre_commit
help:
@echo "🔧 NetGraph Development Commands"
@echo ""
@echo "Setup & Installation:"
- @echo " make install - Install package for usage (no dev dependencies)"
+ @echo " make venv - Create a local virtualenv (./venv)"
@echo " make dev - Full development environment (package + dev deps + hooks)"
+ @echo " make install - Install package for usage (no dev dependencies)"
+ @echo " make clean-venv - Remove virtual environment"
@echo ""
@echo "Code Quality & Testing:"
@echo " make check - Run pre-commit (auto-fix) + schema + tests, then lint"
@@ -46,11 +51,48 @@ help:
@echo ""
@echo "Utilities:"
@echo " make info - Show project information"
+ @echo " make hooks - Run pre-commit on all files"
+ @echo " make check-python - Check if venv Python matches system Python"
# Setup and Installation
dev:
@echo "🚀 Setting up development environment..."
- @bash dev/setup-dev.sh
+ @if [ ! -x "$(VENV_BIN)/python" ]; then \
+ if [ -z "$(PY_FIND)" ]; then \
+ echo "❌ Error: No Python interpreter found (python3 or python)"; \
+ exit 1; \
+ fi; \
+ echo "🐍 Creating virtual environment with $(PY_FIND) ..."; \
+ $(PY_FIND) -m venv venv || { echo "❌ Failed to create venv"; exit 1; }; \
+ if [ ! -x "$(VENV_BIN)/python" ]; then \
+ echo "❌ Error: venv creation failed - $(VENV_BIN)/python not found"; \
+ exit 1; \
+ fi; \
+ $(VENV_BIN)/python -m pip install -U pip wheel; \
+ fi
+ @echo "📦 Installing dev dependencies..."
+ @$(VENV_BIN)/python -m pip install -e .'[dev]'
+ @echo "🔗 Installing pre-commit hooks..."
+ @$(VENV_BIN)/python -m pre_commit install --install-hooks
+ @echo "✅ Dev environment ready. Activate with: source venv/bin/activate"
+ @$(MAKE) check-python
+
+venv:
+ @echo "🐍 Creating virtual environment in ./venv ..."
+ @if [ -z "$(PY_FIND)" ]; then \
+ echo "❌ Error: No Python interpreter found (python3 or python)"; \
+ exit 1; \
+ fi
+ @$(PY_FIND) -m venv venv || { echo "❌ Failed to create venv"; exit 1; }
+ @if [ ! -x "$(VENV_BIN)/python" ]; then \
+ echo "❌ Error: venv creation failed - $(VENV_BIN)/python not found"; \
+ exit 1; \
+ fi
+ @$(VENV_BIN)/python -m pip install -U pip wheel
+ @echo "✅ venv ready. Activate with: source venv/bin/activate"
+
+clean-venv:
+ @rm -rf venv/
install:
@echo "📦 Installing package for usage (no dev dependencies)..."
@@ -184,7 +226,9 @@ info:
@echo "================================"
@echo ""
@echo "🐍 Python Environment:"
- @echo " Python version: $$($(PYTHON) --version)"
+ @echo " Python (active): $$($(PYTHON) --version)"
+ @echo " Python (system): $$($(PY_FIND) --version 2>/dev/null || echo 'missing')"
+ @$(MAKE) check-python
@echo " Package version: $$($(PYTHON) -c 'import importlib.metadata; print(importlib.metadata.version("ngraph"))' 2>/dev/null || echo 'Not installed')"
@echo " Virtual environment: $$(echo $$VIRTUAL_ENV | sed 's|.*/||' || echo 'None active')"
@echo ""
@@ -208,3 +252,17 @@ info:
echo " ... and $$(( $$(git status --porcelain | wc -l | tr -d ' ') - 5 )) more"; \
fi; \
fi
+
+hooks:
+ @echo "🔗 Running pre-commit on all files..."
+ @$(PRECOMMIT) run --all-files || (echo "Some pre-commit hooks failed. Fix and re-run." && exit 1)
+
+check-python:
+ @if [ -x "$(VENV_BIN)/python" ]; then \
+ VENV_VER=$$($(VENV_BIN)/python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')" 2>/dev/null || echo "unknown"); \
+ SYS_VER=$$($(PY_FIND) -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')" 2>/dev/null || echo "unknown"); \
+ if [ -n "$$VENV_VER" ] && [ -n "$$SYS_VER" ] && [ "$$VENV_VER" != "$$SYS_VER" ]; then \
+ echo "⚠️ WARNING: venv Python ($$VENV_VER) != system Python ($$SYS_VER)"; \
+ echo " Run 'make clean-venv && make dev' to recreate venv with system Python"; \
+ fi; \
+ fi
diff --git a/README.md b/README.md
index 63d5034..22818d1 100644
--- a/README.md
+++ b/README.md
@@ -2,137 +2,187 @@
[](https://github.com/networmix/NetGraph/actions/workflows/python-test.yml)
-NetGraph is a scenario-driven network modeling and analysis framework in Python.
-Define topology, traffic, failures, and workflows in YAML; run analyses from the
-CLI or Python API. It scales from small graphs to DC fabrics and WAN backbones.
+Scenario-driven network modeling and analysis framework combining Python's flexibility with high-performance C++ algorithms.
-## Highlights
+## Overview
-- Declarative DSL with schema validation (topology, failures, traffic, workflow)
-- Blueprints and adjacency rules for concise, reusable topologies
-- Strict multidigraph with unique and stable edge IDs; subclass of NetworkX's MultiDiGraph preserving NetworkX APIs
-- Read-only NetworkView overlays with node and link masking for failure simulation
-- Failure policies with weighted failure modes and multiple policy rules per mode
-- Max-flow and demand placement with configurable flow placement strategies to simulate ECMP/WCMP and TE behavior in IP/MPLS networks
-- Export of results to structured JSON for downstream analysis
-- CLI and complete Python API
-- High test coverage
+NetGraph enables declarative modeling of network topologies, traffic matrices, and failure scenarios. It delegates computationally intensive graph algorithms to [NetGraph-Core](https://github.com/networmix/NetGraph-Core) while providing a rich Python API and CLI for orchestration.
-See [Design](https://networmix.github.io/NetGraph/reference/design/) for more details on the internal design of NetGraph.
+## Architecture
-## Example Scenario (Excerpt)
+NetGraph employs a **hybrid Python+C++ architecture**:
-```yaml
-seed: 42
-blueprints:
- Clos_L16_S4:
- groups:
- spine: {node_count: 4, name_template: spine{node_num}}
- leaf: {node_count: 16, name_template: leaf{node_num}}
- adjacency:
- - source: /leaf
- target: /spine
- pattern: mesh
- link_params: {capacity: 3200, cost: 1}
- DCRegion:
- groups:
- dc: {node_count: 1, name_template: dc, attrs: {role: dc}}
-network:
- groups:
- metro1/pop[1-2]: {use_blueprint: Clos_L16_S4}
- metro1/dc[1-1]: {use_blueprint: DCRegion}
- adjacency:
- - source: {path: metro1/pop1}
- target: {path: metro1/dc1}
- pattern: one_to_one
- link_params: {capacity: 2000.0, cost: 1}
-workflow:
-- step_type: NetworkStats
- name: network_statistics
-- step_type: MaximumSupportedDemand
- name: msd_baseline
- matrix_name: baseline_traffic_matrix
-- step_type: TrafficMatrixPlacement
- name: tm_placement
- matrix_name: baseline_traffic_matrix
-```
+- **Python layer (NetGraph)**: Scenario DSL parsing, workflow orchestration, result aggregation, and high-level APIs.
+- **C++ layer (NetGraph-Core)**: Performance-critical graph algorithms (SPF, KSP, Max-Flow) executing in optimized C++ with the GIL released.
-See the full scenario at [scenarios/backbone_clos.yml](scenarios/backbone_clos.yml).
+## Key Features
-## Quick Start
+### 1. Modeling & DSL
+
+- **Declarative Scenarios**: Define topology, traffic, and workflows in validated YAML.
+- **Blueprints**: Reusable topology templates (e.g., Clos fabrics, regions) with parameterized expansion.
+- **Strict Multigraph**: Deterministic graph representation with stable edge IDs.
+
+### 2. Failure Analysis
+
+- **Policy Engine**: Weighted failure modes with multiple policy rules per mode.
+- **Non-Destructive**: Runtime exclusions simulate failures without modifying the base topology.
+- **Risk Groups**: Model shared fate (e.g., fiber cuts, power zones).
+
+### 3. Traffic Engineering
+
+- **Routing Modes**: Unified modeling of **IP Routing** (static costs, oblivious to congestion) and **Traffic Engineering** (dynamic residuals, congestion-aware).
+- **Flow Placement**: Strategies for **ECMP** (Equal-Cost Multi-Path) and **WCMP** (Weighted Cost Multi-Path).
+- **Capacity Analysis**: Compute max-flow envelopes and demand allocation with configurable placement policies.
+
+### 4. Workflow & Integration
-### Install (PyPI package)
+- **Structured Results**: Export analysis artifacts to JSON for downstream processing.
+- **CLI**: Comprehensive command-line interface for validation and execution.
+- **Python API**: Full programmatic access to all modeling and solving capabilities.
+
+## Installation
+
+### From PyPI
```bash
pip install ngraph
```
-### Install (from source on GitHub)
+### From Source
```bash
git clone https://github.com/networmix/NetGraph
cd NetGraph
-make dev # install in editable mode
-make check # run all checks
+make dev # Install in editable mode with dev dependencies
+make check # Run full test suite
```
-### CLI
+## Quick Start
+
+### CLI Usage
```bash
-# Inspect a scenario (validate and preview)
+# Validate and inspect a scenario
ngraph inspect scenarios/backbone_clos.yml --detail
-# Run a scenario and save results
+# Run analysis workflow
ngraph run scenarios/backbone_clos.yml --results clos.results.json
```
-### Python API (MaxFlow quick demo)
+### Python API
```python
from ngraph.scenario import Scenario
-from ngraph.algorithms.base import FlowPlacement
+from ngraph.types.base import FlowPlacement
+from ngraph.solver.maxflow import max_flow
-scenario_yaml = """
-seed: 1234
+# Load scenario
+scenario = Scenario.from_yaml("""
network:
- nodes: {A: {}, B: {}, C: {}, D: {}}
+ nodes: {A: {}, B: {}, C: {}}
links:
- - {source: A, target: B, link_params: {capacity: 1, cost: 1}}
- - {source: A, target: B, link_params: {capacity: 2, cost: 1}}
- - {source: B, target: C, link_params: {capacity: 1, cost: 1}}
- - {source: B, target: C, link_params: {capacity: 2, cost: 1}}
- - {source: A, target: D, link_params: {capacity: 3, cost: 2}}
- - {source: D, target: C, link_params: {capacity: 3, cost: 2}}
-"""
-scenario = Scenario.from_yaml(scenario_yaml)
-network = scenario.network
-
-print(network.max_flow("A", "C")) # {('A', 'C'): 6.0}
-print(network.max_flow("A", "C", shortest_path=True)) # {('A', 'C'): 3.0}
-print(
- network.max_flow(
- "A",
- "C",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
-) # {('A', 'C'): 2.0}
-
-res = network.max_flow_with_summary("A", "C")
-print({k: (v[0], v[1].cost_distribution) for k, v in res.items()})
-# {('A', 'C'): (6.0, {2.0: 3.0, 4.0: 3.0})}
+ - {source: A, target: B, link_params: {capacity: 10, cost: 1}}
+ - {source: B, target: C, link_params: {capacity: 10, cost: 1}}
+""")
+
+# Compute max flow
+flow = max_flow(scenario.network, "A", "C", shortest_path=True)
+print(f"Max flow: {flow}")
```
+## Example Scenario
+
+NetGraph scenarios define topology, configuration, and analysis steps in a unified YAML file. This example demonstrates **blueprints** for modular topology definition:
+
+```yaml
+seed: 42
+
+# Define reusable topology templates
+blueprints:
+ Clos_Fabric:
+ groups:
+ spine: {node_count: 2, name_template: "spine{node_num}"}
+ leaf: {node_count: 4, name_template: "leaf{node_num}"}
+ adjacency:
+ - source: /leaf
+ target: /spine
+ pattern: mesh
+ link_params: {capacity: 100, cost: 1}
+ - source: /spine
+ target: /leaf
+ pattern: mesh
+ link_params: {capacity: 100, cost: 1}
+
+# Instantiate network from templates
+network:
+ groups:
+ site1: {use_blueprint: Clos_Fabric}
+ site2: {use_blueprint: Clos_Fabric}
+ adjacency:
+ - source: {path: site1/spine}
+ target: {path: site2/spine}
+ pattern: one_to_one
+ link_params: {capacity: 50, cost: 10}
+
+# Define traffic matrix
+traffic_matrix_set:
+ global_traffic:
+ - source_path: ^site1/leaf/
+ sink_path: ^site2/leaf/
+ demand: 100.0
+ mode: combine
+ flow_policy_config: SHORTEST_PATHS_ECMP
+
+# Define analysis workflow
+workflow:
+- step_type: NetworkStats
+ name: stats
+- step_type: MaxFlow
+ name: site_capacity
+ source_path: ^site1/leaf/
+ sink_path: ^site2/leaf/
+ mode: combine
+ shortest_path: false
+- step_type: MaximumSupportedDemand
+ name: max_demand
+ matrix_name: global_traffic
+```
+
+## Repository Structure
+
+```
+ngraph/ # Python package source
+ dsl/ # Scenario parsing and blueprint expansion
+ model/ # Network and flow domain models
+ solver/ # Algorithms and Core wrappers
+ workflow/ # Analysis steps and orchestration
+scenarios/ # Example scenario definitions
+tests/ # Pytest suite (unit and integration)
+docs/ # Documentation source (MkDocs)
+dev/ # Development tools and scripts
+```
+
+## Development
+
+```bash
+make dev # Setup environment
+make check # Run tests and linting
+make lint # Run linting only
+make test # Run tests only
+make docs-serve # Preview documentation
+```
+
+## Requirements
+
+- **Python**: 3.9+
+- **NetGraph-Core**: Compatible C++ backend version
+
## Documentation
-- **Documentation site**: [networmix.github.io/NetGraph](https://networmix.github.io/NetGraph/)
-- **Installation**: [Getting started — Installation](https://networmix.github.io/NetGraph/getting-started/installation/)
-- **Tutorial**: [Getting started — Tutorial](https://networmix.github.io/NetGraph/getting-started/tutorial/)
-- **Basic example**: [Examples — Basic](https://networmix.github.io/NetGraph/examples/basic/)
-- **DSL reference**: [Reference — DSL](https://networmix.github.io/NetGraph/reference/dsl/)
-- **Workflow reference**: [Reference — Workflow](https://networmix.github.io/NetGraph/reference/workflow/)
-- **CLI reference**: [Reference — CLI](https://networmix.github.io/NetGraph/reference/cli/)
-- **API reference**: [Reference — API](https://networmix.github.io/NetGraph/reference/api/)
+- **Site**: [networmix.github.io/NetGraph](https://networmix.github.io/NetGraph/)
+- **Tutorial**: [Getting Started](https://networmix.github.io/NetGraph/getting-started/tutorial/)
+- **Reference**: [API](https://networmix.github.io/NetGraph/reference/api/) | [CLI](https://networmix.github.io/NetGraph/reference/cli/) | [DSL](https://networmix.github.io/NetGraph/reference/dsl/)
## License
diff --git a/dev/dev.md b/dev/dev.md
index 35369dd..6979a49 100644
--- a/dev/dev.md
+++ b/dev/dev.md
@@ -24,7 +24,6 @@ make docs-serve # Serve docs locally
pyproject.toml # Package config, dependencies, tool settings
Makefile # Development commands
.pre-commit-config.yaml # Code quality hooks
-dev/setup-dev.sh # Development environment setup script
dev/run-checks.sh # Manual code quality checks
```
diff --git a/dev/perf/runner.py b/dev/perf/runner.py
index b144c89..4a03a40 100644
--- a/dev/perf/runner.py
+++ b/dev/perf/runner.py
@@ -8,11 +8,9 @@
import time
from typing import Any, Callable
+import netgraph_core
import networkx as nx
-from ngraph.algorithms.max_flow import calc_max_flow
-from ngraph.algorithms.spf import spf
-
from .core import (
BenchmarkCase,
BenchmarkProfile,
@@ -75,9 +73,9 @@ def _time_func(func: Callable[[], Any], runs: int) -> dict[str, float]:
def _execute_spf_benchmark(case: BenchmarkCase, iterations: int) -> BenchmarkSample:
- """Execute SPF benchmark for a given case.
+ """Execute SPF benchmark for a given case using NetGraph-Core.
- Creates network/graph once and reuses it across iterations to reduce variance.
+ Creates network and Core graph once outside timing loop to reduce variance.
Uses the first node as the source for shortest path calculation.
Args:
@@ -89,16 +87,27 @@ def _execute_spf_benchmark(case: BenchmarkCase, iterations: int) -> BenchmarkSam
"""
topology: Topology = case.inputs["topology"]
- # Create network and graph once outside timing loop
+ # Create network and Core graph once outside timing loop
network = topology.create_network()
- graph = network.to_strict_multidigraph()
+ graph_handle, multidigraph, edge_mapper, node_mapper = network.build_core_graph()
- # Use first node as source for SPF
- source = next(iter(graph.nodes))
+ # Create Core backend and algorithms
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+
+ # Use first node (ID 0) as source for SPF
+ source_id = 0
+
+ # Create edge selection for all min-cost edges
+ edge_selection = netgraph_core.EdgeSelection(
+ multi_edge=True,
+ require_capacity=False,
+ tie_break=netgraph_core.EdgeTieBreak.DETERMINISTIC,
+ )
# Create a closure that captures the graph and source
def run_spf():
- return spf(graph, source)
+ return algs.spf(graph_handle, source_id, selection=edge_selection)
# Time the SPF execution
timing_stats = _time_func(run_spf, iterations)
@@ -119,10 +128,13 @@ def run_spf():
def _execute_spf_networkx_benchmark(
case: BenchmarkCase, iterations: int
) -> BenchmarkSample:
- """Execute SPF benchmark for a given case.
+ """Execute SPF benchmark using NetworkX for comparison.
- Creates network/graph once and reuses it across iterations to reduce variance.
- Uses the first node as the source for shortest path calculation.
+ Creates network and NetworkX MultiDiGraph once outside timing loop to reduce
+ variance. Uses the first node as the source for shortest path calculation.
+
+ Note: This benchmarks NetworkX's dijkstra_predecessor_and_distance for
+ direct comparison with NetGraph-Core's SPF implementation.
Args:
case: Benchmark case containing topology and configuration.
@@ -133,16 +145,35 @@ def _execute_spf_networkx_benchmark(
"""
topology: Topology = case.inputs["topology"]
- # Create network and graph once outside timing loop
+ # Create network once outside timing loop
network = topology.create_network()
- graph = network.to_strict_multidigraph()
+
+ # Build NetworkX MultiDiGraph manually for NetworkX algorithms
+ nx_graph = nx.MultiDiGraph()
+
+ # Add nodes
+ for node_name, node in network.nodes.items():
+ if not node.disabled:
+ nx_graph.add_node(node_name)
+
+ # Add edges (with reverse edges for bidirectional connectivity)
+ for _, link in network.links.items():
+ if not link.disabled:
+ # Forward edge
+ nx_graph.add_edge(
+ link.source, link.target, capacity=link.capacity, cost=link.cost
+ )
+ # Reverse edge
+ nx_graph.add_edge(
+ link.target, link.source, capacity=link.capacity, cost=link.cost
+ )
# Use first node as source for SPF
- source = next(iter(graph.nodes))
+ source = next(iter(nx_graph.nodes))
# Create a closure that captures the graph and source
def run_spf():
- return nx.dijkstra_predecessor_and_distance(graph, source)
+ return nx.dijkstra_predecessor_and_distance(nx_graph, source, weight="cost")
# Time the SPF execution
timing_stats = _time_func(run_spf, iterations)
@@ -163,19 +194,40 @@ def run_spf():
def _execute_max_flow_benchmark(
case: BenchmarkCase, iterations: int
) -> BenchmarkSample:
- """Execute max flow benchmark for a given case."""
+ """Execute max flow benchmark using NetGraph-Core.
+
+ Creates network and Core graph once outside timing loop to reduce variance.
+ Uses first node as source and last node as sink for maximum path length.
+
+ Args:
+ case: Benchmark case containing topology and configuration.
+ iterations: Number of timing iterations to perform.
+
+ Returns:
+ BenchmarkSample with timing statistics and metadata.
+ """
topology: Topology = case.inputs["topology"]
network = topology.create_network()
- graph = network.to_strict_multidigraph()
+ graph_handle, multidigraph, edge_mapper, node_mapper = network.build_core_graph()
- # Use first node as source and last node as sink
- nodes = list(graph.nodes)
- source = nodes[0]
- sink = nodes[-1]
+ # Create Core backend and algorithms
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
- # Create a closure that captures the graph and source
+ # Use first node as source and last node as sink for maximum path length
+ source_id = 0
+ sink_id = multidigraph.num_nodes() - 1
+
+ # Create a closure that captures the graph handle and node IDs
def run_max_flow():
- return calc_max_flow(graph, source, sink)
+ flow_value, _ = algs.max_flow(
+ graph_handle,
+ source_id,
+ sink_id,
+ flow_placement=netgraph_core.FlowPlacement.PROPORTIONAL,
+ shortest_path=False,
+ )
+ return flow_value
# Time the max flow execution
timing_stats = _time_func(run_max_flow, iterations)
diff --git a/dev/setup-dev.sh b/dev/setup-dev.sh
deleted file mode 100755
index 5506179..0000000
--- a/dev/setup-dev.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-# Setup script for developers
-
-set -euo pipefail
-
-echo "🔧 Setting up development environment..."
-
-# Choose Python interpreter
-PYTHON="python3"
-if ! command -v "$PYTHON" >/dev/null 2>&1; then
- echo "❌ python3 not found. Please install Python 3.11+ and re-run."
- exit 1
-fi
-
-# Create virtual environment if missing
-if [ ! -x "ngraph-venv/bin/python" ]; then
- echo "🧰 Creating virtual environment at ./ngraph-venv ..."
- "$PYTHON" -m venv ngraph-venv
-fi
-
-# Activate venv for this script and upgrade pip
-source ngraph-venv/bin/activate
-PYTHON_VENV="$(command -v python)"
-
-echo "⬆️ Upgrading pip/setuptools/wheel in venv..."
-"$PYTHON_VENV" -m pip install -U pip setuptools wheel
-
-# Install the package with dev dependencies in venv
-echo "📦 Installing package with dev dependencies..."
-"$PYTHON_VENV" -m pip install -e '.[dev]'
-
-# Install pre-commit hooks
-echo "🪝 Installing pre-commit hooks..."
-"$PYTHON_VENV" -m pre_commit install
-
-# Run pre-commit on all files to ensure everything is set up correctly
-echo "✅ Running pre-commit checks..."
-"$PYTHON_VENV" -m pre_commit run --all-files || true
-
-echo "🎉 Development environment setup complete!"
-echo ""
-echo "👉 To use the environment in your shell, run:"
-echo " source ngraph-venv/bin/activate # ✅ activates venv"
-echo ""
-echo "💡 Useful commands:"
-echo " make check # ✅ ruff + pyright + schema + tests"
-echo " make qt # ✅ quick tests (no slow/benchmark)"
-echo " make docs # ✅ regenerate API docs"
diff --git a/dev/validate_basic_example.py b/dev/validate_basic_example.py
deleted file mode 100644
index 895e8a1..0000000
--- a/dev/validate_basic_example.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""Validate docs/examples/basic.md code and expected outputs.
-
-Runs the example scenario, checks flow values and cost distribution, and
-verifies sensitivity helpers return sensible structures. Exits non-zero on
-assertion failure.
-"""
-
-from __future__ import annotations
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.algorithms.max_flow import run_sensitivity, saturated_edges
-from ngraph.scenario import Scenario
-
-
-def main() -> None:
- scenario_yaml = """
-seed: 1234
-
-network:
- name: "fundamentals_example"
-
- nodes:
- A: {}
- B: {}
- C: {}
- D: {}
-
- links:
- - source: A
- target: B
- link_params:
- capacity: 1
- cost: 1
- - source: A
- target: B
- link_params:
- capacity: 2
- cost: 1
- - source: B
- target: C
- link_params:
- capacity: 1
- cost: 1
- - source: B
- target: C
- link_params:
- capacity: 2
- cost: 1
- - source: A
- target: D
- link_params:
- capacity: 3
- cost: 2
- - source: D
- target: C
- link_params:
- capacity: 3
- cost: 2
-"""
-
- scenario = Scenario.from_yaml(scenario_yaml)
- network = scenario.network
-
- # 1) True maximum flow (all paths)
- max_flow_all = network.max_flow(source_path="A", sink_path="C")
- assert isinstance(max_flow_all, dict)
- assert len(max_flow_all) == 1
- value_all = float(next(iter(max_flow_all.values())))
- print("Maximum flow (all paths):", max_flow_all)
- assert value_all == 6.0
-
- # 2) Shortest paths only
- max_flow_shortest = network.max_flow(
- source_path="A", sink_path="C", shortest_path=True
- )
- assert isinstance(max_flow_shortest, dict)
- value_shortest = float(next(iter(max_flow_shortest.values())))
- print("Flow on shortest paths:", max_flow_shortest)
- assert value_shortest == 3.0
-
- # 3) Equal-balanced on shortest paths
- max_flow_balanced = network.max_flow(
- source_path="A",
- sink_path="C",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- assert isinstance(max_flow_balanced, dict)
- value_balanced = float(next(iter(max_flow_balanced.values())))
- print("Equal-balanced flow:", max_flow_balanced)
- # Equal-split over two parallel edges limited by the smallest edge (1+1)
- assert value_balanced == 2.0
- assert value_balanced <= value_shortest
-
- # 4) Cost distribution under combine mode
- with_summary = network.max_flow_with_summary(
- source_path="A", sink_path="C", mode="combine"
- )
- ((_, _), (flow_value, summary)) = next(iter(with_summary.items()))
- assert abs(flow_value - value_all) < 1e-9
- cd = {float(k): float(v) for k, v in summary.cost_distribution.items()}
- print("Cost distribution:", cd)
- # Expect 3.0 at cost 2 (A-B-C), and 3.0 at cost 4 (A-D-C)
- assert cd == {2.0: 3.0, 4.0: 3.0}
-
- # 5) Sensitivity helpers
- graph = network.to_strict_multidigraph()
- bottlenecks = saturated_edges(graph, "A", "C")
- assert isinstance(bottlenecks, list)
- assert len(bottlenecks) > 0
-
- s_inc = run_sensitivity(graph, "A", "C", change_amount=1.0)
- s_dec = run_sensitivity(graph, "A", "C", change_amount=-1.0)
- assert isinstance(s_inc, dict) and isinstance(s_dec, dict)
- assert s_inc and s_dec
- # Increasing capacity should not reduce flow; decreasing should not increase
- assert all(v >= 0 for v in s_inc.values())
- assert all(v <= 0 for v in s_dec.values())
-
-
-if __name__ == "__main__":
- main()
diff --git a/dev/validate_clos_example.py b/dev/validate_clos_example.py
deleted file mode 100644
index 889a117..0000000
--- a/dev/validate_clos_example.py
+++ /dev/null
@@ -1,111 +0,0 @@
-"""Validate docs/examples/clos-fabric.md code and key outputs.
-
-Asserts the ECMP combine shortest-path example returns a positive flow
-and checks ECMP vs WCMP relationship for the constructed topology.
-"""
-
-from __future__ import annotations
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.scenario import Scenario
-
-
-def main() -> None:
- scenario_yaml = """
-blueprints:
- brick_2tier:
- groups:
- t1:
- node_count: 8
- name_template: t1-{node_num}
- t2:
- node_count: 8
- name_template: t2-{node_num}
-
- adjacency:
- - source: /t1
- target: /t2
- pattern: mesh
- link_params:
- capacity: 2
- cost: 1
-
- 3tier_clos:
- groups:
- b1:
- use_blueprint: brick_2tier
- b2:
- use_blueprint: brick_2tier
- spine:
- node_count: 64
- name_template: t3-{node_num}
-
- adjacency:
- - source: b1/t2
- target: spine
- pattern: one_to_one
- link_params:
- capacity: 2
- cost: 1
- - source: b2/t2
- target: spine
- pattern: one_to_one
- link_params:
- capacity: 2
- cost: 1
-
-network:
- name: "3tier_clos_network"
- version: 1.0
-
- groups:
- my_clos1:
- use_blueprint: 3tier_clos
-
- my_clos2:
- use_blueprint: 3tier_clos
-
- adjacency:
- - source: my_clos1/spine
- target: my_clos2/spine
- pattern: one_to_one
- link_count: 4
- link_params:
- capacity: 1
- cost: 1
-"""
-
- scenario = Scenario.from_yaml(scenario_yaml)
- network = scenario.network
-
- # ECMP on shortest paths, combine mode
- max_flow_ecmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- assert isinstance(max_flow_ecmp, dict)
- assert len(max_flow_ecmp) == 1
- flow_ecmp = float(next(iter(max_flow_ecmp.values())))
- print("ECMP combine shortest flow:", max_flow_ecmp)
- assert flow_ecmp > 0
-
- # Compare ECMP vs WCMP (both on shortest paths, combine)
- max_flow_wcmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- flow_wcmp = float(next(iter(max_flow_wcmp.values())))
- print("WCMP combine shortest flow:", max_flow_wcmp)
-
- # In general WCMP >= ECMP in capacity usage under equal-cost parallelism
- assert flow_wcmp >= flow_ecmp
-
-
-if __name__ == "__main__":
- main()
diff --git a/dev/validate_clos_failures_example.py b/dev/validate_clos_failures_example.py
deleted file mode 100644
index 0cb791f..0000000
--- a/dev/validate_clos_failures_example.py
+++ /dev/null
@@ -1,165 +0,0 @@
-"""Validate 'Impact of Link Failures' example for Clos fabric.
-
-We emulate partial link degradation on inter-spine links by setting uneven
-capacities within each parallel set to demonstrate ECMP vs WCMP difference.
-"""
-
-from __future__ import annotations
-
-from collections import defaultdict
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.scenario import Scenario
-
-
-def build_scenario() -> Scenario:
- scenario_yaml = """
-blueprints:
- brick_2tier:
- groups:
- t1:
- node_count: 8
- name_template: t1-{node_num}
- t2:
- node_count: 8
- name_template: t2-{node_num}
-
- adjacency:
- - source: /t1
- target: /t2
- pattern: mesh
- link_params:
- capacity: 2
- cost: 1
-
- 3tier_clos:
- groups:
- b1:
- use_blueprint: brick_2tier
- b2:
- use_blueprint: brick_2tier
- spine:
- node_count: 64
- name_template: t3-{node_num}
-
- adjacency:
- - source: b1/t2
- target: spine
- pattern: one_to_one
- link_params:
- capacity: 2
- cost: 1
- - source: b2/t2
- target: spine
- pattern: one_to_one
- link_params:
- capacity: 2
- cost: 1
-
-network:
- name: "3tier_clos_network"
- version: 1.0
-
- groups:
- my_clos1:
- use_blueprint: 3tier_clos
-
- my_clos2:
- use_blueprint: 3tier_clos
-
- adjacency:
- - source: my_clos1/spine
- target: my_clos2/spine
- pattern: one_to_one
- link_count: 4
- link_params:
- capacity: 1
- cost: 1
-"""
- return Scenario.from_yaml(scenario_yaml)
-
-
-def degrade_inter_spine_links(scenario: Scenario) -> None:
- """Make capacities uneven across parallel inter-spine links.
-
- For each (source,target) inter-spine pair, set one parallel link to
- capacity=4 and the remaining to capacity=1. This keeps equal-cost
- shortest paths but creates capacity disparity that ECMP vs WCMP handles
- differently.
- """
- net = scenario.network
- # Group parallel links by (source,target)
- groups: dict[tuple[str, str], list] = defaultdict(list)
- for link in net.links.values():
- s = link.source
- t = link.target
- if (s.startswith("my_clos1/spine") and t.startswith("my_clos2/spine")) or (
- s.startswith("my_clos2/spine") and t.startswith("my_clos1/spine")
- ):
- groups[(s, t)].append(link)
-
- # Deterministic ordering
- for key in sorted(groups.keys()):
- links = sorted(groups[key], key=lambda lk: (lk.source, lk.target, id(lk)))
- # Ensure we only modify groups that came from the inter-spine adjacency
- if not links:
- continue
- # First link gets higher capacity, rest get low capacity
- links[0].capacity = 4.0
- for lk in links[1:]:
- lk.capacity = 1.0
-
-
-def main() -> None:
- scenario = build_scenario()
- network = scenario.network
-
- # Baseline ECMP/WCMP with symmetric capacities
- base_ecmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- base_wcmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- v_base_ecmp = float(next(iter(base_ecmp.values())))
- v_base_wcmp = float(next(iter(base_wcmp.values())))
- print("Baseline ECMP:", v_base_ecmp)
- print("Baseline WCMP:", v_base_wcmp)
-
- # Apply uneven capacities across parallel inter-spine links
- degrade_inter_spine_links(scenario)
-
- # Recompute flows
- ecmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- wcmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- v_ecmp = float(next(iter(ecmp.values())))
- v_wcmp = float(next(iter(wcmp.values())))
- print("After uneven capacities - ECMP:", ecmp)
- print("After uneven capacities - WCMP:", wcmp)
-
- # Expect WCMP >= ECMP
- assert v_wcmp >= v_ecmp
-
-
-if __name__ == "__main__":
- main()
diff --git a/dev/validate_clos_wcmp_vs_ecmp.py b/dev/validate_clos_wcmp_vs_ecmp.py
deleted file mode 100644
index fd46989..0000000
--- a/dev/validate_clos_wcmp_vs_ecmp.py
+++ /dev/null
@@ -1,141 +0,0 @@
-from __future__ import annotations
-
-from collections import defaultdict
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.scenario import Scenario
-
-
-def build_scenario() -> Scenario:
- scenario_yaml = """
-blueprints:
- brick_2tier:
- groups:
- t1:
- node_count: 8
- name_template: t1-{node_num}
- t2:
- node_count: 8
- name_template: t2-{node_num}
-
- adjacency:
- - source: /t1
- target: /t2
- pattern: mesh
- link_params:
- capacity: 2
- cost: 1
-
- 3tier_clos:
- groups:
- b1:
- use_blueprint: brick_2tier
- b2:
- use_blueprint: brick_2tier
- spine:
- node_count: 64
- name_template: t3-{node_num}
-
- adjacency:
- - source: b1/t2
- target: spine
- pattern: one_to_one
- link_params:
- capacity: 2
- cost: 1
- - source: b2/t2
- target: spine
- pattern: one_to_one
- link_params:
- capacity: 2
- cost: 1
-
-network:
- name: "3tier_clos_network"
- version: 1.0
-
- groups:
- my_clos1:
- use_blueprint: 3tier_clos
-
- my_clos2:
- use_blueprint: 3tier_clos
-
- adjacency:
- - source: my_clos1/spine
- target: my_clos2/spine
- pattern: one_to_one
- link_count: 4
- link_params:
- capacity: 1
- cost: 1
-"""
- return Scenario.from_yaml(scenario_yaml)
-
-
-def make_uneven_parallel_caps(scenario: Scenario) -> None:
- net = scenario.network
- groups: dict[tuple[str, str], list] = defaultdict(list)
- for link in net.links.values():
- s, t = link.source, link.target
- if (s.startswith("my_clos1/spine") and t.startswith("my_clos2/spine")) or (
- s.startswith("my_clos2/spine") and t.startswith("my_clos1/spine")
- ):
- groups[(s, t)].append(link)
-
- for idx, key in enumerate(sorted(groups.keys())):
- links = sorted(groups[key], key=lambda lk: (lk.source, lk.target, id(lk)))
- # Create heavy imbalance among the four parallel links
- caps = [4.0, 0.25, 0.25, 0.25] if idx % 2 == 0 else [2.0, 1.0, 0.5, 0.25]
- for lk, cap in zip(links, caps, strict=False):
- lk.capacity = cap
-
-
-def main() -> None:
- scenario = build_scenario()
- network = scenario.network
-
- base_ecmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- base_wcmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- v_base_ecmp = float(next(iter(base_ecmp.values())))
- v_base_wcmp = float(next(iter(base_wcmp.values())))
- print("Baseline ECMP:", v_base_ecmp)
- print("Baseline WCMP:", v_base_wcmp)
-
- make_uneven_parallel_caps(scenario)
-
- ecmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- wcmp = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- v_ecmp = float(next(iter(ecmp.values())))
- v_wcmp = float(next(iter(wcmp.values())))
- print("Uneven ECMP:", v_ecmp)
- print("Uneven WCMP:", v_wcmp)
- assert v_wcmp >= v_ecmp
-
-
-if __name__ == "__main__":
- main()
diff --git a/docs/assets/diagrams/system_pipeline.dot b/docs/assets/diagrams/system_pipeline.dot
index 1dbc8eb..add6cc3 100644
--- a/docs/assets/diagrams/system_pipeline.dot
+++ b/docs/assets/diagrams/system_pipeline.dot
@@ -1,62 +1,76 @@
digraph SystemPipeline {
- rankdir=TB;
- graph [compound=true, newrank=true, splines=ortho, ranksep="0.5", nodesep=0.35, pad=0.04, labelloc="t"];
- node [shape=box, fontsize=10, margin="0.06,0.04"];
- edge [fontsize=9, arrowsize=0.65];
+ rankdir=LR;
+ graph [compound=true, splines=ortho, ranksep="1.0", nodesep="0.5"];
+ node [shape=box, fontsize=10, margin="0.08,0.05"];
+ edge [fontsize=9, arrowsize=0.7];
- // Input
+ // Input Layer
subgraph cluster_input {
- label="Input"; labelloc="t"; labeljust="l"; style="rounded,dashed";
- a [label="Scenario YAML"];
- b [label="Parse + Validate\n(JSON Schema)"];
- a -> b;
- ia [label="", shape=point, width=0, height=0, style=invis];
- { rank=min; ia; }
+ label="Input"; labeljust="l"; style="rounded,dashed";
+ yaml [label="Scenario YAML"];
+ parse [label="Parse & Validate"];
+ expand [label="DSL Expansion"];
+ yaml -> parse -> expand;
}
- // Model
- subgraph cluster_model {
- label="Model"; labelloc="t"; labeljust="l"; style="rounded,dashed";
- c [label="Network"];
- fp [label="FailurePolicySet"];
- tm [label="TrafficMatrixSet"];
- wf [label="Workflow Steps"];
- // vertical tighten: invisible ordering
- c -> wf [style=invis];
- wf -> fp [style=invis];
- fp -> tm [style=invis];
- ma [label="", shape=point, width=0, height=0, style=invis];
- }
+ // Python Layer - Model
+ subgraph cluster_python {
+ label="Python Layer"; labeljust="l"; style="rounded,dashed";
- // Orchestration
- subgraph cluster_orchestration {
- label="Orchestration"; labelloc="t"; labeljust="l"; style="rounded,dashed";
- fm [label="Failure Manager"];
- af [label="Analysis Function\n(TrafficManager)"];
- fm -> af;
- oa [label="", shape=point, width=0, height=0, style=invis];
- }
+ subgraph cluster_model {
+ label=""; style=invis;
+ scenario [label="Scenario\n·Network\n·FailurePolicySet\n·TrafficMatrixSet\n·Workflow", shape=box];
+ }
- // Results
- subgraph cluster_results {
- label="Results"; labelloc="t"; labeljust="l"; style="rounded,dashed";
- i [label="Results Store + Artifacts"];
- ra [label="", shape=point, width=0, height=0, style=invis];
+ subgraph cluster_exec {
+ label=""; style=invis;
+ engine [label="Scenario.run()"];
+ steps [label="WorkflowSteps"];
+ exec [label="Execution\n·FailureManager\n·DemandBuilder\n·Flow Solvers"];
+ engine -> steps -> exec;
+ }
+
+ adapter [label="Core Adapter\n(build_graph, mappers)"];
+
+ scenario -> engine;
+ exec -> adapter;
}
- // Layering (top→bottom)
- { rank=same; ma; }
- ia -> ma [style=invis, weight=100, minlen=3];
- ma -> oa [style=invis, weight=100, minlen=3];
- oa -> ra [style=invis, weight=100, minlen=3];
- { rank=max; ra; }
+ // C++ Core Layer
+ subgraph cluster_core {
+ label="NetGraph-Core (C++)"; labeljust="l"; style="rounded,filled"; fillcolor="#f0f0f0";
- // Input → Model
- b -> c [ltail=cluster_input, lhead=cluster_model, minlen=3];
+ subgraph cluster_graph {
+ label=""; style=invis;
+ core_backend [label="Backend"];
+ core_graph [label="StrictMultiDiGraph"];
+ { rank=same; core_backend; core_graph; }
+ }
- // Model → Orchestration
- wf -> fm [ltail=cluster_model, lhead=cluster_orchestration, minlen=3];
+ subgraph cluster_algs {
+ label=""; style=invis;
+ core_algs [label="Algorithms"];
+ core_spf [label="SPF"];
+ core_ksp [label="K-SP"];
+ core_maxf [label="Max-Flow"];
+ core_algs -> core_spf;
+ core_algs -> core_ksp;
+ core_algs -> core_maxf;
+ { rank=same; core_spf; core_ksp; core_maxf; }
+ }
+
+ core_backend -> core_algs;
+ core_graph -> core_algs [style=invis];
+ }
+
+ // Results Layer
+ subgraph cluster_results {
+ label="Results"; labeljust="l"; style="rounded,dashed";
+ results [label="Results Store"];
+ }
- // Orchestration → Results
- fm -> i [ltail=cluster_orchestration, lhead=cluster_results, minlen=3];
+ // Main flow
+ expand -> scenario [lhead=cluster_python, ltail=cluster_input];
+ adapter -> core_backend [lhead=cluster_core, ltail=cluster_python, xlabel="GIL released"];
+ core_maxf -> results [ltail=cluster_core, lhead=cluster_results];
}
diff --git a/docs/assets/diagrams/system_pipeline.dot.svg b/docs/assets/diagrams/system_pipeline.dot.svg
index ee3c91c..61b9717 100644
--- a/docs/assets/diagrams/system_pipeline.dot.svg
+++ b/docs/assets/diagrams/system_pipeline.dot.svg
@@ -4,126 +4,220 @@
-
diff --git a/docs/examples/basic.md b/docs/examples/basic.md
index 60b86fa..4a5a517 100644
--- a/docs/examples/basic.md
+++ b/docs/examples/basic.md
@@ -2,7 +2,7 @@
This example builds a tiny topology inline to show APIs. For real analysis, prefer running a provided scenario and generating metrics via the CLI.
-See Quickstart for CLI usage and bundled scenarios.
+See [Tutorial](../getting-started/tutorial.md) for CLI usage and bundled scenarios.
## Creating a Simple Network
@@ -23,7 +23,8 @@ Let's create this network by using NetGraph's scenario system:
```python
from ngraph.scenario import Scenario
-from ngraph.algorithms.base import FlowPlacement
+from ngraph.types.base import FlowPlacement
+from ngraph.solver.maxflow import max_flow, max_flow_with_details
# Define network topology with parallel paths
scenario_yaml = """
@@ -91,28 +92,30 @@ Now let's run MaxFlow using the high-level Network API:
```python
# 1. "True" maximum flow (uses all available paths)
-max_flow_all = network.max_flow(source_path="A", sink_path="C")
+max_flow_all = max_flow(network, source_path="A", sink_path="C")
print(f"Maximum flow (all paths): {max_flow_all}")
-# Result: 6.0 (uses both A→B→C path capacity of 3 and A→D→C path capacity of 3)
+# Result: {('A', 'C'): 6.0} (uses both A→B→C path capacity of 3 and A→D→C path capacity of 3)
# 2. Flow along shortest paths only
-max_flow_shortest = network.max_flow(
+max_flow_shortest = max_flow(
+ network,
source_path="A",
sink_path="C",
shortest_path=True
)
print(f"Flow on shortest paths: {max_flow_shortest}")
-# Result: 3.0 (only uses A→B→C path, ignoring higher-cost A→D→C path)
+# Result: {('A', 'C'): 3.0} (only uses A→B→C path, ignoring higher-cost A→D→C path)
# 3. Equal-balanced flow placement on shortest paths
-max_flow_shortest_balanced = network.max_flow(
+max_flow_shortest_balanced = max_flow(
+ network,
source_path="A",
sink_path="C",
shortest_path=True,
flow_placement=FlowPlacement.EQUAL_BALANCED
)
print(f"Equal-balanced flow: {max_flow_shortest_balanced}")
-# Result: 2.0 (splits flow equally across parallel edges in A→B and B→C)
+# Result: {('A', 'C'): 2.0} (splits flow equally across parallel edges in A→B and B→C)
```
## Results Interpretation
@@ -123,22 +126,23 @@ print(f"Equal-balanced flow: {max_flow_shortest_balanced}")
Note that `EQUAL_BALANCED` flow placement is only applicable when calculating MaxFlow on shortest paths.
-## Cost Distribution (concise)
+## Cost Distribution
-Cost distribution shows how flow splits across path costs for latency/span analysis.
+Cost distribution shows how flow splits across path costs for latency/span analysis:
```python
# Get flow analysis with cost distribution
-result = network.max_flow_with_summary(
+result = max_flow_with_details(
+ network,
source_path="A",
sink_path="C",
mode="combine"
)
# Extract flow value and summary
-(src_label, sink_label), (flow_value, summary) = next(iter(result.items()))
+(src_label, sink_label), summary = next(iter(result.items()))
-print(f"Total flow: {flow_value}")
+print(f"Total flow: {summary.total_flow}")
print(f"Cost distribution: {summary.cost_distribution}")
# Example output:
@@ -150,9 +154,9 @@ print(f"Cost distribution: {summary.cost_distribution}")
# - 3.0 units of flow use paths with total cost 4.0 (A→D→C path)
```
-### Latency Span (optional)
+### Latency Span Analysis
-If link costs approximate latency, you can derive a quick span summary:
+If link costs approximate latency, derive span summary from cost distribution:
```python
def analyze_latency_span(cost_distribution):
@@ -161,7 +165,9 @@ def analyze_latency_span(cost_distribution):
return "No flow paths available"
total_flow = sum(cost_distribution.values())
- weighted_avg_latency = sum(cost * flow for cost, flow in cost_distribution.items()) / total_flow
+ weighted_avg_latency = sum(
+ cost * flow for cost, flow in cost_distribution.items()
+ ) / total_flow
min_latency = min(cost_distribution.keys())
max_latency = max(cost_distribution.keys())
@@ -174,7 +180,7 @@ def analyze_latency_span(cost_distribution):
print(f" Flow distribution:")
for cost, flow in sorted(cost_distribution.items()):
percentage = (flow / total_flow) * 100
- print(f" {percentage:.1f}% of traffic uses paths with latency {cost:.1f}")
+ print(f" {percentage:.1f}% uses paths with latency {cost:.1f}")
# Example usage
analyze_latency_span(summary.cost_distribution)
@@ -182,34 +188,40 @@ analyze_latency_span(summary.cost_distribution)
This helps identify traffic concentration, latency span, and potential bottlenecks.
-## Advanced Analysis: Sensitivity Analysis
+## Advanced Analysis: Failure Simulation
-For network analysis, you can use the low-level graph algorithms to run sensitivity analysis and identify bottleneck edges:
+You can analyze the network under different failure scenarios by excluding nodes or links:
```python
-from ngraph.algorithms.max_flow import calc_max_flow, saturated_edges, run_sensitivity
-
-# Get the underlying graph for low-level analysis
-graph = network.to_strict_multidigraph()
-
-# Identify bottleneck (saturated) edges
-bottlenecks = saturated_edges(graph, "A", "C")
-print(f"Bottleneck edges: {bottlenecks}")
-
-# Perform sensitivity analysis - test increasing capacity by 1 unit
-sensitivity_increase = run_sensitivity(graph, "A", "C", change_amount=1.0)
-print(f"Sensitivity to capacity increases: {sensitivity_increase}")
+# Identify link to fail
+failed_links = set()
+for link_id, link in network.links.items():
+ if link.source == "A" and link.target == "D":
+ failed_links.add(link_id)
+ break
+
+# Compare flows: baseline vs. with failure
+baseline_flow_dict = max_flow(network, source_path="A", sink_path="C")
+baseline_flow = baseline_flow_dict[('A', 'C')]
+
+degraded_flow_dict = max_flow(
+ network,
+ source_path="A",
+ sink_path="C",
+ excluded_links=failed_links
+)
+degraded_flow = degraded_flow_dict[('A', 'C')]
-# Test sensitivity to capacity decreases
-sensitivity_decrease = run_sensitivity(graph, "A", "C", change_amount=-1.0)
-print(f"Sensitivity to capacity decreases: {sensitivity_decrease}")
+print(f"Baseline flow: {baseline_flow}")
+print(f"Flow with A->D link failed: {degraded_flow}")
+print(f"Impact: {baseline_flow - degraded_flow} units lost")
```
This analysis helps identify:
-- **Bottleneck edges**: Links that are fully utilized and limit overall flow
-- **High-impact upgrades**: Which capacity increases provide the most benefit
-- **Vulnerability assessment**: How flow decreases when links are degraded
+- **Critical links**: Links whose failure significantly impacts flow
+- **Redundancy**: How well the network handles failures
+- **Vulnerability assessment**: Network resilience under different failure scenarios
## Next Steps
diff --git a/docs/examples/clos-fabric.md b/docs/examples/clos-fabric.md
index 9e9955c..0c93b2f 100644
--- a/docs/examples/clos-fabric.md
+++ b/docs/examples/clos-fabric.md
@@ -2,7 +2,7 @@
This example demonstrates analysis of a 3-tier Clos fabric. For production use, run the bundled scenario and generate metrics via CLI, then iterate in Python if needed.
-Refer to Quickstart for running bundled scenarios via CLI.
+Refer to [Tutorial](../getting-started/tutorial.md) for running bundled scenarios via CLI.
## Scenario Overview
@@ -16,7 +16,8 @@ We'll create two separate 3-tier Clos networks and analyze the maximum flow capa
```python
from ngraph.scenario import Scenario
-from ngraph.algorithms.base import FlowPlacement
+from ngraph.types.base import FlowPlacement
+from ngraph.solver.maxflow import max_flow
scenario_yaml = """
blueprints:
@@ -24,10 +25,10 @@ blueprints:
groups:
t1:
node_count: 8
- name_template: t1-{node_num}
+ name_template: "t1-{node_num}"
t2:
node_count: 8
- name_template: t2-{node_num}
+ name_template: "t2-{node_num}"
adjacency:
- source: /t1
@@ -45,7 +46,7 @@ blueprints:
use_blueprint: brick_2tier
spine:
node_count: 64
- name_template: t3-{node_num}
+ name_template: "t3-{node_num}"
adjacency:
- source: b1/t2
@@ -87,7 +88,8 @@ scenario = Scenario.from_yaml(scenario_yaml)
network = scenario.network
# Calculate maximum flow with ECMP (Equal Cost Multi-Path)
-max_flow_ecmp = network.max_flow(
+max_flow_ecmp = max_flow(
+ network,
source_path=r"my_clos1.*(b[0-9]*)/t1",
sink_path=r"my_clos2.*(b[0-9]*)/t1",
mode="combine",
@@ -127,22 +129,23 @@ We emulate partial inter-spine degradation by making capacities uneven across th
the effect of the splitting policy.
```python
-from ngraph.algorithms.base import FlowPlacement
+from ngraph.types.base import FlowPlacement
from ngraph.scenario import Scenario
+from ngraph.solver.maxflow import max_flow
scenario_yaml = """
blueprints:
brick_2tier:
groups:
- t1: {node_count: 8, name_template: t1-{node_num}}
- t2: {node_count: 8, name_template: t2-{node_num}}
+ t1: {node_count: 8, name_template: "t1-{node_num}"}
+ t2: {node_count: 8, name_template: "t2-{node_num}"}
adjacency:
- {source: /t1, target: /t2, pattern: mesh, link_params: {capacity: 2, cost: 1}}
3tier_clos:
groups:
b1: {use_blueprint: brick_2tier}
b2: {use_blueprint: brick_2tier}
- spine: {node_count: 64, name_template: t3-{node_num}}
+ spine: {node_count: 64, name_template: "t3-{node_num}"}
adjacency:
- {source: b1/t2, target: spine, pattern: one_to_one, link_params: {capacity: 2, cost: 1}}
- {source: b2/t2, target: spine, pattern: one_to_one, link_params: {capacity: 2, cost: 1}}
@@ -159,13 +162,15 @@ scenario = Scenario.from_yaml(scenario_yaml)
network = scenario.network
# Baseline (symmetric)
-baseline_ecmp = network.max_flow(
+baseline_ecmp = max_flow(
+ network,
source_path=r"my_clos1.*(b[0-9]*)/t1",
sink_path=r"my_clos2.*(b[0-9]*)/t1",
mode="combine", shortest_path=True,
flow_placement=FlowPlacement.EQUAL_BALANCED,
)
-baseline_wcmp = network.max_flow(
+baseline_wcmp = max_flow(
+ network,
source_path=r"my_clos1.*(b[0-9]*)/t1",
sink_path=r"my_clos2.*(b[0-9]*)/t1",
mode="combine", shortest_path=True,
@@ -186,13 +191,15 @@ for i, key in enumerate(sorted(groups.keys())):
for lk, cap in zip(links, caps):
lk.capacity = cap
-ecmp = network.max_flow(
+ecmp = max_flow(
+ network,
source_path=r"my_clos1.*(b[0-9]*)/t1",
sink_path=r"my_clos2.*(b[0-9]*)/t1",
mode="combine", shortest_path=True,
flow_placement=FlowPlacement.EQUAL_BALANCED,
)
-wcmp = network.max_flow(
+wcmp = max_flow(
+ network,
source_path=r"my_clos1.*(b[0-9]*)/t1",
sink_path=r"my_clos2.*(b[0-9]*)/t1",
mode="combine", shortest_path=True,
@@ -217,7 +224,7 @@ As expected, WCMP achieves higher throughput than ECMP when parallel links withi
## Network Structure Analysis
-We can also analyze the network structure to understand the flow distribution.
+We can also analyze the network structure using the NetworkExplorer:
```python
from ngraph.explorer import NetworkExplorer
@@ -226,19 +233,9 @@ from ngraph.explorer import NetworkExplorer
explorer = NetworkExplorer.explore_network(network)
explorer.print_tree(skip_leaves=True, detailed=False)
-# Analyze specific paths between border nodes
-from ngraph.algorithms.spf import spf
-from ngraph.algorithms.paths import resolve_to_paths
-
-# Get border nodes from different segments
-border_nodes = [node for node in network.nodes.values() if '/b1/t1' in node.name or '/b2/t1' in node.name]
-src_node = next(node.name for node in border_nodes if "my_clos1/b1/t1" in node.name)
-dst_node = next(node.name for node in border_nodes if "my_clos2/b1/t1" in node.name)
-
-# Find shortest paths using SPF
-costs, pred = spf(network.to_strict_multidigraph(), src_node)
-paths = list(resolve_to_paths(src_node, dst_node, pred))
-print(f"Found {len(paths)} paths between segments")
+# The explorer shows hierarchical structure and connectivity patterns
+# For detailed path analysis, use max_flow_with_summary to get flow details
+# including cost distribution and path information
```
## Next Steps
diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md
index 8451985..c15ffab 100644
--- a/docs/getting-started/installation.md
+++ b/docs/getting-started/installation.md
@@ -1,19 +1,65 @@
# Installation
-Use NetGraph as a Python package in your environment.
+NetGraph is a hybrid Python+C++ framework. The Python package (`ngraph`) automatically installs
+the C++ performance layer (`netgraph-core`) as a dependency.
-## Using the Python Package
+## Requirements
-- Requires Python 3.11 or higher. Use a virtual environment (`python -m venv .venv && source .venv/bin/activate`).
+- Python 3.11 or higher
+- C++ compiler (for building netgraph-core from source if needed)
+ - Linux: GCC 10+ or Clang 12+
+ - macOS: Xcode Command Line Tools (Apple Clang)
+ - Windows: Visual Studio 2019+ with C++ tools
+
+## From PyPI
+
+Create and activate a virtual environment:
+
+```bash
+python -m venv .venv
+source .venv/bin/activate # On Windows: .venv\Scripts\activate
+```
+
+Install NetGraph:
```bash
pip install ngraph
```
+This installs:
+
+1. The Python `ngraph` package
+2. `netgraph-core` (pre-built wheels for common platforms, or builds from source)
+3. Dependencies (networkx, pyyaml, pandas, jsonschema)
+
Verify installation:
```bash
ngraph --help
```
-Next: see Quickstart for running scenarios and minimal programmatic usage.
+## From Source
+
+For development or if you need the latest changes:
+
+```bash
+# Clone both repositories
+git clone https://github.com/networmix/NetGraph-Core
+git clone https://github.com/networmix/NetGraph
+
+# Install NetGraph-Core first
+cd NetGraph-Core
+pip install -e .
+
+# Install NetGraph
+cd ../NetGraph
+pip install -e .
+```
+
+## Platform Notes
+
+**Pre-built wheels**: Available for Linux (x86_64, aarch64), macOS (x86_64, arm64), and Windows (x86_64).
+
+**Building from source**: Requires CMake 3.15+. Builds automatically during `pip install` if no compatible wheel is available.
+
+**Next**: See [Tutorial](tutorial.md) for running scenarios and programmatic usage examples.
diff --git a/docs/getting-started/tutorial.md b/docs/getting-started/tutorial.md
index 5f243cf..5de7a4f 100644
--- a/docs/getting-started/tutorial.md
+++ b/docs/getting-started/tutorial.md
@@ -1,6 +1,6 @@
-# Quickstart
+# Tutorial
-This guide shows the fastest way to run a scenario from the CLI and a minimal programmatic example. Use examples for detailed scenarios and policies.
+This guide shows the fastest way to run a scenario from the CLI and a minimal programmatic example. See the Examples section for detailed scenarios and policies.
## CLI: run and inspect
@@ -41,11 +41,13 @@ exported = scenario.results.to_dict()
print(list(exported["steps"].keys()))
```
-## Results shape (high level)
+## Results structure
-Results are exported as a fixed shape with `workflow`, `steps`, and `scenario`. Steps such as `MaxFlow`, `TrafficMatrixPlacement`, and `MaximumSupportedDemand` write under their step name. See Reference → Workflow for exact fields.
+Results are exported with a fixed structure containing `workflow`, `steps`, and `scenario` sections. Steps such as `MaxFlow`, `TrafficMatrixPlacement`, and `MaximumSupportedDemand` write their outputs under their step name. See the Workflow Reference for field details.
## Next steps
-- Examples → Bundled Scenarios
-- Reference → DSL, Workflow, CLI
+- [Bundled Scenarios](../examples/bundled-scenarios.md) - Ready-to-run example scenarios
+- [DSL Reference](../reference/dsl.md) - YAML scenario syntax
+- [Workflow Reference](../reference/workflow.md) - Analysis step configuration
+- [CLI Reference](../reference/cli.md) - Command-line interface details
diff --git a/docs/index.md b/docs/index.md
index 727ac83..0c1ab24 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -2,25 +2,62 @@
[](https://github.com/networmix/NetGraph/actions/workflows/python-test.yml)
-NetGraph is a scenario-based network modeling and analysis library in Python. Define topology, failure policies, traffic demands, and workflows in one YAML file; run it programmatically or via CLI; inspect results and iterate.
+Scenario-driven network modeling and analysis framework combining Python's flexibility with high-performance C++ algorithms.
+
+## Overview
+
+NetGraph enables declarative modeling of network topologies, traffic matrices, and failure scenarios. It delegates computationally intensive graph algorithms to [NetGraph-Core](https://github.com/networmix/NetGraph-Core) while providing a rich Python API and CLI for orchestration.
+
+## Architecture
+
+NetGraph employs a **hybrid Python+C++ architecture**:
+
+- **Python layer (NetGraph)**: Scenario DSL parsing, workflow orchestration, result aggregation, and high-level APIs.
+- **C++ layer (NetGraph-Core)**: Performance-critical graph algorithms (SPF, KSP, Max-Flow) executing in optimized C++ with the GIL released.
+
+## Key Features
+
+### Modeling & DSL
+
+- **Declarative Scenarios**: Define topology, traffic, and workflows in validated YAML.
+- **Blueprints**: Reusable topology templates (e.g., Clos fabrics, regions) with parameterized expansion.
+- **Strict Multigraph**: Deterministic graph representation with stable edge IDs.
+
+### Failure Analysis
+
+- **Policy Engine**: Weighted failure modes with multiple policy rules per mode.
+- **Non-Destructive**: Runtime exclusions simulate failures without modifying the base topology.
+- **Risk Groups**: Model shared fate (e.g., fiber cuts, power zones).
+
+### Traffic Engineering
+
+- **Routing Modes**: Unified modeling of **IP Routing** (static costs, oblivious to congestion) and **Traffic Engineering** (dynamic residuals, congestion-aware).
+- **Flow Placement**: Strategies for **ECMP** (Equal-Cost Multi-Path) and **WCMP** (Weighted Cost Multi-Path).
+- **Capacity Analysis**: Compute max-flow envelopes and demand allocation with configurable placement policies.
+
+### Workflow & Integration
+
+- **Structured Results**: Export analysis artifacts to JSON for downstream processing.
+- **CLI**: Comprehensive command-line interface for validation and execution.
+- **Python API**: Full programmatic access to all modeling and solving capabilities.
## Getting Started
- **[Installation Guide](getting-started/installation.md)** - Python package installation
-- **[Quickstart](getting-started/tutorial.md)** - Run scenarios (CLI) and minimal code
+- **[Tutorial](getting-started/tutorial.md)** - Run scenarios (CLI) and code examples
## Examples
- **[Bundled Scenarios](examples/bundled-scenarios.md)** - Ready-to-run scenarios (`square_mesh`, `backbone_clos`, `nsfnet`)
-- **[Basic Example](examples/basic.md)** - A simple graph example
+- **[Basic Example](examples/basic.md)** - Simple graph example
- **[Clos Fabric Analysis](examples/clos-fabric.md)** - Analyze a 3-tier Clos network
-## Documentation
+## Reference Documentation
- **[Design](reference/design.md)** - Architecture, model, algorithms, and workflow
- **[DSL Reference](reference/dsl.md)** - YAML syntax guide
- **[Workflow Reference](reference/workflow.md)** - Analysis workflow configuration
-- **[CLI Reference](reference/cli.md)** - Command line interface
+- **[CLI Reference](reference/cli.md)** - Command-line interface
- **[Schema Reference](reference/schemas.md)** - JSON Schema and validation
- **[API Reference](reference/api.md)** - Python API documentation
- **[Auto-Generated API Reference](reference/api-full.md)** - Complete API docs
diff --git a/docs/reference/api-full.md b/docs/reference/api-full.md
index bf83761..389fcda 100644
--- a/docs/reference/api-full.md
+++ b/docs/reference/api-full.md
@@ -12,9 +12,9 @@ Quick links:
- [CLI Reference](cli.md)
- [DSL Reference](dsl.md)
-Generated from source code on: August 24, 2025 at 05:06 UTC
+Generated from source code on: November 25, 2025 at 05:16 UTC
-Modules auto-discovered: 61
+Modules auto-discovered: 44
---
@@ -32,164 +32,6 @@ Args:
---
-## ngraph.components
-
-Component and ComponentsLibrary classes for hardware capex/power modeling.
-
-### Component
-
-A generic component that can represent chassis, line cards, optics, etc.
-Components can have nested children, each with their own capex, power, etc.
-
-Attributes:
- name (str): Name of the component (e.g., "SpineChassis" or "400G-LR4").
- component_type (str): A string label (e.g., "chassis", "linecard", "optic").
- description (str): A human-readable description of this component.
- capex (float): Monetary capex of a single instance of this component.
- power_watts (float): Typical/nominal power usage (watts) for one instance.
- power_watts_max (float): Maximum/peak power usage (watts) for one instance.
- capacity (float): A generic capacity measure (e.g., platform capacity).
- ports (int): Number of ports if relevant for this component.
- count (int): How many identical copies of this component are present.
- attrs (Dict[str, Any]): Arbitrary key-value attributes for extra metadata.
- children (Dict[str, Component]): Nested child components (e.g., line cards
- inside a chassis), keyed by child name.
-
-**Attributes:**
-
-- `name` (str)
-- `component_type` (str) = generic
-- `description` (str)
-- `capex` (float) = 0.0
-- `power_watts` (float) = 0.0
-- `power_watts_max` (float) = 0.0
-- `capacity` (float) = 0.0
-- `ports` (int) = 0
-- `count` (int) = 1
-- `attrs` (Dict[str, Any]) = {}
-- `children` (Dict[str, Component]) = {}
-
-**Methods:**
-
-- `as_dict(self, include_children: 'bool' = True) -> 'Dict[str, Any]'` - Returns a dictionary containing all properties of this component.
-- `total_capacity(self) -> 'float'` - Computes the total (recursive) capacity of this component,
-- `total_capex(self) -> 'float'` - Computes total capex including children, multiplied by count.
-- `total_power(self) -> 'float'` - Computes the total *typical* (recursive) power usage of this component,
-- `total_power_max(self) -> 'float'` - Computes the total *peak* (recursive) power usage of this component,
-
-### ComponentsLibrary
-
-Holds a collection of named Components. Each entry is a top-level "template"
-that can be referenced for cost/power/capacity lookups, possibly with nested children.
-
-Example (YAML-like):
- components:
- BigSwitch:
- component_type: chassis
- cost: 20000
- power_watts: 1750
- capacity: 25600
- children:
- PIM16Q-16x200G:
- component_type: linecard
- cost: 1000
- power_watts: 10
- ports: 16
- count: 8
- 200G-FR4:
- component_type: optic
- cost: 2000
- power_watts: 6
- power_watts_max: 6.5
-
-**Attributes:**
-
-- `components` (Dict[str, Component]) = {}
-
-**Methods:**
-
-- `clone(self) -> 'ComponentsLibrary'` - Creates a deep copy of this ComponentsLibrary.
-- `from_dict(data: 'Dict[str, Any]') -> 'ComponentsLibrary'` - Constructs a ComponentsLibrary from a dictionary of raw component definitions.
-- `from_yaml(yaml_str: 'str') -> 'ComponentsLibrary'` - Constructs a ComponentsLibrary from a YAML string. If the YAML contains
-- `get(self, name: 'str') -> 'Optional[Component]'` - Retrieves a Component by its name from the library.
-- `merge(self, other: 'ComponentsLibrary', override: 'bool' = True) -> 'ComponentsLibrary'` - Merges another ComponentsLibrary into this one. By default (override=True),
-
-### resolve_link_end_components(attrs: 'Dict[str, Any]', library: 'ComponentsLibrary') -> 'tuple[tuple[Optional[Component], float, bool], tuple[Optional[Component], float, bool], bool]'
-
-Resolve per-end hardware components for a link.
-
-Input format inside ``link.attrs``:
-
-Structured mapping under ``hardware`` key only:
- ``{"hardware": {"source": {"component": NAME, "count": N},
- "target": {"component": NAME, "count": N}}}``
-
-Args:
- attrs: Link attributes mapping.
- library: Components library for lookups.
-
-Exclusive usage:
-
-- Optional ``exclusive: true`` per end indicates unsharable usage.
-
- For exclusive ends, validation and BOM counting should round-up counts
- to integers.
-
-Returns:
- ((src_comp, src_count, src_exclusive), (dst_comp, dst_count, dst_exclusive), per_end_specified)
- where components may be ``None`` if name is absent/unknown. ``per_end_specified``
- is True when a structured per-end mapping is present.
-
-### resolve_node_hardware(attrs: 'Dict[str, Any]', library: 'ComponentsLibrary') -> 'Tuple[Optional[Component], float]'
-
-Resolve node hardware from ``attrs['hardware']``.
-
-Expects the mapping: ``{"hardware": {"component": NAME, "count": N}}``.
-``count`` defaults to 1 if missing or invalid. If ``component`` is missing
-or unknown, returns ``(None, 1.0)``.
-
-Args:
- attrs: Node attributes mapping.
- library: Component library used for lookups.
-
-Returns:
- Tuple of (component or None, positive multiplier).
-
-### totals_with_multiplier(comp: 'Component', hw_count: 'float') -> 'Tuple[float, float, float]'
-
-Return (capex, power_watts, capacity) totals multiplied by ``hw_count``.
-
-Args:
- comp: Component definition (may include nested children and internal ``count``).
- hw_count: External multiplier (e.g., number of modules used for a link or node).
-
-Returns:
- Tuple of total capex, total power (typical), and total capacity as floats.
-
----
-
-## ngraph.config
-
-Configuration classes for NetGraph components.
-
-### TrafficManagerConfig
-
-Configuration for traffic demand placement estimation.
-
-**Attributes:**
-
-- `default_rounds` (int) = 5
-- `min_rounds` (int) = 5
-- `max_rounds` (int) = 100
-- `ratio_base` (int) = 5
-- `ratio_multiplier` (int) = 5
-
-**Methods:**
-
-- `estimate_rounds(self, demand_capacity_ratio: float) -> int` - Calculate placement rounds based on demand to capacity ratio.
-
----
-
## ngraph.explorer
NetworkExplorer class for analyzing network hierarchy and structure.
@@ -425,1280 +267,826 @@ Typical usage example:
---
-## ngraph.seed_manager
-
-Deterministic seed derivation to avoid global random.seed() order dependencies.
+## ngraph.model.components
-### SeedManager
+Component and ComponentsLibrary classes for hardware capex/power modeling.
-Manages deterministic seed derivation for isolated component reproducibility.
+### Component
-Global random.seed() creates order dependencies and component interference.
-SeedManager derives unique seeds per component from a master seed using SHA-256,
-ensuring reproducible results regardless of execution order or parallelism.
+A generic component that can represent chassis, line cards, optics, etc.
+Components can have nested children, each with their own capex, power, etc.
-Usage:
- seed_mgr = SeedManager(42)
- failure_seed = seed_mgr.derive_seed("failure_policy", "default")
+Attributes:
+ name (str): Name of the component (e.g., "SpineChassis" or "400G-LR4").
+ component_type (str): A string label (e.g., "chassis", "linecard", "optic").
+ description (str): A human-readable description of this component.
+ capex (float): Monetary capex of a single instance of this component.
+ power_watts (float): Typical/nominal power usage (watts) for one instance.
+ power_watts_max (float): Maximum/peak power usage (watts) for one instance.
+ capacity (float): A generic capacity measure (e.g., platform capacity).
+ ports (int): Number of ports if relevant for this component.
+ count (int): How many identical copies of this component are present.
+ attrs (Dict[str, Any]): Arbitrary key-value attributes for extra metadata.
+ children (Dict[str, Component]): Nested child components (e.g., line cards
+ inside a chassis), keyed by child name.
-**Methods:**
+**Attributes:**
-- `create_random_state(self, *components: 'Any') -> 'random.Random'` - Create a new Random instance with derived seed.
-- `derive_seed(self, *components: 'Any') -> 'Optional[int]'` - Derive a deterministic seed from master seed and component identifiers.
-- `seed_global_random(self, *components: 'Any') -> 'None'` - Seed the global random module with derived seed.
+- `name` (str)
+- `component_type` (str) = generic
+- `description` (str)
+- `capex` (float) = 0.0
+- `power_watts` (float) = 0.0
+- `power_watts_max` (float) = 0.0
+- `capacity` (float) = 0.0
+- `ports` (int) = 0
+- `count` (int) = 1
+- `attrs` (Dict[str, Any]) = {}
+- `children` (Dict[str, Component]) = {}
----
+**Methods:**
-## ngraph.yaml_utils
+- `as_dict(self, include_children: 'bool' = True) -> 'Dict[str, Any]'` - Returns a dictionary containing all properties of this component.
+- `total_capacity(self) -> 'float'` - Computes the total (recursive) capacity of this component,
+- `total_capex(self) -> 'float'` - Computes total capex including children, multiplied by count.
+- `total_power(self) -> 'float'` - Computes the total *typical* (recursive) power usage of this component,
+- `total_power_max(self) -> 'float'` - Computes the total *peak* (recursive) power usage of this component,
-Utilities for handling YAML parsing quirks and common operations.
+### ComponentsLibrary
-### normalize_yaml_dict_keys(data: Dict[Any, ~V]) -> Dict[str, ~V]
+Holds a collection of named Components. Each entry is a top-level "template"
+that can be referenced for cost/power/capacity lookups, possibly with nested children.
-Normalize dictionary keys from YAML parsing to ensure consistent string keys.
+Example (YAML-like):
+ components:
+ BigSwitch:
+ component_type: chassis
+ cost: 20000
+ power_watts: 1750
+ capacity: 25600
+ children:
+ PIM16Q-16x200G:
+ component_type: linecard
+ cost: 1000
+ power_watts: 10
+ ports: 16
+ count: 8
+ 200G-FR4:
+ component_type: optic
+ cost: 2000
+ power_watts: 6
+ power_watts_max: 6.5
-YAML 1.1 boolean keys (e.g., true, false, yes, no, on, off) get converted to
-Python True/False boolean values. This function converts them to predictable
-string representations ("True"/"False") and ensures all keys are strings.
+**Attributes:**
-Args:
- data: Dictionary that may contain boolean or other non-string keys from YAML parsing
+- `components` (Dict[str, Component]) = {}
-Returns:
- Dictionary with all keys converted to strings, boolean keys converted to "True"/"False"
+**Methods:**
-Examples:
- >>> normalize_yaml_dict_keys({True: "value1", False: "value2", "normal": "value3"})
- {"True": "value1", "False": "value2", "normal": "value3"}
+- `clone(self) -> 'ComponentsLibrary'` - Creates a deep copy of this ComponentsLibrary.
+- `from_dict(data: 'Dict[str, Any]') -> 'ComponentsLibrary'` - Constructs a ComponentsLibrary from a dictionary of raw component definitions.
+- `from_yaml(yaml_str: 'str') -> 'ComponentsLibrary'` - Constructs a ComponentsLibrary from a YAML string. If the YAML contains
+- `get(self, name: 'str') -> 'Optional[Component]'` - Retrieves a Component by its name from the library.
+- `merge(self, other: 'ComponentsLibrary', override: 'bool' = True) -> 'ComponentsLibrary'` - Merges another ComponentsLibrary into this one. By default (override=True),
- >>> # In YAML: true:, yes:, on: all become Python True
- >>> # In YAML: false:, no:, off: all become Python False
+### resolve_link_end_components(attrs: 'Dict[str, Any]', library: 'ComponentsLibrary') -> 'tuple[tuple[Optional[Component], float, bool], tuple[Optional[Component], float, bool], bool]'
----
+Resolve per-end hardware components for a link.
-## ngraph.graph.convert
+Input format inside ``link.attrs``:
-Graph conversion utilities between StrictMultiDiGraph and NetworkX graphs.
+Structured mapping under ``hardware`` key only:
+ ``{"hardware": {"source": {"component": NAME, "count": N},
+ "target": {"component": NAME, "count": N}}}``
-Functions in this module consolidate or expand multi-edges and can preserve
-original edge data for reversion through a special ``_uv_edges`` attribute.
+Args:
+ attrs: Link attributes mapping.
+ library: Components library for lookups.
-### from_digraph(nx_graph: networkx.classes.digraph.DiGraph) -> ngraph.graph.strict_multidigraph.StrictMultiDiGraph
+Exclusive usage:
-Convert a revertible NetworkX DiGraph to a StrictMultiDiGraph.
+- Optional ``exclusive: true`` per end indicates unsharable usage.
-This function reconstructs the original StrictMultiDiGraph by restoring
-multi-edge information from the '_uv_edges' attribute of each edge.
-
-Args:
- nx_graph: A revertible NetworkX DiGraph with ``_uv_edges`` attributes.
-
-Returns:
- A StrictMultiDiGraph reconstructed from the input DiGraph.
-
-### from_graph(nx_graph: networkx.classes.graph.Graph) -> ngraph.graph.strict_multidigraph.StrictMultiDiGraph
-
-Convert a revertible NetworkX Graph to a StrictMultiDiGraph.
-
-Restores the original multi-edge structure from the '_uv_edges' attribute stored
-in each consolidated edge.
-
-Args:
- nx_graph: A revertible NetworkX Graph with ``_uv_edges`` attributes.
+ For exclusive ends, validation and BOM counting should round-up counts
+ to integers.
Returns:
- A StrictMultiDiGraph reconstructed from the input Graph.
+ ((src_comp, src_count, src_exclusive), (dst_comp, dst_count, dst_exclusive), per_end_specified)
+ where components may be ``None`` if name is absent/unknown. ``per_end_specified``
+ is True when a structured per-end mapping is present.
-### to_digraph(graph: ngraph.graph.strict_multidigraph.StrictMultiDiGraph, edge_func: Optional[Callable[[ngraph.graph.strict_multidigraph.StrictMultiDiGraph, Hashable, Hashable, dict], dict]] = None, revertible: bool = True) -> networkx.classes.digraph.DiGraph
+### resolve_node_hardware(attrs: 'Dict[str, Any]', library: 'ComponentsLibrary') -> 'Tuple[Optional[Component], float]'
-Convert a StrictMultiDiGraph to a NetworkX DiGraph.
+Resolve node hardware from ``attrs['hardware']``.
-This function consolidates multi-edges between nodes into a single edge.
-Optionally, a custom edge function can be provided to compute edge attributes.
-If `revertible` is True, the original multi-edge data is stored in the '_uv_edges'
-attribute of each consolidated edge, allowing for later reversion.
+Expects the mapping: ``{"hardware": {"component": NAME, "count": N}}``.
+``count`` defaults to 1 if missing or invalid. If ``component`` is missing
+or unknown, returns ``(None, 1.0)``.
Args:
- graph: The StrictMultiDiGraph to convert.
- edge_func: Optional function to compute consolidated edge attributes.
- The callable receives ``(graph, u, v, edges)`` and returns a dict.
- revertible: If True, store the original multi-edge data for reversion.
+ attrs: Node attributes mapping.
+ library: Component library used for lookups.
Returns:
- A NetworkX DiGraph representing the input graph.
-
-### to_graph(graph: ngraph.graph.strict_multidigraph.StrictMultiDiGraph, edge_func: Optional[Callable[[ngraph.graph.strict_multidigraph.StrictMultiDiGraph, Hashable, Hashable, dict], dict]] = None, revertible: bool = True) -> networkx.classes.graph.Graph
+ Tuple of (component or None, positive multiplier).
-Convert a StrictMultiDiGraph to a NetworkX Graph.
+### totals_with_multiplier(comp: 'Component', hw_count: 'float') -> 'Tuple[float, float, float]'
-This function works similarly to `to_digraph` but returns an undirected graph.
+Return (capex, power_watts, capacity) totals multiplied by ``hw_count``.
Args:
- graph: The StrictMultiDiGraph to convert.
- edge_func: Optional function to compute consolidated edge attributes.
- revertible: If True, store the original multi-edge data for reversion.
+ comp: Component definition (may include nested children and internal ``count``).
+ hw_count: External multiplier (e.g., number of modules used for a link or node).
Returns:
- A NetworkX Graph representing the input graph.
+ Tuple of total capex, total power (typical), and total capacity as floats.
---
-## ngraph.graph.io
+## ngraph.model.demand.matrix
-Graph serialization functions for node-link and edge-list formats.
+Traffic matrix containers.
-### edgelist_to_graph(lines: 'Iterable[str]', columns: 'List[str]', separator: 'str' = ' ', graph: 'Optional[StrictMultiDiGraph]' = None, source: 'str' = 'src', target: 'str' = 'dst', key: 'str' = 'key') -> 'StrictMultiDiGraph'
+Provides `TrafficMatrixSet`, a named collection of `TrafficDemand` lists
+used as input to demand expansion and placement. This module contains input
+containers, not analysis results.
-Build or update a StrictMultiDiGraph from an edge list.
+### TrafficMatrixSet
-Each line in the input is split by the specified separator into tokens. These tokens
-are mapped to column names provided in `columns`. The tokens corresponding to `source`
-and `target` become the node IDs. If a `key` column exists, its token is used as the edge
-ID; remaining tokens are added as edge attributes.
+Named collection of TrafficDemand lists.
-Args:
- lines: An iterable of strings, each representing one edge.
- columns: A list of column names, e.g. ["src", "dst", "cost"].
- separator: The separator used to split each line (default is a space).
- graph: An existing StrictMultiDiGraph to update; if None, a new graph is created.
- source: The column name for the source node ID.
- target: The column name for the target node ID.
- key: The column name for a custom edge ID (if present).
+This mutable container maps scenario names to lists of TrafficDemand objects,
+allowing management of multiple traffic matrices for analysis.
-Returns:
- The updated (or newly created) StrictMultiDiGraph.
+Attributes:
+ matrices: Dictionary mapping scenario names to TrafficDemand lists.
-Raises:
- RuntimeError: If a line does not match the expected number of columns.
+**Attributes:**
-### graph_to_edgelist(graph: 'StrictMultiDiGraph', columns: 'Optional[List[str]]' = None, separator: 'str' = ' ', source_col: 'str' = 'src', target_col: 'str' = 'dst', key_col: 'str' = 'key') -> 'List[str]'
+- `matrices` (dict[str, list[TrafficDemand]]) = {}
-Convert a StrictMultiDiGraph into an edge-list text representation.
+**Methods:**
-Each line in the output represents one edge with tokens joined by the given separator.
-By default, the output columns are:
- [source_col, target_col, key_col] + sorted(edge_attribute_names)
+- `add(self, name: 'str', demands: 'list[TrafficDemand]') -> 'None'` - Add a traffic matrix to the collection.
+- `get_all_demands(self) -> 'list[TrafficDemand]'` - Get all traffic demands from all matrices combined.
+- `get_default_matrix(self) -> 'list[TrafficDemand]'` - Get default traffic matrix.
+- `get_matrix(self, name: 'str') -> 'list[TrafficDemand]'` - Get a specific traffic matrix by name.
+- `to_dict(self) -> 'dict[str, Any]'` - Convert to dictionary for JSON serialization.
-If an explicit list of columns is provided, those columns (in that order) are used,
-and any missing values are output as an empty string.
+---
-Args:
- graph: The StrictMultiDiGraph to export.
- columns: Optional list of column names. If None, they are auto-generated.
- separator: The string used to join tokens (default is a space).
- source_col: The column name for the source node (default "src").
- target_col: The column name for the target node (default "dst").
- key_col: The column name for the edge key (default "key").
+## ngraph.model.demand.spec
-Returns:
- A list of strings, each representing one edge in the specified column format.
-
-### graph_to_node_link(graph: 'StrictMultiDiGraph') -> 'Dict[str, Any]'
-
-Convert a StrictMultiDiGraph into a node-link dict representation.
-
-This representation is suitable for JSON serialization (e.g., for D3.js or Nx formats).
-
-The returned dict has the following structure:
- {
- "graph": { ... top-level graph attributes ... },
- "nodes": [
- {"id": node_id, "attr": { ... node attributes ... }},
- ...
- ],
- "links": [
- {
- "source": ,
- "target": ,
- "key": ,
- "attr": { ... edge attributes ... }
- },
- ...
- ]
- }
+Traffic demand specification.
-Args:
- graph: The StrictMultiDiGraph to convert.
+Defines `TrafficDemand`, a user-facing specification used by demand expansion
+and placement. It can carry either a concrete `FlowPolicy` instance or a
+`FlowPolicyPreset` enum to construct one.
-Returns:
- A dict containing the 'graph' attributes, list of 'nodes', and list of 'links'.
-
-### node_link_to_graph(data: 'Dict[str, Any]') -> 'StrictMultiDiGraph'
-
-Reconstruct a StrictMultiDiGraph from its node-link dict representation.
-
-Expected input format:
- {
- "graph": { ... graph attributes ... },
- "nodes": [
- {"id": , "attr": { ... node attributes ... }},
- ...
- ],
- "links": [
- {
- "source": ,
- "target": ,
- "key": ,
- "attr": { ... edge attributes ... }
- },
- ...
- ]
- }
+### TrafficDemand
-Args:
- data: A dict representing the node-link structure.
+Single traffic demand input.
-Returns:
- A StrictMultiDiGraph reconstructed from the provided data.
+Attributes:
+ source_path: Regex string selecting source nodes.
+ sink_path: Regex string selecting sink nodes.
+ priority: Priority class for this demand (lower value = higher priority).
+ demand: Total demand volume.
+ demand_placed: Portion of this demand placed so far.
+ flow_policy_config: Policy preset (FlowPolicyPreset enum) used to build
+ a `FlowPolicy` if ``flow_policy`` is not provided.
+ flow_policy: Concrete policy instance. If set, it overrides
+ ``flow_policy_config``.
+ mode: Expansion mode, ``"combine"`` or ``"pairwise"``.
+ attrs: Arbitrary user metadata.
+ id: Unique identifier assigned at initialization.
-Raises:
- KeyError: If required keys (e.g., "id" or "attr" on nodes) are missing.
+**Attributes:**
+
+- `source_path` (str)
+- `sink_path` (str)
+- `priority` (int) = 0
+- `demand` (float) = 0.0
+- `demand_placed` (float) = 0.0
+- `flow_policy_config` (Optional)
+- `flow_policy` (Optional)
+- `mode` (str) = combine
+- `attrs` (Dict) = {}
+- `id` (str)
---
-## ngraph.graph.strict_multidigraph
+## ngraph.model.failure.conditions
-Strict multi-directed graph with validation and convenience APIs.
+Shared condition primitives and evaluators.
-`StrictMultiDiGraph` extends `networkx.MultiDiGraph` to enforce explicit node
-management, unique edge identifiers, and predictable error handling. It exposes
-helpers to access nodes/edges as dictionaries and to serialize in node-link
-format via `to_dict()`.
+This module provides a small, dependency-free condition evaluation utility
+that can be reused by failure policies and DSL selection filters.
-### StrictMultiDiGraph
+Operators supported:
-A custom multi-directed graph with strict rules and unique edge IDs.
+- ==, !=, <, <=, >, >=
+- contains, not_contains
+- any_value, no_value
-This class enforces:
+The evaluator operates on a flat attribute mapping for an entity. Callers are
+responsible for constructing that mapping (e.g. merging top-level fields with
+``attrs`` and ensuring appropriate precedence rules).
-- No automatic creation of missing nodes when adding an edge.
-- No duplicate nodes (raises ValueError on duplicates).
-- No duplicate edges by key (raises ValueError on duplicates).
-- Removing non-existent nodes or edges raises ValueError.
-- Each edge key must be unique; by default, a Base64-UUID is generated
+### FailureCondition
- if none is provided.
+A single condition for matching an entity attribute.
-- ``copy()`` can perform a pickle-based deep copy that may be faster
+Args:
+ attr: Attribute name to inspect in the entity mapping.
+ operator: Comparison operator. See module docstring for the list.
+ value: Right-hand operand for the comparison (unused for any_value/no_value).
- than the NetworkX default.
+**Attributes:**
-Inherits from:
- networkx.MultiDiGraph
+- `attr` (str)
+- `operator` (str)
+- `value` (Any | None)
-**Methods:**
+### evaluate_condition(entity_attrs: 'dict[str, Any]', cond: 'FailureCondition') -> 'bool'
-- `add_edge(self, u_for_edge: 'NodeID', v_for_edge: 'NodeID', key: 'Optional[EdgeID]' = None, **attr: 'Any') -> 'EdgeID'` - Add a directed edge from u_for_edge to v_for_edge.
-- `add_edges_from(self, ebunch_to_add, **attr)` - Add all the edges in ebunch_to_add.
-- `add_node(self, node_for_adding: 'NodeID', **attr: 'Any') -> 'None'` - Add a single node, disallowing duplicates.
-- `add_nodes_from(self, nodes_for_adding, **attr)` - Add multiple nodes.
-- `add_weighted_edges_from(self, ebunch_to_add, weight='weight', **attr)` - Add weighted edges in `ebunch_to_add` with specified weight attr
-- `adjacency(self)` - Returns an iterator over (node, adjacency dict) tuples for all nodes.
-- `clear(self)` - Remove all nodes and edges from the graph.
-- `clear_edges(self)` - Remove all edges from the graph without altering nodes.
-- `copy(self, as_view: 'bool' = False, pickle: 'bool' = True) -> 'StrictMultiDiGraph'` - Create a copy of this graph.
-- `edge_subgraph(self, edges)` - Returns the subgraph induced by the specified edges.
-- `edges_between(self, u: 'NodeID', v: 'NodeID') -> 'List[EdgeID]'` - List all edge keys from node u to node v.
-- `get_edge_attr(self, key: 'EdgeID') -> 'AttrDict'` - Retrieve the attribute dictionary of a specific edge.
-- `get_edge_data(self, u, v, key=None, default=None)` - Returns the attribute dictionary associated with edge (u, v,
-- `get_edges(self) -> 'Dict[EdgeID, EdgeTuple]'` - Retrieve a dictionary of all edges by their keys.
-- `get_nodes(self) -> 'Dict[NodeID, AttrDict]'` - Retrieve all nodes and their attributes as a dictionary.
-- `has_edge(self, u, v, key=None)` - Returns True if the graph has an edge between nodes u and v.
-- `has_edge_by_id(self, key: 'EdgeID') -> 'bool'` - Check whether an edge with the given key exists.
-- `has_node(self, n)` - Returns True if the graph contains the node n.
-- `has_predecessor(self, u, v)` - Returns True if node u has predecessor v.
-- `has_successor(self, u, v)` - Returns True if node u has successor v.
-- `is_directed(self)` - Returns True if graph is directed, False otherwise.
-- `is_multigraph(self)` - Returns True if graph is a multigraph, False otherwise.
-- `nbunch_iter(self, nbunch=None)` - Returns an iterator over nodes contained in nbunch that are
-- `neighbors(self, n)` - Returns an iterator over successor nodes of n.
-- `new_edge_key(self, u: 'NodeID', v: 'NodeID', key: 'Optional[int]' = None) -> 'int'` - Return a new unique integer edge ID.
-- `number_of_edges(self, u=None, v=None)` - Returns the number of edges between two nodes.
-- `number_of_nodes(self)` - Returns the number of nodes in the graph.
-- `order(self)` - Returns the number of nodes in the graph.
-- `predecessors(self, n)` - Returns an iterator over predecessor nodes of n.
-- `remove_edge(self, u: 'NodeID', v: 'NodeID', key: 'Optional[EdgeID]' = None) -> 'None'` - Remove an edge (or edges) between nodes u and v.
-- `remove_edge_by_id(self, key: 'EdgeID') -> 'None'` - Remove a directed edge by its unique key.
-- `remove_edges_from(self, ebunch)` - Remove all edges specified in ebunch.
-- `remove_node(self, n: 'NodeID') -> 'None'` - Remove a single node and all incident edges.
-- `remove_nodes_from(self, nodes)` - Remove multiple nodes.
-- `reverse(self, copy=True)` - Returns the reverse of the graph.
-- `size(self, weight=None)` - Returns the number of edges or total of all edge weights.
-- `subgraph(self, nodes)` - Returns a SubGraph view of the subgraph induced on `nodes`.
-- `successors(self, n)` - Returns an iterator over successor nodes of n.
-- `to_dict(self) -> 'Dict[str, Any]'` - Convert the graph to a dictionary representation suitable for JSON serialization.
-- `to_directed(self, as_view=False)` - Returns a directed representation of the graph.
-- `to_directed_class(self)` - Returns the class to use for empty directed copies.
-- `to_undirected(self, reciprocal=False, as_view=False)` - Returns an undirected representation of the digraph.
-- `to_undirected_class(self)` - Returns the class to use for empty undirected copies.
-- `update(self, edges=None, nodes=None)` - Update the graph using nodes/edges/graphs as input.
-- `update_edge_attr(self, key: 'EdgeID', **attr: 'Any') -> 'None'` - Update attributes on an existing edge by key.
+Evaluate a single condition against an entity attribute mapping.
----
+Args:
+ entity_attrs: Flat mapping of attributes for the entity.
+ cond: Condition to evaluate.
-## ngraph.model.network
+Returns:
+ True if the condition passes, False otherwise.
-Network topology modeling with Node, Link, RiskGroup, and Network classes.
+### evaluate_conditions(entity_attrs: 'dict[str, Any]', conditions: 'Iterable[FailureCondition]', logic: 'str') -> 'bool'
-### Link
+Evaluate multiple conditions with AND/OR logic.
-Represents one directed link between two nodes.
+Args:
+ entity_attrs: Flat mapping of attributes for the entity.
+ conditions: Iterable of conditions to evaluate.
+ logic: "and" or "or".
-The model stores a single direction (``source`` -> ``target``). When building
-the working graph for analysis, a reverse edge is added by default to provide
-bidirectional connectivity. Disable with ``add_reverse=False`` in
-``Network.to_strict_multidigraph``.
+Returns:
+ True if the combined predicate passes, False otherwise.
-Attributes:
- source (str): Name of the source node.
- target (str): Name of the target node.
- capacity (float): Link capacity (default 1.0).
- cost (float): Link cost (default 1.0).
- disabled (bool): Whether the link is disabled.
- risk_groups (Set[str]): Set of risk group names this link belongs to.
- attrs (Dict[str, Any]): Additional metadata (e.g., distance).
- id (str): Auto-generated unique identifier: "{source}|{target}|".
+---
-**Attributes:**
+## ngraph.model.failure.parser
-- `source` (str)
-- `target` (str)
-- `capacity` (float) = 1.0
-- `cost` (float) = 1.0
-- `disabled` (bool) = False
-- `risk_groups` (Set[str]) = set()
-- `attrs` (Dict[str, Any]) = {}
-- `id` (str)
+Parsers for FailurePolicySet and related failure modeling structures.
-### Network
+### build_failure_policy(fp_data: 'Dict[str, Any]', *, policy_name: 'str', derive_seed) -> 'FailurePolicy'
-A container for network nodes and links.
+No documentation available.
-Network represents the scenario-level topology with persistent state (nodes/links
-that are disabled in the scenario configuration). For temporary exclusion of
-nodes/links during analysis (e.g., failure simulation), use NetworkView instead
-of modifying the Network's disabled states.
+### build_failure_policy_set(raw: 'Dict[str, Any]', *, derive_seed) -> 'FailurePolicySet'
-Attributes:
- nodes (Dict[str, Node]): Mapping from node name -> Node object.
- links (Dict[str, Link]): Mapping from link ID -> Link object.
- risk_groups (Dict[str, RiskGroup]): Top-level risk groups by name.
- attrs (Dict[str, Any]): Optional metadata about the network.
+No documentation available.
-**Attributes:**
+### build_risk_groups(rg_data: 'List[Dict[str, Any]]') -> 'List[RiskGroup]'
-- `nodes` (Dict[str, Node]) = {}
-- `links` (Dict[str, Link]) = {}
-- `risk_groups` (Dict[str, RiskGroup]) = {}
-- `attrs` (Dict[str, Any]) = {}
+No documentation available.
-**Methods:**
+---
-- `add_link(self, link: 'Link') -> 'None'` - Add a link to the network (keyed by the link's auto-generated ID).
-- `add_node(self, node: 'Node') -> 'None'` - Add a node to the network (keyed by node.name).
-- `disable_all(self) -> 'None'` - Mark all nodes and links as disabled.
-- `disable_link(self, link_id: 'str') -> 'None'` - Mark a link as disabled.
-- `disable_node(self, node_name: 'str') -> 'None'` - Mark a node as disabled.
-- `disable_risk_group(self, name: 'str', recursive: 'bool' = True) -> 'None'` - Disable all nodes/links that have 'name' in their risk_groups.
-- `enable_all(self) -> 'None'` - Mark all nodes and links as enabled.
-- `enable_link(self, link_id: 'str') -> 'None'` - Mark a link as enabled.
-- `enable_node(self, node_name: 'str') -> 'None'` - Mark a node as enabled.
-- `enable_risk_group(self, name: 'str', recursive: 'bool' = True) -> 'None'` - Enable all nodes/links that have 'name' in their risk_groups.
-- `find_links(self, source_regex: 'Optional[str]' = None, target_regex: 'Optional[str]' = None, any_direction: 'bool' = False) -> 'List[Link]'` - Search for links using optional regex patterns for source or target node names.
-- `get_links_between(self, source: 'str', target: 'str') -> 'List[str]'` - Retrieve all link IDs that connect the specified source node
-- `k_shortest_paths(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'pairwise', *, max_k: 'int' = 3, max_path_cost: 'float' = inf, max_path_cost_factor: 'Optional[float]' = None, split_parallel_edges: 'bool' = False) -> 'Dict[Tuple[str, str], List[_NGPath]]'` - Return up to K shortest paths per group pair.
-- `max_flow(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], float]'` - Compute maximum flow between node groups in this network.
-- `max_flow_detailed(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], Tuple[float, FlowSummary, StrictMultiDiGraph]]'` - Compute maximum flow with both analytics summary and graph.
-- `max_flow_with_graph(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], Tuple[float, StrictMultiDiGraph]]'` - Compute maximum flow and return flow-assigned graphs.
-- `max_flow_with_summary(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], Tuple[float, FlowSummary]]'` - Compute maximum flow and return per-pair analytics summary.
-- `saturated_edges(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', tolerance: 'float' = 1e-10, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], List[Tuple[str, str, str]]]'` - Identify saturated edges in max flow solutions.
-- `select_node_groups_by_path(self, path: 'str') -> 'Dict[str, List[Node]]'` - Select and group nodes by regex on name or by attribute directive.
-- `sensitivity_analysis(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', change_amount: 'float' = 1.0, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]]'` - Perform sensitivity analysis for capacity changes.
-- `shortest_path_costs(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine') -> 'Dict[Tuple[str, str], float]'` - Return minimal path costs between node groups in this network.
-- `shortest_paths(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', *, split_parallel_edges: 'bool' = False) -> 'Dict[Tuple[str, str], List[_NGPath]]'` - Return concrete shortest path(s) between selected node groups.
-- `to_strict_multidigraph(self, add_reverse: 'bool' = True, *, compact: 'bool' = False) -> 'StrictMultiDiGraph'` - Create a StrictMultiDiGraph representation of this Network.
+## ngraph.model.failure.policy
-### Node
+Failure policy primitives.
-Represents a node in the network.
+Defines `FailureCondition`, `FailureRule`, and `FailurePolicy` for expressing
+how nodes, links, and risk groups fail in analyses. Conditions match on
+top-level attributes with simple operators; rules select matches using
+"all", probabilistic "random" (with `probability`), or fixed-size "choice"
+(with `count`). Policies can optionally expand failures by shared risk groups
+or by risk-group children.
-Each node is uniquely identified by its name, which is used as
-the key in the Network's node dictionary.
+### FailureCondition
-Attributes:
- name (str): Unique identifier for the node.
- disabled (bool): Whether the node is disabled in the scenario configuration.
- risk_groups (Set[str]): Set of risk group names this node belongs to.
- attrs (Dict[str, Any]): Additional metadata (e.g., coordinates, region).
+Alias to the shared condition dataclass.
+
+This maintains a consistent import path within the failure policy module.
**Attributes:**
-- `name` (str)
-- `disabled` (bool) = False
-- `risk_groups` (Set[str]) = set()
-- `attrs` (Dict[str, Any]) = {}
+- `attr` (str)
+- `operator` (str)
+- `value` (Any | None)
-### RiskGroup
+### FailureMode
-Represents a shared-risk or failure domain, which may have nested children.
+A weighted mode that encapsulates a set of rules applied together.
+
+Exactly one mode is selected per failure iteration according to the
+mode weights. Within a mode, all contained rules are applied and their
+selections are unioned into the failure set.
Attributes:
- name (str): Unique name of this risk group.
- children (List[RiskGroup]): Subdomains in a nested structure.
- disabled (bool): Whether this group was declared disabled on load.
- attrs (Dict[str, Any]): Additional metadata for the risk group.
+ weight: Non-negative weight used for mode selection. All weights are
+ normalized internally. Modes with zero weight are never selected.
+ rules: A list of `FailureRule` applied together when this mode is chosen.
+ attrs: Optional metadata.
**Attributes:**
-- `name` (str)
-- `children` (List[RiskGroup]) = []
-- `disabled` (bool) = False
+- `weight` (float)
+- `rules` (List[FailureRule]) = []
- `attrs` (Dict[str, Any]) = {}
----
-
-## ngraph.model.view
+### FailurePolicy
-Read-only view of a ``Network`` with temporary exclusions.
+A container for multiple FailureRules plus optional metadata in `attrs`.
-This module defines a view over ``Network`` objects that can exclude nodes and
-links for analysis without mutating the base network. It supports what-if
-analysis, including failure simulations.
+The main entry point is `apply_failures`, which:
+ 1) For each rule, gather the relevant entities (node, link, or risk_group).
+ 2) Match them based on rule conditions using 'and' or 'or' logic.
+ 3) Apply the selection strategy (all, random, or choice).
+ 4) Collect the union of all failed entities across all rules.
+ 5) Optionally expand failures by shared-risk groups or sub-risks.
-### NetworkView
+Example YAML configuration:
+ ```yaml
+ failure_policy:
+ attrs:
+ description: "Regional power grid failure affecting telecom infrastructure"
+ fail_risk_groups: true
+ rules:
+ # Fail all nodes in Texas electrical grid
+ - entity_scope: "node"
-Read-only overlay that hides selected nodes/links from a base Network.
+ conditions:
+ - attr: "electric_grid"
-NetworkView provides filtered access to a Network where both scenario-disabled
-elements (Node.disabled, Link.disabled) and analysis-excluded elements are
-hidden from algorithms. This enables failure simulation and what-if analysis
-without mutating the base Network.
+ operator: "=="
+ value: "texas"
+ logic: "and"
+ rule_type: "all"
-Multiple NetworkView instances can safely operate on the same base Network
-concurrently, each with different exclusion sets.
+ # Randomly fail 40% of underground fiber links in affected region
+ - entity_scope: "link"
-Example:
- ```python
- # Create view excluding specific nodes for failure analysis
- view = NetworkView.from_excluded_sets(
- base_network,
- excluded_nodes=["node1", "node2"],
- excluded_links=["link1"]
- )
-
- # Run analysis on filtered topology
- flows = view.max_flow("source.*", "sink.*")
- ```
+ conditions:
+ - attr: "region"
-Attributes:
- _base: The underlying Network object.
- _excluded_nodes: Frozen set of node names to exclude from analysis.
- _excluded_links: Frozen set of link IDs to exclude from analysis.
+ operator: "=="
+ value: "southwest"
+ - attr: "installation"
+
+ operator: "=="
+ value: "underground"
+ logic: "and"
+ rule_type: "random"
+ probability: 0.4
+
+ # Choose exactly 2 risk groups to fail (e.g., data centers)
+ # Note: logic defaults to "or" when not specified
+ - entity_scope: "risk_group"
+
+ rule_type: "choice"
+ count: 2
+ ```
+
+Attributes:
+ rules (List[FailureRule]):
+ A list of FailureRules to apply.
+ attrs (Dict[str, Any]):
+ Arbitrary metadata about this policy (e.g. "name", "description").
+ fail_risk_groups (bool):
+ If True, after initial selection, expand failures among any
+ node/link that shares a risk group with a failed entity.
+ fail_risk_group_children (bool):
+ If True, and if a risk_group is marked as failed, expand to
+ children risk_groups recursively.
+ seed (Optional[int]):
+ Seed for reproducible random operations. If None, operations
+ will be non-deterministic.
**Attributes:**
-- `_base` ('Network')
-- `_excluded_nodes` (frozenset[str]) = frozenset()
-- `_excluded_links` (frozenset[str]) = frozenset()
+- `attrs` (Dict[str, Any]) = {}
+- `fail_risk_groups` (bool) = False
+- `fail_risk_group_children` (bool) = False
+- `seed` (Optional[int])
+- `modes` (List[FailureMode]) = []
**Methods:**
-- `from_excluded_sets(base: "'Network'", excluded_nodes: 'Iterable[str]' = (), excluded_links: 'Iterable[str]' = ()) -> "'NetworkView'"` - Create a NetworkView with specified exclusions.
-- `is_link_hidden(self, link_id: 'str') -> 'bool'` - Check if a link is hidden in this view.
-- `is_node_hidden(self, name: 'str') -> 'bool'` - Check if a node is hidden in this view.
-- `k_shortest_paths(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'pairwise', *, max_k: 'int' = 3, max_path_cost: 'float' = inf, max_path_cost_factor: 'Optional[float]' = None, split_parallel_edges: 'bool' = False) -> 'Dict[Tuple[str, str], List[_NGPath]]'` - Return up to K shortest paths per group pair.
-- `max_flow(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: "Optional['FlowPlacement']" = None) -> 'Dict[Tuple[str, str], float]'` - Compute maximum flow between node groups in this view.
-- `max_flow_detailed(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: "Optional['FlowPlacement']" = None) -> "Dict[Tuple[str, str], Tuple[float, 'FlowSummary', 'StrictMultiDiGraph']]"` - Compute maximum flow with complete analytics and graph.
-- `max_flow_with_graph(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: "Optional['FlowPlacement']" = None) -> "Dict[Tuple[str, str], Tuple[float, 'StrictMultiDiGraph']]"` - Compute maximum flow and return flow-assigned graph.
-- `max_flow_with_summary(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: "Optional['FlowPlacement']" = None) -> "Dict[Tuple[str, str], Tuple[float, 'FlowSummary']]"` - Compute maximum flow with detailed analytics summary.
-- `saturated_edges(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', tolerance: 'float' = 1e-10, shortest_path: 'bool' = False, flow_placement: "Optional['FlowPlacement']" = None) -> 'Dict[Tuple[str, str], List[Tuple[str, str, str]]]'` - Identify saturated edges in max flow solutions.
-- `select_node_groups_by_path(self, path: 'str') -> "Dict[str, List['Node']]"` - Select and group visible nodes by regex or attribute directive.
-- `sensitivity_analysis(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', change_amount: 'float' = 1.0, shortest_path: 'bool' = False, flow_placement: "Optional['FlowPlacement']" = None) -> 'Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]]'` - Perform sensitivity analysis on capacity changes.
-- `shortest_path_costs(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine') -> 'Dict[Tuple[str, str], float]'` - Return minimal path costs between node groups in this view.
-- `shortest_paths(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', *, split_parallel_edges: 'bool' = False) -> 'Dict[Tuple[str, str], List[_NGPath]]'` - Return concrete shortest path(s) between selected node groups.
-- `to_strict_multidigraph(self, add_reverse: 'bool' = True, *, compact: 'bool' = False) -> "'StrictMultiDiGraph'"` - Create a StrictMultiDiGraph representation of this view.
+- `apply_failures(self, network_nodes: 'Dict[str, Any]', network_links: 'Dict[str, Any]', network_risk_groups: 'Dict[str, Any] | None' = None, *, seed: 'Optional[int]' = None) -> 'List[str]'` - Identify which entities fail for this iteration.
+- `to_dict(self) -> 'Dict[str, Any]'` - Convert to dictionary for JSON serialization.
----
+### FailureRule
-## ngraph.algorithms.base
+Defines how to match and then select entities for failure.
-Base classes and enums for network analysis algorithms.
+Attributes:
+ entity_scope (EntityScope):
+ The type of entities this rule applies to: "node", "link", or "risk_group".
+ conditions (List[FailureCondition]):
+ A list of conditions to filter matching entities.
+ logic (Literal["and", "or"]):
+ "and": All conditions must be true for a match.
+ "or": At least one condition is true for a match (default).
+ rule_type (Literal["random", "choice", "all"]):
+ The selection strategy among the matched set:
-### EdgeSelect
+- "random": each matched entity is chosen with probability = `probability`.
+- "choice": pick exactly `count` items from the matched set (random sample).
+- "all": select every matched entity in the matched set.
-Edge selection criteria.
+ probability (float):
+ Probability in [0,1], used if `rule_type="random"`.
+ count (int):
+ Number of entities to pick if `rule_type="choice"`.
-Determines which edges are considered for path-finding between a node and
-its neighbor(s).
+**Attributes:**
-### FlowPlacement
+- `entity_scope` (EntityScope)
+- `conditions` (List[FailureCondition]) = []
+- `logic` (Literal['and', 'or']) = or
+- `rule_type` (Literal['random', 'choice', 'all']) = all
+- `probability` (float) = 1.0
+- `count` (int) = 1
+- `weight_by` (Optional[str])
-Strategies to distribute flow across parallel equal-cost paths.
+---
-### PathAlg
+## ngraph.model.failure.policy_set
-Path-finding algorithm types.
+Failure policy containers.
----
+Provides `FailurePolicySet`, a named collection of `FailurePolicy` objects
+used as input to failure analysis workflows. This module contains input
+containers, not analysis results.
-## ngraph.algorithms.capacity
+### FailurePolicySet
-Capacity calculation algorithms for network analysis.
+Named collection of FailurePolicy objects.
-This module computes feasible flow given a predecessor DAG from a shortest-path
-routine and supports two placement strategies: proportional and equal-balanced
-in reversed orientation. Functions follow a Dinic-like blocking-flow approach
-for proportional placement.
+This mutable container maps failure policy names to FailurePolicy objects,
+allowing management of multiple failure policies for analysis.
-### calc_graph_capacity(flow_graph: 'StrictMultiDiGraph', src_node: 'NodeID', dst_node: 'NodeID', pred: 'Dict[NodeID, Dict[NodeID, List[EdgeID]]]', flow_placement: 'FlowPlacement' = , capacity_attr: 'str' = 'capacity', flow_attr: 'str' = 'flow') -> 'Tuple[float, Dict[NodeID, Dict[NodeID, float]]]'
+Attributes:
+ policies: Dictionary mapping failure policy names to FailurePolicy objects.
-Calculate feasible flow and flow fractions between two nodes.
+**Attributes:**
-In PROPORTIONAL mode (similar to Dinic in reversed orientation):
+- `policies` (dict[str, FailurePolicy]) = {}
-1. Build the reversed residual graph from dst_node (via `_init_graph_data`).
-2. Use BFS (in `_set_levels_bfs`) to build a level graph and DFS (`_push_flow_dfs`)
+**Methods:**
- to push blocking flows, repeating until no more flow can be pushed.
+- `add(self, name: 'str', policy: 'FailurePolicy') -> 'None'` - Add a failure policy to the collection.
+- `get_all_policies(self) -> 'list[FailurePolicy]'` - Get all failure policies from the collection.
+- `get_policy(self, name: 'str') -> 'FailurePolicy'` - Get a specific failure policy by name.
+- `to_dict(self) -> 'dict[str, Any]'` - Convert to dictionary for JSON serialization.
-3. The net flow found is stored in reversed orientation. Convert final flows
+---
- to forward orientation by negating and normalizing by the total.
+## ngraph.model.flow.policy_config
-In EQUAL_BALANCED mode:
+Flow policy preset configurations for NetGraph.
-1. Build reversed adjacency from dst_node (also via `_init_graph_data`),
+Provides convenient factory functions to create common FlowPolicy configurations
+using NetGraph-Core's FlowPolicy and FlowPolicyConfig.
- ignoring capacity checks in that BFS.
+### FlowPolicyPreset
-2. Perform a BFS pass from src_node (`_equal_balance_bfs`) to distribute a
+Enumerates common flow policy presets for traffic routing.
- nominal flow of 1.0 equally among parallel edges.
+These presets map to specific combinations of path algorithms, flow placement
+strategies, and edge selection modes provided by NetGraph-Core.
-3. Determine the scaling ratio so that no edge capacity is exceeded.
+### create_flow_policy(algorithms: 'netgraph_core.Algorithms', graph: 'netgraph_core.Graph', preset: 'FlowPolicyPreset', node_mask=None, edge_mask=None) -> 'netgraph_core.FlowPolicy'
- Scale the flow assignments accordingly, then normalize to the forward sense.
+Create a FlowPolicy instance from a preset configuration.
Args:
- flow_graph: The multigraph with capacity and flow attributes.
- src_node: The source node in the forward graph.
- dst_node: The destination node in the forward graph.
- pred: Forward adjacency mapping (node -> (adjacent node -> list of EdgeIDs)),
- typically produced by `spf(..., multipath=True)`. Must be a DAG.
- flow_placement: The flow distribution strategy (PROPORTIONAL or EQUAL_BALANCED).
- capacity_attr: Name of the capacity attribute on edges.
- flow_attr: Name of the flow attribute on edges.
+ algorithms: NetGraph-Core Algorithms instance.
+ graph: NetGraph-Core Graph handle.
+ preset: FlowPolicyPreset enum value specifying the desired policy.
+ node_mask: Optional numpy bool array for node exclusions (True = include).
+ edge_mask: Optional numpy bool array for edge exclusions (True = include).
Returns:
- tuple[float, dict[NodeID, dict[NodeID, float]]]:
-
-- Total feasible flow from ``src_node`` to ``dst_node``.
-- Normalized flow fractions in forward orientation (``[u][v]`` >= 0).
+ netgraph_core.FlowPolicy: Configured policy instance.
Raises:
- ValueError: If src_node or dst_node is not in the graph, or the flow_placement
- is unsupported.
-
----
-
-## ngraph.algorithms.edge_select
+ ValueError: If an unknown FlowPolicyPreset value is provided.
-Edge selection algorithms for routing.
+Example:
+ >>> backend = netgraph_core.Backend.cpu()
+ >>> algs = netgraph_core.Algorithms(backend)
+ >>> graph = algs.build_graph(strict_multidigraph)
+ >>> policy = create_flow_policy(algs, graph, FlowPolicyPreset.SHORTEST_PATHS_ECMP)
-Provides selection routines used by SPF to choose candidate edges between
-neighbors according to cost and capacity constraints.
+---
-### edge_select_fabric(edge_select: ngraph.algorithms.base.EdgeSelect, select_value: Optional[Any] = None, edge_select_func: Optional[Callable[[ngraph.graph.strict_multidigraph.StrictMultiDiGraph, Hashable, Hashable, Dict[Hashable, Dict[str, Any]], Optional[Set[Hashable]], Optional[Set[Hashable]]], Tuple[Union[int, float], List[Hashable]]]] = None, excluded_edges: Optional[Set[Hashable]] = None, excluded_nodes: Optional[Set[Hashable]] = None, cost_attr: str = 'cost', capacity_attr: str = 'capacity', flow_attr: str = 'flow') -> Callable[[ngraph.graph.strict_multidigraph.StrictMultiDiGraph, Hashable, Hashable, Dict[Hashable, Dict[str, Any]], Optional[Set[Hashable]], Optional[Set[Hashable]]], Tuple[Union[int, float], List[Hashable]]]
+## ngraph.model.network
-Create an edge-selection callable for SPF.
+Network topology modeling with Node, Link, RiskGroup, and Network classes.
-Args:
- edge_select: An EdgeSelect enum specifying the selection strategy.
- select_value: An optional numeric threshold or scaling factor for capacity checks.
- edge_select_func: A user-supplied function if edge_select=USER_DEFINED.
- excluded_edges: A set of edges to ignore entirely.
- excluded_nodes: A set of nodes to skip (if the destination node is in this set).
- cost_attr: The edge attribute name representing cost.
- capacity_attr: The edge attribute name representing capacity.
- flow_attr: The edge attribute name representing current flow.
+### Link
-Returns:
- Callable: Function with signature
- ``(graph, src, dst, edges_dict, excluded_edges, excluded_nodes) ->
- (selected_cost, [edge_ids])``.
+Represents one directed link between two nodes.
----
+The model stores a single direction (``source`` -> ``target``). When building
+the working graph for analysis, a reverse edge is added by default to provide
+bidirectional connectivity. Disable with ``add_reverse=False`` in
+``Network.to_strict_multidigraph``.
-## ngraph.algorithms.flow_init
+Attributes:
+ source (str): Name of the source node.
+ target (str): Name of the target node.
+ capacity (float): Link capacity (default 1.0).
+ cost (float): Link cost (default 1.0).
+ disabled (bool): Whether the link is disabled.
+ risk_groups (Set[str]): Set of risk group names this link belongs to.
+ attrs (Dict[str, Any]): Additional metadata (e.g., distance).
+ id (str): Auto-generated unique identifier: "{source}|{target}|".
-Flow graph initialization utilities.
+**Attributes:**
-Ensures nodes and edges carry aggregate (``flow_attr``) and per-flow
-(``flows_attr``) attributes, optionally resetting existing values.
+- `source` (str)
+- `target` (str)
+- `capacity` (float) = 1.0
+- `cost` (float) = 1.0
+- `disabled` (bool) = False
+- `risk_groups` (Set[str]) = set()
+- `attrs` (Dict[str, Any]) = {}
+- `id` (str)
-### init_flow_graph(flow_graph: 'StrictMultiDiGraph', flow_attr: 'str' = 'flow', flows_attr: 'str' = 'flows', reset_flow_graph: 'bool' = True) -> 'StrictMultiDiGraph'
+### Network
-Ensure that nodes and edges expose flow-related attributes.
+A container for network nodes and links.
-For each node and edge:
+Network represents the scenario-level topology with persistent state (nodes/links
+that are disabled in the scenario configuration). For temporary exclusion of
+nodes/links during analysis (e.g., failure simulation), use node_mask and edge_mask
+parameters when calling NetGraph-Core algorithms.
-- The attribute named `flow_attr` (default: "flow") is set to 0.
-- The attribute named `flows_attr` (default: "flows") is set to an empty dict.
+Attributes:
+ nodes (Dict[str, Node]): Mapping from node name -> Node object.
+ links (Dict[str, Link]): Mapping from link ID -> Link object.
+ risk_groups (Dict[str, RiskGroup]): Top-level risk groups by name.
+ attrs (Dict[str, Any]): Optional metadata about the network.
-If `reset_flow_graph` is True, any existing flow values in these attributes
-are overwritten; otherwise they are only created if missing.
+**Attributes:**
-Args:
- flow_graph: The StrictMultiDiGraph whose nodes and edges should be
- prepared for flow assignment.
- flow_attr: The attribute name to track a numeric flow value per node/edge.
- flows_attr: The attribute name to track multiple flow identifiers (and flows).
- reset_flow_graph: If True, reset existing flows (set to 0). If False, do not overwrite.
+- `nodes` (Dict[str, Node]) = {}
+- `links` (Dict[str, Link]) = {}
+- `risk_groups` (Dict[str, RiskGroup]) = {}
+- `attrs` (Dict[str, Any]) = {}
+- `_selection_cache` (Dict[str, Dict[str, List[Node]]]) = {}
-Returns:
- StrictMultiDiGraph: The same graph instance after attribute checks.
+**Methods:**
----
+- `add_link(self, link: 'Link') -> 'None'` - Add a link to the network (keyed by the link's auto-generated ID).
+- `add_node(self, node: 'Node') -> 'None'` - Add a node to the network (keyed by node.name).
+- `build_core_graph(self, add_reverse: 'bool' = True, augmentations: 'Optional[List]' = None, excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Tuple[Any, Any, Any, Any]'` - Build NetGraph-Core graph representation.
+- `disable_all(self) -> 'None'` - Mark all nodes and links as disabled.
+- `disable_link(self, link_id: 'str') -> 'None'` - Mark a link as disabled.
+- `disable_node(self, node_name: 'str') -> 'None'` - Mark a node as disabled.
+- `disable_risk_group(self, name: 'str', recursive: 'bool' = True) -> 'None'` - Disable all nodes/links that have 'name' in their risk_groups.
+- `enable_all(self) -> 'None'` - Mark all nodes and links as enabled.
+- `enable_link(self, link_id: 'str') -> 'None'` - Mark a link as enabled.
+- `enable_node(self, node_name: 'str') -> 'None'` - Mark a node as enabled.
+- `enable_risk_group(self, name: 'str', recursive: 'bool' = True) -> 'None'` - Enable all nodes/links that have 'name' in their risk_groups.
+- `find_links(self, source_regex: 'Optional[str]' = None, target_regex: 'Optional[str]' = None, any_direction: 'bool' = False) -> 'List[Link]'` - Search for links using optional regex patterns for source or target node names.
+- `get_links_between(self, source: 'str', target: 'str') -> 'List[str]'` - Retrieve all link IDs that connect the specified source node
+- `select_node_groups_by_path(self, path: 'str') -> 'Dict[str, List[Node]]'` - Select and group nodes by regex on name or by attribute directive.
-## ngraph.algorithms.max_flow
+### Node
-Maximum-flow computation via iterative shortest-path augmentation.
+Represents a node in the network.
-Implements a practical Edmonds-Karp-like procedure using SPF with capacity
-constraints and configurable flow-splitting across equal-cost parallel edges.
-Provides helpers for saturated-edge detection and simple sensitivity analysis.
+Each node is uniquely identified by its name, which is used as
+the key in the Network's node dictionary.
-### calc_max_flow(graph: ngraph.graph.strict_multidigraph.StrictMultiDiGraph, src_node: Hashable, dst_node: Hashable, *, return_summary: bool = False, return_graph: bool = False, flow_placement: ngraph.algorithms.base.FlowPlacement = , shortest_path: bool = False, reset_flow_graph: bool = False, capacity_attr: str = 'capacity', flow_attr: str = 'flow', flows_attr: str = 'flows', copy_graph: bool = True, tolerance: float = 1e-10) -> Union[float, tuple]
+Attributes:
+ name (str): Unique identifier for the node.
+ disabled (bool): Whether the node is disabled in the scenario configuration.
+ risk_groups (Set[str]): Set of risk group names this node belongs to.
+ attrs (Dict[str, Any]): Additional metadata (e.g., coordinates, region).
-Compute max flow between two nodes in a directed multi-graph.
+**Attributes:**
-Uses iterative shortest-path augmentation with capacity-aware SPF and
-configurable flow placement.
+- `name` (str)
+- `disabled` (bool) = False
+- `risk_groups` (Set[str]) = set()
+- `attrs` (Dict[str, Any]) = {}
-By default, this function:
+### RiskGroup
-1. Creates or re-initializes a flow-aware copy of the graph (via ``init_flow_graph``).
-2. Repeatedly finds a path from ``src_node`` to ``dst_node`` using ``spf`` with
+Represents a shared-risk or failure domain, which may have nested children.
- capacity constraints (``EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING``).
+Attributes:
+ name (str): Unique name of this risk group.
+ children (List[RiskGroup]): Subdomains in a nested structure.
+ disabled (bool): Whether this group was declared disabled on load.
+ attrs (Dict[str, Any]): Additional metadata for the risk group.
-3. Places flow along that path (via ``place_flow_on_graph``) until no augmenting path
+**Attributes:**
- remains or the capacities are exhausted.
+- `name` (str)
+- `children` (List[RiskGroup]) = []
+- `disabled` (bool) = False
+- `attrs` (Dict[str, Any]) = {}
-If ``shortest_path=True``, the function performs only one iteration (single augmentation)
-and returns the flow placed along that single path (not the true max flow).
+---
-Args:
- graph (StrictMultiDiGraph):
- The original graph containing capacity/flow attributes on each edge.
- src_node (NodeID):
- The source node for flow.
- dst_node (NodeID):
- The destination node for flow.
- return_summary (bool):
- If True, return a FlowSummary with detailed flow analytics.
- Defaults to False.
- return_graph (bool):
- If True, return the mutated flow graph along with other results.
- Defaults to False.
- flow_placement (FlowPlacement):
- Determines how flow is split among parallel edges of equal cost.
- Defaults to ``FlowPlacement.PROPORTIONAL``.
- shortest_path (bool):
- If True, place flow only once along the first shortest path found and return
- immediately, rather than iterating for the true max flow.
- reset_flow_graph (bool):
- If True, reset any existing flow data (e.g., ``flow_attr``, ``flows_attr``).
- Defaults to False.
- capacity_attr (str):
- The name of the capacity attribute on edges. Defaults to "capacity".
- flow_attr (str):
- The name of the aggregated flow attribute on edges. Defaults to "flow".
- flows_attr (str):
- The name of the per-flow dictionary attribute on edges. Defaults to "flows".
- copy_graph (bool):
- If True, work on a copy of the original graph so it remains unmodified.
- Defaults to True.
- tolerance (float):
- Tolerance for floating-point comparisons when determining saturated edges
- and residual capacity. Defaults to 1e-10.
+## ngraph.model.path
-Returns:
- Union[float, tuple]:
+Lightweight representation of a single routing path.
-- If neither flag: ``float`` total flow.
-- If return_summary only: ``tuple[float, FlowSummary]``.
-- If both flags: ``tuple[float, FlowSummary, StrictMultiDiGraph]``.
+The ``Path`` dataclass stores a node-and-parallel-edges sequence and a numeric
+cost. Cached properties expose derived sequences for nodes and edges, and
+helpers provide equality, ordering by cost, and sub-path extraction with cost
+recalculation.
-Notes:
+Breaking change from v1.x: Edge references now use EdgeRef (link_id + direction)
+instead of integer edge keys for stable scenario-level edge identification.
-- When using return_summary or return_graph, the return value is a tuple.
+### Path
-Examples:
- >>> g = StrictMultiDiGraph()
- >>> g.add_node('A')
- >>> g.add_node('B')
- >>> g.add_node('C')
- >>> g.add_edge('A', 'B', capacity=10.0, flow=0.0, flows={}, cost=1)
- >>> g.add_edge('B', 'C', capacity=5.0, flow=0.0, flows={}, cost=1)
- >>>
- >>> # Basic usage (scalar return)
- >>> max_flow_value = calc_max_flow(g, 'A', 'C')
- >>> print(max_flow_value)
- 5.0
- >>>
- >>> # With flow summary analytics
- >>> flow, summary = calc_max_flow(g, 'A', 'C', return_summary=True)
- >>> print(f"Min-cut edges: {summary.min_cut}")
- >>>
- >>> # With both summary and mutated graph
- >>> flow, summary, flow_graph = calc_max_flow(
- ... g, 'A', 'C', return_summary=True, return_graph=True
- ... )
- >>> # flow_graph contains the flow assignments
-
-### run_sensitivity(graph: ngraph.graph.strict_multidigraph.StrictMultiDiGraph, src_node: Hashable, dst_node: Hashable, *, capacity_attr: str = 'capacity', flow_attr: str = 'flow', change_amount: float = 1.0, **kwargs) -> dict[tuple, float]
-
-Simple sensitivity analysis for per-edge capacity changes.
-
-Tests changing each saturated edge capacity by change_amount and measures
-the resulting change in total flow. Positive values increase capacity,
-negative values decrease capacity (with validation to prevent negative capacities).
+Represents a single path in the network.
-Args:
- graph: The graph to analyze
- src_node: Source node
- dst_node: Destination node
- capacity_attr: Name of capacity attribute
- flow_attr: Name of flow attribute
- change_amount: Amount to change capacity for testing (positive=increase, negative=decrease)
- **kwargs: Additional arguments passed to calc_max_flow
+Breaking change from v1.x: path field now uses EdgeRef (link_id + direction)
+instead of integer edge keys for stable scenario-level edge identification.
-Returns:
- dict[tuple, float]: Flow delta per modified edge.
+Attributes:
+ path: Sequence of (node_name, (edge_refs...)) tuples representing the path.
+ The final element typically has an empty tuple of edge refs.
+ cost: Total numeric cost (e.g., distance or metric) of the path.
+ edges: Set of all EdgeRefs encountered in the path.
+ nodes: Set of all node names encountered in the path.
+ edge_tuples: Set of all tuples of parallel EdgeRefs from each path element.
-### saturated_edges(graph: ngraph.graph.strict_multidigraph.StrictMultiDiGraph, src_node: Hashable, dst_node: Hashable, *, capacity_attr: str = 'capacity', flow_attr: str = 'flow', tolerance: float = 1e-10, **kwargs) -> list[tuple]
+**Attributes:**
-Identify saturated edges in the max-flow solution.
+- `path` (Tuple[Tuple[str, Tuple[EdgeRef, ...]], ...])
+- `cost` (Cost)
+- `edges` (Set[EdgeRef]) = set()
+- `nodes` (Set[str]) = set()
+- `edge_tuples` (Set[Tuple[EdgeRef, ...]]) = set()
-Args:
- graph: The graph to analyze
- src_node: Source node
- dst_node: Destination node
- capacity_attr: Name of capacity attribute
- flow_attr: Name of flow attribute
- tolerance: Tolerance for considering an edge saturated
- **kwargs: Additional arguments passed to calc_max_flow
+**Methods:**
-Returns:
- list[tuple]: Edges ``(u, v, k)`` with residual capacity <= ``tolerance``.
+- `get_sub_path(self, dst_node: 'str', graph: 'StrictMultiDiGraph | None' = None, cost_attr: 'str' = 'cost') -> 'Path'` - Create a sub-path ending at the specified destination node.
---
-## ngraph.algorithms.paths
-
-Path manipulation utilities.
-
-Provides helpers to enumerate realized paths from a predecessor map produced by
-SPF/KSP, with optional expansion of parallel edges into distinct paths.
-
-### resolve_to_paths(src_node: 'NodeID', dst_node: 'NodeID', pred: 'Dict[NodeID, Dict[NodeID, List[EdgeID]]]', split_parallel_edges: 'bool' = False) -> 'Iterator[PathTuple]'
-
-Enumerate all paths from a predecessor map.
+## ngraph.solver.maxflow
-Args:
- src_node: Source node ID.
- dst_node: Destination node ID.
- pred: Predecessor map from SPF or KSP.
- split_parallel_edges: If True, expand parallel edges into distinct paths.
+Max-flow computation between node groups with NetGraph-Core integration.
-Yields:
- PathTuple: Sequence of ``(node_id, (edge_ids,))`` pairs from source to dest.
+This module provides max-flow analysis for Network models by transforming
+multi-source/multi-sink problems into single-source/single-sink problems
+using pseudo nodes.
----
+Key functions:
-## ngraph.algorithms.placement
+- max_flow(): Compute max flow values between node groups
+- max_flow_with_details(): Max flow with cost distribution details
+- sensitivity_analysis(): Identify critical edges and flow reduction
+- build_maxflow_cache(): Build cache for efficient repeated analysis
-Flow placement for routing over equal-cost predecessor DAGs.
+Graph caching (via MaxFlowGraphCache) enables efficient repeated analysis with
+different exclusion sets by building the graph with pseudo nodes once and using
+O(|excluded|) masks for exclusions. Disabled nodes/links are automatically
+handled via the underlying GraphCache from ngraph.adapters.core.
-Places feasible flow on a graph given predecessor relations and a placement
-strategy, updating aggregate and per-flow attributes.
+### MaxFlowGraphCache
-### FlowPlacementMeta
+Pre-built graph with pseudo nodes for efficient repeated max-flow analysis.
-Metadata describing how flow was placed on the graph.
+Composes a GraphCache with additional pseudo node mappings for max-flow.
Attributes:
- placed_flow: The amount of flow actually placed.
- remaining_flow: The portion of flow that could not be placed due to capacity limits.
- nodes: Set of node IDs that participated in the flow.
- edges: Set of edge IDs that carried some portion of this flow.
+ base_cache: Underlying GraphCache with graph, mappers, and disabled topology.
+ pair_to_pseudo_ids: Mapping from (src_label, snk_label) to (pseudo_src_id, pseudo_snk_id).
**Attributes:**
-- `placed_flow` (float)
-- `remaining_flow` (float)
-- `nodes` (Set[NodeID]) = set()
-- `edges` (Set[EdgeID]) = set()
+- `base_cache` (GraphCache)
+- `pair_to_pseudo_ids` (Dict[Tuple[str, str], Tuple[int, int]]) = {}
-### place_flow_on_graph(flow_graph: 'StrictMultiDiGraph', src_node: 'NodeID', dst_node: 'NodeID', pred: 'Dict[NodeID, Dict[NodeID, List[EdgeID]]]', flow: 'float' = inf, flow_index: 'Optional[Hashable]' = None, flow_placement: 'FlowPlacement' = , capacity_attr: 'str' = 'capacity', flow_attr: 'str' = 'flow', flows_attr: 'str' = 'flows') -> 'FlowPlacementMeta'
+### build_maxflow_cache(network: 'Network', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine') -> 'MaxFlowGraphCache'
-Place flow from ``src_node`` to ``dst_node`` on ``flow_graph``.
+Build cached graph with pseudo nodes for efficient repeated max-flow analysis.
-Uses a precomputed `flow_dict` from `calc_graph_capacity` to figure out how
-much flow can be placed. Updates the graph's edges and nodes with the placed flow.
+Constructs a single graph with all pseudo source/sink nodes for all
+source/sink pairs, enabling O(|excluded|) mask building per iteration
+instead of O(V+E) graph reconstruction.
Args:
- flow_graph: The graph on which flow will be placed.
- src_node: The source node.
- dst_node: The destination node.
- pred: A dictionary of node->(adj_node->list_of_edge_IDs) giving path adjacency.
- flow: Requested flow amount; can be infinite.
- flow_index: Identifier for this flow (used to track multiple flows).
- flow_placement: Strategy for distributing flow among parallel equal cost paths.
- capacity_attr: Attribute name on edges for capacity.
- flow_attr: Attribute name on edges/nodes for aggregated flow.
- flows_attr: Attribute name on edges/nodes for per-flow tracking.
+ network: Network instance.
+ source_path: Selection expression for source node groups.
+ sink_path: Selection expression for sink node groups.
+ mode: "combine" (single pair) or "pairwise" (N×M pairs).
Returns:
- FlowPlacementMeta: Amount placed, remaining amount, and touched nodes/edges.
-
-### remove_flow_from_graph(flow_graph: 'StrictMultiDiGraph', flow_index: 'Optional[Hashable]' = None, flow_attr: 'str' = 'flow', flows_attr: 'str' = 'flows') -> 'None'
-
-Remove one or all flows from the graph.
+ MaxFlowGraphCache with pre-built graph and pseudo node mappings.
-Args:
- flow_graph: Graph whose edge flow attributes will be modified.
- flow_index: If provided, remove only the specified flow; otherwise remove all.
- flow_attr: Aggregate flow attribute name on edges.
- flows_attr: Per-flow attribute name on edges.
-
----
+Raises:
+ ValueError: If no matching sources or sinks are found.
-## ngraph.algorithms.spf
+### max_flow(network: 'Network', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None, _cache: 'Optional[MaxFlowGraphCache]' = None) -> 'Dict[Tuple[str, str], float]'
-Shortest-path-first (SPF) algorithms.
+Compute max flow between node groups in a network.
-Implements Dijkstra-like SPF with pluggable edge-selection policies and a
-Yen-like KSP generator. Specialized fast paths exist for common selection
-strategies without exclusions.
+This function calculates the maximum flow from a set of source nodes
+to a set of sink nodes within the provided network.
-Notes:
- When a destination node is known, SPF supports an optimized mode that
- terminates once the destination's minimal distance is settled. In this mode:
+When `_cache` is provided, uses O(|excluded|) mask building instead of
+O(V+E) graph reconstruction for efficient repeated analysis.
-- The destination node is not expanded (no neighbor relaxation from ``dst``).
-- The algorithm continues processing any nodes with equal distance to capture
+Args:
+ network: Network instance containing topology and node/link data.
+ source_path: Selection expression for source node groups.
+ sink_path: Selection expression for sink node groups.
+ mode: "combine" (all sources to all sinks) or "pairwise" (each pair separately).
+ shortest_path: If True, restricts flow to shortest paths only.
+ flow_placement: Strategy for distributing flow among equal-cost edges.
+ excluded_nodes: Optional set of node names to exclude.
+ excluded_links: Optional set of link IDs to exclude.
+ _cache: Pre-built cache for efficient repeated analysis.
- equal-cost predecessors (needed by proportional flow placement).
+Returns:
+ Dict mapping (source_label, sink_label) to total flow value.
-### ksp(graph: ngraph.graph.strict_multidigraph.StrictMultiDiGraph, src_node: Hashable, dst_node: Hashable, edge_select: ngraph.algorithms.base.EdgeSelect = , edge_select_func: Optional[Callable[[ngraph.graph.strict_multidigraph.StrictMultiDiGraph, Hashable, Hashable, Dict[Hashable, Dict[str, Any]], Set[Hashable], Set[Hashable]], Tuple[Union[int, float], List[Hashable]]]] = None, max_k: Optional[int] = None, max_path_cost: Union[int, float] = inf, max_path_cost_factor: Optional[float] = None, multipath: bool = True, excluded_edges: Optional[Set[Hashable]] = None, excluded_nodes: Optional[Set[Hashable]] = None) -> Iterator[Tuple[Dict[Hashable, Union[int, float]], Dict[Hashable, Dict[Hashable, List[Hashable]]]]]
+Raises:
+ ValueError: If no matching sources or sinks are found.
-Yield up to k shortest paths using a Yen-like algorithm.
+### max_flow_with_details(network: 'Network', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None, _cache: 'Optional[MaxFlowGraphCache]' = None) -> 'Dict[Tuple[str, str], FlowSummary]'
-The initial SPF (shortest path) is computed; subsequent paths are found by systematically
-excluding edges/nodes used by previously generated paths. Each iteration yields a
-(costs, pred) describing one path. Stops if there are no more valid paths or if max_k
-is reached.
+Compute max flow with detailed results including cost distribution.
-Args:
- graph: The directed graph (StrictMultiDiGraph).
- src_node: The source node.
- dst_node: The destination node.
- edge_select: The edge selection strategy. Defaults to ALL_MIN_COST.
- edge_select_func: Optional override of the default edge selection function.
- max_k: If set, yields at most k distinct paths.
- max_path_cost: If set, do not yield any path whose total cost > max_path_cost.
- max_path_cost_factor: If set, updates max_path_cost to:
- min(max_path_cost, best_path_cost * max_path_cost_factor).
- multipath: Whether to consider multiple same-cost expansions in SPF.
- excluded_edges: Set of edge IDs to exclude globally.
- excluded_nodes: Set of node IDs to exclude globally.
-
-Yields:
- Tuple of ``(costs, pred)`` per discovered path in ascending cost order.
-
-### spf(graph: ngraph.graph.strict_multidigraph.StrictMultiDiGraph, src_node: Hashable, edge_select: ngraph.algorithms.base.EdgeSelect = , edge_select_func: Optional[Callable[[ngraph.graph.strict_multidigraph.StrictMultiDiGraph, Hashable, Hashable, Dict[Hashable, Dict[str, Any]], Set[Hashable], Set[Hashable]], Tuple[Union[int, float], List[Hashable]]]] = None, multipath: bool = True, excluded_edges: Optional[Set[Hashable]] = None, excluded_nodes: Optional[Set[Hashable]] = None, dst_node: Optional[Hashable] = None) -> Tuple[Dict[Hashable, Union[int, float]], Dict[Hashable, Dict[Hashable, List[Hashable]]]]
-
-Compute shortest paths from a source node.
-
-By default, uses EdgeSelect.ALL_MIN_COST. If multipath=True, multiple equal-cost
-paths to the same node will be recorded in the predecessor structure. If no
-excluded edges/nodes are given and edge_select is one of the specialized
-(ALL_MIN_COST or ALL_MIN_COST_WITH_CAP_REMAINING), it uses a fast specialized
-routine.
+When `_cache` is provided, uses O(|excluded|) mask building instead of
+O(V+E) graph reconstruction for efficient repeated analysis.
Args:
- graph: The directed graph (StrictMultiDiGraph).
- src_node: The source node from which to compute shortest paths.
- edge_select: The edge selection strategy. Defaults to ALL_MIN_COST.
- edge_select_func: If provided, overrides the default edge selection function.
- Must return (cost, list_of_edges) for the given node->neighbor adjacency.
- multipath: Whether to record multiple same-cost paths.
- excluded_edges: A set of edge IDs to ignore in the graph.
- excluded_nodes: A set of node IDs to ignore in the graph.
- dst_node: Optional destination node. If provided, SPF avoids expanding
- from the destination and performs early termination once the next
- candidate in the heap would exceed the settled distance for
- ``dst_node``. This preserves equal-cost predecessors while avoiding
- unnecessary relaxations beyond the destination.
+ network: Network instance.
+ source_path: Selection expression for source groups.
+ sink_path: Selection expression for sink groups.
+ mode: "combine" or "pairwise".
+ shortest_path: If True, restricts flow to shortest paths.
+ flow_placement: Flow placement strategy.
+ excluded_nodes: Optional set of node names to exclude.
+ excluded_links: Optional set of link IDs to exclude.
+ _cache: Pre-built cache for efficient repeated analysis.
Returns:
- tuple[dict[NodeID, Cost], dict[NodeID, dict[NodeID, list[EdgeID]]]]:
- Costs and predecessor mapping.
-
-Raises:
- KeyError: If src_node does not exist in graph.
-
----
+ Dict mapping (source_label, sink_label) to FlowSummary.
-## ngraph.algorithms.types
+### sensitivity_analysis(network: 'Network', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None, _cache: 'Optional[MaxFlowGraphCache]' = None) -> 'Dict[Tuple[str, str], Dict[str, float]]'
-Types and data structures for algorithm analytics.
+Analyze sensitivity of max flow to edge failures.
-Defines immutable summary containers and aliases for algorithm outputs.
+Identifies critical edges and computes the flow reduction caused by
+removing each one.
-### FlowSummary
+When `_cache` is provided, uses O(|excluded|) mask building instead of
+O(V+E) graph reconstruction for efficient repeated analysis.
-Summary of max-flow computation results.
+The `shortest_path` parameter controls routing semantics:
-Captures edge flows, residual capacities, reachable set, and min-cut.
+- shortest_path=False (default): Full max-flow; reports all saturated edges.
+- shortest_path=True: Shortest-path-only (IP/IGP); reports only edges
-Attributes:
- total_flow: Maximum flow value achieved.
- edge_flow: Flow amount per edge, indexed by ``(src, dst, key)``.
- residual_cap: Remaining capacity per edge after placement.
- reachable: Nodes reachable from source in residual graph.
- min_cut: Saturated edges crossing the s-t cut.
- cost_distribution: Mapping of path cost to flow volume placed at that cost.
+ used under ECMP routing.
-**Attributes:**
+Args:
+ network: Network instance.
+ source_path: Selection expression for source groups.
+ sink_path: Selection expression for sink groups.
+ mode: "combine" or "pairwise".
+ shortest_path: If True, use single-tier shortest-path flow (IP/IGP).
+ If False, use full iterative max-flow (SDN/TE).
+ flow_placement: Flow placement strategy.
+ excluded_nodes: Optional set of node names to exclude.
+ excluded_links: Optional set of link IDs to exclude.
+ _cache: Pre-built cache for efficient repeated analysis.
-- `total_flow` (float)
-- `edge_flow` (Dict[Edge, float])
-- `residual_cap` (Dict[Edge, float])
-- `reachable` (Set[str])
-- `min_cut` (List[Edge])
-- `cost_distribution` (Dict[Cost, float])
+Returns:
+ Dict mapping (source_label, sink_label) to {link_id: flow_reduction}.
---
-## ngraph.paths.bundle
+## ngraph.solver.paths
-Utilities for compact representation of equal-cost path sets.
+Shortest-path solver wrappers bound to the model layer.
-This module defines ``PathBundle``, a structure that represents one or more
-equal-cost paths between two nodes using a predecessor map. It supports
-concatenation, containment checks, sub-bundle extraction with cost
-recalculation, and enumeration into concrete ``Path`` instances.
+Expose convenience functions for computing shortest paths between node groups
+selected from a ``Network`` context. Selection semantics mirror the max-flow
+wrappers with ``mode`` in {"combine", "pairwise"}.
-### PathBundle
+Functions return minimal costs or concrete ``Path`` objects built from SPF
+predecessor maps. Parallel equal-cost edges can be expanded into distinct
+paths.
-A collection of equal-cost paths between two nodes.
-
-This class encapsulates one or more parallel paths (all of the same cost)
-between `src_node` and `dst_node`. The predecessor map `pred` associates
-each node with the node(s) from which it can be reached, along with a list
-of edge IDs used in that step. The constructor performs a reverse traversal
-from `dst_node` to `src_node` to collect all edges, nodes, and store them
-in this bundle.
-
-The constructor assumes the predecessor relation forms a DAG between
-``src_node`` and ``dst_node``. No cycle detection is performed. If cycles
-are present, traversal may not terminate.
-
-**Methods:**
-
-- `add(self, other: 'PathBundle') -> 'PathBundle'` - Concatenate this bundle with another bundle (end-to-start).
-- `contains(self, other: 'PathBundle') -> 'bool'` - Check if this bundle's edge set contains all edges of `other`.
-- `from_path(path: 'Path', resolve_edges: 'bool' = False, graph: 'Optional[StrictMultiDiGraph]' = None, edge_select: 'Optional[EdgeSelect]' = None, cost_attr: 'str' = 'cost', capacity_attr: 'str' = 'capacity') -> 'PathBundle'` - Construct a PathBundle from a single `Path` object.
-- `get_sub_path_bundle(self, new_dst_node: 'NodeID', graph: 'StrictMultiDiGraph', cost_attr: 'str' = 'cost') -> 'PathBundle'` - Create a sub-bundle ending at `new_dst_node` with correct minimal cost.
-- `is_disjoint_from(self, other: 'PathBundle') -> 'bool'` - Check if this bundle shares no edges with `other`.
-- `is_subset_of(self, other: 'PathBundle') -> 'bool'` - Check if this bundle's edge set is contained in `other`'s edge set.
-- `resolve_to_paths(self, split_parallel_edges: 'bool' = False) -> 'Iterator[Path]'` - Generate all concrete `Path` objects contained in this PathBundle.
-
----
-
-## ngraph.paths.path
-
-Lightweight representation of a single routing path.
-
-The ``Path`` dataclass stores a node-and-parallel-edges sequence and a numeric
-cost. Cached properties expose derived sequences for nodes and edges, and
-helpers provide equality, ordering by cost, and sub-path extraction with cost
-recalculation.
-
-### Path
-
-Represents a single path in the network.
-
-Attributes:
- path_tuple (PathTuple):
- A sequence of path elements. Each element is a tuple of the form
- (node_id, (edge_id_1, edge_id_2, ...)), where the final element typically has an empty tuple.
- cost (Cost):
- The total numeric cost (e.g., distance or metric) of the path.
- edges (Set[EdgeID]):
- A set of all edge IDs encountered in the path.
- nodes (Set[NodeID]):
- A set of all node IDs encountered in the path.
- edge_tuples (Set[Tuple[EdgeID, ...]]):
- A set of all tuples of parallel edges from each path element (including the final empty tuple).
-
-**Attributes:**
-
-- `path_tuple` (PathTuple)
-- `cost` (Cost)
-- `edges` (Set[EdgeID]) = set()
-- `nodes` (Set[NodeID]) = set()
-- `edge_tuples` (Set[Tuple[EdgeID, ...]]) = set()
-
-**Methods:**
-
-- `get_sub_path(self, dst_node: 'NodeID', graph: 'StrictMultiDiGraph', cost_attr: 'str' = 'cost') -> 'Path'` - Create a sub-path ending at the specified destination node, recalculating the cost.
-
----
-
-## ngraph.flows.flow
-
-Flow and FlowIndex classes for traffic flow representation.
-
-### Flow
-
-Represents a fraction of demand routed along a given PathBundle.
-
-In traffic-engineering scenarios, a `Flow` object can model:
-
-- MPLS LSPs/tunnels with explicit paths,
-- IP forwarding behavior (with ECMP or WCMP),
-- Or anything that follows a specific set of paths.
-
-**Methods:**
-
-- `place_flow(self, flow_graph: 'StrictMultiDiGraph', to_place: 'float', flow_placement: 'FlowPlacement') -> 'Tuple[float, float]'` - Place or update this flow on the graph.
-- `remove_flow(self, flow_graph: 'StrictMultiDiGraph') -> 'None'` - Remove this flow from the graph.
-
-### FlowIndex
-
-Unique identifier for a flow.
-
-Attributes:
- src_node: Source node.
- dst_node: Destination node.
- flow_class: Flow class label (hashable).
- flow_id: Monotonic integer id for this flow.
-
----
-
-## ngraph.flows.policy
-
-FlowPolicy and FlowPolicyConfig classes for traffic routing algorithms.
-
-### FlowPolicy
-
-Create, place, rebalance, and remove flows on a network graph.
-
-Converts a demand into one or more `Flow` objects subject to capacity
-constraints and configuration: path selection, edge selection, and flow
-placement method.
-
-**Methods:**
-
-- `deep_copy(self) -> 'FlowPolicy'` - Return a deep copy of this policy including flows.
-- `get_metrics(self) -> 'Dict[str, float]'` - Return cumulative placement metrics for this policy instance.
-- `place_demand(self, flow_graph: 'StrictMultiDiGraph', src_node: 'NodeID', dst_node: 'NodeID', flow_class: 'Hashable', volume: 'float', target_flow_volume: 'Optional[float]' = None, min_flow: 'Optional[float]' = None) -> 'Tuple[float, float]'` - Place demand volume on the graph by splitting or creating flows as needed.
-- `rebalance_demand(self, flow_graph: 'StrictMultiDiGraph', src_node: 'NodeID', dst_node: 'NodeID', flow_class: 'Hashable', target_flow_volume: 'float') -> 'Tuple[float, float]'` - Rebalance demand across existing flows towards the target volume per flow.
-- `remove_demand(self, flow_graph: 'StrictMultiDiGraph') -> 'None'` - Removes all flows from the network graph without clearing internal state.
-
-### FlowPolicyConfig
-
-Enumerates supported flow policy configurations.
-
-### get_flow_policy(flow_policy_config: 'FlowPolicyConfig') -> 'FlowPolicy'
-
-Create a policy instance from a configuration preset.
-
-Args:
- flow_policy_config: A FlowPolicyConfig enum value specifying the desired policy.
-
-Returns:
- FlowPolicy: Pre-configured policy instance.
-
-Raises:
- ValueError: If an unknown FlowPolicyConfig value is provided.
-
----
-
-## ngraph.solver.helpers
-
----
-
-## ngraph.solver.maxflow
-
-Problem-level max-flow API bound to the model layer.
-
-Functions here operate on a model context that provides:
-
-- to_strict_multidigraph(add_reverse: bool = True) -> StrictMultiDiGraph
-- select_node_groups_by_path(path: str) -> dict[str, list[Node]]
-
-They accept either a `Network` or a `NetworkView`. The input context is not
-mutated. Pseudo source and sink nodes are attached on a working graph when
-computing flows between groups.
-
-### max_flow(context: 'Any', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], float]'
-
-Compute max flow between groups selected from the context.
-
-Creates a working graph from the context, adds a pseudo source attached to
-the selected source nodes and a pseudo sink attached to the selected sink
-nodes, then runs the max-flow routine.
-
-Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: Aggregation strategy. "combine" considers all sources as one
- group and all sinks as one group. "pairwise" evaluates each
- source-label and sink-label pair separately.
- shortest_path: If True, perform a single augmentation along the first
- shortest path instead of the full max-flow.
- flow_placement: Strategy for splitting flow among equal-cost parallel
- edges.
-
-Returns:
- Dict[Tuple[str, str], float]: Total flow per (source_label, sink_label).
-
-Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is not one of {"combine", "pairwise"}.
-
-### max_flow_detailed(context: 'Any', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> "Dict[Tuple[str, str], Tuple[float, FlowSummary, 'StrictMultiDiGraph']]"
-
-Compute max flow, return summary and flow graph for each pair.
-
-Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
-
-Returns:
- Dict[Tuple[str, str], Tuple[float, FlowSummary, StrictMultiDiGraph]]:
- For each (source_label, sink_label), the total flow, a summary, and the
- flow-assigned graph.
-
-Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
-
-### max_flow_with_graph(context: 'Any', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> "Dict[Tuple[str, str], Tuple[float, 'StrictMultiDiGraph']]"
-
-Compute max flow and return the mutated flow graph for each pair.
-
-Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
-
-Returns:
- Dict[Tuple[str, str], Tuple[float, StrictMultiDiGraph]]: For each
- (source_label, sink_label), the total flow and the flow-assigned graph.
-
-Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
-
-### max_flow_with_summary(context: 'Any', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], Tuple[float, FlowSummary]]'
-
-Compute max flow and return a summary for each group pair.
-
-The summary includes total flow, per-edge flow, residual capacity,
-reachable set from the source in the residual graph, min-cut edges, and a
-cost distribution over augmentation steps.
-
-Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
-
-Returns:
- Dict[Tuple[str, str], Tuple[float, FlowSummary]]: For each
- (source_label, sink_label), the total flow and the associated summary.
-
-Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
-
-### saturated_edges(context: 'Any', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', tolerance: 'float' = 1e-10, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], List[Tuple[str, str, str]]]'
-
-Identify saturated edges for each selected group pair.
-
-Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- tolerance: Residual capacity threshold to consider an edge saturated.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
-
-Returns:
- Dict[Tuple[str, str], list[tuple[str, str, str]]]: For each
- (source_label, sink_label), a list of saturated edges ``(u, v, k)``.
-
-Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
-
-### sensitivity_analysis(context: 'Any', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', change_amount: 'float' = 1.0, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = ) -> 'Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]]'
-
-Perform a simple sensitivity analysis per saturated edge.
-
-For each saturated edge, test a capacity change of ``change_amount`` and
-report the change in total flow. Positive amounts increase capacity; negative
-amounts decrease capacity (with lower bound at zero).
-
-Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- change_amount: Capacity delta to apply when testing each saturated edge.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
-
-Returns:
- Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]]: For each
- (source_label, sink_label), a mapping from saturated edge ``(u, v, k)``
- to the change in total flow after applying the capacity delta.
-
-Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
-
----
-
-## ngraph.solver.paths
-
-Shortest-path solver wrappers bound to the model layer.
-
-Expose convenience functions for computing shortest paths between node groups
-selected from a ``Network`` or ``NetworkView`` context. Selection semantics
-mirror the max-flow wrappers with ``mode`` in {"combine", "pairwise"}.
-
-Functions return minimal costs or concrete ``Path`` objects built from SPF
-predecessor maps. Parallel equal-cost edges can be expanded into distinct
-paths.
+Graph caching is used internally for efficient mask-based exclusions. For
+repeated queries with different exclusions, consider using the lower-level
+adapters/core.py functions with explicit cache management.
All functions fail fast on invalid selection inputs and do not mutate the
input context.
@@ -1707,12 +1095,12 @@ Note:
For path queries, overlapping source/sink membership is treated as
unreachable.
-### k_shortest_paths(context: 'Any', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'pairwise', max_k: 'int' = 3, edge_select: 'EdgeSelect' = , max_path_cost: 'float' = inf, max_path_cost_factor: 'Optional[float]' = None, split_parallel_edges: 'bool' = False) -> 'Dict[Tuple[str, str], List[Path]]'
+### k_shortest_paths(network: 'Network', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'pairwise', max_k: 'int' = 3, edge_select: 'EdgeSelect' = , max_path_cost: 'float' = inf, max_path_cost_factor: 'Optional[float]' = None, split_parallel_edges: 'bool' = False, excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], List[Path]]'
Return up to K shortest paths per group pair.
Args:
- context: Network or NetworkView.
+ network: Network instance.
source_path: Selection expression for source groups.
sink_path: Selection expression for sink groups.
mode: "pairwise" (default) or "combine".
@@ -1721,6 +1109,8 @@ Args:
max_path_cost: Absolute cost threshold.
max_path_cost_factor: Relative threshold versus best path.
split_parallel_edges: Expand parallel edges into distinct paths when True.
+ excluded_nodes: Optional set of node names to exclude temporarily.
+ excluded_links: Optional set of link IDs to exclude temporarily.
Returns:
Mapping from (source_label, sink_label) to list of Path (<= max_k).
@@ -1730,16 +1120,18 @@ Raises:
ValueError: If no sink nodes match ``sink_path``.
ValueError: If ``mode`` is not "combine" or "pairwise".
-### shortest_path_costs(context: 'Any', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', edge_select: 'EdgeSelect' = ) -> 'Dict[Tuple[str, str], float]'
+### shortest_path_costs(network: 'Network', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', edge_select: 'EdgeSelect' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], float]'
Return minimal path cost(s) between selected node groups.
Args:
- context: Network or NetworkView.
+ network: Network instance.
source_path: Selection expression for source groups.
sink_path: Selection expression for sink groups.
mode: "combine" or "pairwise".
edge_select: SPF edge selection strategy.
+ excluded_nodes: Optional set of node names to exclude temporarily.
+ excluded_links: Optional set of link IDs to exclude temporarily.
Returns:
Mapping from (source_label, sink_label) to minimal cost; ``inf`` if no
@@ -1750,17 +1142,19 @@ Raises:
ValueError: If no sink nodes match ``sink_path``.
ValueError: If ``mode`` is not "combine" or "pairwise".
-### shortest_paths(context: 'Any', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', edge_select: 'EdgeSelect' = , split_parallel_edges: 'bool' = False) -> 'Dict[Tuple[str, str], List[Path]]'
+### shortest_paths(network: 'Network', source_path: 'str', sink_path: 'str', *, mode: 'str' = 'combine', edge_select: 'EdgeSelect' = , split_parallel_edges: 'bool' = False, excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], List[Path]]'
Return concrete shortest path(s) between selected node groups.
Args:
- context: Network or NetworkView.
+ network: Network instance.
source_path: Selection expression for source groups.
sink_path: Selection expression for sink groups.
mode: "combine" or "pairwise".
edge_select: SPF edge selection strategy.
split_parallel_edges: Expand parallel edges into distinct paths when True.
+ excluded_nodes: Optional set of node names to exclude temporarily.
+ excluded_links: Optional set of link IDs to exclude temporarily.
Returns:
Mapping from (source_label, sink_label) to list of Path. Empty if
@@ -1773,1481 +1167,1470 @@ Raises:
---
-## ngraph.demand.manager.builder
-
-Builders for traffic matrices.
+## ngraph.workflow.base
-Construct `TrafficMatrixSet` from raw dictionaries (e.g. parsed YAML).
-This logic was previously embedded in `Scenario.from_yaml`.
+Base classes for workflow automation.
-### build_traffic_matrix_set(raw: 'Dict[str, List[dict]]') -> 'TrafficMatrixSet'
+Defines the workflow step abstraction, registration decorator, and execution
+wrapper that adds timing and logging. Steps implement `run()` and are executed
+via `execute()` which records metadata and re-raises failures.
-Build a `TrafficMatrixSet` from a mapping of name -> list of dicts.
+### WorkflowStep
-Args:
- raw: Mapping where each key is a matrix name and each value is a list of
- dictionaries with `TrafficDemand` constructor fields.
+Base class for all workflow steps.
-Returns:
- Initialized `TrafficMatrixSet` with constructed `TrafficDemand` objects.
+All workflow steps are automatically logged with execution timing information.
+All workflow steps support seeding for reproducible random operations.
+Workflow metadata is automatically stored in scenario.results for analysis.
-Raises:
- ValueError: If ``raw`` is not a mapping of name -> list[dict].
+YAML Configuration:
+ ```yaml
+ workflow:
+ - step_type:
----
+ name: "optional_step_name" # Optional: Custom name for this step instance
+ seed: 42 # Optional: Seed for reproducible random operations
+ # ... step-specific parameters ...
+ ```
-## ngraph.demand.manager.expand
+Attributes:
+ name: Optional custom identifier for this workflow step instance,
+ used for logging and result storage purposes.
+ seed: Optional seed for reproducible random operations. If None,
+ random operations will be non-deterministic.
-Expansion helpers for traffic demand specifications.
+**Attributes:**
-Public functions here convert user-facing `TrafficDemand` specifications into
-concrete `Demand` objects that can be placed on a `StrictMultiDiGraph`.
+- `name` (str)
+- `seed` (Optional[int])
+- `_seed_source` (str)
-This module provides the pure expansion logic that was previously embedded in
-`TrafficManager`.
+**Methods:**
-### expand_demands(network: "Union[Network, 'NetworkView']", graph: 'StrictMultiDiGraph | None', traffic_demands: 'List[TrafficDemand]', default_flow_policy_config: 'FlowPolicyConfig') -> 'Tuple[List[Demand], Dict[str, List[Demand]]]'
+- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
+- `run(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step logic.
-Expand traffic demands into concrete `Demand` objects.
+### register_workflow_step(step_type: 'str')
-The result is a flat list of `Demand` plus a mapping from
-``TrafficDemand.id`` to the list of expanded demands for that entry.
+Return a decorator that registers a `WorkflowStep` subclass.
Args:
- network: Network or NetworkView used for node group selection.
- graph: Flow graph to operate on. If ``None``, expansion that requires
- graph mutation (pseudo nodes/edges) is skipped.
- traffic_demands: List of high-level traffic demand specifications.
- default_flow_policy_config: Default policy to apply when a demand does
- not specify an explicit `flow_policy`.
+ step_type: Registry key used to instantiate steps from configuration.
Returns:
- A tuple ``(expanded, td_map)`` where:
-
-- ``expanded`` is the flattened, sorted list of all expanded demands
-
- (sorted by ascending ``demand_class``).
-
-- ``td_map`` maps ``TrafficDemand.id`` to its expanded demands.
+ A class decorator that adds the class to `WORKFLOW_STEP_REGISTRY`.
---
-## ngraph.demand.manager.manager
-
-Traffic demand management and placement.
-
-`TrafficManager` expands `TrafficDemand` specs into concrete `Demand` objects,
-builds a working `StrictMultiDiGraph` from a `Network`, and places flows via
-per-demand `FlowPolicy` instances.
-
-### TrafficManager
-
-Manage expansion and placement of traffic demands on a `Network`.
-
- This class:
+## ngraph.workflow.build_graph
- 1) Builds (or rebuilds) a StrictMultiDiGraph from the given Network.
- 2) Expands each TrafficDemand into one or more Demand objects based
- on a configurable 'mode' ("combine" or "pairwise").
- 3) Each Demand is associated with a FlowPolicy, which handles how flows
- are placed (split across paths, balancing, etc.).
- 4) Provides methods to place all demands incrementally with optional
- re-optimization, reset usage, and retrieve flow/usage summaries.
+Graph building workflow component.
- Auto rounds semantics:
+Validates and exports network topology as a node-link representation using NetworkX.
+After NetGraph-Core integration, actual graph building happens in analysis
+functions. This step primarily validates the network and stores a serializable
+representation for inspection.
-- placement_rounds="auto" performs up to a small number of fairness passes
+YAML Configuration Example:
+ ```yaml
+ workflow:
+ - step_type: BuildGraph
- (at most 3), with early stop when diminishing returns are detected. Each
- pass asks the scheduler to place full leftovers without step splitting.
+ name: "build_network_graph" # Optional: Custom name for this step
+ add_reverse: true # Optional: Add reverse edges (default: true)
+ ```
-In particular:
+The `add_reverse` parameter controls whether reverse edges are added for each link.
+When `True` (default), each Link(A→B) gets both forward(A→B) and reverse(B→A) edges
+for bidirectional connectivity. Set to `False` for directed-only graphs.
-- 'combine' mode:
-- Combine all matched sources into a single pseudo-source node, and all
+Results stored in `scenario.results` under the step name as two keys:
- matched sinks into a single pseudo-sink node (named using the traffic
- demand's `source_path` and `sink_path`). A single Demand is created
- from the pseudo-source to the pseudo-sink, with the full volume.
+- metadata: Step-level execution metadata (node/link counts)
+- data: { graph: node-link JSON dict, context: { add_reverse: bool } }
-- 'pairwise' mode:
-- All matched sources form one group, all matched sinks form another group.
+### BuildGraph
- A separate Demand is created for each (src_node, dst_node) pair,
- skipping self-pairs. The total volume is split evenly across the pairs.
+Validates network topology and stores node-link representation.
-The sum of volumes of all expanded Demands for a given TrafficDemand matches
-that TrafficDemand's `demand` value (unless no valid node pairs exist, in which
-case no demands are created).
+After NetGraph-Core integration, this step validates the network structure
+and stores a JSON-serializable node-link representation using NetworkX.
+Actual Core graph building happens in analysis functions as needed.
Attributes:
- network (Union[Network, NetworkView]): The underlying network or view object.
- traffic_matrix_set (TrafficMatrixSet): Traffic matrices containing demands.
- matrix_name (Optional[str]): Name of specific matrix to use, or None for default.
- default_flow_policy_config (FlowPolicyConfig): Default FlowPolicy if
- a TrafficDemand does not specify one.
- graph (StrictMultiDiGraph): Active graph built from the network.
- demands (List[Demand]): All expanded demands from the active matrix.
- _td_to_demands (Dict[str, List[Demand]]): Internal mapping from
- TrafficDemand.id to its expanded Demand objects.
+ add_reverse: If True, adds reverse edges for bidirectional connectivity.
+ Defaults to True for backward compatibility.
**Attributes:**
-- `network` (Union[Network, 'NetworkView'])
-- `traffic_matrix_set` ('TrafficMatrixSet')
-- `matrix_name` (Optional[str])
-- `default_flow_policy_config` (FlowPolicyConfig) = 1
-- `graph` (Optional[StrictMultiDiGraph])
-- `demands` (List[Demand]) = []
-- `_td_to_demands` (Dict[str, List[Demand]]) = {}
+- `name` (str)
+- `seed` (Optional[int])
+- `_seed_source` (str)
+- `add_reverse` (bool) = True
**Methods:**
-- `build_graph(self, add_reverse: 'bool' = True) -> 'None'` - Build or rebuild the internal `StrictMultiDiGraph` from ``network``.
-- `expand_demands(self) -> 'None'` - Expand each `TrafficDemand` into one or more `Demand` objects.
-- `get_flow_details(self) -> 'Dict[Tuple[int, int], Dict[str, object]]'` - Summarize flows from each demand's policy.
-- `get_traffic_results(self, detailed: 'bool' = False) -> 'List[TrafficResult]'` - Return traffic demand summaries.
-- `place_all_demands(self, placement_rounds: 'Union[int, str]' = 'auto', reoptimize_after_each_round: 'bool' = False) -> 'float'` - Place all expanded demands in ascending priority order.
-- `reset_all_flow_usages(self) -> 'None'` - Remove flow usage for each demand and reset placements to 0.
-- `summarize_link_usage(self) -> 'Dict[str, float]'` - Return total flow usage per edge in the graph.
-
-### TrafficResult
-
-Traffic demand result entry.
-
-Attributes:
- priority: Demand priority class (lower value is more critical).
- total_volume: Total traffic volume for this entry.
- placed_volume: Volume actually placed in the flow graph.
- unplaced_volume: Volume not placed (``total_volume - placed_volume``).
- src: Source node or path.
- dst: Destination node or path.
+- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
+- `run(self, scenario: 'Scenario') -> 'None'` - Validate network and store node-link representation.
---
-## ngraph.demand.manager.schedule
-
-Scheduling utilities for demand placement rounds.
+## ngraph.workflow.cost_power
-Provides the simple priority-aware round-robin scheduler that was previously
-implemented in `TrafficManager`.
+CostPower workflow step: collect capex and power by hierarchy level.
-### place_demands_round_robin(graph: 'StrictMultiDiGraph', demands: 'List[Demand]', placement_rounds: 'int', reoptimize_after_each_round: 'bool' = False) -> 'float'
+This step aggregates capex and power from the network hardware inventory without
+performing any normalization or reporting. It separates contributions into two
+categories:
-Place demands using priority buckets and round-robin within each bucket.
+- platform_*: node hardware (e.g., chassis, linecards) resolved from node attrs
+- optics_*: per-end link hardware (e.g., optics) resolved from link attrs
-Args:
- graph: Active flow graph.
- demands: Expanded demands to place.
- placement_rounds: Number of passes per priority class.
- reoptimize_after_each_round: Whether to re-run placement for each demand
- after a round to better share capacity.
+Aggregation is computed at hierarchy levels 0..N where level 0 is the global
+root (path ""), and higher levels correspond to prefixes of node names split by
+"/". For example, for node "dc1/plane1/leaf/leaf-1":
-Returns:
- Total volume successfully placed across all demands.
+- level 1 path is "dc1"
+- level 2 path is "dc1/plane1"
+- etc.
----
+Disabled handling:
-## ngraph.demand.matrix
+- When include_disabled is False, only enabled nodes and links are considered.
+- Optics are counted only when the endpoint node has platform hardware.
-Traffic matrix containers.
+YAML Configuration Example:
+ ```yaml
+ workflow:
+ - step_type: CostPower
-Provides `TrafficMatrixSet`, a named collection of `TrafficDemand` lists
-used as input to demand expansion and placement. This module contains input
-containers, not analysis results.
+ name: "cost_power" # Optional custom name
+ include_disabled: false # Default: only enabled nodes/links
+ aggregation_level: 2 # Produce levels: 0, 1, 2
+ ```
-### TrafficMatrixSet
+Results stored in `scenario.results` under this step namespace:
+ data:
+ context:
+ include_disabled: bool
+ aggregation_level: int
+ levels:
+ "0":
-Named collection of TrafficDemand lists.
+- path: ""
-This mutable container maps scenario names to lists of TrafficDemand objects,
-allowing management of multiple traffic matrices for analysis.
+ platform_capex: float
+ platform_power_watts: float
+ optics_capex: float
+ optics_power_watts: float
+ capex_total: float
+ power_total_watts: float
+ "1": [ ... ]
+ "2": [ ... ]
+
+### CostPower
+
+Collect platform and optics capex/power by aggregation level.
Attributes:
- matrices: Dictionary mapping scenario names to TrafficDemand lists.
+ include_disabled: If True, include disabled nodes and links.
+ aggregation_level: Inclusive depth for aggregation. 0=root only.
**Attributes:**
-- `matrices` (dict[str, list[TrafficDemand]]) = {}
+- `name` (str)
+- `seed` (Optional[int])
+- `_seed_source` (str)
+- `include_disabled` (bool) = False
+- `aggregation_level` (int) = 2
**Methods:**
-- `add(self, name: 'str', demands: 'list[TrafficDemand]') -> 'None'` - Add a traffic matrix to the collection.
-- `get_all_demands(self) -> 'list[TrafficDemand]'` - Get all traffic demands from all matrices combined.
-- `get_default_matrix(self) -> 'list[TrafficDemand]'` - Get default traffic matrix.
-- `get_matrix(self, name: 'str') -> 'list[TrafficDemand]'` - Get a specific traffic matrix by name.
-- `to_dict(self) -> 'dict[str, Any]'` - Convert to dictionary for JSON serialization.
+- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
+- `run(self, scenario: 'Any') -> 'None'` - Aggregate capex and power by hierarchy levels 0..N.
---
-## ngraph.demand.spec
+## ngraph.workflow.max_flow_step
-Traffic demand specification.
+MaxFlow workflow step.
-Defines `TrafficDemand`, a user-facing specification used by demand expansion
-and placement. It can carry either a concrete `FlowPolicy` instance or a
-`FlowPolicyConfig` enum to construct one.
+Monte Carlo analysis of maximum flow capacity between node groups using FailureManager.
+Produces unified `flow_results` per iteration under `data.flow_results`.
-### TrafficDemand
+YAML Configuration Example:
-Single traffic demand input.
+ workflow:
+
+- step_type: MaxFlow
+
+ name: "maxflow_dc_to_edge"
+ source_path: "^datacenter/.*"
+ sink_path: "^edge/.*"
+ mode: "combine"
+ failure_policy: "random_failures"
+ iterations: 100
+ parallelism: auto
+ shortest_path: false
+ flow_placement: "PROPORTIONAL"
+ baseline: false
+ seed: 42
+ store_failure_patterns: false
+ include_flow_details: false # cost_distribution
+ include_min_cut: false # min-cut edges list
+
+### MaxFlow
+
+Maximum flow Monte Carlo workflow step.
Attributes:
- source_path: Regex string selecting source nodes.
- sink_path: Regex string selecting sink nodes.
- priority: Priority class for this demand (lower value = higher priority).
- demand: Total demand volume.
- demand_placed: Portion of this demand placed so far.
- flow_policy_config: Policy configuration used to build a `FlowPolicy` if
- ``flow_policy`` is not provided.
- flow_policy: Concrete policy instance. If set, it overrides
- ``flow_policy_config``.
- mode: Expansion mode, ``"combine"`` or ``"pairwise"``.
- attrs: Arbitrary user metadata.
- id: Unique identifier assigned at initialization.
+ source_path: Regex pattern for source node groups.
+ sink_path: Regex pattern for sink node groups.
+ mode: Flow analysis mode ("combine" or "pairwise").
+ failure_policy: Name of failure policy in scenario.failure_policy_set.
+ iterations: Number of Monte Carlo trials.
+ parallelism: Number of parallel worker processes.
+ shortest_path: Whether to use shortest paths only.
+ flow_placement: Flow placement strategy.
+ baseline: Whether to run first iteration without failures as baseline.
+ seed: Optional seed for reproducible results.
+ store_failure_patterns: Whether to store failure patterns in results.
+ include_flow_details: Whether to collect cost distribution per flow.
+ include_min_cut: Whether to include min-cut edges per flow.
**Attributes:**
+- `name` (str)
+- `seed` (int | None)
+- `_seed_source` (str)
- `source_path` (str)
- `sink_path` (str)
-- `priority` (int) = 0
-- `demand` (float) = 0.0
-- `demand_placed` (float) = 0.0
-- `flow_policy_config` (Optional)
-- `flow_policy` (Optional)
- `mode` (str) = combine
-- `attrs` (Dict) = {}
-- `id` (str)
-
----
+- `failure_policy` (str | None)
+- `iterations` (int) = 1
+- `parallelism` (int | str) = auto
+- `shortest_path` (bool) = False
+- `flow_placement` (FlowPlacement | str) = 1
+- `baseline` (bool) = False
+- `store_failure_patterns` (bool) = False
+- `include_flow_details` (bool) = False
+- `include_min_cut` (bool) = False
-## ngraph.failure.conditions
+**Methods:**
-Shared condition primitives and evaluators.
+- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
+- `run(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step logic.
-This module provides a small, dependency-free condition evaluation utility
-that can be reused by failure policies and DSL selection filters.
+---
-Operators supported:
+## ngraph.workflow.maximum_supported_demand_step
-- ==, !=, <, <=, >, >=
-- contains, not_contains
-- any_value, no_value
+Maximum Supported Demand (MSD) workflow step.
-The evaluator operates on a flat attribute mapping for an entity. Callers are
-responsible for constructing that mapping (e.g. merging top-level fields with
-``attrs`` and ensuring appropriate precedence rules).
+Searches for the maximum uniform traffic multiplier `alpha_star` that is fully
+placeable for a given matrix. Stores results under `data` as:
-### FailureCondition
+- `alpha_star`: float
+- `context`: parameters used for the search
+- `base_demands`: serialized base demand specs
+- `probes`: bracket/bisect evaluations with feasibility
-A single condition for matching an entity attribute.
+### MaximumSupportedDemand
-Args:
- attr: Attribute name to inspect in the entity mapping.
- operator: Comparison operator. See module docstring for the list.
- value: Right-hand operand for the comparison (unused for any_value/no_value).
+MaximumSupportedDemand(name: 'str' = '', seed: 'Optional[int]' = None, _seed_source: 'str' = '', matrix_name: 'str' = 'default', acceptance_rule: 'str' = 'hard', alpha_start: 'float' = 1.0, growth_factor: 'float' = 2.0, alpha_min: 'float' = 1e-06, alpha_max: 'float' = 1000000000.0, resolution: 'float' = 0.01, max_bracket_iters: 'int' = 32, max_bisect_iters: 'int' = 32, seeds_per_alpha: 'int' = 1, placement_rounds: 'int | str' = 'auto')
**Attributes:**
-- `attr` (str)
-- `operator` (str)
-- `value` (Any | None)
-
-### evaluate_condition(entity_attrs: 'dict[str, Any]', cond: 'FailureCondition') -> 'bool'
+- `name` (str)
+- `seed` (Optional[int])
+- `_seed_source` (str)
+- `matrix_name` (str) = default
+- `acceptance_rule` (str) = hard
+- `alpha_start` (float) = 1.0
+- `growth_factor` (float) = 2.0
+- `alpha_min` (float) = 1e-06
+- `alpha_max` (float) = 1000000000.0
+- `resolution` (float) = 0.01
+- `max_bracket_iters` (int) = 32
+- `max_bisect_iters` (int) = 32
+- `seeds_per_alpha` (int) = 1
+- `placement_rounds` (int | str) = auto
-Evaluate a single condition against an entity attribute mapping.
+**Methods:**
-Args:
- entity_attrs: Flat mapping of attributes for the entity.
- cond: Condition to evaluate.
+- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
+- `run(self, scenario: "'Any'") -> 'None'` - Execute the workflow step logic.
-Returns:
- True if the condition passes, False otherwise.
+---
-### evaluate_conditions(entity_attrs: 'dict[str, Any]', conditions: 'Iterable[FailureCondition]', logic: 'str') -> 'bool'
+## ngraph.workflow.network_stats
-Evaluate multiple conditions with AND/OR logic.
+Workflow step for basic node and link statistics.
-Args:
- entity_attrs: Flat mapping of attributes for the entity.
- conditions: Iterable of conditions to evaluate.
- logic: "and" or "or".
+Computes and stores network statistics including node/link counts,
+capacity distributions, cost distributions, and degree distributions. Supports
+optional exclusion simulation and disabled entity handling.
-Returns:
- True if the combined predicate passes, False otherwise.
+YAML Configuration Example:
+ ```yaml
+ workflow:
+ - step_type: NetworkStats
----
+ name: "network_statistics" # Optional: Custom name for this step
+ include_disabled: false # Include disabled nodes/links in stats
+ excluded_nodes: ["node1", "node2"] # Optional: Temporary node exclusions
+ excluded_links: ["link1", "link3"] # Optional: Temporary link exclusions
+ ```
-## ngraph.failure.manager.aggregate
+Results stored in `scenario.results`:
-Aggregation helpers for failure analysis results.
+- Node statistics: node_count
+- Link statistics: link_count, total_capacity, mean_capacity, median_capacity,
-Utilities in this module group and summarize outputs produced by
-`FailureManager` runs. Functions are factored here to keep `manager.py`
-focused on orchestration. This module intentionally avoids importing heavy
-dependencies to keep import cost low in the common path.
+ min_capacity, max_capacity, mean_cost, median_cost, min_cost, max_cost
----
+- Degree statistics: mean_degree, median_degree, min_degree, max_degree
-## ngraph.failure.manager.enumerate
+### NetworkStats
-Failure pattern enumeration helpers.
+Compute basic node and link statistics for the network.
-Hosts utilities for generating or iterating over failure patterns for testing
-and analysis workflows. These helpers are separate from the Monte Carlo engine
-to keep the main manager small and focused.
+Supports optional exclusion simulation without modifying the base network.
----
+Attributes:
+ include_disabled: If True, include disabled nodes and links in statistics.
+ If False, only consider enabled entities.
+ excluded_nodes: Optional list of node names to exclude (temporary exclusion).
+ excluded_links: Optional list of link IDs to exclude (temporary exclusion).
-## ngraph.failure.manager.manager
+**Attributes:**
-FailureManager for Monte Carlo failure analysis.
+- `name` (str)
+- `seed` (Optional[int])
+- `_seed_source` (str)
+- `include_disabled` (bool) = False
+- `excluded_nodes` (Iterable[str]) = ()
+- `excluded_links` (Iterable[str]) = ()
-Provides the failure analysis engine for NetGraph. Supports parallel
-processing, per-worker caching, and failure policy handling for workflow steps
-and direct programmatic use.
+**Methods:**
-Performance characteristics:
-Time complexity: O(I × A / P), where I is iteration count, A is analysis cost,
-and P is parallelism. Worker-local caching reduces repeated work when exclusion
-sets repeat across iterations. Network serialization happens once per worker,
-not per iteration.
+- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
+- `run(self, scenario: 'Scenario') -> 'None'` - Compute and store network statistics.
-Space complexity: O(V + E + I × R + C), where V and E are node and link counts,
-R is result size per iteration, and C is cache size. The per-worker cache is
-bounded and evicts in FIFO order after 1000 unique patterns.
+---
-Parallelism: For small iteration counts, serial execution avoids IPC overhead.
-For larger workloads, parallel execution benefits from worker caching and CPU
-utilization. Optimal parallelism is the number of CPU cores for analysis-bound
-workloads.
+## ngraph.workflow.parse
-### AnalysisFunction
+Workflow parsing helpers.
-Protocol for analysis functions used with FailureManager.
+Converts a normalized workflow section (list[dict]) into WorkflowStep
+instances using the WORKFLOW_STEP_REGISTRY and attaches unique names/seeds.
-Analysis functions should take a NetworkView and any additional
-keyword arguments, returning analysis results of any type.
+### build_workflow_steps(workflow_data: 'List[Dict[str, Any]]', derive_seed) -> 'List[WorkflowStep]'
-### FailureManager
+Instantiate workflow steps from normalized dictionaries.
-Failure analysis engine with Monte Carlo capabilities.
+Args:
+ workflow_data: List of step dicts; each must have "step_type".
+ derive_seed: Callable(name: str) -> int | None, used to derive step seeds.
-This is the component for failure analysis in NetGraph.
-Provides parallel processing, worker caching, and failure
-policy handling for workflow steps and direct notebook usage.
+Returns:
+ A list of WorkflowStep instances with unique names and optional seeds.
-The FailureManager can execute any analysis function that takes a NetworkView
-and returns results, making it generic for different types of
-failure analysis (capacity, traffic, connectivity, etc.).
+---
-Attributes:
- network: The underlying network (not modified during analysis).
- failure_policy_set: Set of named failure policies.
- policy_name: Name of specific failure policy to use.
+## ngraph.workflow.traffic_matrix_placement_step
-**Methods:**
+TrafficMatrixPlacement workflow step.
-- `compute_exclusions(self, policy: "'FailurePolicy | None'" = None, seed_offset: 'int | None' = None) -> 'tuple[set[str], set[str]]'` - Compute set of nodes and links to exclude for a failure iteration.
-- `create_network_view(self, excluded_nodes: 'set[str] | None' = None, excluded_links: 'set[str] | None' = None) -> 'NetworkView'` - Create NetworkView with specified exclusions.
-- `get_failure_policy(self) -> "'FailurePolicy | None'"` - Get failure policy for analysis.
-- `run_demand_placement_monte_carlo(self, demands_config: 'list[dict[str, Any]] | Any', iterations: 'int' = 100, parallelism: 'int' = 1, placement_rounds: 'int | str' = 'auto', baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_details: 'bool' = False, include_used_edges: 'bool' = False, **kwargs) -> 'Any'` - Analyze traffic demand placement success under failures.
-- `run_max_flow_monte_carlo(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement | str' = , baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_summary: 'bool' = False, **kwargs) -> 'Any'` - Analyze maximum flow capacity envelopes between node groups under failures.
-- `run_monte_carlo_analysis(self, analysis_func: 'AnalysisFunction', iterations: 'int' = 1, parallelism: 'int' = 1, baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, **analysis_kwargs) -> 'dict[str, Any]'` - Run Monte Carlo failure analysis with any analysis function.
-- `run_sensitivity_monte_carlo(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement | str' = , baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, **kwargs) -> 'Any'` - Analyze component criticality for flow capacity under failures.
-- `run_single_failure_scenario(self, analysis_func: 'AnalysisFunction', **kwargs) -> 'Any'` - Run a single failure scenario for convenience.
+Runs Monte Carlo demand placement using a named traffic matrix and produces
+unified `flow_results` per iteration under `data.flow_results`.
----
+### TrafficMatrixPlacement
-## ngraph.failure.manager.simulate
+Monte Carlo demand placement using a named traffic matrix.
-Simulation helpers for failure analyses.
+Attributes:
+ matrix_name: Name of the traffic matrix to analyze.
+ failure_policy: Optional policy name in scenario.failure_policy_set.
+ iterations: Number of Monte Carlo iterations.
+ parallelism: Number of parallel worker processes.
+ placement_rounds: Placement optimization rounds (int or "auto").
+ baseline: Include baseline iteration without failures first.
+ seed: Optional seed for reproducibility.
+ store_failure_patterns: Whether to store failure pattern results.
+ include_flow_details: When True, include cost_distribution per flow.
+ include_used_edges: When True, include set of used edges per demand in entry data.
+ alpha: Numeric scale for demands in the matrix.
+ alpha_from_step: Optional producer step name to read alpha from.
+ alpha_from_field: Dotted field path in producer step (default: "data.alpha_star").
+
+**Attributes:**
+
+- `name` (str)
+- `seed` (int | None)
+- `_seed_source` (str)
+- `matrix_name` (str)
+- `failure_policy` (str | None)
+- `iterations` (int) = 1
+- `parallelism` (int | str) = auto
+- `placement_rounds` (int | str) = auto
+- `baseline` (bool) = False
+- `store_failure_patterns` (bool) = False
+- `include_flow_details` (bool) = False
+- `include_used_edges` (bool) = False
+- `alpha` (float) = 1.0
+- `alpha_from_step` (str | None)
+- `alpha_from_field` (str) = data.alpha_star
-Contains small helpers used to drive simulations in tests and examples. The
-main orchestration lives in `manager.py`.
+**Methods:**
+
+- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
+- `run(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step logic.
---
-## ngraph.failure.policy
+## ngraph.dsl.blueprints.expand
-Failure policy primitives.
+Network topology blueprints and generation.
-Defines `FailureCondition`, `FailureRule`, and `FailurePolicy` for expressing
-how nodes, links, and risk groups fail in analyses. Conditions match on
-top-level attributes with simple operators; rules select matches using
-"all", probabilistic "random" (with `probability`), or fixed-size "choice"
-(with `count`). Policies can optionally expand failures by shared risk groups
-or by risk-group children.
+### Blueprint
-### FailureCondition
+Represents a reusable blueprint for hierarchical sub-topologies.
-Alias to the shared condition dataclass.
+A blueprint may contain multiple groups of nodes (each can have a node_count
+and a name_template), plus adjacency rules describing how those groups connect.
-This maintains a consistent import path within the failure policy module.
+Attributes:
+ name (str): Unique identifier of this blueprint.
+ groups (Dict[str, Any]): A mapping of group_name -> group definition.
+ Allowed top-level keys in each group definition here are the same
+ as in normal group definitions (e.g. node_count, name_template,
+ attrs, disabled, risk_groups, or nested use_blueprint references, etc.).
+ adjacency (List[Dict[str, Any]]): A list of adjacency definitions
+ describing how these groups are linked, using the DSL fields
+ (source, target, pattern, link_params, etc.).
**Attributes:**
-- `attr` (str)
-- `operator` (str)
-- `value` (Any | None)
-
-### FailureMode
+- `name` (str)
+- `groups` (Dict[str, Any])
+- `adjacency` (List[Dict[str, Any]])
-A weighted mode that encapsulates a set of rules applied together.
+### DSLExpansionContext
-Exactly one mode is selected per failure iteration according to the
-mode weights. Within a mode, all contained rules are applied and their
-selections are unioned into the failure set.
+Carries the blueprint definitions and the final Network instance
+to be populated during DSL expansion.
Attributes:
- weight: Non-negative weight used for mode selection. All weights are
- normalized internally. Modes with zero weight are never selected.
- rules: A list of `FailureRule` applied together when this mode is chosen.
- attrs: Optional metadata.
+ blueprints (Dict[str, Blueprint]): Dictionary of blueprint-name -> Blueprint.
+ network (Network): The Network into which expanded nodes/links are inserted.
+ pending_bp_adj (List[tuple[Dict[str, Any], str]]): Deferred blueprint adjacency
+ expansions collected as (adj_def, parent_path) to be processed later.
**Attributes:**
-- `weight` (float)
-- `rules` (List[FailureRule]) = []
-- `attrs` (Dict[str, Any]) = {}
+- `blueprints` (Dict[str, Blueprint])
+- `network` (Network)
+- `pending_bp_adj` (List[tuple[Dict[str, Any], str]]) = []
-### FailurePolicy
+### expand_network_dsl(data: 'Dict[str, Any]') -> 'Network'
-A container for multiple FailureRules plus optional metadata in `attrs`.
+Expands a combined blueprint + network DSL into a complete Network object.
-The main entry point is `apply_failures`, which:
- 1) For each rule, gather the relevant entities (node, link, or risk_group).
- 2) Match them based on rule conditions using 'and' or 'or' logic.
- 3) Apply the selection strategy (all, random, or choice).
- 4) Collect the union of all failed entities across all rules.
- 5) Optionally expand failures by shared-risk groups or sub-risks.
+Overall flow:
+ 1) Parse "blueprints" into Blueprint objects.
+ 2) Build a new Network from "network" metadata (e.g. name, version).
+ 3) Expand 'network["groups"]' (collect blueprint adjacencies for later).
-Example YAML configuration:
- ```yaml
- failure_policy:
- attrs:
- description: "Regional power grid failure affecting telecom infrastructure"
- fail_risk_groups: true
- rules:
- # Fail all nodes in Texas electrical grid
- - entity_scope: "node"
+- If a group references a blueprint, incorporate that blueprint's subgroups
- conditions:
- - attr: "electric_grid"
+ while merging parent's attrs + disabled + risk_groups into subgroups.
+ Blueprint adjacency is deferred and processed after node overrides.
- operator: "=="
- value: "texas"
- logic: "and"
- rule_type: "all"
+- Otherwise, directly create nodes (a "direct node group").
- # Randomly fail 40% of underground fiber links in affected region
- - entity_scope: "link"
+ 4) Process any direct node definitions (network["nodes"]).
+ 5) Process node overrides (in order if multiple overrides match).
+ 6) Expand deferred blueprint adjacencies.
+ 7) Expand adjacency definitions in 'network["adjacency"]'.
+ 8) Process any direct link definitions (network["links"]).
+ 9) Process link overrides (in order if multiple overrides match).
- conditions:
- - attr: "region"
+Under the new rules:
- operator: "=="
- value: "southwest"
- - attr: "installation"
+- Only certain top-level fields are permitted in each structure. Any extra
- operator: "=="
- value: "underground"
- logic: "and"
- rule_type: "random"
- probability: 0.4
+ keys raise a ValueError. "attrs" is where arbitrary user fields go.
- # Choose exactly 2 risk groups to fail (e.g., data centers)
- # Note: logic defaults to "or" when not specified
- - entity_scope: "risk_group"
+- For link_params, recognized fields are "capacity", "cost", "disabled",
- rule_type: "choice"
- count: 2
- ```
+ "risk_groups", "attrs". Everything else must go inside link_params["attrs"].
-Attributes:
- rules (List[FailureRule]):
- A list of FailureRules to apply.
- attrs (Dict[str, Any]):
- Arbitrary metadata about this policy (e.g. "name", "description").
- fail_risk_groups (bool):
- If True, after initial selection, expand failures among any
- node/link that shares a risk group with a failed entity.
- fail_risk_group_children (bool):
- If True, and if a risk_group is marked as failed, expand to
- children risk_groups recursively.
- seed (Optional[int]):
- Seed for reproducible random operations. If None, operations
- will be non-deterministic.
+- For node/group definitions, recognized fields include "node_count",
-**Attributes:**
+ "name_template", "attrs", "disabled", "risk_groups" or "use_blueprint"
+ for blueprint-based groups.
-- `attrs` (Dict[str, Any]) = {}
-- `fail_risk_groups` (bool) = False
-- `fail_risk_group_children` (bool) = False
-- `seed` (Optional[int])
-- `modes` (List[FailureMode]) = []
+Args:
+ data (Dict[str, Any]): The YAML-parsed dictionary containing
+ optional "blueprints" + "network".
-**Methods:**
+Returns:
+ Network: The expanded Network object with all nodes and links.
-- `apply_failures(self, network_nodes: 'Dict[str, Any]', network_links: 'Dict[str, Any]', network_risk_groups: 'Dict[str, Any] | None' = None, *, seed: 'Optional[int]' = None) -> 'List[str]'` - Identify which entities fail for this iteration.
-- `to_dict(self) -> 'Dict[str, Any]'` - Convert to dictionary for JSON serialization.
+---
-### FailureRule
+## ngraph.dsl.blueprints.parser
-Defines how to match and then select entities for failure.
+Parsing helpers for the network DSL.
-Attributes:
- entity_scope (EntityScope):
- The type of entities this rule applies to: "node", "link", or "risk_group".
- conditions (List[FailureCondition]):
- A list of conditions to filter matching entities.
- logic (Literal["and", "or"]):
- "and": All conditions must be true for a match.
- "or": At least one condition is true for a match (default).
- rule_type (Literal["random", "choice", "all"]):
- The selection strategy among the matched set:
+This module factors out pure parsing/validation helpers from the expansion
+module so they can be tested independently and reused.
-- "random": each matched entity is chosen with probability = `probability`.
-- "choice": pick exactly `count` items from the matched set (random sample).
-- "all": select every matched entity in the matched set.
+### check_adjacency_keys(adj_def: 'Dict[str, Any]', context: 'str') -> 'None'
- probability (float):
- Probability in [0,1], used if `rule_type="random"`.
- count (int):
- Number of entities to pick if `rule_type="choice"`.
+Ensure adjacency definitions only contain recognized keys.
-**Attributes:**
+### check_link_params(link_params: 'Dict[str, Any]', context: 'str') -> 'None'
-- `entity_scope` (EntityScope)
-- `conditions` (List[FailureCondition]) = []
-- `logic` (Literal['and', 'or']) = or
-- `rule_type` (Literal['random', 'choice', 'all']) = all
-- `probability` (float) = 1.0
-- `count` (int) = 1
-- `weight_by` (Optional[str])
+Ensure link_params contain only recognized keys.
----
+Link attributes may include "hardware" per-end mapping when set under
+link_params.attrs. This function only validates top-level link_params keys.
-## ngraph.failure.policy_set
+### check_no_extra_keys(data_dict: 'Dict[str, Any]', allowed: 'set[str]', context: 'str') -> 'None'
-Failure policy containers.
+Raise if ``data_dict`` contains keys outside ``allowed``.
-Provides `FailurePolicySet`, a named collection of `FailurePolicy` objects
-used as input to failure analysis workflows. This module contains input
-containers, not analysis results.
+Args:
+ data_dict: The dict to check.
+ allowed: Set of recognized keys.
+ context: Short description used in error messages.
-### FailurePolicySet
+### expand_name_patterns(name: 'str') -> 'List[str]'
-Named collection of FailurePolicy objects.
+Expand bracket expressions in a group name.
-This mutable container maps failure policy names to FailurePolicy objects,
-allowing management of multiple failure policies for analysis.
+Examples:
-Attributes:
- policies: Dictionary mapping failure policy names to FailurePolicy objects.
+- "fa[1-3]" -> ["fa1", "fa2", "fa3"]
+- "dc[1,3,5-6]" -> ["dc1", "dc3", "dc5", "dc6"]
+- "fa[1-2]_plane[5-6]" -> ["fa1_plane5", "fa1_plane6", "fa2_plane5", "fa2_plane6"]
-**Attributes:**
+### join_paths(parent_path: 'str', rel_path: 'str') -> 'str'
-- `policies` (dict[str, FailurePolicy]) = {}
+Join two path segments according to the DSL conventions.
-**Methods:**
+---
-- `add(self, name: 'str', policy: 'FailurePolicy') -> 'None'` - Add a failure policy to the collection.
-- `get_all_policies(self) -> 'list[FailurePolicy]'` - Get all failure policies from the collection.
-- `get_policy(self, name: 'str') -> 'FailurePolicy'` - Get a specific failure policy by name.
-- `to_dict(self) -> 'dict[str, Any]'` - Convert to dictionary for JSON serialization.
+## ngraph.dsl.loader
----
+YAML loader + schema validation for Scenario DSL.
-## ngraph.workflow.base
+Provides a single entrypoint to parse a YAML string, normalize keys where
+needed, validate against the packaged JSON schema, and return a canonical
+dictionary suitable for downstream expansion/parsing.
-Base classes for workflow automation.
+### load_scenario_yaml(yaml_str: 'str') -> 'Dict[str, Any]'
-Defines the workflow step abstraction, registration decorator, and execution
-wrapper that adds timing and logging. Steps implement `run()` and are executed
-via `execute()` which records metadata and re-raises failures.
+Load, normalize, and validate a Scenario YAML string.
-### WorkflowStep
+Returns a canonical dictionary representation that downstream parsers can
+consume without worrying about YAML-specific quirks (e.g., boolean-like
+keys) and with schema shape already enforced.
-Base class for all workflow steps.
+---
-All workflow steps are automatically logged with execution timing information.
-All workflow steps support seeding for reproducible random operations.
-Workflow metadata is automatically stored in scenario.results for analysis.
+## ngraph.results.artifacts
-YAML Configuration:
- ```yaml
- workflow:
- - step_type:
+Serializable result artifacts for analysis workflows.
- name: "optional_step_name" # Optional: Custom name for this step instance
- seed: 42 # Optional: Seed for reproducible random operations
- # ... step-specific parameters ...
- ```
+This module defines dataclasses that capture outputs from analyses and
+simulations in a JSON-serializable form:
-Attributes:
- name: Optional custom identifier for this workflow step instance,
- used for logging and result storage purposes.
- seed: Optional seed for reproducible random operations. If None,
- random operations will be non-deterministic.
+- `CapacityEnvelope`: frequency-based capacity distributions and optional
-**Attributes:**
+ aggregated flow statistics
-- `name` (str)
-- `seed` (Optional[int])
-- `_seed_source` (str)
+- `FailurePatternResult`: capacity results for specific failure patterns
+- `PlacementEnvelope`: per-demand placement envelopes
-**Methods:**
+### CapacityEnvelope
-- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
-- `run(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step logic.
+Frequency-based capacity envelope that stores capacity values as frequencies.
-### register_workflow_step(step_type: 'str')
+This approach is memory-efficient for Monte Carlo analysis where we care
+about statistical distributions rather than individual sample order.
-Return a decorator that registers a `WorkflowStep` subclass.
+Attributes:
+ source_pattern: Regex pattern used to select source nodes.
+ sink_pattern: Regex pattern used to select sink nodes.
+ mode: Flow analysis mode ("combine" or "pairwise").
+ frequencies: Dictionary mapping capacity values to their occurrence counts.
+ min_capacity: Minimum observed capacity.
+ max_capacity: Maximum observed capacity.
+ mean_capacity: Mean capacity across all samples.
+ stdev_capacity: Standard deviation of capacity values.
+ total_samples: Total number of samples represented.
+ flow_summary_stats: Optional dictionary with aggregated FlowSummary statistics.
+ Contains cost_distribution_stats and other flow analytics.
-Args:
- step_type: Registry key used to instantiate steps from configuration.
+**Attributes:**
-Returns:
- A class decorator that adds the class to `WORKFLOW_STEP_REGISTRY`.
+- `source_pattern` (str)
+- `sink_pattern` (str)
+- `mode` (str)
+- `frequencies` (Dict[float, int])
+- `min_capacity` (float)
+- `max_capacity` (float)
+- `mean_capacity` (float)
+- `stdev_capacity` (float)
+- `total_samples` (int)
+- `flow_summary_stats` (Dict[str, Any]) = {}
----
+**Methods:**
-## ngraph.workflow.build_graph
+- `expand_to_values(self) -> 'List[float]'` - Expand frequency map back to individual values.
+- `from_dict(data: 'Dict[str, Any]') -> "'CapacityEnvelope'"` - Construct a CapacityEnvelope from a dictionary.
+- `from_values(source_pattern: 'str', sink_pattern: 'str', mode: 'str', values: 'List[float]', flow_summaries: 'List[Any] | None' = None) -> "'CapacityEnvelope'"` - Create envelope from capacity values and optional flow summaries.
+- `get_percentile(self, percentile: 'float') -> 'float'` - Calculate percentile from frequency distribution.
+- `to_dict(self) -> 'Dict[str, Any]'` - Convert to dictionary for JSON serialization.
-Graph building workflow component.
+### FailurePatternResult
-Converts scenario network definitions into StrictMultiDiGraph structures suitable
-for analysis algorithms. No additional parameters required beyond basic workflow step options.
+Result for a unique failure pattern with associated capacity matrix.
-YAML Configuration Example:
- ```yaml
- workflow:
- - step_type: BuildGraph
+Attributes:
+ excluded_nodes: List of failed node IDs.
+ excluded_links: List of failed link IDs.
+ capacity_matrix: Dictionary mapping flow keys to capacity values.
+ count: Number of times this pattern occurred.
+ is_baseline: Whether this represents the baseline (no failures) case.
- name: "build_network_graph" # Optional: Custom name for this step
- ```
+**Attributes:**
-Results stored in `scenario.results` under the step name as two keys:
+- `excluded_nodes` (List[str])
+- `excluded_links` (List[str])
+- `capacity_matrix` (Dict[str, float])
+- `count` (int)
+- `is_baseline` (bool) = False
+- `_pattern_key_cache` (str)
-- metadata: Step-level execution metadata (empty dict)
-- data: { graph: node-link JSON dict, context: { add_reverse: bool } }
+**Methods:**
-### BuildGraph
+- `from_dict(data: 'Dict[str, Any]') -> "'FailurePatternResult'"` - Construct FailurePatternResult from a dictionary.
+- `to_dict(self) -> 'Dict[str, Any]'` - Convert to dictionary for JSON serialization.
-A workflow step that builds a StrictMultiDiGraph from scenario.network.
+### PlacementEnvelope
-This step converts the scenario's network definition into a graph structure
-suitable for analysis algorithms. No additional parameters are required.
+Per-demand placement envelope keyed like capacity envelopes.
+
+Each envelope captures frequency distribution of placement ratio for a
+specific demand definition across Monte Carlo iterations.
+
+Attributes:
+ source: Source selection regex or node label.
+ sink: Sink selection regex or node label.
+ mode: Demand expansion mode ("combine" or "pairwise").
+ priority: Demand priority class.
+ frequencies: Mapping of placement ratio to occurrence count.
+ min: Minimum observed placement ratio.
+ max: Maximum observed placement ratio.
+ mean: Mean placement ratio.
+ stdev: Standard deviation of placement ratio.
+ total_samples: Number of iterations represented.
**Attributes:**
-- `name` (str)
-- `seed` (Optional[int])
-- `_seed_source` (str)
+- `source` (str)
+- `sink` (str)
+- `mode` (str)
+- `priority` (int)
+- `frequencies` (Dict[float, int])
+- `min` (float)
+- `max` (float)
+- `mean` (float)
+- `stdev` (float)
+- `total_samples` (int)
**Methods:**
-- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
-- `run(self, scenario: 'Scenario') -> 'None'` - Build the network graph and store it in results.
+- `from_dict(data: 'Dict[str, Any]') -> "'PlacementEnvelope'"` - Construct a PlacementEnvelope from a dictionary.
+- `from_values(source: 'str', sink: 'str', mode: 'str', priority: 'int', ratios: 'List[float]', rounding_decimals: 'int' = 4) -> "'PlacementEnvelope'"`
+- `to_dict(self) -> 'Dict[str, Any]'`
---
-## ngraph.workflow.cost_power
-
-CostPower workflow step: collect capex and power by hierarchy level.
+## ngraph.results.flow
-This step aggregates capex and power from the network hardware inventory without
-performing any normalization or reporting. It separates contributions into two
-categories:
+Unified flow result containers for failure-analysis iterations.
-- platform_*: node hardware (e.g., chassis, linecards) resolved from node attrs
-- optics_*: per-end link hardware (e.g., optics) resolved from link attrs
+Defines small, serializable dataclasses that capture per-iteration outcomes
+for capacity and demand-placement style analyses in a unit-agnostic form.
-Aggregation is computed at hierarchy levels 0..N where level 0 is the global
-root (path ""), and higher levels correspond to prefixes of node names split by
-"/". For example, for node "dc1/plane1/leaf/leaf-1":
+Objects expose `to_dict()` that returns JSON-safe primitives. Float-keyed
+distributions are normalized to string keys, and arbitrary `data` payloads are
+sanitized. These dicts are written under `data.flow_results` by steps.
-- level 1 path is "dc1"
-- level 2 path is "dc1/plane1"
-- etc.
+### FlowEntry
-Disabled handling:
+Represents a single source→destination flow outcome within an iteration.
-- When include_disabled is False, only enabled nodes and links are considered.
-- Optics are counted only when the endpoint node has platform hardware.
+Fields are unit-agnostic. Callers can interpret numbers as needed for
+presentation (e.g., Gbit/s).
-YAML Configuration Example:
- ```yaml
- workflow:
- - step_type: CostPower
+Args:
+ source: Source identifier.
+ destination: Destination identifier.
+ priority: Priority/class for traffic placement scenarios. Zero when not applicable.
+ demand: Requested volume for this flow.
+ placed: Delivered volume for this flow.
+ dropped: Unmet volume (``demand - placed``).
+ cost_distribution: Optional distribution of placed volume by path cost.
+ data: Optional per-flow details (e.g., min-cut edges, used edges).
- name: "cost_power" # Optional custom name
- include_disabled: false # Default: only enabled nodes/links
- aggregation_level: 2 # Produce levels: 0, 1, 2
- ```
+**Attributes:**
-Results stored in `scenario.results` under this step namespace:
- data:
- context:
- include_disabled: bool
- aggregation_level: int
- levels:
- "0":
+- `source` (str)
+- `destination` (str)
+- `priority` (int)
+- `demand` (float)
+- `placed` (float)
+- `dropped` (float)
+- `cost_distribution` (Dict[float, float]) = {}
+- `data` (Dict[str, Any]) = {}
-- path: ""
+**Methods:**
- platform_capex: float
- platform_power_watts: float
- optics_capex: float
- optics_power_watts: float
- capex_total: float
- power_total_watts: float
- "1": [ ... ]
- "2": [ ... ]
+- `to_dict(self) -> 'Dict[str, Any]'` - Return a JSON-serializable dictionary representation.
-### CostPower
+### FlowIterationResult
-Collect platform and optics capex/power by aggregation level.
+Container for per-iteration analysis results.
-Attributes:
- include_disabled: If True, include disabled nodes and links.
- aggregation_level: Inclusive depth for aggregation. 0=root only.
+Args:
+ failure_id: Stable identifier for the failure scenario (e.g., "baseline" or a hash).
+ failure_state: Optional excluded components for the iteration.
+ flows: List of flow entries for this iteration.
+ summary: Aggregated summary across ``flows``.
+ data: Optional per-iteration extras.
**Attributes:**
-- `name` (str)
-- `seed` (Optional[int])
-- `_seed_source` (str)
-- `include_disabled` (bool) = False
-- `aggregation_level` (int) = 2
+- `failure_id` (str)
+- `failure_state` (Optional[Dict[str, List[str]]])
+- `flows` (List[FlowEntry]) = []
+- `summary` (FlowSummary) = FlowSummary(total_demand=0.0, total_placed=0.0, overall_ratio=1.0, dropped_flows=0, num_flows=0)
+- `data` (Dict[str, Any]) = {}
**Methods:**
-- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
-- `run(self, scenario: 'Any') -> 'None'` - Aggregate capex and power by hierarchy levels 0..N.
+- `to_dict(self) -> 'Dict[str, Any]'` - Return a JSON-serializable dictionary representation.
----
+### FlowSummary
-## ngraph.workflow.max_flow_step
+Aggregated metrics across all flows in one iteration.
-MaxFlow workflow step.
+Args:
+ total_demand: Sum of all demands in this iteration.
+ total_placed: Sum of all delivered volumes in this iteration.
+ overall_ratio: ``total_placed / total_demand`` when demand > 0, else 1.0.
+ dropped_flows: Number of flow entries with non-zero drop.
+ num_flows: Total number of flows considered.
-Monte Carlo analysis of maximum flow capacity between node groups using FailureManager.
-Produces unified `flow_results` per iteration under `data.flow_results`.
+**Attributes:**
-YAML Configuration Example:
+- `total_demand` (float)
+- `total_placed` (float)
+- `overall_ratio` (float)
+- `dropped_flows` (int)
+- `num_flows` (int)
- workflow:
+**Methods:**
-- step_type: MaxFlow
+- `to_dict(self) -> 'Dict[str, Any]'` - Return a JSON-serializable dictionary representation.
- name: "maxflow_dc_to_edge"
- source_path: "^datacenter/.*"
- sink_path: "^edge/.*"
- mode: "combine"
- failure_policy: "random_failures"
- iterations: 100
- parallelism: auto
- shortest_path: false
- flow_placement: "PROPORTIONAL"
- baseline: false
- seed: 42
- store_failure_patterns: false
- include_flow_details: false # cost_distribution
- include_min_cut: false # min-cut edges list
+---
-### MaxFlow
+## ngraph.results.snapshot
-Maximum flow Monte Carlo workflow step.
+Scenario snapshot helpers.
-Attributes:
- source_path: Regex pattern for source node groups.
- sink_path: Regex pattern for sink node groups.
- mode: Flow analysis mode ("combine" or "pairwise").
- failure_policy: Name of failure policy in scenario.failure_policy_set.
- iterations: Number of Monte Carlo trials.
- parallelism: Number of parallel worker processes.
- shortest_path: Whether to use shortest paths only.
- flow_placement: Flow placement strategy.
- baseline: Whether to run first iteration without failures as baseline.
- seed: Optional seed for reproducible results.
- store_failure_patterns: Whether to store failure patterns in results.
- include_flow_details: Whether to collect cost distribution per flow.
- include_min_cut: Whether to include min-cut edges per flow.
+Build a concise dictionary snapshot of failure policies and traffic matrices for
+export into results without keeping heavy domain objects.
-**Attributes:**
+### build_scenario_snapshot(*, seed: 'int | None', failure_policy_set, traffic_matrix_set) -> 'Dict[str, Any]'
-- `name` (str)
-- `seed` (int | None)
-- `_seed_source` (str)
-- `source_path` (str)
-- `sink_path` (str)
-- `mode` (str) = combine
-- `failure_policy` (str | None)
-- `iterations` (int) = 1
-- `parallelism` (int | str) = auto
-- `shortest_path` (bool) = False
-- `flow_placement` (FlowPlacement | str) = 1
-- `baseline` (bool) = False
-- `store_failure_patterns` (bool) = False
-- `include_flow_details` (bool) = False
-- `include_min_cut` (bool) = False
+No documentation available.
-**Methods:**
+---
-- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
-- `run(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step logic.
+## ngraph.results.store
----
+Generic results store for workflow steps and their metadata.
-## ngraph.workflow.maximum_supported_demand_step
+`Results` organizes outputs by workflow step name and records
+`WorkflowStepMetadata` for execution context. Storage is strictly
+step-scoped: steps must write two keys under their namespace:
-Maximum Supported Demand (MSD) workflow step.
+- ``metadata``: step-level metadata (dict)
+- ``data``: step-specific payload (dict)
-Searches for the maximum uniform traffic multiplier `alpha_star` that is fully
-placeable for a given matrix. Stores results under `data` as:
+Export with :meth:`Results.to_dict`, which returns a JSON-safe structure
+with shape ``{workflow, steps, scenario}``. During export, objects with a
+``to_dict()`` method are converted, dictionary keys are coerced to strings,
+tuples are emitted as lists, and only JSON primitives are produced.
-- `alpha_star`: float
-- `context`: parameters used for the search
-- `base_demands`: serialized base demand specs
-- `probes`: bracket/bisect evaluations with feasibility
+### Results
-### MaximumSupportedDemand
+Step-scoped results container with deterministic export shape.
-MaximumSupportedDemand(name: 'str' = '', seed: 'Optional[int]' = None, _seed_source: 'str' = '', matrix_name: 'str' = 'default', acceptance_rule: 'str' = 'hard', alpha_start: 'float' = 1.0, growth_factor: 'float' = 2.0, alpha_min: 'float' = 1e-06, alpha_max: 'float' = 1000000000.0, resolution: 'float' = 0.01, max_bracket_iters: 'int' = 32, max_bisect_iters: 'int' = 32, seeds_per_alpha: 'int' = 1, placement_rounds: 'int | str' = 'auto')
+Structure:
+
+- workflow: step metadata registry
+- steps: per-step results with enforced keys {"metadata", "data"}
+- scenario: optional scenario snapshot set once at load time
**Attributes:**
-- `name` (str)
-- `seed` (Optional[int])
-- `_seed_source` (str)
-- `matrix_name` (str) = default
-- `acceptance_rule` (str) = hard
-- `alpha_start` (float) = 1.0
-- `growth_factor` (float) = 2.0
-- `alpha_min` (float) = 1e-06
-- `alpha_max` (float) = 1000000000.0
-- `resolution` (float) = 0.01
-- `max_bracket_iters` (int) = 32
-- `max_bisect_iters` (int) = 32
-- `seeds_per_alpha` (int) = 1
-- `placement_rounds` (int | str) = auto
+- `_store` (Dict) = {}
+- `_metadata` (Dict) = {}
+- `_active_step` (Optional)
+- `_scenario` (Dict) = {}
**Methods:**
-- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
-- `run(self, scenario: "'Any'") -> 'None'` - Execute the workflow step logic.
+- `enter_step(self, step_name: str) -> None` - Enter step scope. Subsequent put/get are scoped to this step.
+- `exit_step(self) -> None` - Exit step scope.
+- `get(self, key: str, default: Any = None) -> Any` - Get a value from the active step scope.
+- `get_all_step_metadata(self) -> Dict[str, ngraph.results.store.WorkflowStepMetadata]` - Get metadata for all workflow steps.
+- `get_step(self, step_name: str) -> Dict[str, Any]` - Return the raw dict for a given step name (for cross-step reads).
+- `get_step_metadata(self, step_name: str) -> Optional[ngraph.results.store.WorkflowStepMetadata]` - Get metadata for a workflow step.
+- `get_steps_by_execution_order(self) -> list[str]` - Get step names ordered by their execution order.
+- `put(self, key: str, value: Any) -> None` - Store a value in the active step under an allowed key.
+- `put_step_metadata(self, step_name: str, step_type: str, execution_order: int, *, scenario_seed: Optional[int] = None, step_seed: Optional[int] = None, seed_source: str = 'none', active_seed: Optional[int] = None) -> None` - Store metadata for a workflow step.
+- `set_scenario_snapshot(self, snapshot: Dict[str, Any]) -> None` - Attach a normalized scenario snapshot for export.
+- `to_dict(self) -> Dict[str, Any]` - Return exported results with shape: {workflow, steps, scenario}.
----
+### WorkflowStepMetadata
-## ngraph.workflow.network_stats
+Metadata for a workflow step execution.
-Workflow step for basic node and link statistics.
+Attributes:
+ step_type: The workflow step class name (e.g., 'CapacityEnvelopeAnalysis').
+ step_name: The instance name of the step.
+ execution_order: Order in which this step was executed (0-based).
+ scenario_seed: Scenario-level seed provided in the YAML (if any).
+ step_seed: Seed assigned to this step (explicit or scenario-derived).
+ seed_source: Source for the step seed. One of:
-Computes and stores network statistics including node/link counts,
-capacity distributions, cost distributions, and degree distributions. Supports
-optional exclusion simulation and disabled entity handling.
+- "scenario-derived": seed was derived from scenario.seed
+- "explicit-step": seed was explicitly provided for the step
+- "none": no seed provided/active for this step
-YAML Configuration Example:
- ```yaml
- workflow:
- - step_type: NetworkStats
+ active_seed: The effective base seed used by the step, if any. For steps
+ that use Monte Carlo execution, per-iteration seeds are derived from
+ active_seed (e.g., active_seed + iteration_index).
- name: "network_statistics" # Optional: Custom name for this step
- include_disabled: false # Include disabled nodes/links in stats
- excluded_nodes: ["node1", "node2"] # Optional: Temporary node exclusions
- excluded_links: ["link1", "link3"] # Optional: Temporary link exclusions
- ```
+**Attributes:**
-Results stored in `scenario.results`:
+- `step_type` (str)
+- `step_name` (str)
+- `execution_order` (int)
+- `scenario_seed` (Optional)
+- `step_seed` (Optional)
+- `seed_source` (str) = none
+- `active_seed` (Optional)
-- Node statistics: node_count
-- Link statistics: link_count, total_capacity, mean_capacity, median_capacity,
+---
+
+## ngraph.profiling.profiler
+
+Profiling for NetGraph workflow execution.
+
+Provides CPU and wall-clock timing per workflow step using ``cProfile`` and
+optionally peak memory via ``tracemalloc``. Aggregates results into structured
+summaries and identifies time-dominant steps (bottlenecks).
- min_capacity, max_capacity, mean_cost, median_cost, min_cost, max_cost
+### PerformanceProfiler
-- Degree statistics: mean_degree, median_degree, min_degree, max_degree
+CPU profiler for NetGraph workflow execution.
-### NetworkStats
+Profiles workflow steps using cProfile and identifies bottlenecks.
-Compute basic node and link statistics for the network.
+**Methods:**
-Supports optional exclusion simulation using NetworkView without modifying the base network.
+- `analyze_performance(self) -> 'None'` - Analyze profiling results and identify bottlenecks.
+- `end_scenario(self) -> 'None'` - End profiling for the entire scenario execution.
+- `get_top_functions(self, step_name: 'str', limit: 'int' = 10) -> 'List[Tuple[str, float, int]]'` - Get the top CPU-consuming functions for a specific step.
+- `merge_child_profiles(self, profile_dir: 'Path', step_name: 'str') -> 'None'` - Merge child worker profiles into the parent step profile.
+- `profile_step(self, step_name: 'str', step_type: 'str') -> 'Generator[None, None, None]'` - Context manager for profiling individual workflow steps.
+- `save_detailed_profile(self, output_path: 'Path', step_name: 'Optional[str]' = None) -> 'None'` - Save detailed profiling data to a file.
+- `start_scenario(self) -> 'None'` - Start profiling for the entire scenario execution.
-Attributes:
- include_disabled: If True, include disabled nodes and links in statistics.
- If False, only consider enabled entities.
- excluded_nodes: Optional list of node names to exclude (temporary exclusion).
- excluded_links: Optional list of link IDs to exclude (temporary exclusion).
+### PerformanceReporter
-**Attributes:**
+Format and render performance profiling results.
-- `name` (str)
-- `seed` (Optional[int])
-- `_seed_source` (str)
-- `include_disabled` (bool) = False
-- `excluded_nodes` (Iterable[str]) = ()
-- `excluded_links` (Iterable[str]) = ()
+Generates plain-text reports with timing analysis, bottleneck identification,
+and practical performance tuning suggestions.
**Methods:**
-- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
-- `run(self, scenario: 'Scenario') -> 'None'` - Compute and store network statistics.
+- `generate_report(self) -> 'str'` - Generate performance report.
----
+### ProfileResults
-## ngraph.workflow.traffic_matrix_placement_step
+Profiling results for a scenario execution.
-TrafficMatrixPlacement workflow step.
+Attributes:
+ step_profiles: List of individual step performance profiles.
+ total_wall_time: Total wall-clock time for entire scenario.
+ total_cpu_time: Total CPU time across all steps.
+ total_function_calls: Total function calls across all steps.
+ bottlenecks: List of performance bottlenecks (>10% execution time).
+ analysis_summary: Performance metrics and statistics.
-Runs Monte Carlo demand placement using a named traffic matrix and produces
-unified `flow_results` per iteration under `data.flow_results`.
+**Attributes:**
-### TrafficMatrixPlacement
+- `step_profiles` (List[StepProfile]) = []
+- `total_wall_time` (float) = 0.0
+- `total_cpu_time` (float) = 0.0
+- `total_function_calls` (int) = 0
+- `bottlenecks` (List[Dict[str, Any]]) = []
+- `analysis_summary` (Dict[str, Any]) = {}
-Monte Carlo demand placement using a named traffic matrix.
+### StepProfile
+
+Performance profile data for a single workflow step.
Attributes:
- matrix_name: Name of the traffic matrix to analyze.
- failure_policy: Optional policy name in scenario.failure_policy_set.
- iterations: Number of Monte Carlo iterations.
- parallelism: Number of parallel worker processes.
- placement_rounds: Placement optimization rounds (int or "auto").
- baseline: Include baseline iteration without failures first.
- seed: Optional seed for reproducibility.
- store_failure_patterns: Whether to store failure pattern results.
- include_flow_details: When True, include cost_distribution per flow.
- include_used_edges: When True, include set of used edges per demand in entry data.
- alpha: Numeric scale for demands in the matrix.
- alpha_from_step: Optional producer step name to read alpha from.
- alpha_from_field: Dotted field path in producer step (default: "data.alpha_star").
+ step_name: Name of the workflow step.
+ step_type: Type/class name of the workflow step.
+ wall_time: Total wall-clock time in seconds.
+ cpu_time: CPU time spent in step execution.
+ function_calls: Number of function calls during execution.
+ memory_peak: Peak memory usage during step in bytes (if available).
+ cprofile_stats: Detailed cProfile statistics object.
+ worker_profiles_merged: Number of worker profiles merged into this step.
**Attributes:**
-- `name` (str)
-- `seed` (int | None)
-- `_seed_source` (str)
-- `matrix_name` (str)
-- `failure_policy` (str | None)
-- `iterations` (int) = 1
-- `parallelism` (int | str) = auto
-- `placement_rounds` (int | str) = auto
-- `baseline` (bool) = False
-- `store_failure_patterns` (bool) = False
-- `include_flow_details` (bool) = False
-- `include_used_edges` (bool) = False
-- `alpha` (float) = 1.0
-- `alpha_from_step` (str | None)
-- `alpha_from_field` (str) = data.alpha_star
+- `step_name` (str)
+- `step_type` (str)
+- `wall_time` (float)
+- `cpu_time` (float)
+- `function_calls` (int)
+- `memory_peak` (Optional[float])
+- `cprofile_stats` (Optional[pstats.Stats])
+- `worker_profiles_merged` (int) = 0
-**Methods:**
+---
-- `execute(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step with logging and metadata storage.
-- `run(self, scenario: "'Scenario'") -> 'None'` - Execute the workflow step logic.
+## ngraph.types.base
----
+Base classes and enums for network analysis algorithms.
-## ngraph.dsl.blueprints.expand
+### EdgeSelect
-Network topology blueprints and generation.
+Edge selection criteria.
-### Blueprint
+Determines which edges are considered for path-finding between a node and
+its neighbor(s).
-Represents a reusable blueprint for hierarchical sub-topologies.
+### FlowPlacement
-A blueprint may contain multiple groups of nodes (each can have a node_count
-and a name_template), plus adjacency rules describing how those groups connect.
+Strategies to distribute flow across parallel equal-cost paths.
-Attributes:
- name (str): Unique identifier of this blueprint.
- groups (Dict[str, Any]): A mapping of group_name -> group definition.
- Allowed top-level keys in each group definition here are the same
- as in normal group definitions (e.g. node_count, name_template,
- attrs, disabled, risk_groups, or nested use_blueprint references, etc.).
- adjacency (List[Dict[str, Any]]): A list of adjacency definitions
- describing how these groups are linked, using the DSL fields
- (source, target, pattern, link_params, etc.).
+### PathAlg
-**Attributes:**
+Path-finding algorithm types.
-- `name` (str)
-- `groups` (Dict[str, Any])
-- `adjacency` (List[Dict[str, Any]])
+---
-### DSLExpansionContext
+## ngraph.types.dto
-Carries the blueprint definitions and the final Network instance
-to be populated during DSL expansion.
+Types and data structures for algorithm analytics.
-Attributes:
- blueprints (Dict[str, Blueprint]): Dictionary of blueprint-name -> Blueprint.
- network (Network): The Network into which expanded nodes/links are inserted.
- pending_bp_adj (List[tuple[Dict[str, Any], str]]): Deferred blueprint adjacency
- expansions collected as (adj_def, parent_path) to be processed later.
+Defines immutable summary containers and aliases for algorithm outputs.
-**Attributes:**
+### EdgeRef
-- `blueprints` (Dict[str, Blueprint])
-- `network` (Network)
-- `pending_bp_adj` (List[tuple[Dict[str, Any], str]]) = []
+Reference to a directed edge via scenario link_id and direction.
-### expand_network_dsl(data: 'Dict[str, Any]') -> 'Network'
+Replaces the old Edge = Tuple[str, str, Hashable] to provide stable,
+scenario-native edge identification across Core reorderings.
-Expands a combined blueprint + network DSL into a complete Network object.
+Attributes:
+ link_id: Scenario link identifier (matches Network.links keys)
+ direction: 'fwd' for source→target as defined in Link; 'rev' for reverse
-Overall flow:
- 1) Parse "blueprints" into Blueprint objects.
- 2) Build a new Network from "network" metadata (e.g. name, version).
- 3) Expand 'network["groups"]' (collect blueprint adjacencies for later).
+**Attributes:**
-- If a group references a blueprint, incorporate that blueprint's subgroups
+- `link_id` (str)
+- `direction` (EdgeDir)
- while merging parent's attrs + disabled + risk_groups into subgroups.
- Blueprint adjacency is deferred and processed after node overrides.
+### FlowSummary
-- Otherwise, directly create nodes (a "direct node group").
+Summary of max-flow computation results.
- 4) Process any direct node definitions (network["nodes"]).
- 5) Process node overrides (in order if multiple overrides match).
- 6) Expand deferred blueprint adjacencies.
- 7) Expand adjacency definitions in 'network["adjacency"]'.
- 8) Process any direct link definitions (network["links"]).
- 9) Process link overrides (in order if multiple overrides match).
+Captures edge flows, residual capacities, reachable set, and min-cut.
-Under the new rules:
+Breaking change from v1.x: Fields now use EdgeRef instead of (src, dst, key) tuples
+for stable scenario-level edge identification.
-- Only certain top-level fields are permitted in each structure. Any extra
+Attributes:
+ total_flow: Maximum flow value achieved.
+ cost_distribution: Mapping of path cost to flow volume placed at that cost.
+ min_cut: Saturated edges crossing the s-t cut.
+ reachable_nodes: Nodes reachable from source in residual graph (optional).
+ edge_flow: Flow amount per edge (optional, only populated when requested).
+ residual_cap: Remaining capacity per edge after placement (optional).
- keys raise a ValueError. "attrs" is where arbitrary user fields go.
+**Attributes:**
-- For link_params, recognized fields are "capacity", "cost", "disabled",
+- `total_flow` (float)
+- `cost_distribution` (Dict[Cost, float])
+- `min_cut` (Tuple[EdgeRef, ...])
+- `reachable_nodes` (Tuple[str, ...] | None)
+- `edge_flow` (Dict[EdgeRef, float] | None)
+- `residual_cap` (Dict[EdgeRef, float] | None)
- "risk_groups", "attrs". Everything else must go inside link_params["attrs"].
+---
-- For node/group definitions, recognized fields include "node_count",
+## ngraph.utils.ids
- "name_template", "attrs", "disabled", "risk_groups" or "use_blueprint"
- for blueprint-based groups.
+### new_base64_uuid() -> 'str'
-Args:
- data (Dict[str, Any]): The YAML-parsed dictionary containing
- optional "blueprints" + "network".
+Return a 22-character URL-safe Base64-encoded UUID without padding.
+
+The function generates a random version 4 UUID, encodes the 16 raw bytes
+using URL-safe Base64, removes the two trailing padding characters, and
+decodes to ASCII. The resulting string length is 22 characters.
Returns:
- Network: The expanded Network object with all nodes and links.
+ A 22-character URL-safe Base64 representation of a UUID4 without
+ padding.
---
-## ngraph.dsl.blueprints.parse
+## ngraph.utils.output_paths
-Parsing helpers for the network DSL.
+Utilities for building CLI artifact output paths.
-This module factors out pure parsing/validation helpers from the expansion
-module so they can be tested independently and reused.
+This module centralizes logic for composing file and directory paths for
+artifacts produced by the NetGraph CLI. Paths are built from an optional
+output directory, a prefix (usually derived from the scenario file or
+results file), and a per-artifact suffix.
-### check_adjacency_keys(adj_def: 'Dict[str, Any]', context: 'str') -> 'None'
+### build_artifact_path(output_dir: 'Optional[Path]', prefix: 'str', suffix: 'str') -> 'Path'
-Ensure adjacency definitions only contain recognized keys.
+Compose an artifact path as output_dir / (prefix + suffix).
-### check_link_params(link_params: 'Dict[str, Any]', context: 'str') -> 'None'
+If ``output_dir`` is None, the path is created relative to the current
+working directory.
-Ensure link_params contain only recognized keys.
+Args:
+ output_dir: Base directory for outputs; if None, use CWD.
+ prefix: Filename prefix; usually derived from scenario or results stem.
+ suffix: Per-artifact suffix including the dot (e.g. ".results.json").
-Link attributes may include "hardware" per-end mapping when set under
-link_params.attrs. This function only validates top-level link_params keys.
+Returns:
+ The composed path.
-### check_no_extra_keys(data_dict: 'Dict[str, Any]', allowed: 'set[str]', context: 'str') -> 'None'
+### ensure_parent_dir(path: 'Path') -> 'None'
-Raise if ``data_dict`` contains keys outside ``allowed``.
+Ensure the parent directory exists for a file path.
-Args:
- data_dict: The dict to check.
- allowed: Set of recognized keys.
- context: Short description used in error messages.
+### profiles_dir_for_run(scenario_path: 'Path', output_dir: 'Optional[Path]') -> 'Path'
-### expand_name_patterns(name: 'str') -> 'List[str]'
+Return the directory for child worker profiles for ``run --profile``.
-Expand bracket expressions in a group name.
+Args:
+ scenario_path: The scenario YAML path.
+ output_dir: Optional base output directory.
-Examples:
+Returns:
+ Directory path where worker profiles should be stored.
-- "fa[1-3]" -> ["fa1", "fa2", "fa3"]
-- "dc[1,3,5-6]" -> ["dc1", "dc3", "dc5", "dc6"]
-- "fa[1-2]_plane[5-6]" -> ["fa1_plane5", "fa1_plane6", "fa2_plane5", "fa2_plane6"]
+### resolve_override_path(override: 'Optional[Path]', output_dir: 'Optional[Path]') -> 'Optional[Path]'
-### join_paths(parent_path: 'str', rel_path: 'str') -> 'str'
+Resolve an override path with respect to an optional output directory.
-Join two path segments according to the DSL conventions.
+- Absolute override paths are returned as-is.
+- Relative override paths are interpreted as relative to ``output_dir``
----
+ when provided; otherwise relative to the current working directory.
-## ngraph.results.artifacts
+Args:
+ override: Path provided by the user to override the default.
+ output_dir: Optional base directory for relative overrides.
-Serializable result artifacts for analysis workflows.
+Returns:
+ The resolved path or None if no override was provided.
-This module defines dataclasses that capture outputs from analyses and
-simulations in a JSON-serializable form:
+### results_path_for_run(scenario_path: 'Path', output_dir: 'Optional[Path]', results_override: 'Optional[Path]') -> 'Path'
-- `PlacementResultSet`: aggregated placement results and statistics
-- `CapacityEnvelope`: frequency-based capacity distributions and optional
+Determine the results JSON path for the ``run`` command.
- aggregated flow statistics
+Behavior:
-- `FailurePatternResult`: capacity results for specific failure patterns
+- If ``results_override`` is provided, return it (resolved relative to
-### CapacityEnvelope
+ ``output_dir`` when that is specified, otherwise as-is).
-Frequency-based capacity envelope that stores capacity values as frequencies.
+- Else if ``output_dir`` is provided, return ``output_dir/.results.json``.
+- Else, return ``.results.json`` in the current working directory.
-This approach is memory-efficient for Monte Carlo analysis where we care
-about statistical distributions rather than individual sample order.
+Args:
+ scenario_path: The scenario YAML file path.
+ output_dir: Optional base output directory.
+ results_override: Optional explicit results file path.
-Attributes:
- source_pattern: Regex pattern used to select source nodes.
- sink_pattern: Regex pattern used to select sink nodes.
- mode: Flow analysis mode ("combine" or "pairwise").
- frequencies: Dictionary mapping capacity values to their occurrence counts.
- min_capacity: Minimum observed capacity.
- max_capacity: Maximum observed capacity.
- mean_capacity: Mean capacity across all samples.
- stdev_capacity: Standard deviation of capacity values.
- total_samples: Total number of samples represented.
- flow_summary_stats: Optional dictionary with aggregated FlowSummary statistics.
- Contains cost_distribution_stats and other flow analytics.
+Returns:
+ The path where results should be written.
-**Attributes:**
+### scenario_prefix_from_path(scenario_path: 'Path') -> 'str'
-- `source_pattern` (str)
-- `sink_pattern` (str)
-- `mode` (str)
-- `frequencies` (Dict[float, int])
-- `min_capacity` (float)
-- `max_capacity` (float)
-- `mean_capacity` (float)
-- `stdev_capacity` (float)
-- `total_samples` (int)
-- `flow_summary_stats` (Dict[str, Any]) = {}
+Return a safe prefix derived from a scenario file path.
-**Methods:**
+Args:
+ scenario_path: The scenario YAML file path.
-- `expand_to_values(self) -> 'List[float]'` - Expand frequency map back to individual values.
-- `from_dict(data: 'Dict[str, Any]') -> "'CapacityEnvelope'"` - Construct a CapacityEnvelope from a dictionary.
-- `from_values(source_pattern: 'str', sink_pattern: 'str', mode: 'str', values: 'List[float]', flow_summaries: 'List[Any] | None' = None) -> "'CapacityEnvelope'"` - Create envelope from capacity values and optional flow summaries.
-- `get_percentile(self, percentile: 'float') -> 'float'` - Calculate percentile from frequency distribution.
-- `to_dict(self) -> 'Dict[str, Any]'` - Convert to dictionary for JSON serialization.
+Returns:
+ The scenario filename stem, trimmed of extensions.
-### FailurePatternResult
+---
-Result for a unique failure pattern with associated capacity matrix.
+## ngraph.utils.seed_manager
-Attributes:
- excluded_nodes: List of failed node IDs.
- excluded_links: List of failed link IDs.
- capacity_matrix: Dictionary mapping flow keys to capacity values.
- count: Number of times this pattern occurred.
- is_baseline: Whether this represents the baseline (no failures) case.
+Deterministic seed derivation to avoid global random.seed() order dependencies.
-**Attributes:**
+### SeedManager
-- `excluded_nodes` (List[str])
-- `excluded_links` (List[str])
-- `capacity_matrix` (Dict[str, float])
-- `count` (int)
-- `is_baseline` (bool) = False
-- `_pattern_key_cache` (str)
+Manages deterministic seed derivation for isolated component reproducibility.
-**Methods:**
+Global random.seed() creates order dependencies and component interference.
+SeedManager derives unique seeds per component from a master seed using SHA-256,
+ensuring reproducible results regardless of execution order or parallelism.
-- `from_dict(data: 'Dict[str, Any]') -> "'FailurePatternResult'"` - Construct FailurePatternResult from a dictionary.
-- `to_dict(self) -> 'Dict[str, Any]'` - Convert to dictionary for JSON serialization.
+Usage:
+ seed_mgr = SeedManager(42)
+ failure_seed = seed_mgr.derive_seed("failure_policy", "default")
-### PlacementEnvelope
+**Methods:**
-Per-demand placement envelope keyed like capacity envelopes.
+- `create_random_state(self, *components: 'Any') -> 'random.Random'` - Create a new Random instance with derived seed.
+- `derive_seed(self, *components: 'Any') -> 'Optional[int]'` - Derive a deterministic seed from master seed and component identifiers.
+- `seed_global_random(self, *components: 'Any') -> 'None'` - Seed the global random module with derived seed.
-Each envelope captures frequency distribution of placement ratio for a
-specific demand definition across Monte Carlo iterations.
+---
-Attributes:
- source: Source selection regex or node label.
- sink: Sink selection regex or node label.
- mode: Demand expansion mode ("combine" or "pairwise").
- priority: Demand priority class.
- frequencies: Mapping of placement ratio to occurrence count.
- min: Minimum observed placement ratio.
- max: Maximum observed placement ratio.
- mean: Mean placement ratio.
- stdev: Standard deviation of placement ratio.
- total_samples: Number of iterations represented.
+## ngraph.utils.yaml_utils
-**Attributes:**
+Utilities for handling YAML parsing quirks and common operations.
-- `source` (str)
-- `sink` (str)
-- `mode` (str)
-- `priority` (int)
-- `frequencies` (Dict[float, int])
-- `min` (float)
-- `max` (float)
-- `mean` (float)
-- `stdev` (float)
-- `total_samples` (int)
+### normalize_yaml_dict_keys(data: Dict[Any, ~V]) -> Dict[str, ~V]
-**Methods:**
+Normalize dictionary keys from YAML parsing to ensure consistent string keys.
-- `from_dict(data: 'Dict[str, Any]') -> "'PlacementEnvelope'"` - Construct a PlacementEnvelope from a dictionary.
-- `from_values(source: 'str', sink: 'str', mode: 'str', priority: 'int', ratios: 'List[float]', rounding_decimals: 'int' = 4) -> "'PlacementEnvelope'"`
-- `to_dict(self) -> 'Dict[str, Any]'`
+YAML 1.1 boolean keys (e.g., true, false, yes, no, on, off) get converted to
+Python True/False boolean values. This function converts them to predictable
+string representations ("True"/"False") and ensures all keys are strings.
-### PlacementResultSet
+Args:
+ data: Dictionary that may contain boolean or other non-string keys from YAML parsing
-Aggregated traffic placement results from one or many runs.
+Returns:
+ Dictionary with all keys converted to strings, boolean keys converted to "True"/"False"
-This immutable dataclass stores traffic placement results organized by case,
-with overall statistics and per-demand statistics.
+Examples:
+ >>> normalize_yaml_dict_keys({True: "value1", False: "value2", "normal": "value3"})
+ {"True": "value1", "False": "value2", "normal": "value3"}
-Attributes:
- results_by_case: Dictionary mapping case names to TrafficResult lists.
- overall_stats: Dictionary of overall statistics.
- demand_stats: Dictionary mapping demand keys to per-demand statistics.
+ >>> # In YAML: true:, yes:, on: all become Python True
+ >>> # In YAML: false:, no:, off: all become Python False
-**Attributes:**
+---
-- `results_by_case` (dict[str, list[TrafficResult]]) = {}
-- `overall_stats` (dict[str, float]) = {}
-- `demand_stats` (dict[tuple[str, str, int], dict[str, float]]) = {}
+## ngraph.adapters.core
-**Methods:**
+Adapter layer for NetGraph-Core integration.
-- `to_dict(self) -> 'dict[str, Any]'` - Convert to dictionary for JSON serialization.
+Provides graph building, node/edge ID mapping, and result translation between
+NetGraph's scenario-level types and NetGraph-Core's internal representations.
----
+Key components:
-## ngraph.results.flow
+- build_graph(): One-shot graph construction with exclusions
+- build_graph_cache(): Cached graph for repeated analysis with masks
+- build_node_mask() / build_edge_mask(): O(|excluded|) mask construction
+- get_disabled_exclusions(): Helper to collect disabled topology for exclusions
-Unified flow result containers for failure-analysis iterations.
+Graph caching enables efficient repeated analysis with different exclusion sets
+by building the graph once and using lightweight masks for exclusions. Disabled
+nodes and links are automatically included in masks built from a GraphCache.
-Defines small, serializable dataclasses that capture per-iteration outcomes
-for capacity and demand-placement style analyses in a unit-agnostic form.
+### AugmentationEdge
-Objects expose `to_dict()` that returns JSON-safe primitives. Float-keyed
-distributions are normalized to string keys, and arbitrary `data` payloads are
-sanitized. These dicts are written under `data.flow_results` by steps.
+Edge specification for graph augmentation.
-### FlowEntry
+Augmentation edges are added to the graph as-is (unidirectional).
+Nodes referenced in augmentations that don't exist in the network
+are automatically treated as pseudo/virtual nodes.
-Represents a single source→destination flow outcome within an iteration.
+Attributes:
+ source: Source node name (real or pseudo)
+ target: Target node name (real or pseudo)
+ capacity: Edge capacity
+ cost: Edge cost (converted to int64 for Core)
-Fields are unit-agnostic. Callers can interpret numbers as needed for
-presentation (e.g., Gbit/s).
+### EdgeMapper
-Args:
- source: Source identifier.
- destination: Destination identifier.
- priority: Priority/class for traffic placement scenarios. Zero when not applicable.
- demand: Requested volume for this flow.
- placed: Delivered volume for this flow.
- dropped: Unmet volume (``demand - placed``).
- cost_distribution: Optional distribution of placed volume by path cost.
- data: Optional per-flow details (e.g., min-cut edges, used edges).
+Bidirectional mapping between external edge IDs and EdgeRef (link_id + direction).
-**Attributes:**
+External edge ID encoding: (linkIndex << 1) | dirBit
-- `source` (str)
-- `destination` (str)
-- `priority` (int)
-- `demand` (float)
-- `placed` (float)
-- `dropped` (float)
-- `cost_distribution` (Dict[float, float]) = {}
-- `data` (Dict[str, Any]) = {}
+- linkIndex: stable sorted index of link_id in Network.links
+- dirBit: 0 for forward ('fwd'), 1 for reverse ('rev')
**Methods:**
-- `to_dict(self) -> 'Dict[str, Any]'` - Return a JSON-serializable dictionary representation.
+- `decode_ext_id(self, ext_id: 'int') -> 'Optional[EdgeRef]'` - Decode external edge ID to EdgeRef.
+- `encode_ext_id(self, link_id: 'str', direction: 'str') -> 'int'` - Encode (link_id, direction) to external edge ID.
+- `to_name(self, ext_id: 'int') -> 'Optional[str]'` - Map external edge ID to link ID (name).
+- `to_ref(self, core_edge_id: 'int', multidigraph: 'netgraph_core.StrictMultiDiGraph') -> 'Optional[EdgeRef]'` - Map Core EdgeId to EdgeRef using the Core graph's ext_edge_ids.
-### FlowIterationResult
+### GraphCache
-Container for per-iteration analysis results.
+Pre-built graph components for efficient repeated analysis.
-Args:
- failure_id: Stable identifier for the failure scenario (e.g., "baseline" or a hash).
- failure_state: Optional excluded components for the iteration.
- flows: List of flow entries for this iteration.
- summary: Aggregated summary across ``flows``.
- data: Optional per-iteration extras.
+Holds all components needed for running analysis with different exclusion
+sets without rebuilding the graph. Use build_graph_cache() to create.
+
+Attributes:
+ graph_handle: Core Graph handle for algorithm execution.
+ multidigraph: Core StrictMultiDiGraph with topology data.
+ edge_mapper: Mapper for link_id <-> edge_id translation.
+ node_mapper: Mapper for node_name <-> node_id translation.
+ algorithms: Core Algorithms instance for running computations.
+ disabled_node_ids: Pre-computed set of disabled node IDs.
+ disabled_link_ids: Pre-computed set of disabled link IDs.
+ link_id_to_edge_indices: Mapping from link_id to edge array indices.
**Attributes:**
-- `failure_id` (str)
-- `failure_state` (Optional[Dict[str, List[str]]])
-- `flows` (List[FlowEntry]) = []
-- `summary` (FlowSummary) = FlowSummary(total_demand=0.0, total_placed=0.0, overall_ratio=1.0, dropped_flows=0, num_flows=0)
-- `data` (Dict[str, Any]) = {}
+- `graph_handle` (netgraph_core.Graph)
+- `multidigraph` (netgraph_core.StrictMultiDiGraph)
+- `edge_mapper` (EdgeMapper)
+- `node_mapper` (NodeMapper)
+- `algorithms` (netgraph_core.Algorithms)
+- `disabled_node_ids` (Set[int]) = set()
+- `disabled_link_ids` (Set[str]) = set()
+- `link_id_to_edge_indices` (Dict[str, List[int]]) = {}
+
+### NodeMapper
+
+Bidirectional mapping between NetGraph node names (str) and Core NodeId (int).
**Methods:**
-- `to_dict(self) -> 'Dict[str, Any]'` - Return a JSON-serializable dictionary representation.
+- `to_id(self, name: 'str') -> 'int'` - Map node name to Core NodeId.
+- `to_name(self, node_id: 'int') -> 'str'` - Map Core NodeId to node name.
-### FlowSummary
+### build_edge_mask(cache: 'GraphCache', excluded_links: 'Optional[Set[str]]' = None) -> 'np.ndarray'
-Aggregated metrics across all flows in one iteration.
+Build an edge mask array for Core algorithms.
+
+Uses O(|excluded| + |disabled|) time complexity by using the pre-computed
+link_id -> edge_indices mapping, rather than iterating all edges.
+
+Core semantics: True = include, False = exclude.
Args:
- total_demand: Sum of all demands in this iteration.
- total_placed: Sum of all delivered volumes in this iteration.
- overall_ratio: ``total_placed / total_demand`` when demand > 0, else 1.0.
- dropped_flows: Number of flow entries with non-zero drop.
- num_flows: Total number of flows considered.
+ cache: GraphCache with pre-computed edge index mapping.
+ excluded_links: Optional set of link IDs to exclude.
-**Attributes:**
+Returns:
+ Boolean numpy array of shape (num_edges,) where True means included.
-- `total_demand` (float)
-- `total_placed` (float)
-- `overall_ratio` (float)
-- `dropped_flows` (int)
-- `num_flows` (int)
+### build_graph(network: "'Network'", *, add_reverse: 'bool' = True, augmentations: 'Optional[List[AugmentationEdge]]' = None, excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'tuple[netgraph_core.Graph, netgraph_core.StrictMultiDiGraph, EdgeMapper, NodeMapper]'
-**Methods:**
+Build Core graph with optional augmentations and exclusions.
-- `to_dict(self) -> 'Dict[str, Any]'` - Return a JSON-serializable dictionary representation.
+This is the unified graph builder for all analysis functions. It supports:
----
+- Standard network topology
+- Pseudo/virtual nodes (via augmentations)
+- Filtered topology (via exclusions)
-## ngraph.results.store
+For repeated analysis with different exclusions, use build_graph_cache()
+with build_node_mask()/build_edge_mask() for better performance.
-Generic results store for workflow steps and their metadata.
+Args:
+ network: NetGraph Network instance.
+ add_reverse: If True, add reverse edges for network links.
+ augmentations: Optional list of edges to add (for pseudo nodes, etc.).
+ excluded_nodes: Optional set of node names to exclude.
+ excluded_links: Optional set of link IDs to exclude.
-`Results` organizes outputs by workflow step name and records
-`WorkflowStepMetadata` for execution context. Storage is strictly
-step-scoped: steps must write two keys under their namespace:
+Returns:
+ Tuple of (graph_handle, multidigraph, edge_mapper, node_mapper).
-- ``metadata``: step-level metadata (dict)
-- ``data``: step-specific payload (dict)
+Pseudo Nodes:
+ Any node name in augmentations that doesn't exist in network.nodes
+ is automatically treated as a pseudo node and assigned a node ID.
-Export with :meth:`Results.to_dict`, which returns a JSON-safe structure
-with shape ``{workflow, steps, scenario}``. During export, objects with a
-``to_dict()`` method are converted, dictionary keys are coerced to strings,
-tuples are emitted as lists, and only JSON primitives are produced.
+Augmentation Edges:
-### Results
+- Added unidirectionally as specified
+- Assigned ext_edge_id of -1 (sentinel for non-network edges)
+- Not included in edge_mapper translation
-Step-scoped results container with deterministic export shape.
+Node ID Assignment:
+ Real nodes (sorted): IDs 0..(num_real-1)
+ Pseudo nodes (sorted): IDs num_real..(num_real+num_pseudo-1)
-Structure:
+### build_graph_cache(network: "'Network'", *, add_reverse: 'bool' = True, augmentations: 'Optional[List[AugmentationEdge]]' = None) -> 'GraphCache'
-- workflow: step metadata registry
-- steps: per-step results with enforced keys {"metadata", "data"}
-- scenario: optional scenario snapshot set once at load time
+Build cached graph components for efficient repeated analysis.
-**Attributes:**
+Constructs the graph once and pre-computes mappings needed for fast
+mask building. Use with build_node_mask() and build_edge_mask() for
+O(|excluded|) exclusion handling instead of O(V+E).
-- `_store` (Dict) = {}
-- `_metadata` (Dict) = {}
-- `_active_step` (Optional)
-- `_scenario` (Dict) = {}
+Args:
+ network: NetGraph Network instance.
+ add_reverse: If True, add reverse edges for network links.
+ augmentations: Optional list of edges to add (for pseudo nodes, etc.).
-**Methods:**
+Returns:
+ GraphCache with all pre-built components.
-- `enter_step(self, step_name: str) -> None` - Enter step scope. Subsequent put/get are scoped to this step.
-- `exit_step(self) -> None` - Exit step scope.
-- `get(self, key: str, default: Any = None) -> Any` - Get a value from the active step scope.
-- `get_all_step_metadata(self) -> Dict[str, ngraph.results.store.WorkflowStepMetadata]` - Get metadata for all workflow steps.
-- `get_step(self, step_name: str) -> Dict[str, Any]` - Return the raw dict for a given step name (for cross-step reads).
-- `get_step_metadata(self, step_name: str) -> Optional[ngraph.results.store.WorkflowStepMetadata]` - Get metadata for a workflow step.
-- `get_steps_by_execution_order(self) -> list[str]` - Get step names ordered by their execution order.
-- `put(self, key: str, value: Any) -> None` - Store a value in the active step under an allowed key.
-- `put_step_metadata(self, step_name: str, step_type: str, execution_order: int, *, scenario_seed: Optional[int] = None, step_seed: Optional[int] = None, seed_source: str = 'none', active_seed: Optional[int] = None) -> None` - Store metadata for a workflow step.
-- `set_scenario_snapshot(self, snapshot: Dict[str, Any]) -> None` - Attach a normalized scenario snapshot for export.
-- `to_dict(self) -> Dict[str, Any]` - Return exported results with shape: {workflow, steps, scenario}.
+Example:
+ >>> cache = build_graph_cache(network)
+ >>> for excluded_nodes, excluded_links in failure_patterns:
+ ... node_mask = build_node_mask(cache, excluded_nodes)
+ ... edge_mask = build_edge_mask(cache, excluded_links)
+ ... result = cache.algorithms.max_flow(
+ ... cache.graph_handle, src, dst,
+ ... node_mask=node_mask, edge_mask=edge_mask
+ ... )
-### WorkflowStepMetadata
+### build_node_mask(cache: 'GraphCache', excluded_nodes: 'Optional[Set[str]]' = None) -> 'np.ndarray'
-Metadata for a workflow step execution.
+Build a node mask array for Core algorithms.
-Attributes:
- step_type: The workflow step class name (e.g., 'CapacityEnvelopeAnalysis').
- step_name: The instance name of the step.
- execution_order: Order in which this step was executed (0-based).
- scenario_seed: Scenario-level seed provided in the YAML (if any).
- step_seed: Seed assigned to this step (explicit or scenario-derived).
- seed_source: Source for the step seed. One of:
+Uses O(|excluded| + |disabled|) time complexity by only setting
+excluded/disabled nodes to False, rather than iterating all nodes.
-- "scenario-derived": seed was derived from scenario.seed
-- "explicit-step": seed was explicitly provided for the step
-- "none": no seed provided/active for this step
+Core semantics: True = include, False = exclude.
- active_seed: The effective base seed used by the step, if any. For steps
- that use Monte Carlo execution, per-iteration seeds are derived from
- active_seed (e.g., active_seed + iteration_index).
+Args:
+ cache: GraphCache with pre-computed disabled node IDs.
+ excluded_nodes: Optional set of node names to exclude.
-**Attributes:**
+Returns:
+ Boolean numpy array of shape (num_nodes,) where True means included.
-- `step_type` (str)
-- `step_name` (str)
-- `execution_order` (int)
-- `scenario_seed` (Optional)
-- `step_seed` (Optional)
-- `seed_source` (str) = none
-- `active_seed` (Optional)
+### get_disabled_exclusions(network: "'Network'", excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'tuple[Optional[Set[str]], Optional[Set[str]]]'
+
+Merge user exclusions with disabled nodes/links from the network.
+
+Use this when calling build_graph() to ensure disabled topology is excluded.
+
+Args:
+ network: Network instance.
+ excluded_nodes: User-provided node exclusions (or None).
+ excluded_links: User-provided link exclusions (or None).
+
+Returns:
+ Tuple of (full_excluded_nodes, full_excluded_links) including disabled.
+ Returns None for either if empty (for efficient build_graph calls).
---
-## ngraph.monte_carlo.functions
+## ngraph.exec.analysis.flow
-Picklable Monte Carlo analysis functions for FailureManager simulations.
+Flow analysis functions for network evaluation.
-These functions are designed for use with FailureManager.run_monte_carlo_analysis()
-and follow the pattern: analysis_func(network_view: NetworkView, **kwargs) -> Any.
+These functions are designed for use with FailureManager and follow the
+AnalysisFunction protocol: analysis_func(network: Network, excluded_nodes: Set[str],
+excluded_links: Set[str], **kwargs) -> Any.
All functions accept only simple, hashable parameters to ensure compatibility
-with FailureManager's caching and multiprocessing systems for Monte Carlo
-failure analysis scenarios.
+with FailureManager's caching and multiprocessing systems.
+
+Graph caching enables efficient repeated analysis with different exclusion
+sets by building the graph once and using O(|excluded|) masks for exclusions.
-This module provides only computation functions. Visualization and notebook
-analysis live in external packages.
+### build_demand_graph_cache(network: "'Network'", demands_config: 'list[dict[str, Any]]') -> 'GraphCache'
-### demand_placement_analysis(network_view: "'NetworkView'", demands_config: 'list[dict[str, Any]]', placement_rounds: 'int | str' = 'auto', include_flow_details: 'bool' = False, include_used_edges: 'bool' = False, **kwargs) -> 'FlowIterationResult'
+Build a graph cache for repeated demand placement analysis.
-Analyze traffic demand placement success rates.
+Pre-computes the graph with augmentations (pseudo source/sink nodes) for
+efficient repeated analysis with different exclusion sets.
-Produces per-demand FlowEntry records and an iteration-level summary suitable
-for downstream statistics (e.g., delivered percentiles) without reconstructing
-joint distributions.
+Args:
+ network: Network instance.
+ demands_config: List of demand configurations (same format as demand_placement_analysis).
+
+Returns:
+ GraphCache ready for use with demand_placement_analysis.
+
+### build_maxflow_graph_cache(network: "'Network'", source_regex: 'str', sink_regex: 'str', mode: 'str' = 'combine') -> 'MaxFlowGraphCache'
+
+Build a graph cache for repeated max-flow analysis.
+
+Pre-computes the graph with pseudo source/sink nodes for all source/sink
+pairs, enabling O(|excluded|) mask building per iteration.
+
+Args:
+ network: Network instance.
+ source_regex: Regex pattern for source node groups.
+ sink_regex: Regex pattern for sink node groups.
+ mode: Flow analysis mode ("combine" or "pairwise").
-Additionally exposes placement engine counters to aid performance analysis:
+Returns:
+ MaxFlowGraphCache ready for use with max_flow_analysis or sensitivity_analysis.
-- Per-demand: ``FlowEntry.data.policy_metrics`` (dict) with totals collected by
+### demand_placement_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', demands_config: 'list[dict[str, Any]]', placement_rounds: 'int | str' = 'auto', include_flow_details: 'bool' = False, include_used_edges: 'bool' = False, _graph_cache: 'Optional[GraphCache]' = None, **kwargs) -> 'FlowIterationResult'
- the active FlowPolicy (e.g., ``spf_calls_total``, ``flows_created_total``,
- ``reopt_calls_total``, ``place_iterations_total``).
+Analyze traffic demand placement success rates using Core directly.
-- Per-iteration: ``FlowIterationResult.data.iteration_metrics`` aggregating the
+This function:
- same counters across all demands in the iteration. Use
- ``FlowIterationResult.summary.total_placed`` for placed volume totals.
+1. Builds Core infrastructure (graph, algorithms, flow_graph) or uses cached
+2. Expands demands into concrete (src, dst, volume) tuples
+3. Places each demand using Core's FlowPolicy with exclusion masks
+4. Aggregates results into FlowIterationResult
Args:
- network_view: NetworkView with potential exclusions applied.
+ network: Network instance.
+ excluded_nodes: Set of node names to exclude temporarily.
+ excluded_links: Set of link IDs to exclude temporarily.
demands_config: List of demand configurations (serializable dicts).
- placement_rounds: Number of placement optimization rounds.
+ placement_rounds: Number of placement optimization rounds (unused - Core handles internally).
include_flow_details: When True, include cost_distribution per flow.
- include_used_edges: When True, include set of used edges per demand in entry data
- as ``FlowEntry.data.edges`` with ``edges_kind='used'``.
+ include_used_edges: When True, include set of used edges per demand in entry data.
+ _graph_cache: Pre-built graph cache for fast repeated analysis.
**kwargs: Ignored. Accepted for interface compatibility.
Returns:
- FlowIterationResult describing this iteration. The ``data`` field contains
- ``{"iteration_metrics": { ... }}``.
+ FlowIterationResult describing this iteration.
-### max_flow_analysis(network_view: "'NetworkView'", source_regex: 'str', sink_regex: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , include_flow_details: 'bool' = False, include_min_cut: 'bool' = False, **kwargs) -> 'FlowIterationResult'
+### max_flow_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source_regex: 'str', sink_regex: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , include_flow_details: 'bool' = False, include_min_cut: 'bool' = False, _graph_cache: 'Optional[MaxFlowGraphCache]' = None, **kwargs) -> 'FlowIterationResult'
Analyze maximum flow capacity between node groups.
+When `_graph_cache` is provided, uses O(|excluded|) mask building instead
+of O(V+E) graph reconstruction for efficient repeated analysis.
+
Args:
- network_view: NetworkView with potential exclusions applied.
+ network: Network instance.
+ excluded_nodes: Set of node names to exclude temporarily.
+ excluded_links: Set of link IDs to exclude temporarily.
source_regex: Regex pattern for source node groups.
sink_regex: Regex pattern for sink node groups.
mode: Flow analysis mode ("combine" or "pairwise").
@@ -3255,22 +2638,34 @@ Args:
flow_placement: Flow placement strategy.
include_flow_details: Whether to collect cost distribution and similar details.
include_min_cut: Whether to include min-cut edge list in entry data.
+ _graph_cache: Pre-built cache for efficient repeated analysis.
**kwargs: Ignored. Accepted for interface compatibility.
Returns:
FlowIterationResult describing this iteration.
-### sensitivity_analysis(network_view: "'NetworkView'", source_regex: 'str', sink_regex: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , **kwargs) -> 'dict[str, dict[str, float]]'
+### sensitivity_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source_regex: 'str', sink_regex: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , _graph_cache: 'Optional[MaxFlowGraphCache]' = None, **kwargs) -> 'dict[str, dict[str, float]]'
Analyze component sensitivity to failures.
+Identifies critical edges (saturated edges) and computes the flow reduction
+caused by removing each one.
+
+When `_graph_cache` is provided, uses O(|excluded|) mask building instead
+of O(V+E) graph reconstruction for efficient repeated analysis.
+
Args:
- network_view: NetworkView with potential exclusions applied.
+ network: Network instance.
+ excluded_nodes: Set of node names to exclude temporarily.
+ excluded_links: Set of link IDs to exclude temporarily.
source_regex: Regex pattern for source node groups.
sink_regex: Regex pattern for sink node groups.
mode: Flow analysis mode ("combine" or "pairwise").
- shortest_path: Whether to use shortest paths only.
+ shortest_path: If True, use single-tier shortest-path flow (IP/IGP mode).
+ Reports only edges used under ECMP routing. If False (default), use
+ full iterative max-flow (SDN/TE mode) and report all saturated edges.
flow_placement: Flow placement strategy.
+ _graph_cache: Pre-built cache for efficient repeated analysis.
**kwargs: Ignored. Accepted for interface compatibility.
Returns:
@@ -3279,88 +2674,12 @@ Returns:
---
-## ngraph.monte_carlo.results
-
-Structured result objects for FailureManager analysis functions.
-
-These classes provide interfaces for accessing Monte Carlo analysis
-results from FailureManager convenience methods. Visualization is handled by
-specialized analyzer classes in the workflow.analysis module.
-
-### CapacityEnvelopeResults
-
-CapacityEnvelopeResults(envelopes: 'Dict[str, CapacityEnvelope]', failure_patterns: 'Dict[str, FailurePatternResult]', source_pattern: 'str', sink_pattern: 'str', mode: 'str', iterations: 'int', metadata: 'Dict[str, Any]')
-
-**Attributes:**
-
-- `envelopes` (Dict[str, CapacityEnvelope])
-- `failure_patterns` (Dict[str, FailurePatternResult])
-- `source_pattern` (str)
-- `sink_pattern` (str)
-- `mode` (str)
-- `iterations` (int)
-- `metadata` (Dict[str, Any])
-
-**Methods:**
-
-- `export_summary(self) -> 'Dict[str, Any]'`
-
-### DemandPlacementResults
-
-DemandPlacementResults(raw_results: 'dict[str, Any]', iterations: 'int', baseline: 'Optional[dict[str, Any]]' = None, failure_patterns: 'Optional[Dict[str, Any]]' = None, metadata: 'Optional[Dict[str, Any]]' = None)
-
-**Attributes:**
-
-- `raw_results` (dict[str, Any])
-- `iterations` (int)
-- `baseline` (Optional[dict[str, Any]])
-- `failure_patterns` (Optional[Dict[str, Any]])
-- `metadata` (Optional[Dict[str, Any]])
-
-### SensitivityResults
-
-Results from sensitivity Monte Carlo analysis.
-
-Attributes:
- raw_results: Raw results from FailureManager
- iterations: Number of Monte Carlo iterations
- baseline: Optional baseline result (no failures)
- component_scores: Aggregated component impact scores by flow
- failure_patterns: Dictionary mapping pattern keys to failure pattern results
- source_pattern: Source node regex pattern used in analysis
- sink_pattern: Sink node regex pattern used in analysis
- mode: Flow analysis mode ("combine" or "pairwise")
- metadata: Additional analysis metadata from FailureManager
-
-**Attributes:**
-
-- `raw_results` (dict[str, Any])
-- `iterations` (int)
-- `baseline` (Optional[dict[str, Any]])
-- `component_scores` (Optional[Dict[str, Dict[str, Dict[str, float]]]])
-- `failure_patterns` (Optional[Dict[str, Any]])
-- `source_pattern` (Optional[str])
-- `sink_pattern` (Optional[str])
-- `mode` (Optional[str])
-- `metadata` (Optional[Dict[str, Any]])
-
-**Methods:**
-
-- `component_impact_distribution(self) -> 'pd.DataFrame'` - Get component impact distribution as DataFrame.
-- `export_summary(self) -> 'Dict[str, Any]'` - Export summary for serialization.
-- `flow_keys(self) -> 'List[str]'` - Get list of all flow keys in results.
-- `get_failure_pattern_summary(self) -> 'pd.DataFrame'` - Get summary of failure patterns if available.
-- `get_flow_sensitivity(self, flow_key: 'str') -> 'Dict[str, Dict[str, float]]'` - Get component sensitivity scores for a specific flow.
-- `summary_statistics(self) -> 'Dict[str, Dict[str, float]]'` - Get summary statistics for component impact across all flows.
-- `to_dataframe(self) -> 'pd.DataFrame'` - Convert sensitivity results to DataFrame for analysis.
-
----
-
-## ngraph.monte_carlo.types
+## ngraph.exec.analysis.types
-Typed protocols for Monte Carlo analysis IPC payloads.
+Typed protocols for analysis IPC payloads.
-Defines lightweight, serializable structures used across worker boundaries.
+Defines lightweight, serializable structures used across worker boundaries
+during parallel analysis execution.
### FlowResult
@@ -3386,194 +2705,155 @@ Keys:
---
-## ngraph.profiling.profiler
-
-Profiling for NetGraph workflow execution.
+## ngraph.exec.demand.builder
-Provides CPU and wall-clock timing per workflow step using ``cProfile`` and
-optionally peak memory via ``tracemalloc``. Aggregates results into structured
-summaries and identifies time-dominant steps (bottlenecks).
+Builders for traffic matrices.
-### PerformanceProfiler
+Construct `TrafficMatrixSet` from raw dictionaries (e.g. parsed YAML).
+This logic was previously embedded in `Scenario.from_yaml`.
-CPU profiler for NetGraph workflow execution.
+### build_traffic_matrix_set(raw: 'Dict[str, List[dict]]') -> 'TrafficMatrixSet'
-Profiles workflow steps using cProfile and identifies bottlenecks.
+Build a `TrafficMatrixSet` from a mapping of name -> list of dicts.
-**Methods:**
+Args:
+ raw: Mapping where each key is a matrix name and each value is a list of
+ dictionaries with `TrafficDemand` constructor fields.
-- `analyze_performance(self) -> 'None'` - Analyze profiling results and identify bottlenecks.
-- `end_scenario(self) -> 'None'` - End profiling for the entire scenario execution.
-- `get_top_functions(self, step_name: 'str', limit: 'int' = 10) -> 'List[Tuple[str, float, int]]'` - Get the top CPU-consuming functions for a specific step.
-- `merge_child_profiles(self, profile_dir: 'Path', step_name: 'str') -> 'None'` - Merge child worker profiles into the parent step profile.
-- `profile_step(self, step_name: 'str', step_type: 'str') -> 'Generator[None, None, None]'` - Context manager for profiling individual workflow steps.
-- `save_detailed_profile(self, output_path: 'Path', step_name: 'Optional[str]' = None) -> 'None'` - Save detailed profiling data to a file.
-- `start_scenario(self) -> 'None'` - Start profiling for the entire scenario execution.
+Returns:
+ Initialized `TrafficMatrixSet` with constructed `TrafficDemand` objects.
-### PerformanceReporter
+Raises:
+ ValueError: If ``raw`` is not a mapping of name -> list[dict].
-Format and render performance profiling results.
+---
-Generates plain-text reports with timing analysis, bottleneck identification,
-and practical performance tuning suggestions.
+## ngraph.exec.demand.expand
-**Methods:**
+Demand expansion: converts TrafficDemand specs into concrete placement demands.
-- `generate_report(self) -> 'str'` - Generate performance report.
+Supports both pairwise and combine modes through augmentation-based pseudo nodes.
-### ProfileResults
+### DemandExpansion
-Profiling results for a scenario execution.
+Demand expansion result.
Attributes:
- step_profiles: List of individual step performance profiles.
- total_wall_time: Total wall-clock time for entire scenario.
- total_cpu_time: Total CPU time across all steps.
- total_function_calls: Total function calls across all steps.
- bottlenecks: List of performance bottlenecks (>10% execution time).
- analysis_summary: Performance metrics and statistics.
+ demands: Concrete demands ready for placement (sorted by priority).
+ augmentations: Augmentation edges for pseudo nodes (empty for pairwise).
**Attributes:**
-- `step_profiles` (List[StepProfile]) = []
-- `total_wall_time` (float) = 0.0
-- `total_cpu_time` (float) = 0.0
-- `total_function_calls` (int) = 0
-- `bottlenecks` (List[Dict[str, Any]]) = []
-- `analysis_summary` (Dict[str, Any]) = {}
+- `demands` (List[ExpandedDemand])
+- `augmentations` (List[AugmentationEdge])
-### StepProfile
+### ExpandedDemand
-Performance profile data for a single workflow step.
+Concrete demand ready for placement.
+
+Uses node names (not IDs) so expansion happens before graph building.
+Node IDs are resolved after the graph is built with pseudo nodes.
Attributes:
- step_name: Name of the workflow step.
- step_type: Type/class name of the workflow step.
- wall_time: Total wall-clock time in seconds.
- cpu_time: CPU time spent in step execution.
- function_calls: Number of function calls during execution.
- memory_peak: Peak memory usage during step in bytes (if available).
- cprofile_stats: Detailed cProfile statistics object.
- worker_profiles_merged: Number of worker profiles merged into this step.
+ src_name: Source node name (real or pseudo).
+ dst_name: Destination node name (real or pseudo).
+ volume: Traffic volume to place.
+ priority: Priority class (lower is higher priority).
+ policy_preset: FlowPolicy configuration preset.
+ demand_id: Parent TrafficDemand ID (for tracking).
**Attributes:**
-- `step_name` (str)
-- `step_type` (str)
-- `wall_time` (float)
-- `cpu_time` (float)
-- `function_calls` (int)
-- `memory_peak` (Optional[float])
-- `cprofile_stats` (Optional[pstats.Stats])
-- `worker_profiles_merged` (int) = 0
-
----
-
-## ngraph.profiling.reporter
-
----
-
-## ngraph.utils.ids
-
-### new_base64_uuid() -> 'str'
-
-Return a 22-character URL-safe Base64-encoded UUID without padding.
-
-The function generates a random version 4 UUID, encodes the 16 raw bytes
-using URL-safe Base64, removes the two trailing padding characters, and
-decodes to ASCII. The resulting string length is 22 characters.
-
-Returns:
- A 22-character URL-safe Base64 representation of a UUID4 without
- padding.
-
----
-
-## ngraph.utils.output_paths
-
-Utilities for building CLI artifact output paths.
-
-This module centralizes logic for composing file and directory paths for
-artifacts produced by the NetGraph CLI. Paths are built from an optional
-output directory, a prefix (usually derived from the scenario file or
-results file), and a per-artifact suffix.
-
-### build_artifact_path(output_dir: 'Optional[Path]', prefix: 'str', suffix: 'str') -> 'Path'
-
-Compose an artifact path as output_dir / (prefix + suffix).
-
-If ``output_dir`` is None, the path is created relative to the current
-working directory.
-
-Args:
- output_dir: Base directory for outputs; if None, use CWD.
- prefix: Filename prefix; usually derived from scenario or results stem.
- suffix: Per-artifact suffix including the dot (e.g. ".results.json").
+- `src_name` (str)
+- `dst_name` (str)
+- `volume` (float)
+- `priority` (int)
+- `policy_preset` (FlowPolicyPreset)
+- `demand_id` (str)
-Returns:
- The composed path.
+### expand_demands(network: 'Network', traffic_demands: 'List[TrafficDemand]', default_policy_preset: 'FlowPolicyPreset' = ) -> 'DemandExpansion'
-### ensure_parent_dir(path: 'Path') -> 'None'
+Expand TrafficDemand specifications into concrete demands with augmentations.
-Ensure the parent directory exists for a file path.
+Pure function that:
-### profiles_dir_for_run(scenario_path: 'Path', output_dir: 'Optional[Path]') -> 'Path'
+1. Selects node groups using Network's selection API
+2. Distributes volume based on mode (combine/pairwise)
+3. Generates augmentation edges for combine mode (pseudo nodes)
+4. Returns demands (node names) + augmentations
-Return the directory for child worker profiles for ``run --profile``.
+Node names are used (not IDs) so expansion happens BEFORE graph building.
+IDs are resolved after graph is built with augmentations.
Args:
- scenario_path: The scenario YAML path.
- output_dir: Optional base output directory.
+ network: Network for node selection.
+ traffic_demands: High-level demand specifications.
+ default_policy_preset: Default policy if demand doesn't specify one.
Returns:
- Directory path where worker profiles should be stored.
+ DemandExpansion with demands and augmentations.
-### resolve_override_path(override: 'Optional[Path]', output_dir: 'Optional[Path]') -> 'Optional[Path]'
+Raises:
+ ValueError: If no demands could be expanded or unsupported mode.
-Resolve an override path with respect to an optional output directory.
+---
-- Absolute override paths are returned as-is.
-- Relative override paths are interpreted as relative to ``output_dir``
+## ngraph.exec.failure.manager
- when provided; otherwise relative to the current working directory.
+FailureManager for Monte Carlo failure analysis.
-Args:
- override: Path provided by the user to override the default.
- output_dir: Optional base directory for relative overrides.
+Provides the failure analysis engine for NetGraph. Supports parallel
+processing, graph caching, and failure policy handling for workflow steps
+and direct programmatic use.
-Returns:
- The resolved path or None if no override was provided.
+Performance characteristics:
+Time complexity: O(S + I × A / P), where S is one-time graph setup cost,
+I is iteration count, A is per-iteration analysis cost, and P is parallelism.
+Graph caching amortizes expensive graph construction across all iterations,
+and O(|excluded|) mask building replaces O(V+E) iteration.
-### results_path_for_run(scenario_path: 'Path', output_dir: 'Optional[Path]', results_override: 'Optional[Path]') -> 'Path'
+Space complexity: O(V + E + I × R), where V and E are node and link counts,
+and R is result size per iteration. The pre-built graph is shared across
+all iterations.
-Determine the results JSON path for the ``run`` command.
+Parallelism: The C++ Core backend releases the GIL during computation,
+enabling true parallelism with Python threads. With graph caching, most
+per-iteration work happens in GIL-free C++ code, achieving near-linear
+scaling with thread count.
-Behavior:
+### AnalysisFunction
-- If ``results_override`` is provided, return it (resolved relative to
+Protocol for analysis functions used with FailureManager.
- ``output_dir`` when that is specified, otherwise as-is).
+Analysis functions should take a Network, exclusion sets, and any additional
+keyword arguments, returning analysis results of any type.
-- Else if ``output_dir`` is provided, return ``output_dir/.results.json``.
-- Else, return ``.results.json`` in the current working directory.
+### FailureManager
-Args:
- scenario_path: The scenario YAML file path.
- output_dir: Optional base output directory.
- results_override: Optional explicit results file path.
+Failure analysis engine with Monte Carlo capabilities.
-Returns:
- The path where results should be written.
+This is the component for failure analysis in NetGraph.
+Provides parallel processing, worker caching, and failure
+policy handling for workflow steps and direct notebook usage.
-### scenario_prefix_from_path(scenario_path: 'Path') -> 'str'
+The FailureManager can execute any analysis function that takes a Network
+with exclusion sets and returns results, making it generic for different
+types of failure analysis (capacity, traffic, connectivity, etc.).
-Return a safe prefix derived from a scenario file path.
+Attributes:
+ network: The underlying network (not modified during analysis).
+ failure_policy_set: Set of named failure policies.
+ policy_name: Name of specific failure policy to use.
-Args:
- scenario_path: The scenario YAML file path.
+**Methods:**
-Returns:
- The scenario filename stem, trimmed of extensions.
+- `compute_exclusions(self, policy: "'FailurePolicy | None'" = None, seed_offset: 'int | None' = None) -> 'tuple[set[str], set[str]]'` - Compute set of nodes and links to exclude for a failure iteration.
+- `get_failure_policy(self) -> "'FailurePolicy | None'"` - Get failure policy for analysis.
+- `run_demand_placement_monte_carlo(self, demands_config: 'list[dict[str, Any]] | Any', iterations: 'int' = 100, parallelism: 'int' = 1, placement_rounds: 'int | str' = 'auto', baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_details: 'bool' = False, include_used_edges: 'bool' = False, **kwargs) -> 'Any'` - Analyze traffic demand placement success under failures.
+- `run_max_flow_monte_carlo(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement | str' = , baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_summary: 'bool' = False, **kwargs) -> 'Any'` - Analyze maximum flow capacity envelopes between node groups under failures.
+- `run_monte_carlo_analysis(self, analysis_func: 'AnalysisFunction', iterations: 'int' = 1, parallelism: 'int' = 1, baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, **analysis_kwargs) -> 'dict[str, Any]'` - Run Monte Carlo failure analysis with any analysis function.
+- `run_sensitivity_monte_carlo(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement | str' = , baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, **kwargs) -> 'dict[str, Any]'` - Analyze component criticality for flow capacity under failures.
+- `run_single_failure_scenario(self, analysis_func: 'AnalysisFunction', **kwargs) -> 'Any'` - Run a single failure scenario for convenience.
---
diff --git a/docs/reference/api.md b/docs/reference/api.md
index 50439ad..20c8204 100644
--- a/docs/reference/api.md
+++ b/docs/reference/api.md
@@ -10,15 +10,27 @@ Quick links:
This section provides a curated guide to NetGraph's Python API, organized by typical usage patterns.
+## Performance Notes
+
+NetGraph uses a **hybrid Python+C++ architecture**:
+
+- **High-level APIs** (Network, Scenario, Workflow) are pure Python with ergonomic interfaces
+- **Core algorithms** (shortest paths, max-flow, K-shortest paths) execute in optimized C++ via NetGraph-Core
+- **GIL released** during algorithm execution for true parallel processing
+- **Transparent integration**: You work with Python objects; Core acceleration is automatic
+
+All public APIs accept and return Python types (Network, Node, Link, FlowSummary, etc.).
+The C++ layer is an implementation detail you generally don't interact with directly.
+
## 1. Fundamentals
The core components that form the foundation of most NetGraph programs.
### Scenario
-**Purpose:** The main orchestrator that coordinates network topology, analysis workflows, and result collection.
+**Purpose:** Coordinates network topology, workflow execution, and result storage for complete analysis pipelines.
-**When to use:** Every NetGraph program starts with a Scenario - either loaded from YAML or built programmatically.
+**When to use:** Entry point for analysis workflows - load from YAML for declarative scenarios or construct programmatically for direct API access.
```python
from pathlib import Path
@@ -38,19 +50,20 @@ print(exported["workflow"].keys())
**Key Methods:**
-- `from_yaml(yaml_content)` - Load scenario from YAML string/file
-- `run()` - Execute the complete analysis workflow
+- `from_yaml(yaml_str, default_components=None)` - Parse scenario from YAML string (use `Path.read_text()` for file loading)
+- `run()` - Execute workflow steps in sequence
**Integration:** Scenario coordinates Network topology, workflow execution, and Results collection. Components can also be used independently for direct programmatic access.
### Network
-**Purpose:** Represents network topology and provides fundamental analysis capabilities like maximum flow calculation.
+**Purpose:** Represents network topology.
**When to use:** Core component for representing network structure. Used directly for programmatic topology creation or accessed via `scenario.network`.
```python
from ngraph.model.network import Network, Node, Link
+from ngraph.solver.maxflow import max_flow
# Create a tiny network
network = Network()
@@ -58,32 +71,31 @@ network.add_node(Node(name="n1"))
network.add_node(Node(name="n2"))
network.add_link(Link(source="n1", target="n2", capacity=100.0))
-# Calculate maximum flow (returns dict)
-max_flow = network.max_flow(
+# Calculate maximum flow (returns Dict[Tuple[str, str], float])
+flow_result = max_flow(
+ network,
source_path="n1",
sink_path="n2"
)
-print(max_flow)
+print(flow_result) # {("n1", "n2"): 100.0}
```
**Key Methods:**
- `add_node(node)`, `add_link(link)` - Build topology programmatically
-- `max_flow(source_path, sink_path, **options)` - Calculate maximum flow (returns dict)
- `nodes`, `links` - Access topology as dictionaries
**Key Concepts:**
-- **Node.disabled/Link.disabled:** Scenario-level configuration that persists across analyses
-- **Node selection:** Use regex patterns like `"datacenter.*"` or attribute directives like `attr:role` to group nodes by attribute (see DSL Node Selection)
+- **disabled flags:** Node.disabled and Link.disabled mark components as inactive in the scenario topology (use `excluded_nodes`/`excluded_links` parameters for temporary analysis-time exclusion)
+- **Node selection:** Use regex patterns anchored at start (e.g., `"^datacenter.*"`) or attribute directives (`"attr:role"`) to select and group nodes (see DSL Node Selection)
-**Integration:** Foundation for all analysis. Used directly or through NetworkView for filtered analysis.
### Results
**Purpose:** Centralized container for storing and retrieving analysis results from workflow steps.
-**When to use:** Automatically managed by `scenario.results`. Used for storing custom analysis results and retrieving outputs from workflow steps.
+**When to use:** Managed by Scenario; stores workflow step outputs with metadata. Access via `scenario.results` for result retrieval and custom step implementation.
```python
# Access results from scenario
@@ -96,10 +108,11 @@ print(list(all_data["steps"].keys()))
**Key Methods:**
-- `enter_step(step_name)` / `exit_step()` - Scope mutations to a step (managed by WorkflowStep)
-- `put(key, value)` - Store under active step; key is `"metadata"` or `"data"`
-- `get_step(step_name)` - Read a step’s raw dict (for explicit cross-step reads)
-- `to_dict()` - Export with shape `{workflow, steps, scenario}` (JSON-safe)
+- `enter_step(step_name)` / `exit_step()` - Scope writes to a step (managed by WorkflowStep.execute())
+- `put(key, value)` - Store value under active step; key must be `"metadata"` or `"data"`
+- `get(key, default=None)` - Retrieve value from active step scope
+- `get_step(step_name)` - Retrieve complete step dict for cross-step reads
+- `to_dict()` - Export results with shape `{workflow, steps, scenario}` (JSON-serializable)
**Integration:** Used by all workflow steps for result storage. Provides consistent access pattern for analysis outputs.
@@ -111,26 +124,31 @@ Essential analysis capabilities for network evaluation.
**Purpose:** Calculate network flows between source and sink groups with various policies and constraints.
-**When to use:** Fundamental analysis for understanding network capacity, bottlenecks, and traffic engineering scenarios.
+**When to use:** Compute network capacity between source and sink groups. Supports multiple flow placement policies and failure scenarios.
+
+**Performance:** Max-flow computation executes in C++ with the GIL released for concurrent execution. Algorithm complexity is O(V²E) for push-relabel with gap heuristic.
```python
-from ngraph.algorithms.base import FlowPlacement
+from ngraph.types.base import FlowPlacement
+from ngraph.solver.maxflow import max_flow, max_flow_with_details
# Maximum flow between group patterns (combine all sources/sinks)
-max_flow = network.max_flow(
+flow_result = max_flow(
+ network,
source_path="^metro1/.*",
sink_path="^metro5/.*",
mode="combine"
)
# Detailed flow analysis with cost distribution
-result = network.max_flow_with_summary(
+result = max_flow_with_details(
+ network,
source_path="^metro1/.*",
sink_path="^metro5/.*",
mode="combine"
)
-(src_label, sink_label), (flow_value, summary) = next(iter(result.items()))
-print(summary.cost_distribution)
+(src_label, sink_label), summary = next(iter(result.items()))
+print(summary.cost_distribution) # Dict[float, float] mapping cost to flow volume
```
**Key Options:**
@@ -141,39 +159,40 @@ print(summary.cost_distribution)
**Advanced Features:**
-- **Cost Distribution**: `FlowSummary.cost_distribution` provides flow volume breakdown by path cost for latency span analysis and performance characterization
-- **Analytics**: Edge flows, residual capacities, min-cut analysis, and reachability information
+- **Cost Distribution**: `FlowSummary.cost_distribution` maps path cost to flow volume at that cost tier
+- **Min-cut**: `FlowSummary.min_cut` contains saturated edges crossing the source-sink cut
-**Integration:** Available on both Network and NetworkView objects. Foundation for FailureManager Monte Carlo analysis.
+**Integration:** Uses `excluded_nodes` and `excluded_links` parameters for filtered analysis. Foundation for FailureManager Monte Carlo analysis.
-### NetworkView
+### Filtered Analysis (Failure Simulation)
-**Purpose:** Provides filtered view of network topology for failure analysis without modifying the base network.
+**Purpose:** Execute analysis on filtered topology views using exclusion sets rather than graph mutation.
-**When to use:** Simulate component failures, analyze degraded network states, or perform parallel analysis with different exclusions.
+**When to use:** Failure simulation, sensitivity analysis, or concurrent evaluation of multiple degraded states.
```python
-from ngraph.model.view import NetworkView
-
-# Create view with failed components (for failure simulation)
-failed_view = NetworkView.from_excluded_sets(
+# Identify links to fail (e.g., all links from "n2")
+failed_links = {
+ l.id for l in network.links.values()
+ if l.source == "n2" or l.target == "n2"
+}
+
+# Analyze degraded network by passing excluded_links
+degraded_flow = max_flow(
network,
- excluded_nodes={"n2"},
- excluded_links=set()
+ source_path="n1",
+ sink_path="n3",
+ excluded_links=failed_links
)
-
-# Analyze degraded network
-degraded_flow = failed_view.max_flow("n1", "n2")
print(degraded_flow)
```
**Key Features:**
-- **Read-only overlay:** Combines scenario-disabled and analysis-excluded elements
-- **Concurrent analysis:** Supports different failure scenarios in parallel
-- **Identical API:** Same analysis methods as Network
+- **Read-only filtering:** Uses analysis-time exclusion lists without mutating the Network
+- **Concurrent analysis:** Supports different failure scenarios in parallel (thread-safe)
+- **Identical API:** Uses the same solver functions (`max_flow`, etc.) with optional exclusion arguments
-**Integration:** Used internally by FailureManager for Monte Carlo analysis. Enables concurrent failure simulations without network state conflicts.
## 3. Advanced Analysis
@@ -181,24 +200,23 @@ Sophisticated analysis capabilities using Monte Carlo methods and parallel proce
### FailureManager
-**Purpose:** Authoritative Monte Carlo failure analysis engine with parallel processing and result aggregation.
+**Purpose:** Monte Carlo failure analysis engine with parallel execution and automatic result aggregation.
-**When to use:** Capacity envelope analysis, demand placement studies, component sensitivity analysis, or custom Monte Carlo simulations.
+**When to use:** Monte Carlo failure analysis for capacity distribution, demand placement, or component criticality studies.
```python
-from ngraph.failure.manager.manager import FailureManager
-from ngraph.failure.policy import FailurePolicy, FailureRule
-from ngraph.failure.policy_set import FailurePolicySet
+from ngraph.exec.failure.manager import FailureManager
+from ngraph.model.failure.policy import FailurePolicy, FailureMode, FailureRule
+from ngraph.model.failure.policy_set import FailurePolicySet
policy_set = FailurePolicySet()
policy = FailurePolicy(modes=[
- # One mode with a single random link failure
- {
- "weight": 1.0,
- "rules": [FailureRule(entity_scope="link", rule_type="choice", count=1)]
- }
+ FailureMode(
+ weight=1.0,
+ rules=[FailureRule(entity_scope="link", rule_type="choice", count=1)]
+ )
])
-policy_set.add("one_link", policy)
+policy_set.policies["one_link"] = policy
manager = FailureManager(
network=network,
@@ -229,30 +247,34 @@ results = manager.run_max_flow_monte_carlo(
- **Reproducible results** with seed support
- **Failure policy integration** for realistic failure scenarios
-**Integration:** Uses NetworkView for isolated failure simulation. Returns specialized result objects for statistical analysis.
+**Integration:** Uses `excluded_nodes`/`excluded_links` for isolated failure simulation. Returns specialized result objects for statistical analysis.
### Monte Carlo Results
**Purpose:** Rich result objects with statistical analysis and visualization capabilities.
-**When to use:** Analyzing outputs from FailureManager convenience methods - provides pandas integration and statistical summaries.
+**When to use:** Process results from FailureManager Monte Carlo methods. Provides statistical aggregation and serialization.
```python
from ngraph.results.flow import FlowEntry, FlowIterationResult, FlowSummary
-flows = [
- FlowEntry(
- source="n1", destination="n2", priority=0,
- demand=10.0, placed=10.0, dropped=0.0,
- cost_distribution={2.0: 6.0, 4.0: 4.0}, data={}
- )
-]
+# Construct flow entries for a single iteration
+flow = FlowEntry(
+ source="n1", destination="n2", priority=0,
+ demand=10.0, placed=10.0, dropped=0.0,
+ cost_distribution={2.0: 6.0, 4.0: 4.0}
+)
summary = FlowSummary(
total_demand=10.0, total_placed=10.0, overall_ratio=1.0,
- dropped_flows=0, num_flows=len(flows)
+ dropped_flows=0, num_flows=1
)
-iteration = FlowIterationResult(flows=flows, summary=summary)
-iteration_dict = iteration.to_dict()
+iteration = FlowIterationResult(
+ failure_id="baseline",
+ flows=[flow],
+ summary=summary
+)
+# Export to JSON-serializable dict
+result_dict = iteration.to_dict()
```
**Key Result Types:**
@@ -275,11 +297,11 @@ Working with analysis outputs and implementing custom result storage.
**When to use:** Working with stored analysis results, implementing custom workflow steps, or exporting data for external analysis.
```python
-from ngraph.results.store import Results
+from ngraph.results import Results
-# Access exported results
+# Access results after scenario execution
exported = scenario.results.to_dict()
-print(exported["steps"].keys())
+print(list(exported["steps"].keys()))
```
**Key Classes:**
@@ -292,7 +314,7 @@ print(exported["steps"].keys())
### Export Patterns
-**Purpose:** Best practices for storing results in custom workflow steps and analysis functions.
+**Purpose:** Patterns for result storage in custom workflow steps with consistent serialization.
```python
from ngraph.workflow.base import WorkflowStep
@@ -311,10 +333,10 @@ class CustomAnalysis(WorkflowStep):
**Storage Conventions:**
-- Use `self.name` as step identifier for result storage
-- Convert complex objects using `to_dict()` before storage
-- Use descriptive keys like `"capacity_envelopes"`, `"network_statistics"`
-- Results are automatically serialized via `results.to_dict()`
+- Store step metadata using `results.put("metadata", {})`
+- Store step data using `results.put("data", {...})`
+- Convert complex objects to dicts via `to_dict()` before storage
+- Export complete results via `scenario.results.to_dict()`
## 5. Automation
@@ -324,7 +346,7 @@ Workflow orchestration and reusable network templates.
**Purpose:** Automated analysis sequences with standardized result storage and execution order.
-**When to use:** Complex multi-step analysis, reproducible analysis pipelines, or when you need automatic result collection and metadata tracking.
+**When to use:** Multi-step analysis pipelines with automatic result storage, execution ordering, and metadata tracking.
Available workflow steps:
@@ -340,7 +362,7 @@ Available workflow steps:
**Purpose:** Reusable network topology templates defined in YAML for complex, hierarchical network structures.
-**When to use:** Creating standardized network architectures, multi-pod topologies, or when you need parameterized network generation.
+**When to use:** Define reusable topology templates with parameterization. Common for data center fabrics and hierarchical network structures.
```python
# Blueprints are typically defined in YAML and used via Scenario
@@ -355,24 +377,31 @@ Advanced capabilities for custom analysis and low-level operations.
### Utilities & Helpers
-**Purpose:** Graph format conversion and direct access to low-level algorithms.
+**Purpose:** Graph format conversion and access to adapter layer for advanced use cases.
+
+**When to use:** Custom analysis requiring direct access to Core graphs, or when built-in analysis methods are insufficient.
-**When to use:** Custom analysis requiring NetworkX integration, performance-critical algorithms, or when you need direct control over graph operations.
+**Note:** For most use cases, use the high-level Network API. The adapter layer (`ngraph.adapters.core`) is available for advanced scenarios requiring direct Core graph access.
```python
-from ngraph.graph.convert import to_digraph, from_digraph
-from ngraph.algorithms.spf import spf
-from ngraph.algorithms.max_flow import calc_max_flow
+from ngraph.adapters.core import build_graph
+import netgraph_core
+
+# Access Core layer for custom algorithm implementation
+graph_handle, multidigraph, edge_mapper, node_mapper = build_graph(
+ network, add_reverse=True
+)
-# Convert to NetworkX for custom algorithms
-nx_graph = to_digraph(scenario.network.to_strict_multidigraph())
+# Execute Core algorithms directly (operate on int node IDs, return NumPy arrays)
+backend = netgraph_core.Backend.cpu()
+algs = netgraph_core.Algorithms(backend)
-# Direct algorithm access
-costs, predecessors = spf(graph, source_node)
-max_flow_value = calc_max_flow(graph, source, sink)
+src_id = node_mapper.to_id("A")
+costs, predecessors = algs.spf(graph_handle, src_id)
+# costs: numpy array of float64 distances from src_id to each node
```
-**Integration:** Provides bridge between NetGraph and NetworkX ecosystems. Used when built-in analysis methods are insufficient.
+**Integration:** Direct Core access for custom algorithm implementation. Built-in solver functions (`max_flow`, etc.) already provide Core integration.
### Error Handling
@@ -395,4 +424,6 @@ except Exception as e:
**Common Patterns:**
-- Use `results.get()` with `default`
+- Catch `ValueError` for YAML schema validation failures
+- Use `results.get(key, default=None)` for optional result values
+- Validate presence of expected workflow steps in exported results
diff --git a/docs/reference/design.md b/docs/reference/design.md
index 359a1cf..b63e90e 100644
--- a/docs/reference/design.md
+++ b/docs/reference/design.md
@@ -4,29 +4,42 @@ This document describes NetGraph's internal design: scenario DSL, data models, e
## Overview
-NetGraph is a network scenario analysis engine. It takes a scenario (defined in a YAML DSL) as input, builds a directed multigraph model of the network, and runs a configurable workflow of analysis steps (like traffic placement or max-flow capacity) to produce structured results. Key components include:
+NetGraph is a network scenario analysis engine using a **hybrid Python+C++ architecture**. It takes a scenario (defined in a YAML DSL) as input, builds a directed multigraph model of the network, and runs a configurable workflow of analysis steps (like traffic placement or max-flow capacity) to produce structured results.
-- CLI and API: Entry points to load scenarios and invoke analyses.
+### Architecture Layers
-- Scenario DSL Parser: Validates and expands the YAML scenario into an internal model.
+**Python Layer (NetGraph):**
-- Domain Model: In-memory representation of nodes, links, risk groups, etc., with selection and grouping utilities.
+- CLI and API: Entry points to load scenarios and invoke analyses
+- Scenario DSL Parser: Validates and expands the YAML scenario into an internal model
+- Domain Model: In-memory representation of nodes, links, risk groups, etc., with selection and grouping utilities
+- Managers: Orchestrators for higher-level behaviors (demand expansion, failure enumeration)
+- Workflow Engine: Composes steps into end-to-end analyses, storing outputs in a results store
+- Results Store: Collects outputs and metadata from each step, enabling structured JSON export
+- Adapter Layer: Translates between Python domain objects and C++ graph representations
-- NetworkView: A read-only overlay on the model to simulate failures or what-if scenarios without altering the base network.
+**C++ Layer (NetGraph-Core):**
-- Graph construction: Builds a `StrictMultiDiGraph` (a `networkx.MultiDiGraph` subclass) from the `Network` or `NetworkView` for consumption by SPF/KSP/MaxFlow (compact or full, optional reverse edges).
+- StrictMultiDiGraph: Immutable directed multigraph with CSR adjacency representation
+- Shortest Paths (SPF): Dijkstra's algorithm with multipath support and configurable edge selection
+- K-Shortest Paths: Yen's algorithm for finding k-shortest simple paths
+- Max-Flow: Successive shortest paths with blocking flow augmentation and configurable flow placement policies
+- Backend Interface: Abstraction for algorithm execution (CPU backend provided)
-- Algorithms: Core graph algorithms: shortest paths (SPF and KSP, k-shortest paths) and max-flow, with configurable edge selection and flow splitting strategies.
+### Integration Points
-- Managers: Orchestrators for higher-level behaviors, e.g., expanding demands over time or enumerating failure cases.
+The Python layer uses an adapter (`ngraph.adapters.core`) to:
-- Workflow Engine: Composes steps (graph build, demand placement, max flow, etc.) into end-to-end analyses, storing outputs in a results store.
+1. Build Core graphs from Network instances with optional exclusions
+2. Map node names (str) ↔ NodeId (int32)
+3. Map link IDs (str) ↔ EdgeId/ext_edge_id (int64)
+4. Translate results (costs, flows, paths) back to scenario-level objects
-- Results Store: Collects outputs and metadata from each step, enabling structured JSON export for post-analysis.
+Core algorithms release the GIL during execution, enabling concurrent Python threads to execute analysis in parallel with minimal Python-level overhead.
### Execution Flow
-The diagram below shows simplified end-to-end execution flow from scenario input to results.
+The diagram below shows the architecture and end-to-end execution flow from scenario input through both Python and C++ layers to final results. The Python layer handles scenario loading, workflow orchestration, and result aggregation, while compute-intensive graph algorithms execute in C++ with the GIL released for parallel execution.

@@ -60,7 +73,7 @@ The loader validates and expands DSL definitions into concrete nodes and links.
## Data Model
-Once the scenario is parsed and expanded, NetGraph represents the network with a set of core model classes. These define the authoritative in-memory representation of the scenario and enforce invariants (like unique names).
+Once the scenario is parsed and expanded, NetGraph represents the network with a set of core model classes. These define the in-memory representation of the scenario topology and enforce structural invariants (unique node names, valid link endpoints).
### Node
@@ -118,7 +131,7 @@ A Network is the container class that holds all nodes, links, and top-level risk
- risk_groups: Dict[name, RiskGroup],
-The Network is the authoritative source of truth for the topology. It provides methods to add nodes and links, enforcing invariants. For example, adding a link checks that the source and target nodes exist in the network, and prevents duplicate node additions. The Network also never removes nodes or links; instead, disabled flags are used to mark elements inactive.
+Network is the container for scenario topology. It enforces invariants during construction: adding a link validates that source and target nodes exist; adding a node rejects duplicates by name. Components are never removed from the Network; the `disabled` flag marks them inactive. The Network also maintains a selection cache for `select_node_groups_by_path` to avoid repeated regex/attribute queries.
### Node and Link Selection
@@ -132,258 +145,523 @@ This selection mechanism allows workflow steps and API calls to refer to nodes f
### Disabled Elements
-Nodes or links marked as disabled=True represent elements present in the design but out of service for the analysis. The base model keeps them in the collection but they will be ignored when building the analysis graph or when creating views (the disabled flag is always checked and such elements filtered out). This design preserves topology information (e.g., you know a link exists but is just turned off) and allows easily enabling it later if needed.
+Nodes or links marked as disabled=True represent elements present in the design but out of service for the analysis. The base model keeps them in the collection but solver functions filter them out when selecting active nodes. This design preserves topology information (e.g., you know a link exists but is just turned off) and allows easily enabling it later if needed.
-### NetworkView
+### Filtered Analysis (Exclusions)
-To simulate failures or other what-if scenarios without modifying the base network, NetGraph uses the NetworkView class. A NetworkView is essentially a read-only filtered view of a Network.
-
-You create a NetworkView by specifying a base Network and sets of nodes and links to exclude. For example:
+To simulate failures or other what-if scenarios without modifying the base network, NetGraph uses analysis-time exclusions. Instead of creating a stateful view object, you pass sets of excluded nodes and links directly to analysis functions.
```python
-view = NetworkView.from_excluded_sets(base_network,
- excluded_nodes={"Node5"},
- excluded_links={"A|B|xyz123"})
+# Analyze with specific exclusions
+results = max_flow(
+ network,
+ source_path="A",
+ sink_path="B",
+ excluded_nodes={"Node5"},
+ excluded_links={"A|B|xyz123"}
+)
```
-This will behave like the original network except that Node5 and the link with id "A|B|xyz123" are considered "hidden".
+This approach avoids mutating the base graph when simulating failures (e.g., deleting nodes or toggling flags). It separates the static scenario (base network) from dynamic conditions (exclusions), enabling thread-safe parallel analyses and eliminating deep copies for each failure scenario.
+
+**Implementation:** For repeated analysis (Monte Carlo, FailureManager), exclusions are applied via boolean masks passed to Core algorithms. The graph is built once without exclusions, and masks disable specific elements at algorithm execution time. This enables O(|excluded|) mask updates rather than O(V+E) graph rebuilding. For one-off solver calls, exclusions may be applied during graph construction for simplicity.
+
+Multiple concurrent analyses can run on the same base network with different exclusion sets. This is important for performing parallel simulations (e.g., analyzing many failure combinations in a Monte Carlo) efficiently.
+
+### Graph Construction
+
+NetGraph builds graphs through an adapter layer that translates from the Python domain model to NetGraph-Core's C++ representation.
-The view is read-only: it does not allow adding or removing nodes or links. It delegates attribute access to the base network but filters out anything in the excluded sets or disabled in the scenario. For example, view.nodes returns a dict of Node objects excluding any hidden node.
+**Python Side (`ngraph.adapters.core`):**
-Similarly, view.links provides only links not hidden. This means algorithms run on a view automatically ignore the failed components.
+Two construction patterns are provided:
-Multiple concurrent views can be created on the same base network. This is important for performing parallel simulations (e.g., analyzing many failure combinations in a Monte Carlo) without copying the entire network each time. Each view carries its own exclusion sets.
+- `build_graph()`: Constructs a Core graph with optional exclusions applied during build. Used for one-off solver calls where caching overhead isn't justified.
+- `build_graph_cache()`: Returns a `GraphCache` with pre-built components and mappings for efficient repeated analysis. Graph is built without exclusions; per-iteration exclusions are applied via masks at algorithm call time.
-Importantly, a NetworkView can be converted into a graph just like a full Network. NetworkView.to_strict_multidigraph(add_reverse=True, compact=True) will build the directed graph of the visible portion of the network. Internally, the view uses the base network's graph-building function with the exclusion sets applied. The first time this is called for a given parameter combination, the result is cached inside the view. Subsequent calls with the same flags retrieve the cached graph instead of rebuilding. This caching avoids redundant work when running multiple algorithms on the same view (e.g., running many flow computations on the same failed topology) and is crucial for performance.
+**Graph Construction Steps:**
-The NetworkView overlay avoids mutating the base graph when simulating failures (e.g., deleting nodes or toggling flags). It separates the static scenario (base network) from dynamic conditions (the view), enabling thread-safe parallel analyses and eliminating deep copies for each failure scenario. This improves performance and keeps semantics clear.
+- Collects nodes from Network (real + optional pseudo nodes for augmentation)
+- Assigns stable node IDs (sorted by name for determinism)
+- Encodes link_id + direction as ext_edge_id (packed int64)
+- Constructs NumPy arrays (src, dst, capacity, cost, ext_edge_ids)
+- Supports augmentation edges (e.g., pseudo-source/sink for multi-source max-flow)
-### Graph Construction (StrictMultiDiGraph)
+**GraphCache for Repeated Analysis:**
-NetGraph uses a custom graph implementation, StrictMultiDiGraph, to represent the network topology for analysis algorithms. This class is a subclass of networkx.MultiDiGraph with stricter semantics and performance tweaks.
+The `GraphCache` dataclass holds pre-built graph components:
-- Explicit node management: The graph does not auto-create nodes. If you try to add an edge with a non-existent node, it raises an error instead of silently adding the node. This ensures that any edge references a valid node from the model (catching bugs where a node might be misspelled or not added).
+- `graph_handle`, `multidigraph`: Core graph structures
+- `node_mapper`, `edge_mapper`: Name ↔ ID translation
+- `algorithms`: Core Algorithms instance
+- `disabled_node_ids`, `disabled_link_ids`: Pre-computed disabled topology
+- `link_id_to_edge_indices`: Pre-computed mapping for O(|excluded|) mask building
-- No duplicate nodes; unique edge keys: Adding an existing node raises a ValueError. Parallel edges are allowed, but each edge key must be unique. If an explicit key is provided to `add_edge` and it's already in use, an error is raised; if no key is provided, the graph generates a new unique key.
+When analyzing many failure scenarios, the graph is built once and exclusions are applied via boolean masks using `build_node_mask()` and `build_edge_mask()`. These mask builders automatically include disabled nodes/links from the cache, ensuring disabled topology is always excluded. This avoids rebuilding the graph for each iteration, providing significant speedup for Monte Carlo simulations.
-- Stable edge identifiers: The edge keys (IDs) are monotonically increasing integers assigned in insertion order and never reused. This provides globally unique, stable keys, simplifying flow analysis and result mapping.
+**Disabled Topology Handling:**
-- Fast deep copy: Copying large graphs in Python can be expensive. StrictMultiDiGraph.copy() by default uses a pickle-based deep copy (serializing and deserializing the graph), which in many cases outperforms the iterative copy provided by NetworkX. This is especially beneficial when duplicating graphs for separate computations.
+The `get_disabled_exclusions()` helper collects disabled nodes/links from a Network and merges them with user-provided exclusions. This is used when calling `build_graph()` directly (non-cached path). For the cached path, disabled topology is pre-computed in `GraphCache` and automatically applied by the mask builders.
-- Compatibility with NetworkX: StrictMultiDiGraph is compatible with NetworkX's MultiDiGraph API. It can be used as a drop-in replacement for MultiDiGraph in NetworkX code. All NetworkX algorithms and utilities supporting MultiDiGraph can be used with StrictMultiDiGraph.
+**C++ Side (`netgraph_core.StrictMultiDiGraph`):**
-The Network (or NetworkView) produces a StrictMultiDiGraph via to_strict_multidigraph(add_reverse=True, compact=...).
+- Immutable directed multigraph using Compressed Sparse Row (CSR) adjacency
+- Nodes identified by NodeId (int32), edges by EdgeId (int32)
+- Each edge stores capacity (float64), cost (int64), and ext_edge_id (int64)
+- Edges sorted by (cost, src, dst) for deterministic algorithm behavior
+- Zero-copy NumPy views for array access (capacities, costs, ext_edge_ids)
+- Efficient neighbor iteration via CSR structure
-If compact=True, the graph is built with minimal attributes: only each edge's capacity and cost are set, and edge keys are auto-assigned integers. Node attributes are omitted in this mode. The original link ID and any custom attributes are not carried to reduce overhead. If compact=False, the graph includes full fidelity: nodes carry their attrs, and each edge is added with the original link's id (link_id) and all its attrs from the model. In full mode, the edge key in the StrictMultiDiGraph is still a unique integer, but the original link id is stored as an attribute on the edge for traceability.
+**Edge Direction Handling:**
-If add_reverse=True (the default), for every Link in the network, a reverse edge is also added. This effectively makes the analysis graph bidirectional even though the model stores directed links. In other words, the algorithms will consider traffic flowing in both directions on each physical link unless add_reverse is turned off. The reverse edge uses the same link attributes; however, it is a distinct edge object in the graph with its own unique key.
+If `add_reverse=True` (default), the adapter creates bidirectional edges for each network link:
-The rationale for compact=True by default in analyses is performance: stripping down to just capacity and cost (which are floats) yields a lighter-weight graph, which improves algorithm speed by reducing Python overhead (fewer attributes to copy or inspect).
+- Forward edge: original link direction with ext_edge_id encoding (link_id, 'fwd')
+- Reverse edge: opposite direction with ext_edge_id encoding (link_id, 'rev')
+
+This allows algorithms to consider traffic flowing in both directions on physical links.
+The Core graph itself is always directed; bidirectionality is achieved by explicit reverse edges.
+
+**Augmentation Support:**
+
+For algorithms requiring virtual source/sink nodes (e.g., multi-source max-flow), the adapter
+adds augmentation edges with ext_edge_id = -1 (sentinel for non-network edges). These edges
+are not mapped back to scenario links in results.
### Analysis Algorithms
-NetGraph's core algorithms revolve around path-finding and flow computation on the graph. These algorithms are designed to handle the multi-graph nature (parallel edges), cost metrics, and varying selection policies. Performance is critical, so certain specialized code paths and heuristics are used.
+NetGraph's core algorithms execute in C++ via NetGraph-Core. Algorithms operate on the immutable StrictMultiDiGraph and support masking (runtime exclusions via boolean arrays) for efficient repeated analysis under different failure scenarios without graph reconstruction.
+
+All Core algorithms release the Python GIL during execution, enabling concurrent execution across multiple Python threads without GIL contention.
### Shortest-Path First (SPF) Algorithm
-NetGraph uses a Dijkstra-like algorithm with pluggable edge selection and optional multipath predecessor recording. Key features of `ngraph.algorithms.spf.spf` include:
+Implemented in C++ (`netgraph::core::shortest_paths`), using Dijkstra's algorithm
+with configurable edge selection and optional multipath predecessor recording.
-Edge Selection Policies: Rather than always choosing a single smallest-weight edge per step, the algorithm evaluates edges per neighbor. The behavior is governed by an EdgeSelect policy. For example:
+**Core Features:**
-- EdgeSelect.ALL_MIN_COST (default): for each neighbor v, include all parallel edges u->v that achieve the minimal edge cost among u->v edges.
+**Edge Selection Policies:**
-- EdgeSelect.SINGLE_MIN_COST: for each neighbor v, choose a single u->v edge with minimal edge cost (ties broken deterministically).
+The algorithm evaluates parallel edges per neighbor using `EdgeSelection` configuration:
-- EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING: for each neighbor v, consider only u->v edges with residual capacity and include all with minimal edge cost among those.
+- `multi_edge=true` (default): Include all parallel edges u→v with minimal cost among (u,v) pairs
+- `multi_edge=false`: Select single edge per (u,v) pair using tie-breaking:
+ - `PreferHigherResidual`: Choose edge with highest residual capacity (secondary: lowest edge ID)
+ - `Deterministic`: Choose edge with lowest edge ID for reproducibility
+- `require_capacity=true`: Only consider edges with residual capacity > kMinCap (used in max-flow)
+- `require_capacity=false` (default): Consider all edges regardless of residual capacity
-Other policies include capacity-aware single-edge, load-factored single-edge, and `USER_DEFINED` via a callback.
+**Capacity-Aware Tie-Breaking:**
-Capturing equal-cost predecessors: With multipath=True, SPF stores all minimal-cost predecessors: `pred[node][predecessor] = [edge_id, ...]`. New equal-cost routes extend the predecessor set rather than overwrite.
+When multiple nodes or edges have equal cost, SPF uses residual capacity for tie-breaking to improve flow distribution:
-This predecessor DAG is essential for later flow splitting: it retains all equal-cost paths in a compact form.
+- **Node-level**: Priority queue ordered by (cost, -residual, node). Among equal-cost nodes, prefers paths with higher bottleneck capacity. This naturally guides flow toward higher-capacity routes.
+- **Edge-level**: When `multi_edge=false` and `tie_break=PreferHigherResidual`, selects the parallel edge with most available capacity among equal-cost options.
-Early destination stop: If `dst` is provided, once `dst` is popped at minimal distance, SPF does not expand from `dst` and continues only while the heap front cost equals that minimal distance. This preserves equal-cost predecessors and terminates early.
+This tie-breaking is applied even in IP/IGP mode (`require_capacity=false`) using static capacities, improving flow distribution without altering routing topology.
-This optimization saves time when only a specific target's distance is needed.
+**Multipath Support:**
-Specialized fast path: With no exclusions and `ALL_MIN_COST` or `ALL_MIN_COST_WITH_CAP_REMAINING`, SPF uses optimized loops (`_spf_fast_*`) that inline per-neighbor scanning and skip callbacks.
+With `multipath=True`, SPF stores all minimal-cost predecessors forming a DAG:
+`pred[node] = {predecessor: [edge_ids...]}`. This DAG captures all equal-cost paths
+in a compact form, used by max-flow for flow splitting.
-Complexity: Using a binary heap, time is \(O((V+E) \\log V)\); memory is \(O(V+E)\) for costs and predecessors.
+**Early Termination:**
-### Pseudocode (for EdgeSelect.ALL_MIN_COST, no exclusions)
+If `dst` is provided, SPF stops expanding after popping `dst` from the priority queue
+(continuing only while heap front cost equals dst cost to capture equal-cost predecessors).
+This optimization reduces work when only source-to-sink distances are needed.
+
+**Masking:**
+
+Optional `node_mask` and `edge_mask` boolean arrays enable runtime exclusions without
+rebuilding the graph. Used by FailureManager for efficient Monte Carlo analysis.
+
+**Complexity:**
+
+Using binary heap with capacity-aware tie-breaking: \(O((V+E) \log V)\) time, \(O(V+E)\) space for costs, predecessors, and residual tracking.
+
+### Pseudocode (simplified, see implementation for complete details)
```text
-function SPF_AllMinCost(graph, src, dst=None, multipath=True):
+function SPF(graph, src, dst=None, multipath=True, edge_selection):
costs = { src: 0 }
- pred = { src: {} } # no predecessor for source
- pq = [(0, src)] # min-heap of (cost, node)
+ pred = { src: {} }
+ min_residual_to_node = { src: infinity } # Track bottleneck capacity for tie-breaking
+
+ # Priority queue with node-level tie-breaking by residual capacity
+ # QItem: (cost, -residual, node) - negated residual for max-heap behavior
+ pq = [(0, -infinity, src)]
best_dst_cost = None
while pq:
- (c, u) = heappop(pq)
+ (c, neg_res, u) = heappop(pq)
if c > costs[u]:
- continue # stale entry in pq
+ continue # stale entry
+
if dst is not None and u == dst and best_dst_cost is None:
- best_dst_cost = c # found shortest path to dst
-
- if dst is None or u != dst:
- # Relax edges from u
- for v, edges_map in graph._adj[u].items():
- # find minimal cost among edges u->v
- min_cost = inf
- min_edges = []
- for e_id, e_attr in edges_map.items():
- ec = e_attr["cost"]
- if ec < min_cost:
- min_cost = ec
- min_edges = [e_id]
- elif multipath and ec == min_cost:
- min_edges.append(e_id)
- if min_cost == inf:
- continue # no edges
- new_cost = c + min_cost
- if v not in costs or new_cost < costs[v]:
- costs[v] = new_cost
- pred[v] = { u: min_edges }
- heappush(pq, (new_cost, v))
- elif multipath and new_cost == costs[v]:
- pred[v][u] = min_edges
-
- if best_dst_cost is not None:
- # If next closest node is farther than dst, done
+ best_dst_cost = c
+ if dst is not None and u == dst:
if not pq or pq[0][0] > best_dst_cost:
break
+ continue
+
+ # Relax edges from u
+ for v in neighbors(u):
+ # Edge selection among parallel edges u->v
+ min_cost = inf
+ selected_edges = []
+
+ for e_id in edges_between(u, v):
+ residual_cap = residual[e_id] if has_residual else capacity[e_id]
+
+ # Skip if capacity filtering enabled and edge has no residual
+ if edge_selection.require_capacity and residual_cap < kMinCap:
+ continue
+
+ edge_cost = cost[e_id]
+
+ if edge_cost < min_cost:
+ min_cost = edge_cost
+ selected_edges = select_edge_by_policy(e_id, edge_selection, residual_cap)
+
+ elif edge_cost == min_cost:
+ if edge_selection.multi_edge:
+ selected_edges.append(e_id) # Keep all equal-cost edges
+ else:
+ # Edge-level tie-breaking for single-edge selection
+ selected_edges = tiebreak_edge(selected_edges, e_id,
+ edge_selection.tie_break, residual_cap)
+
+ if not selected_edges:
+ continue # no admissible edges to v
+
+ new_cost = c + min_cost
+
+ # Compute bottleneck capacity: min of path residual and max edge residual
+ max_edge_res = max(residual[e] for e in selected_edges)
+ path_residual = min(min_residual_to_node[u], max_edge_res)
+
+ # Relaxation: found shorter path
+ if new_cost < costs[v]:
+ costs[v] = new_cost
+ min_residual_to_node[v] = path_residual
+ pred[v] = { u: selected_edges }
+ pq.push((new_cost, -path_residual, v)) # Node-level tie-breaking by capacity
+
+ # Multipath: found equal-cost alternative
+ elif multipath and new_cost == costs[v]:
+ pred[v][u] = selected_edges
+ # Don't update min_residual_to_node in multipath (collecting all paths)
+
+ if best_dst_cost is not None and (not pq or pq[0][0] > best_dst_cost):
+ break
return costs, pred
+
+
+# Tie-breaking policies for edge selection when multi_edge=false:
+function tiebreak_edge(current_edges, new_edge, tie_break, new_residual):
+ if tie_break == PreferHigherResidual:
+ # Select edge with highest residual capacity
+ if new_residual > current_best_residual + epsilon:
+ return [new_edge]
+ elif abs(new_residual - current_best_residual) <= epsilon:
+ # Secondary tie-break: deterministic by edge ID
+ return [min(new_edge, current_edges[0])]
+ else: # Deterministic
+ # Select edge with smallest ID for reproducibility
+ return [min(new_edge, current_edges[0])]
```
-This pseudocode corresponds to the implementation. With EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING, edges with no residual capacity are skipped when computing min_edges. When multipath=False, only a single predecessor is stored per node.
+**Key Tie-Breaking Mechanisms:**
+
+1. **Node-level tie-breaking**: When multiple nodes have equal cost in the priority queue, prefer nodes reachable via paths with higher bottleneck (residual) capacity. This naturally distributes flows across equal-cost paths based on available capacity.
+
+2. **Edge-level tie-breaking** (when `multi_edge=false`):
+ - `PreferHigherResidual`: Among parallel equal-cost edges (u,v), select the one with highest residual capacity
+ - `Deterministic`: Select edge with smallest ID for reproducible results
+
+3. **Multipath behavior**: When `multipath=true`, all equal-cost predecessors are retained without capacity-based filtering, enabling flow splitting across all equal-cost paths.
### Maximum Flow Algorithm
-NetGraph's max-flow uses iterative shortest-path augmentation, blending Edmonds-Karp (augment along shortest paths) and Dinic (push blocking flows on a level graph) with cost awareness and configurable flow splitting across equal-cost parallels. It is implemented in `ngraph.algorithms.max_flow.calc_max_flow`. The goal is to compute the maximum feasible flow between a provided source and sink under edge capacity constraints.
+Implemented in C++ (`netgraph::core::max_flow`), using successive shortest paths with
+blocking flow augmentation. The algorithm blends Edmonds-Karp (augment along shortest
+paths) and Dinic (push blocking flows on a level graph) with cost awareness and
+configurable flow splitting across equal-cost parallel edges.
+
+**Goal:** Compute maximum feasible flow between source and sink under edge capacity constraints.
+
+**Multi-source/multi-sink:** Handled by the Python adapter layer (`ngraph.solver.maxflow`)
+which creates pseudo-source and pseudo-sink nodes with large-capacity, zero-cost edges
+to/from real endpoints. The C++ algorithm operates on single source and single sink.
-Multi-source/multi-sink is handled by callers when needed (e.g., Demand Manager in `combine` mode) by introducing pseudo-source and pseudo-sink nodes with infinite-capacity, zero-cost edges to/from the real endpoints. `calc_max_flow` itself operates on a single source and single sink and does not create pseudo nodes.
+**Routing Semantics:** The algorithm's behavior is controlled by `require_capacity` and `shortest_path`:
-The residual network is implicit on the flow-aware graph. For a physical edge u->v:
+- `require_capacity=true` + `shortest_path=false` (SDN/TE): SPF filters to edges with residual capacity, routes adapt iteratively during placement
+- `require_capacity=false` + `shortest_path=true` (IP/IGP): SPF uses all edges based on cost, single-pass flow placement over fixed equal-cost paths
-- Forward residual arc u->v has capacity `capacity(u,v) - flow(u,v)`.
-- Reverse residual arc v->u (implicit, not stored) has capacity `flow(u,v)`.
-No physical reverse edge is required for residual traversal. SPF traverses only forward residual arcs u->v. Reverse residual arcs v->u are used when computing reachability for the min-cut and within blocking-flow computations; they are not considered by SPF. This is distinct from `add_reverse=True` during graph construction, which adds an actual reverse physical edge with its own capacity and cost.
+See "Routing Semantics: IP/IGP vs SDN/TE" section for detailed explanation.
-Reverse residual arcs are not stored as edges in the `StrictMultiDiGraph`. They are derived on the fly from edge attributes:
+The residual network is maintained via `FlowState`, which tracks per-edge flow and computes residual capacities on demand. For each edge u→v:
-- For reachability/min-cut, we traverse incoming edges and treat v->u as available when `flow(u,v) > eps`.
-- For blocking-flow, `calc_graph_capacity` builds a reversed adjacency from the SPF predecessor DAG and computes residual capacities from `capacity - flow` (forward) and `flow` (reverse) without mutating the graph.
+- Forward residual capacity: `capacity(u,v) - flow(u,v)`
+- Reverse residual capacity (for flow cancellation): `flow(u,v)`
+
+SPF operates over the residual graph by requesting edges with `require_capacity=true`, which filters to edges with positive residual capacity. The `FlowState` provides a residual capacity view without graph mutation.
+
+Note: Reverse residual arcs for flow cancellation are distinct from physical reverse edges added via `add_reverse=True` during graph construction. Physical reverse edges model bidirectional links with independent capacity; residual reverse arcs enable flow augmentation/cancellation.
The core loop finds augmenting paths using the cost-aware SPF described above:
-Run SPF from the source to the sink with edge selection ALL_MIN_COST_WITH_CAP_REMAINING. This computes shortest-path distances and a predecessor DAG to the sink over forward residual edges with residual capacity > 0 (no reverse arcs). The edge cost can represent distance or preference; SPF selects minimum cumulative cost.
+Run SPF from source to sink with `multi_edge=true` and `require_capacity=true` (filters to edges with positive residual capacity). This computes shortest-path distances and a predecessor DAG over forward residual edges. The edge cost can represent distance, latency, or preference; SPF selects paths minimizing cumulative cost.
If the pseudo-sink is not reached (i.e., no augmenting path exists), stop: the max flow is achieved.
Otherwise, determine how much flow can be sent along the found paths:
-Using the pred DAG from SPF, compute a blocking flow with consideration of parallel edges and the chosen splitting policy. This is done by `calc_graph_capacity` (Dinic-like): it builds a reversed residual view from the sink, assigns BFS levels, and uses DFS to push blocking flow. With parallel equal-cost edges, flow is split proportionally to residual capacity (PROPORTIONAL) or equally (EQUAL_BALANCED) until a bottleneck is reached.
+Using the predecessor DAG from SPF, `FlowState.place_on_dag` computes blocking flow considering parallel edges and the splitting policy. For PROPORTIONAL: builds reversed residual graph, assigns BFS levels, uses DFS to push flow with capacity-proportional splits. For EQUAL_BALANCED: performs topological traversal with equal splits, computes global scale factor to prevent oversubscription.
-This yields a value f (flow amount) and a per-edge flow assignment on the predecessor DAG (fractions that sum to 1 along outgoing splits).
+This yields flow amount `f` and per-edge flow assignments tracking which edges carry flow and their utilization.
-We then augment the flow: for each edge on those shortest paths, increase its flow attribute by the assigned portion of f. The algorithm updates both per-edge flow and aggregate node flow for bookkeeping, and marks which edges carried flow.
+The algorithm then augments the flow: `FlowState` increases each edge's flow by its assigned portion. Per-edge flows and residual capacities are updated for the next iteration.
Add f to the total flow counter.
-If f is below a small tolerance eps (meaning no meaningful flow could be added, perhaps due to rounding or all residual capacity being negligible), break out and treat it as saturated.
+If `f` is below tolerance `kMinFlow` (negligible flow placed due to numerical limits or exhausted capacity), terminate iteration.
Repeat to find the next augmenting path (back to step 1).
If `shortest_path=True`, the algorithm performs only one augmentation pass and returns (useful when the goal is a single cheapest augmentation rather than maximum flow).
-After the loop, if detailed results are requested, the algorithm computes a FlowSummary which includes:
+After the loop, the C++ algorithm computes a FlowSummary which includes:
- total_flow: the sum of flow from source to sink achieved
-- edge_flow: a dictionary of each edge (u,v,key) to the flow on that edge
+- edge_flows: per-edge flow assignments (optional, populated when requested)
-- residual_cap: remaining capacity on each edge = capacity - flow
+- residual_capacity: remaining capacity on each edge = capacity - flow (optional, populated when requested)
-- reachable: the set of nodes reachable from the source in the final residual network (this identifies the source side of the min-cut)
+- reachable_nodes: the set of nodes reachable from the source in the final residual network (optional, identifies the source side of the min-cut)
- min_cut: the list of edges that are saturated and go from reachable to non-reachable (these form the minimum cut)
-- cost_distribution: how much flow was sent in each augmentation step cost (e.g., X flow units were sent along paths of cost Y)
+- cost_distribution: flow volume placed at each path cost tier. Core returns parallel arrays (`costs`, `flows`); Python wrapper converts to `Dict[Cost, Flow]` mapping in `FlowSummary.cost_distribution`.
+
+This is returned along with the total flow value.
+
+### Routing Semantics: IP/IGP vs SDN/TE
+
+NetGraph models two fundamentally different routing paradigms through the `require_capacity` and `shortest_path` parameters:
+
+**IP/IGP Semantics (`require_capacity=false` + `shortest_path=true`):**
+
+Traditional IP routing with Interior Gateway Protocols (OSPF, IS-IS):
+
+- Routes computed based on link costs/metrics only, ignoring available capacity
+- Single SPF computation determines equal-cost paths; forwarding is fixed until topology/cost change
+- Traffic follows predetermined paths even as links saturate
+- Models best-effort forwarding with potential packet loss when demand exceeds capacity
+- No iterative augmentation: flow placed in single pass over fixed equal-cost DAG
+- Use case: Simulating production IP networks, validating IGP designs
+
+**SDN/TE Semantics (`require_capacity=true` + `shortest_path=false`, default):**
-This is returned along with the total flow value. If a flow-assigned graph copy is requested, that is also returned.
+Software-Defined Networking and Traffic Engineering:
+
+- Routes adapt dynamically to residual link capacities during flow placement
+- SPF recomputed after each flow placement iteration, excluding saturated links
+- Iterative augmentation continues until max-flow achieved or capacity exhausted
+- Flow placement respects capacity constraints, never oversubscribing links
+- Models centralized traffic engineering with real-time capacity awareness
+- Use case: Optimal demand placement, capacity planning, failure impact analysis
+
+This distinction is fundamental: IP networks route on cost alone with fixed forwarding tables (congestion managed via queuing/drops), while TE systems route dynamically on both cost and available capacity (congestion avoided via admission control). The `require_capacity` parameter controls whether SPF filters to available capacity; `shortest_path` controls whether routes are recomputed iteratively or fixed after initial SPF.
### Flow Placement Strategies
-NetGraph's max-flow differs from classical augmenting-path implementations by controlling how flow is split across equal-cost parallel edges in each augmentation. Governed by `FlowPlacement`:
+Beyond routing semantics, NetGraph controls how flow splits across equal-cost parallel edges through `FlowPlacement`:
+
+- **PROPORTIONAL** (default, models WCMP/Weighted ECMP):
+ - Splits flow across parallel equal-cost edges proportional to residual capacity
+ - Example: Two 100G links get 50/50 split; one 100G + one 10G get 91/9 split
+ - Maximizes utilization by preferring higher-capacity paths
+ - Used in networks with heterogeneous link speeds (common in fabrics with multi-generation hardware)
+ - Can be used iteratively (e.g., successive max-flow augmentations)
-- PROPORTIONAL (default): If multiple parallel edges have equal cost on a path segment, distribute flow among them in proportion to their remaining capacities. This mimics how weighted equal-cost multi-path (W-ECMP or WCMP) routing splits flow based on link bandwidth.
+- **EQUAL_BALANCED** (models traditional ECMP):
+ - Splits flow equally across all parallel equal-cost edges regardless of capacity
+ - Example: Two 100G links get 50/50; one 100G + one 10G still attempt 50/50 (10G saturates first)
+ - Models IP hash-based load balancing (5-tuple hashing distributes flows uniformly)
+ - Single-pass admission: computes one global scale factor to avoid oversubscription
+ - For IP ECMP simulation: use with `require_capacity=false` + `shortest_path=true`
-- EQUAL_BALANCED: Split flow equally across all equal-cost parallel edges, regardless of capacity differences (up to capacity limits). This may under-utilize a higher-capacity link if paired with a lower-capacity one, but it maintains an even load balance until one link saturates. This matches IP forwarding with equal-cost multi-path (ECMP), which splits flow based on the number of parallel paths.
+`FlowState.place_on_dag` implements single-pass placement over a fixed SPF DAG:
-`calc_graph_capacity` implements these strategies:
+- **PROPORTIONAL**: Constructs reversed residual graph from predecessor DAG. Uses Dinic-style BFS leveling and DFS push from sink to source. Within each edge group (parallel edges between node pair), splits flow proportionally to residual capacity. Distributes pushed flow back to underlying edges maintaining proportional ratios. Can be called iteratively on updated residuals.
-- PROPORTIONAL: Build a reversed residual view from the sink. Assign BFS levels and push blocking flows with DFS, summing capacities across parallel equal-cost edges in each segment. Convert reversed flows back to forward orientation.
+- **EQUAL_BALANCED**: Performs topological traversal (Kahn's algorithm) from source to sink over forward DAG. Assigns equal splits across all outgoing parallel edges from each node. Computes global scale factor as `min(edge_capacity / edge_assignment)` across all edges to prevent oversubscription. Applies scale uniformly and stops. This models single-pass ECMP admission where the forwarding DAG doesn't change mid-flow.
-- EQUAL_BALANCED: Build reversed adjacency from the sink. Push a nominal unit flow from the source equally across outgoing reversed arcs, then scale by the minimum capacity-to-assignment ratio to respect capacities; normalize back to forward orientation.
+**Configuration Examples:**
-Support for these placement strategies together with control over the path selection and edge selection policies enables realistic modeling of different forwarding and traffic engineering scenarios.
+```python
+# IP/ECMP: Traditional router behavior (cost-based routing, equal splits)
+max_flow(network, src, dst,
+ flow_placement=FlowPlacement.EQUAL_BALANCED,
+ shortest_path=True, # Single SPF tier
+ require_capacity=False) # Ignore capacity when routing
+
+# SDN/TE with WCMP: Capacity-aware routing with proportional splits
+max_flow(network, src, dst,
+ flow_placement=FlowPlacement.PROPORTIONAL,
+ shortest_path=False, # Iterative augmentation
+ require_capacity=True) # Adapt routes to capacity
+
+# WCMP: Fixed equal-cost paths with bandwidth-weighted splits
+max_flow(network, src, dst,
+ flow_placement=FlowPlacement.PROPORTIONAL,
+ shortest_path=True, # Single tier of equal-cost paths
+ require_capacity=False) # Fixed paths regardless of utilization
+```
+
+These configurations enable realistic modeling of diverse forwarding behaviors: from traditional IP networks with best-effort delivery to modern SDN deployments with capacity-aware traffic engineering.
+
+### Flow Policy Presets
+
+For traffic matrix placement, NetGraph provides `FlowPolicyPreset` values that bundle the routing semantics described above into convenient configurations. These presets map to real-world network behaviors:
+
+| Preset | Behavior | Use Case |
+|--------|----------|----------|
+| `SHORTEST_PATHS_ECMP` | IP/IGP with hash-based ECMP | Traditional routers (OSPF/IS-IS), equal splits across equal-cost paths |
+| `SHORTEST_PATHS_WCMP` | IP/IGP with weighted ECMP | Routers with WCMP support, proportional splits based on link capacity |
+| `TE_WCMP_UNLIM` | MPLS-TE / SDN with WCMP | Capacity-aware TE with unlimited tunnels, iterative placement |
+| `TE_ECMP_16_LSP` | MPLS-TE with 16 LSPs | Fixed 16 ECMP tunnels per demand, models RSVP-TE with LSP limits |
+| `TE_ECMP_UP_TO_256_LSP` | MPLS-TE with up to 256 LSPs | Scalable TE with tunnel limit, models SR-TE or large-scale RSVP |
+
+**Detailed Configuration Mapping:**
+
+| Preset | `require_capacity` | `max_flows` | `multi_edge` | `flow_placement` |
+|--------|--------------------|--------------|--------------|--------------------|
+| `SHORTEST_PATHS_ECMP` | `false` | 1 | `true` | `EQUAL_BALANCED` |
+| `SHORTEST_PATHS_WCMP` | `false` | 1 | `true` | `PROPORTIONAL` |
+| `TE_WCMP_UNLIM` | `true` | unlimited | `true` | `PROPORTIONAL` |
+| `TE_ECMP_16_LSP` | `true` | 16 | `false` | `EQUAL_BALANCED` |
+| `TE_ECMP_UP_TO_256_LSP` | `true` | 256 | `false` | `EQUAL_BALANCED` |
+
+**Key parameters:**
+
+- `require_capacity`: When `false`, paths are selected based on link costs alone (models IP/IGP routing). When `true`, paths adapt to residual capacity during placement (models SDN/TE). See [Routing Semantics](#routing-semantics-ipigp-vs-sdnte) for details.
+- `max_flows`: With `1`, demand is placed in a single pass over fixed paths, equivalent to `shortest_path=true` in the max-flow solver. With `> 1`, paths are recomputed as capacity is consumed (iterative placement), equivalent to `shortest_path=false`.
+- `multi_edge`: When `true`, uses all parallel equal-cost edges (hop-by-hop ECMP); when `false`, each flow uses a single path (tunnel/LSP semantics)
+- `flow_placement`: `EQUAL_BALANCED` splits equally across paths; `PROPORTIONAL` splits by residual capacity
+
+**Example: Modeling IP vs MPLS Networks**
+
+```yaml
+# IP network with traditional ECMP (e.g., data center leaf-spine)
+traffic_matrix_set:
+ dc_traffic:
+ - source_path: ^rack1/
+ sink_path: ^rack2/
+ demand: 1000.0
+ flow_policy_config: SHORTEST_PATHS_ECMP
+
+# MPLS-TE network with capacity-aware tunnel placement
+traffic_matrix_set:
+ backbone_traffic:
+ - source_path: ^metro1/
+ sink_path: ^metro2/
+ demand: 5000.0
+ flow_policy_config: TE_WCMP_UNLIM
+```
### Pseudocode (simplified max-flow loop)
```text
-function MAX_FLOW(graph, S, T, placement=PROPORTIONAL):
- initialize flow_graph with 0 flow on all edges
+function MAX_FLOW(graph, S, T, placement=PROPORTIONAL, require_capacity=True):
+ flow_state = FlowState(graph) # Tracks per-edge flow and residuals
total_flow = 0
- do:
- costs, pred = SPF(graph, src=S, dst=T, edge_select=ALL_MIN_COST_WITH_CAP_REMAINING)
- if T not reachable in pred:
+ cost_distribution = []
+
+ while True:
+ # Configure edge selection for SPF
+ edge_selection = EdgeSelection(
+ multi_edge=True,
+ require_capacity=require_capacity,
+ tie_break=Deterministic
+ )
+
+ # Find shortest augmenting paths in residual graph
+ residuals = flow_state.residual_view() if require_capacity else None
+ costs, dag = SPF(graph, S, T,
+ multipath=True,
+ edge_selection=edge_selection,
+ residual=residuals)
+
+ if T not in dag: # No augmenting path exists
break
- f, flow_dict = calc_graph_capacity(flow_graph, S, T, pred, placement)
- if f <= eps:
+
+ # Push blocking flow through predecessor DAG
+ path_cost = costs[T]
+ placed = flow_state.place_on_dag(S, T, dag, infinity, placement)
+
+ if placed < kMinFlow: # Negligible flow placed
break
- for each edge in flow_dict:
- add flow on that edge as per flow_dict
- total_flow += f
- while True
- return total_flow, (and optionally summary, flow_graph)
+
+ total_flow += placed
+ cost_distribution.append((path_cost, placed))
+
+ # Compute min-cut, reachability, cost distribution
+ min_cut = flow_state.compute_min_cut(S, node_mask, edge_mask)
+
+ return FlowSummary(
+ total_flow=total_flow,
+ cost_distribution=cost_distribution,
+ min_cut=min_cut,
+ edge_flows=..., # optional
+ residual_capacity=..., # optional
+ reachable_nodes=... # optional
+ )
```
-Here `eps` denotes a small tolerance (default 1e-10; configurable via parameter).
+The flow tolerance constant `kMinFlow` (default 1/4096 ≈ 2.4e-4) determines when flow placement is considered negligible and iteration terminates.
-In practice, each augmentation performs one SPF \(O((V+E) \\log V)\) and one blocking-flow computation over the pred DAG and residual view (typically \(O(V+E)\)). If we pushed one path at a time the worst case would be \(O(E)\) augmentations, giving \(O(E^2 \\log V)\). Because we push blocking flows, the number of augmentations is usually far smaller than \(E\). A practical upper bound is \(O(\\min\\{E, F\\} \\cdot (E \\log V))\), where \(F\) is the max-flow value.
+Each augmentation phase performs one SPF \(O((V+E) \\log V)\) and one blocking-flow computation \(O(V+E)\) over the predecessor DAG. With blocking flow augmentation, the shortest path distance (in hops) increases with each phase, bounding the number of phases by \(O(V)\). This yields an overall complexity of \(O(V \\cdot (V+E) \\log V)\) = \(O(V^2 E \\log V)\) for sparse graphs where \(E = O(V)\).
+
+Practical performance is significantly better than worst-case bounds due to early termination when residual capacity exhausts. For integer capacities, the bound becomes \(O(F \\cdot (V+E) \\log V)\) where \(F\) is the max-flow value, which dominates when \(F \ll V\).
### Managers and Workflow Orchestration
Managers handle scenario dynamics and prepare inputs for algorithmic steps.
-Demand Manager (`ngraph.demand.manager`): Expands `TrafficDemand` entries into concrete `Demand` objects and places them on a `StrictMultiDiGraph` derived from the `Network` (or a `NetworkView`).
+**Demand Expansion** (`ngraph.exec.demand.builder`): Builds traffic matrix sets from DSL definitions, expanding source/sink patterns into concrete node groups.
-- Expansion is deterministic: source/sink node lists are sorted; no randomization is used.
-- Modes: `combine` (one aggregate demand via pseudo source/sink nodes) and `pairwise` (one demand per (src, dst) pair, excluding self-pairs, with even volume split).
-- Expanded demands are sorted by ascending priority before placement.
-- Placement uses a priority-aware round-robin scheduler. `placement_rounds="auto"` performs up to 3 passes with early stop based on progress and fairness.
-- Provides summaries (per-demand placement, link usage) and does not mutate the base `Network` (operates on the built flow graph).
+- Deterministic expansion: source/sink node lists sorted alphabetically; no randomization
+- Supports `combine` mode (aggregate via pseudo nodes) and `pairwise` mode (individual (src,dst) pairs with volume split)
+- Demands sorted by ascending priority before placement (lower value = higher priority)
+- Placement handled by Core's FlowPolicy with configurable presets (ECMP, WCMP, TE modes)
+- Non-mutating: operates on Core flow graphs with exclusions; Network remains unmodified
-Failure Manager (`ngraph.failure.manager`): Applies a `FailurePolicy` to compute exclusion sets and runs analyses on `NetworkView` instances.
+**Failure Manager** (`ngraph.exec.failure.manager`): Applies a `FailurePolicy` to compute exclusion sets and runs analyses with those exclusions.
-- Supports baseline (no failures), serial or process-parallel execution, and per-worker network caching (the network is serialized once per worker).
-- Deterministic when a seed is supplied (each iteration receives `seed + iteration_index`).
-- Deduplication: iterations are grouped by a key built from sorted excluded node IDs, sorted excluded link IDs, analysis function name, and analysis parameters. Only one representative per group is executed; results are replicated to all members.
- - This reduces effective executions from I to U, where U is the number of unique failure patterns for the chosen policy and parameters (e.g., 10,000 samples over 250 unique single-link failures execute as 250 tasks, not 10,000).
-- Parameter validation: with no effective failure rules, `iterations > 1` without `baseline=True` is rejected; `baseline=True` requires `iterations >= 2`.
-- Parallelism auto-adjusts to 1 if the analysis function cannot be pickled (e.g., defined in `__main__`).
+- Parallel execution via `ThreadPoolExecutor` with zero-copy network sharing across worker threads
+- Deterministic results when seed is provided (each iteration derives `seed + iteration_index`)
+- Optional baseline execution (no failures) for comparing degraded vs. intact capacity
+- Automatic parallelism adjustment: Forces serial execution when analysis function defined in `__main__` (notebook context) to avoid pickling failures
+- Thread-safe analysis: Network shared by reference; exclusion sets passed per-iteration
+- Automatic graph cache pre-building: Before parallel iterations, builds `GraphCache` to amortize graph construction cost; per-iteration exclusions applied via O(|excluded|) mask operations
-Both managers separate policy (how to expand demands or pick failures) from core algorithms. They prepare concrete inputs (expanded demands or `NetworkView`s) for each workflow iteration.
+Both the demand expansion logic and failure manager separate policy (how to expand demands or pick failures) from core algorithms. They prepare concrete inputs (expanded demands or exclusion sets) for each workflow iteration.
### Workflow Engine and Steps
-NetGraph workflows (see Workflow Reference) are essentially recipes of analysis steps to run in sequence. Each step is typically a pure function: it takes the current model (or view) and possibly prior results, performs an analysis, and stores its outputs. The workflow engine coordinates these steps, using a Results store to record data.
+NetGraph workflows (see Workflow Reference) are essentially recipes of analysis steps to run in sequence. Each step is typically a pure function: it takes the current model and possibly prior results, performs an analysis, and stores its outputs. The workflow engine coordinates these steps, using a Results store to record data.
Common built-in steps:
-- BuildGraph: builds a `StrictMultiDiGraph` from the Network (or view) and stores node-link JSON plus `{context: {add_reverse}}`. Often an initial step.
+- BuildGraph: validates network topology and stores node-link JSON representation via NetworkX `MultiDiGraph`. Stores graph structure under `data.graph` and parameters under `data.context`. Primarily for validation and export; Core graph building happens in analysis functions.
- NetworkStats: computes node/link counts, capacity statistics, cost statistics, and degree statistics. Supports optional `excluded_nodes`/`excluded_links` and `include_disabled`.
@@ -395,7 +673,7 @@ Common built-in steps:
- CostPower: aggregates platform and per-end optics capex/power by hierarchy level (0..N). Respects `include_disabled` and `aggregation_level`. Stores `data.levels` and `data.context`.
-Each step is implemented in the code (in ngraph.workflow module) and has a corresponding step_type name. Steps are generally pure in that they don't modify the Network (except perhaps to disable something if that's the nature of the step, but usually they operate on views and copies). They take inputs, often including references to prior steps' results (the workflow engine allows one step to use another step's output). For instance, a placement step might need the value of alpha* from an MSD step; the workflow definition can specify that link.
+Each step is implemented in the code (in ngraph.workflow module) and has a corresponding step_type name. Steps are pure functions that don't modify the Network. They take inputs, often including references to prior steps' results (the workflow engine allows one step to use another step's output). For instance, a placement step might need the value of alpha* from an MSD step; the workflow definition can specify that link.
### Results storage
@@ -411,15 +689,15 @@ NetGraph's design includes several features that differentiate it from tradition
- Declarative Scenario DSL: A YAML DSL with blueprints and programmatic expansion allows abstract definitions (e.g., a fully meshed Clos) to be expanded into concrete nodes and links. Strict schema validation ensures that scenarios are well-formed and rejects unknown or invalid fields.
-- NetworkView overlays vs graph copying: Read-only overlays avoid copying large structures for each scenario. The view is designed for availability toggles and caches built graphs for algorithms.
+- Runtime Exclusions vs graph copying: Analysis-time exclusions avoid copying large structures for each scenario. The design separates static topology from dynamic failure states.
-- Strict graph with stable edge IDs: Extends `MultiDiGraph` with explicit node management and monotonic edge keys, simplifying correlation of results to original links.
+- Stable edge IDs: Links have auto-generated unique IDs (`source|target|`) that remain stable throughout analysis, simplifying correlation of results to original links.
-- Flow placement strategies (proportional and equal): During augmentation, split flow across equal-cost paths and parallel links, modeling ECMP/WCMP behavior without linear programming.
+- Dual routing semantics: Models both IP/IGP (cost-only, fixed paths via `require_capacity=false` + `shortest_path=true`) and SDN/TE (capacity-aware, iterative via `require_capacity=true` + `shortest_path=false`)
-- Cost-aware augmentation: Prefer cheapest capacity first. It does not re-route previously placed flow.
+- Configurable flow placement: Proportional (WCMP-style, capacity-weighted) and Equal-Balanced (ECMP-style, uniform) splitting across parallel equal-cost edges
-- User-defined edge selection: Custom edge selection logic is supported (EdgeSelect.USER_DEFINED with a callback), enabling custom routing heuristics.
+- Cost-aware augmentation: Prefer cheapest capacity first via successive shortest paths. Does not re-route previously placed flow.
- Deterministic simulation with seeding: Random aspects (e.g., failure sampling) are controlled by explicit seeds that propagate through steps. Runs are reproducible given the same scenario and seed.
@@ -427,19 +705,71 @@ NetGraph's design includes several features that differentiate it from tradition
### Performance Considerations
-Throughout the design, performance has been considered:
+**C++ Algorithm Implementation:**
+
+- Native C++ execution with optimized data structures (CSR adjacency, flat arrays)
+- GIL released during algorithm execution, enabling concurrent analysis across Python threads
+- Zero-copy NumPy integration for array inputs/outputs (via buffer protocol)
+- Deterministic edge ordering for reproducible results
+- Cache-friendly CSR representation for efficient neighbor traversal
+
+**Graph Caching:**
+
+For Monte Carlo analysis with many failure iterations, graph construction is amortized via `GraphCache`:
+
+- Graph built once before iterations begin (includes all nodes and augmentation edges)
+- Per-iteration exclusions applied via boolean masks rather than graph rebuilding
+- Mask building is O(|excluded|) using pre-computed `link_id_to_edge_indices` mapping
+- FailureManager automatically pre-builds caches before parallel execution
+
+This optimization is critical for performance: graph construction involves Python processing, NumPy array creation, and C++ object initialization. Caching eliminates this overhead from the per-iteration critical path, enabling the GIL-releasing C++ algorithms to execute with minimal Python overhead.
+
+**Monte Carlo Deduplication:**
+
+FailureManager collapses identical failure patterns into single executions. Runtime
+scales with unique patterns U rather than requested iterations I; often U << I for
+common failure policies.
+
+**Complexity:**
+
+- SPF: \(O((V+E) \log V)\) using binary heap
+- Max-flow: \(O(V^2 E \log V)\) worst-case for successive shortest paths with blocking flow
+ - Practical performance dominated by \(O(F \cdot (V+E) \log V)\) for integer capacities where \(F\) is max-flow value
+ - Early termination when residual capacity exhausts provides significant speedup in typical networks
+
+**Scalability:**
+
+Benchmarks on structured topologies (Clos, grid) and realistic network graphs demonstrate scalability to networks with thousands of nodes and tens of thousands of edges. C++ execution with CSR adjacency and GIL release provides order-of-magnitude speedups over pure Python graph libraries for compute-intensive analysis.
+
+## Summary
+
+NetGraph's hybrid architecture combines:
+
+**Python Layer:**
-SPF uses Python heapq and optimized loops. Internal profiling shows expected scaling for typical network sizes.
+- Declarative scenario DSL with schema validation
+- Domain model (Network, Node, Link, RiskGroup)
+- Runtime exclusions for non-destructive failure simulation
+- Workflow orchestration and result aggregation
+- Managers for demand expansion and failure enumeration
-Caching graphs in views avoids O(N+E) rebuild costs repeatedly when analyzing many failures.
+**C++ Layer:**
-Monte Carlo deduplication collapses identical failure patterns (plus analysis parameters) into single executions. Runtime scales with the number of unique patterns U rather than requested iterations I; in many policies U << I.
+- High-performance graph algorithms (SPF, K-shortest paths, max-flow)
+- Immutable StrictMultiDiGraph with CSR adjacency
+- Configurable flow placement policies (ECMP/WCMP simulation)
+- Runtime masking for efficient repeated analysis
-Pickle-based deep copy for StrictMultiDiGraph was faster than the default iterative copy for large graphs in local measurements. This reduces the cost of creating multiple independent graph instances.
+**Integration:**
-The complexity of algorithms has been kept polynomial and usually near-linear. For instance, typical network max flows (with unit capacities) can be O(VE^2), but by using shortest path (cost) and splitting, NetGraph's algorithm often uses far fewer augmentations than worst-case. Benchmarks on Clos topologies and grid graphs confirm the algorithms perform within expected growth rates and can handle networks of thousands of nodes and edges efficiently.
+- Adapter layer for seamless Python ↔ C++ translation
+- Stable node/edge ID mapping for result traceability
+- NumPy array interface for efficient data transfer
+- GIL release during computation for concurrent thread execution
-Summary: The design combines a declarative scenario model, reproducible views, a strict graph with stable IDs, cost-aware SPF and augmentation, and a structured results store. It adapts standard algorithms to network engineering use cases (flow splitting, failure simulation, traceable outputs).
+This design adapts standard algorithms to network engineering use cases (flow splitting,
+failure simulation, cost-aware routing) while achieving high performance through native
+C++ execution and ergonomic interfaces through Python APIs.
## Cross-references
diff --git a/docs/reference/dsl.md b/docs/reference/dsl.md
index a7b5e2e..5fa05b8 100644
--- a/docs/reference/dsl.md
+++ b/docs/reference/dsl.md
@@ -472,11 +472,13 @@ traffic_matrix_set:
**Flow Policies:**
-- `SHORTEST_PATHS_ECMP`: Equal-cost multi-path (ECMP) over shortest paths; equal split across paths.
-- `SHORTEST_PATHS_WCMP`: Weighted ECMP (WCMP) over equal-cost shortest paths; weighted split (proportional).
-- `TE_WCMP_UNLIM`: Traffic engineering weighted multipath (WCMP) with capacity-aware selection; unlimited LSPs.
-- `TE_ECMP_16_LSP`: Traffic engineering with 16 ECMP LSPs; equal split across LSPs.
-- `TE_ECMP_UP_TO_256_LSP`: Traffic engineering with up to 256 ECMP LSPs; equal split across LSPs.
+- `SHORTEST_PATHS_ECMP`: IP/IGP routing with hash-based ECMP; equal split across equal-cost paths
+- `SHORTEST_PATHS_WCMP`: IP/IGP routing with weighted ECMP; proportional split by link capacity
+- `TE_WCMP_UNLIM`: MPLS-TE / SDN with capacity-aware WCMP; unlimited tunnels
+- `TE_ECMP_16_LSP`: MPLS-TE with exactly 16 ECMP LSPs per demand
+- `TE_ECMP_UP_TO_256_LSP`: MPLS-TE with up to 256 ECMP LSPs per demand
+
+See [Flow Policy Presets](design.md#flow-policy-presets) for detailed configuration mapping and real-world network behavior.
## `failure_policy_set` - Failure Simulation
diff --git a/docs/reference/workflow.md b/docs/reference/workflow.md
index 179144f..a8277ee 100644
--- a/docs/reference/workflow.md
+++ b/docs/reference/workflow.md
@@ -21,40 +21,54 @@ workflow:
- step_type: MaximumSupportedDemand
name: msd_baseline
matrix_name: baseline_traffic_matrix
- - step_type: TrafficMatrixPlacement
- name: tm_placement
- matrix_name: baseline_traffic_matrix
- failure_policy: weighted_modes
- iterations: 1000
- baseline: true
+- step_type: TrafficMatrixPlacement
+ name: tm_placement
+ matrix_name: baseline_traffic_matrix
+ failure_policy: random_failures
+ iterations: 1000
+ baseline: true
```
## Execution Model
- Steps run sequentially via `WorkflowStep.execute()`, which records timing and metadata and stores outputs under `{metadata, data}` for the step.
-- Monte Carlo steps (`MaxFlow`, `TrafficMatrixPlacement`) execute iterations using the Failure Manager. Each iteration analyzes a `NetworkView` that masks failed nodes/links without mutating the base network. Workers are controlled by `parallelism: auto|int`.
+- Monte Carlo steps (`MaxFlow`, `TrafficMatrixPlacement`) execute iterations using the Failure Manager. Each iteration analyzes the network with exclusion sets applied to mask failed nodes/links without mutating the base network. Workers are controlled by `parallelism: auto|int`.
- Seeding: a scenario-level `seed` derives per-step seeds unless a step sets an explicit `seed`. Metadata includes `scenario_seed`, `step_seed`, `seed_source`, and `active_seed`.
## Core Workflow Steps
### BuildGraph
-Export the network graph to node-link JSON for external analysis. Optional for other steps.
+Validates network topology and exports node-link JSON for external analysis. Optional for other workflow steps.
```yaml
- step_type: BuildGraph
name: build_graph
+ add_reverse: true # Add reverse edges for bidirectional connectivity (default: true)
```
+Parameters:
+
+- `add_reverse`: If `true`, adds reverse edges for each link to enable bidirectional connectivity. Set to `false` for directed-only graphs. Default: `true`.
+
### NetworkStats
-Compute node, link, and degree metrics. Supports temporary exclusions.
+Compute node, link, and degree metrics. Supports temporary exclusions without modifying the base network.
```yaml
- step_type: NetworkStats
name: baseline_stats
+ include_disabled: false # Include disabled nodes/links in stats
+ excluded_nodes: [] # Optional: Temporary node exclusions
+ excluded_links: [] # Optional: Temporary link exclusions
```
+Parameters:
+
+- `include_disabled`: If `true`, include disabled nodes and links in statistics. Default: `false`.
+- `excluded_nodes`: Optional list of node names to exclude temporarily (does not modify network).
+- `excluded_links`: Optional list of link IDs to exclude temporarily (does not modify network).
+
### MaxFlow
Monte Carlo maximum flow analysis between node groups.
@@ -84,11 +98,13 @@ Monte Carlo placement of a named traffic matrix with optional alpha scaling.
- step_type: TrafficMatrixPlacement
name: tm_placement
matrix_name: default
+ failure_policy: random_failures # Optional: policy name in failure_policy_set
iterations: 100
parallelism: auto
+ placement_rounds: auto # or an integer
baseline: false
- include_flow_details: true # cost_distribution per flow
- include_used_edges: false # include per-demand used edge lists
+ include_flow_details: true # cost_distribution per flow
+ include_used_edges: false # include per-demand used edge lists
store_failure_patterns: false
# Alpha scaling – explicit or from another step
alpha: 1.0
@@ -111,16 +127,32 @@ Search for the maximum uniform traffic multiplier `alpha_star` that is fully pla
- step_type: MaximumSupportedDemand
name: msd_default
matrix_name: default
- acceptance_rule: hard
- alpha_start: 1.0
- growth_factor: 2.0
- resolution: 0.01
- max_bracket_iters: 32
- max_bisect_iters: 32
- seeds_per_alpha: 1
- placement_rounds: auto
+ acceptance_rule: hard # Currently only "hard" is supported
+ alpha_start: 1.0 # Starting alpha value for search
+ growth_factor: 2.0 # Growth factor for bracketing (must be > 1.0)
+ alpha_min: 0.000001 # Minimum alpha bound (default: 1e-6)
+ alpha_max: 1000000000.0 # Maximum alpha bound (default: 1e9)
+ resolution: 0.01 # Convergence resolution for bisection
+ max_bracket_iters: 32 # Maximum bracketing iterations
+ max_bisect_iters: 32 # Maximum bisection iterations
+ seeds_per_alpha: 1 # Number of seeds to test per alpha (majority vote)
+ placement_rounds: auto # Placement optimization rounds
```
+Parameters:
+
+- `matrix_name`: Name of the traffic matrix to analyze (default: "default").
+- `acceptance_rule`: Acceptance rule for feasibility (currently only "hard" is supported).
+- `alpha_start`: Initial alpha value to probe.
+- `growth_factor`: Multiplier for bracketing phase (must be > 1.0).
+- `alpha_min`: Minimum alpha bound for search.
+- `alpha_max`: Maximum alpha bound for search.
+- `resolution`: Convergence threshold for bisection.
+- `max_bracket_iters`: Maximum iterations for bracketing phase.
+- `max_bisect_iters`: Maximum iterations for bisection phase.
+- `seeds_per_alpha`: Number of random seeds to test per alpha (uses majority vote).
+- `placement_rounds`: Number of placement optimization rounds (`int` or `"auto"`).
+
Outputs:
- data.alpha_star: maximum uniform scaling factor
diff --git a/ngraph/__init__.py b/ngraph/__init__.py
index 191c18b..ac301d4 100644
--- a/ngraph/__init__.py
+++ b/ngraph/__init__.py
@@ -7,15 +7,13 @@
from __future__ import annotations
-from . import cli, config, logging
-from .demand.matrix import TrafficMatrixSet
-from .results.artifacts import CapacityEnvelope, PlacementResultSet
+from . import cli, logging
+from .model.demand.matrix import TrafficMatrixSet
+from .results.artifacts import CapacityEnvelope
__all__ = [
"cli",
- "config",
"logging",
"CapacityEnvelope",
- "PlacementResultSet",
"TrafficMatrixSet",
]
diff --git a/ngraph/adapters/core.py b/ngraph/adapters/core.py
new file mode 100644
index 0000000..4ee51a7
--- /dev/null
+++ b/ngraph/adapters/core.py
@@ -0,0 +1,509 @@
+"""Adapter layer for NetGraph-Core integration.
+
+Provides graph building, node/edge ID mapping, and result translation between
+NetGraph's scenario-level types and NetGraph-Core's internal representations.
+
+Key components:
+- build_graph(): One-shot graph construction with exclusions
+- build_graph_cache(): Cached graph for repeated analysis with masks
+- build_node_mask() / build_edge_mask(): O(|excluded|) mask construction
+- get_disabled_exclusions(): Helper to collect disabled topology for exclusions
+
+Graph caching enables efficient repeated analysis with different exclusion sets
+by building the graph once and using lightweight masks for exclusions. Disabled
+nodes and links are automatically included in masks built from a GraphCache.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Set
+
+import netgraph_core
+import numpy as np
+
+from ngraph.types.dto import EdgeRef
+
+if TYPE_CHECKING:
+ from ngraph.model.network import Network
+
+
+def get_disabled_exclusions(
+ network: "Network",
+ excluded_nodes: Optional[Set[str]] = None,
+ excluded_links: Optional[Set[str]] = None,
+) -> tuple[Optional[Set[str]], Optional[Set[str]]]:
+ """Merge user exclusions with disabled nodes/links from the network.
+
+ Use this when calling build_graph() to ensure disabled topology is excluded.
+
+ Args:
+ network: Network instance.
+ excluded_nodes: User-provided node exclusions (or None).
+ excluded_links: User-provided link exclusions (or None).
+
+ Returns:
+ Tuple of (full_excluded_nodes, full_excluded_links) including disabled.
+ Returns None for either if empty (for efficient build_graph calls).
+ """
+ disabled_nodes = {name for name, node in network.nodes.items() if node.disabled}
+ disabled_links = {lid for lid, link in network.links.items() if link.disabled}
+
+ full_excluded_nodes: Optional[Set[str]] = None
+ if disabled_nodes or excluded_nodes:
+ full_excluded_nodes = (excluded_nodes or set()) | disabled_nodes
+
+ full_excluded_links: Optional[Set[str]] = None
+ if disabled_links or excluded_links:
+ full_excluded_links = (excluded_links or set()) | disabled_links
+
+ return full_excluded_nodes, full_excluded_links
+
+
+class AugmentationEdge(NamedTuple):
+ """Edge specification for graph augmentation.
+
+ Augmentation edges are added to the graph as-is (unidirectional).
+ Nodes referenced in augmentations that don't exist in the network
+ are automatically treated as pseudo/virtual nodes.
+
+ Attributes:
+ source: Source node name (real or pseudo)
+ target: Target node name (real or pseudo)
+ capacity: Edge capacity
+ cost: Edge cost (converted to int64 for Core)
+ """
+
+ source: str
+ target: str
+ capacity: float
+ cost: float
+
+
+class NodeMapper:
+ """Bidirectional mapping between NetGraph node names (str) and Core NodeId (int)."""
+
+ def __init__(self, node_names: list[str]):
+ """Initialize mapper from ordered list of node names.
+
+ Args:
+ node_names: Ordered list of node names; index is Core NodeId.
+ """
+ self.node_names = node_names
+ self.node_id_of = {name: idx for idx, name in enumerate(node_names)}
+
+ def to_id(self, name: str) -> int:
+ """Map node name to Core NodeId."""
+ return self.node_id_of[name]
+
+ def to_name(self, node_id: int) -> str:
+ """Map Core NodeId to node name."""
+ return self.node_names[node_id]
+
+
+class EdgeMapper:
+ """Bidirectional mapping between external edge IDs and EdgeRef (link_id + direction).
+
+ External edge ID encoding: (linkIndex << 1) | dirBit
+ - linkIndex: stable sorted index of link_id in Network.links
+ - dirBit: 0 for forward ('fwd'), 1 for reverse ('rev')
+ """
+
+ def __init__(self, link_ids: list[str]):
+ """Initialize mapper from ordered list of link IDs.
+
+ Args:
+ link_ids: Sorted list of link IDs (stable ordering for linkIndex assignment).
+ """
+ self.link_ids = link_ids
+ self.link_index_of = {lid: idx for idx, lid in enumerate(link_ids)}
+
+ def encode_ext_id(self, link_id: str, direction: str) -> int:
+ """Encode (link_id, direction) to external edge ID.
+
+ Args:
+ link_id: Scenario link identifier.
+ direction: 'fwd' or 'rev'.
+
+ Returns:
+ External edge ID as int64.
+ """
+ link_idx = self.link_index_of[link_id]
+ dir_bit = 1 if direction == "rev" else 0
+ return (link_idx << 1) | dir_bit
+
+ def decode_ext_id(self, ext_id: int) -> Optional[EdgeRef]:
+ """Decode external edge ID to EdgeRef.
+
+ Args:
+ ext_id: External edge ID from Core.
+
+ Returns:
+ EdgeRef with link_id and direction, or None if augmentation edge.
+ """
+ if ext_id == -1:
+ return None
+ link_idx = ext_id >> 1
+ dir_bit = ext_id & 1
+ link_id = self.link_ids[link_idx]
+ direction = "rev" if dir_bit else "fwd"
+ return EdgeRef(link_id=link_id, direction=direction) # type: ignore
+
+ def to_ref(
+ self, core_edge_id: int, multidigraph: netgraph_core.StrictMultiDiGraph
+ ) -> Optional[EdgeRef]:
+ """Map Core EdgeId to EdgeRef using the Core graph's ext_edge_ids.
+
+ Args:
+ core_edge_id: Core's internal EdgeId (index into edge arrays).
+ multidigraph: Core StrictMultiDiGraph instance.
+
+ Returns:
+ EdgeRef corresponding to the Core edge, or None if augmentation edge.
+ """
+ ext_edge_ids = multidigraph.ext_edge_ids_view()
+ ext_id = ext_edge_ids[core_edge_id]
+ return self.decode_ext_id(int(ext_id))
+
+ def to_name(self, ext_id: int) -> Optional[str]:
+ """Map external edge ID to link ID (name).
+
+ Args:
+ ext_id: External edge ID from Core.
+
+ Returns:
+ Link ID string, or None if it's a sentinel/augmentation edge.
+ """
+ if ext_id == -1:
+ return None
+ edge_ref = self.decode_ext_id(ext_id)
+ if edge_ref is None:
+ return None
+ return edge_ref.link_id
+
+
+@dataclass
+class GraphCache:
+ """Pre-built graph components for efficient repeated analysis.
+
+ Holds all components needed for running analysis with different exclusion
+ sets without rebuilding the graph. Use build_graph_cache() to create.
+
+ Attributes:
+ graph_handle: Core Graph handle for algorithm execution.
+ multidigraph: Core StrictMultiDiGraph with topology data.
+ edge_mapper: Mapper for link_id <-> edge_id translation.
+ node_mapper: Mapper for node_name <-> node_id translation.
+ algorithms: Core Algorithms instance for running computations.
+ disabled_node_ids: Pre-computed set of disabled node IDs.
+ disabled_link_ids: Pre-computed set of disabled link IDs.
+ link_id_to_edge_indices: Mapping from link_id to edge array indices.
+ """
+
+ graph_handle: netgraph_core.Graph
+ multidigraph: netgraph_core.StrictMultiDiGraph
+ edge_mapper: EdgeMapper
+ node_mapper: NodeMapper
+ algorithms: netgraph_core.Algorithms
+ disabled_node_ids: Set[int] = field(default_factory=set)
+ disabled_link_ids: Set[str] = field(default_factory=set)
+ link_id_to_edge_indices: Dict[str, List[int]] = field(default_factory=dict)
+
+
+def build_graph_cache(
+ network: "Network",
+ *,
+ add_reverse: bool = True,
+ augmentations: Optional[List[AugmentationEdge]] = None,
+) -> GraphCache:
+ """Build cached graph components for efficient repeated analysis.
+
+ Constructs the graph once and pre-computes mappings needed for fast
+ mask building. Use with build_node_mask() and build_edge_mask() for
+ O(|excluded|) exclusion handling instead of O(V+E).
+
+ Args:
+ network: NetGraph Network instance.
+ add_reverse: If True, add reverse edges for network links.
+ augmentations: Optional list of edges to add (for pseudo nodes, etc.).
+
+ Returns:
+ GraphCache with all pre-built components.
+
+ Example:
+ >>> cache = build_graph_cache(network)
+ >>> for excluded_nodes, excluded_links in failure_patterns:
+ ... node_mask = build_node_mask(cache, excluded_nodes)
+ ... edge_mask = build_edge_mask(cache, excluded_links)
+ ... result = cache.algorithms.max_flow(
+ ... cache.graph_handle, src, dst,
+ ... node_mask=node_mask, edge_mask=edge_mask
+ ... )
+ """
+ # Build graph without exclusions (exclusions handled via masks)
+ graph_handle, multidigraph, edge_mapper, node_mapper = build_graph(
+ network,
+ add_reverse=add_reverse,
+ augmentations=augmentations,
+ excluded_nodes=None,
+ excluded_links=None,
+ )
+
+ # Create algorithms instance
+ backend = netgraph_core.Backend.cpu()
+ algorithms = netgraph_core.Algorithms(backend)
+
+ # Pre-compute disabled node IDs
+ disabled_node_ids: Set[int] = set()
+ for node_name, node in network.nodes.items():
+ if node.disabled and node_name in node_mapper.node_id_of:
+ disabled_node_ids.add(node_mapper.node_id_of[node_name])
+
+ # Pre-compute disabled link IDs
+ disabled_link_ids: Set[str] = {
+ link_id for link_id, link in network.links.items() if link.disabled
+ }
+
+ # Pre-compute link_id -> edge indices mapping for O(|excluded|) mask building
+ ext_edge_ids = multidigraph.ext_edge_ids_view()
+ link_id_to_edge_indices: Dict[str, List[int]] = {}
+ for edge_idx in range(len(ext_edge_ids)):
+ ext_id = int(ext_edge_ids[edge_idx])
+ if ext_id == -1: # Skip augmentation edges
+ continue
+ edge_ref = edge_mapper.decode_ext_id(ext_id)
+ if edge_ref:
+ link_id_to_edge_indices.setdefault(edge_ref.link_id, []).append(edge_idx)
+
+ return GraphCache(
+ graph_handle=graph_handle,
+ multidigraph=multidigraph,
+ edge_mapper=edge_mapper,
+ node_mapper=node_mapper,
+ algorithms=algorithms,
+ disabled_node_ids=disabled_node_ids,
+ disabled_link_ids=disabled_link_ids,
+ link_id_to_edge_indices=link_id_to_edge_indices,
+ )
+
+
+def build_graph(
+ network: "Network",
+ *,
+ add_reverse: bool = True,
+ augmentations: Optional[List[AugmentationEdge]] = None,
+ excluded_nodes: Optional[Set[str]] = None,
+ excluded_links: Optional[Set[str]] = None,
+) -> tuple[
+ netgraph_core.Graph, netgraph_core.StrictMultiDiGraph, EdgeMapper, NodeMapper
+]:
+ """Build Core graph with optional augmentations and exclusions.
+
+ This is the unified graph builder for all analysis functions. It supports:
+ - Standard network topology
+ - Pseudo/virtual nodes (via augmentations)
+ - Filtered topology (via exclusions)
+
+ For repeated analysis with different exclusions, use build_graph_cache()
+ with build_node_mask()/build_edge_mask() for better performance.
+
+ Args:
+ network: NetGraph Network instance.
+ add_reverse: If True, add reverse edges for network links.
+ augmentations: Optional list of edges to add (for pseudo nodes, etc.).
+ excluded_nodes: Optional set of node names to exclude.
+ excluded_links: Optional set of link IDs to exclude.
+
+ Returns:
+ Tuple of (graph_handle, multidigraph, edge_mapper, node_mapper).
+
+ Pseudo Nodes:
+ Any node name in augmentations that doesn't exist in network.nodes
+ is automatically treated as a pseudo node and assigned a node ID.
+
+ Augmentation Edges:
+ - Added unidirectionally as specified
+ - Assigned ext_edge_id of -1 (sentinel for non-network edges)
+ - Not included in edge_mapper translation
+
+ Node ID Assignment:
+ Real nodes (sorted): IDs 0..(num_real-1)
+ Pseudo nodes (sorted): IDs num_real..(num_real+num_pseudo-1)
+ """
+ # Validate exclusions
+ if excluded_nodes:
+ invalid = excluded_nodes - set(network.nodes.keys())
+ if invalid:
+ raise ValueError(f"Excluded nodes not in network: {invalid}")
+
+ if excluded_links:
+ invalid = excluded_links - set(network.links.keys())
+ if invalid:
+ raise ValueError(f"Excluded links not in network: {invalid}")
+
+ # Step 1: Identify real nodes (after exclusions)
+ real_node_names = set(network.nodes.keys())
+ if excluded_nodes:
+ real_node_names -= excluded_nodes
+
+ # Step 2: Infer pseudo nodes from augmentation edges
+ pseudo_node_names: Set[str] = set()
+ if augmentations:
+ for aug_edge in augmentations:
+ if aug_edge.source not in real_node_names:
+ pseudo_node_names.add(aug_edge.source)
+ if aug_edge.target not in real_node_names:
+ pseudo_node_names.add(aug_edge.target)
+
+ # Step 3: Assign node IDs (real first, then pseudo)
+ all_node_names = sorted(real_node_names) + sorted(pseudo_node_names)
+ node_mapper = NodeMapper(all_node_names)
+
+ # Step 4: Build edge mapper (only for real network links)
+ link_ids = sorted(network.links.keys())
+ edge_mapper = EdgeMapper(link_ids)
+
+ # Step 5: Build edge arrays
+ src_list: List[int] = []
+ dst_list: List[int] = []
+ capacity_list: List[float] = []
+ cost_list: List[float] = []
+ ext_edge_id_list: List[int] = []
+
+ # Add real network edges (bidirectional)
+ for link_id in link_ids:
+ if excluded_links and link_id in excluded_links:
+ continue
+
+ link = network.links[link_id]
+
+ # Skip if either endpoint is excluded
+ if (
+ link.source not in node_mapper.node_id_of
+ or link.target not in node_mapper.node_id_of
+ ):
+ continue
+
+ src_id = node_mapper.to_id(link.source)
+ dst_id = node_mapper.to_id(link.target)
+
+ # Forward edge
+ src_list.append(src_id)
+ dst_list.append(dst_id)
+ capacity_list.append(link.capacity)
+ cost_list.append(link.cost)
+ ext_edge_id_list.append(edge_mapper.encode_ext_id(link_id, "fwd"))
+
+ # Reverse edge
+ if add_reverse:
+ src_list.append(dst_id)
+ dst_list.append(src_id)
+ capacity_list.append(link.capacity)
+ cost_list.append(link.cost)
+ ext_edge_id_list.append(edge_mapper.encode_ext_id(link_id, "rev"))
+
+ # Add augmentation edges (unidirectional, as specified)
+ if augmentations:
+ for aug_edge in augmentations:
+ src_id = node_mapper.to_id(aug_edge.source)
+ dst_id = node_mapper.to_id(aug_edge.target)
+ src_list.append(src_id)
+ dst_list.append(dst_id)
+ capacity_list.append(aug_edge.capacity)
+ cost_list.append(aug_edge.cost)
+ ext_edge_id_list.append(-1) # Sentinel: not a network edge
+
+ # Convert to numpy arrays
+ src_arr = np.array(src_list, dtype=np.int32)
+ dst_arr = np.array(dst_list, dtype=np.int32)
+ capacity_arr = np.array(capacity_list, dtype=np.float64)
+ cost_arr = np.array(cost_list, dtype=np.int64)
+ ext_edge_ids_arr = np.array(ext_edge_id_list, dtype=np.int64)
+
+ # Build StrictMultiDiGraph
+ multidigraph = netgraph_core.StrictMultiDiGraph.from_arrays(
+ num_nodes=len(all_node_names),
+ src=src_arr,
+ dst=dst_arr,
+ capacity=capacity_arr,
+ cost=cost_arr,
+ ext_edge_ids=ext_edge_ids_arr,
+ )
+
+ # Build Core graph handle
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+ graph_handle = algs.build_graph(multidigraph)
+
+ return graph_handle, multidigraph, edge_mapper, node_mapper
+
+
+def build_node_mask(
+ cache: GraphCache,
+ excluded_nodes: Optional[Set[str]] = None,
+) -> np.ndarray:
+ """Build a node mask array for Core algorithms.
+
+ Uses O(|excluded| + |disabled|) time complexity by only setting
+ excluded/disabled nodes to False, rather than iterating all nodes.
+
+ Core semantics: True = include, False = exclude.
+
+ Args:
+ cache: GraphCache with pre-computed disabled node IDs.
+ excluded_nodes: Optional set of node names to exclude.
+
+ Returns:
+ Boolean numpy array of shape (num_nodes,) where True means included.
+ """
+ num_nodes = len(cache.node_mapper.node_names)
+ mask = np.ones(num_nodes, dtype=bool)
+
+ # Exclude disabled nodes (pre-computed)
+ for node_id in cache.disabled_node_ids:
+ mask[node_id] = False
+
+ # Exclude requested nodes
+ if excluded_nodes:
+ for node_name in excluded_nodes:
+ if node_name in cache.node_mapper.node_id_of:
+ mask[cache.node_mapper.node_id_of[node_name]] = False
+
+ return mask
+
+
+def build_edge_mask(
+ cache: GraphCache,
+ excluded_links: Optional[Set[str]] = None,
+) -> np.ndarray:
+ """Build an edge mask array for Core algorithms.
+
+ Uses O(|excluded| + |disabled|) time complexity by using the pre-computed
+ link_id -> edge_indices mapping, rather than iterating all edges.
+
+ Core semantics: True = include, False = exclude.
+
+ Args:
+ cache: GraphCache with pre-computed edge index mapping.
+ excluded_links: Optional set of link IDs to exclude.
+
+ Returns:
+ Boolean numpy array of shape (num_edges,) where True means included.
+ """
+ num_edges = cache.multidigraph.num_edges()
+ mask = np.ones(num_edges, dtype=bool)
+
+ # Exclude disabled links (pre-computed)
+ for link_id in cache.disabled_link_ids:
+ if link_id in cache.link_id_to_edge_indices:
+ for edge_idx in cache.link_id_to_edge_indices[link_id]:
+ mask[edge_idx] = False
+
+ # Exclude requested links
+ if excluded_links:
+ for link_id in excluded_links:
+ if link_id in cache.link_id_to_edge_indices:
+ for edge_idx in cache.link_id_to_edge_indices[link_id]:
+ mask[edge_idx] = False
+
+ return mask
diff --git a/ngraph/algorithms/__init__.py b/ngraph/algorithms/__init__.py
deleted file mode 100644
index 848a1eb..0000000
--- a/ngraph/algorithms/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Network analysis algorithms and implementations."""
diff --git a/ngraph/algorithms/capacity.py b/ngraph/algorithms/capacity.py
deleted file mode 100644
index 2d607e8..0000000
--- a/ngraph/algorithms/capacity.py
+++ /dev/null
@@ -1,434 +0,0 @@
-"""Capacity calculation algorithms for network analysis.
-
-This module computes feasible flow given a predecessor DAG from a shortest-path
-routine and supports two placement strategies: proportional and equal-balanced
-in reversed orientation. Functions follow a Dinic-like blocking-flow approach
-for proportional placement.
-"""
-
-from __future__ import annotations
-
-from collections import defaultdict, deque
-from typing import Deque, Dict, List, Set, Tuple
-
-from ngraph.algorithms.base import MIN_CAP, MIN_FLOW, FlowPlacement
-from ngraph.graph.strict_multidigraph import EdgeID, NodeID, StrictMultiDiGraph
-
-
-def _init_graph_data(
- flow_graph: StrictMultiDiGraph,
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]],
- init_node: NodeID,
- flow_placement: FlowPlacement,
- capacity_attr: str,
- flow_attr: str,
-) -> Tuple[
- Dict[NodeID, Dict[NodeID, Tuple[EdgeID, ...]]],
- Dict[NodeID, int],
- Dict[NodeID, Dict[NodeID, float]],
- Dict[NodeID, Dict[NodeID, float]],
-]:
- """Build reversed residual structures used by the flow routine.
-
- Constructs reversed adjacency and residual capacity for the subset of the
- graph that can reach ``init_node`` in forward orientation.
-
- Args:
- flow_graph: The multigraph with capacity and flow attributes on edges.
- pred: Forward adjacency mapping: node -> (adjacent node -> list of EdgeIDs).
- This is a DAG typically produced by a shortest-path routine from the
- source in the forward direction.
- init_node: The node from which we perform the reversed BFS (generally the
- destination in forward flow).
- flow_placement: Strategy for distributing flow (PROPORTIONAL or EQUAL_BALANCED).
- capacity_attr: Name of the capacity attribute on edges.
- flow_attr: Name of the flow attribute on edges.
-
- Returns:
- A tuple ``(succ, levels, residual_cap, flow_dict)`` where:
- - ``succ``: Reversed adjacency mapping (u->v in reversed sense).
- - ``levels``: Node -> level mapping for Dinic; initialized to -1.
- - ``residual_cap``: Residual capacities in reversed orientation.
- - ``flow_dict``: Net flow in reversed orientation (initialized to 0).
- """
- edges = flow_graph.get_edges()
-
- # Reversed adjacency: For each forward edge u->v in pred, store v->u in succ.
- succ: Dict[NodeID, Dict[NodeID, Tuple[EdgeID, ...]]] = defaultdict(dict)
-
- # Will store BFS levels (set to -1 here, updated later in _set_levels_bfs).
- levels: Dict[NodeID, int] = {}
-
- # Residual capacities in the reversed orientation
- residual_cap: Dict[NodeID, Dict[NodeID, float]] = defaultdict(dict)
-
- # Net flow (updated during flow pushes)
- flow_dict: Dict[NodeID, Dict[NodeID, float]] = defaultdict(dict)
-
- # Standard BFS to collect only the portion of pred reachable from init_node (in reverse)
- visited: Set[NodeID] = set()
- queue: Deque[NodeID] = deque()
-
- visited.add(init_node)
- levels[init_node] = -1
- queue.append(init_node)
-
- while queue:
- node = queue.popleft()
-
- # Check each forward adjacency from node in pred, so we can form reversed edges.
- for adj_node, edge_list in pred.get(node, {}).items():
- # Build reversed adjacency once
- if node not in succ[adj_node]:
- succ[adj_node][node] = tuple(edge_list)
-
- # Calculate available capacities of the forward edges
- capacities = []
- for eid in edge_list:
- e_attrs = edges[eid][3] # Slightly faster repeated access
- cap_val = e_attrs[capacity_attr]
- flow_val = e_attrs[flow_attr]
- c = cap_val - flow_val
- if c < 0.0:
- c = 0.0
- capacities.append(c)
-
- # Set reversed and forward capacities in the residual_cap structure
- if flow_placement == FlowPlacement.PROPORTIONAL:
- # Sum capacities of parallel edges for the reversed edge
- fwd_capacity = sum(capacities)
- residual_cap[node][adj_node] = (
- fwd_capacity if fwd_capacity >= MIN_CAP else 0.0
- )
- # Reverse edge in the BFS sense starts with 0 capacity.
- # Do not overwrite if a positive capacity was already assigned
- # when processing the opposite orientation (bidirectional links).
- residual_cap[adj_node].setdefault(node, 0.0)
-
- elif flow_placement == FlowPlacement.EQUAL_BALANCED:
- # min(...) * number_of_parallel_edges
- if capacities:
- rev_cap = min(capacities) * len(capacities)
- residual_cap[adj_node][node] = (
- rev_cap if rev_cap >= MIN_CAP else 0.0
- )
- else:
- residual_cap[adj_node][node] = 0.0
- # The forward edge in reversed orientation starts at 0 capacity.
- # Do not overwrite non-zero value if already assigned earlier.
- residual_cap[node].setdefault(adj_node, 0.0)
-
- else:
- raise ValueError(f"Unsupported flow placement: {flow_placement}")
-
- # Initialize net flow for both orientations to 0
- flow_dict[node][adj_node] = 0.0
- flow_dict[adj_node][node] = 0.0
-
- # Enqueue adj_node if not visited
- if adj_node not in visited:
- visited.add(adj_node)
- levels[adj_node] = -1
- queue.append(adj_node)
-
- # Ensure every node in the entire graph has at least an empty adjacency dict in succ
- # (some nodes might be outside the reversed BFS component).
- for n in flow_graph.nodes():
- succ.setdefault(n, {})
-
- return succ, levels, residual_cap, flow_dict
-
-
-def _set_levels_bfs(
- start_node: NodeID,
- levels: Dict[NodeID, int],
- residual_cap: Dict[NodeID, Dict[NodeID, float]],
-) -> None:
- """Assign BFS levels on the reversed residual graph.
-
- Considers edges with residual capacity >= ``MIN_CAP``.
-
- Args:
- start_node: The starting node for the BFS (acts as the 'source' in reversed graph).
- levels: The dict from node -> BFS level (modified in-place).
- residual_cap: The dict of reversed residual capacities for edges.
- """
- # Reset all node levels to -1 (unvisited)
- for nd in levels:
- levels[nd] = -1
- levels[start_node] = 0
-
- queue: Deque[NodeID] = deque([start_node])
- while queue:
- u = queue.popleft()
- # Explore all neighbors of u in the reversed graph
- for v, cap_uv in residual_cap[u].items():
- # Only traverse edges with sufficient capacity and unvisited nodes
- if cap_uv >= MIN_CAP and levels[v] < 0:
- levels[v] = levels[u] + 1
- queue.append(v)
-
-
-def _push_flow_dfs(
- current: NodeID,
- sink: NodeID,
- flow_in: float,
- residual_cap: Dict[NodeID, Dict[NodeID, float]],
- flow_dict: Dict[NodeID, Dict[NodeID, float]],
- levels: Dict[NodeID, int],
-) -> float:
- """Push flow via DFS on the reversed residual graph.
-
- Only edges that respect the level structure are considered, i.e.
- ``levels[nxt] == levels[current] + 1``.
-
- Args:
- current: The current node in the DFS.
- sink: The target node in the reversed orientation.
- flow_in: The amount of flow available to push from the current node.
- residual_cap: Residual capacities of edges in the reversed graph.
- flow_dict: Tracks the net flow pushed along edges in the reversed graph.
- levels: BFS levels in the reversed graph (from `_set_levels_bfs`).
-
- Returns:
- The total amount of flow successfully pushed from `current` to `sink`.
- """
- # Base case: reached sink
- if current == sink:
- return flow_in
-
- total_pushed = 0.0
- neighbors = list(
- residual_cap[current].items()
- ) # snapshot to avoid iteration changes
-
- for nxt, capacity_uv in neighbors:
- if capacity_uv < MIN_CAP:
- continue
- if levels.get(nxt, -1) != levels[current] + 1:
- continue
-
- flow_to_push = min(flow_in, capacity_uv)
- if flow_to_push < MIN_FLOW:
- continue
-
- pushed = _push_flow_dfs(
- nxt, sink, flow_to_push, residual_cap, flow_dict, levels
- )
- if pushed >= MIN_FLOW:
- # Update residual capacities
- residual_cap[current][nxt] -= pushed
- residual_cap[nxt][current] += pushed
-
- # Update net flow (remember, we're in reversed orientation)
- flow_dict[current][nxt] += pushed
- flow_dict[nxt][current] -= pushed
-
- flow_in -= pushed
- total_pushed += pushed
-
- if flow_in < MIN_FLOW:
- break
-
- return total_pushed
-
-
-def _equal_balance_bfs(
- src_node: NodeID,
- succ: Dict[NodeID, Dict[NodeID, Tuple[EdgeID, ...]]],
- flow_dict: Dict[NodeID, Dict[NodeID, float]],
-) -> None:
- """Distribute nominal unit flow equally over reversed adjacency.
-
- Performs a BFS-like pass from ``src_node`` over ``succ`` to assign fractional
- flows, later scaled to respect capacities.
-
- Args:
- src_node: The node from which a nominal flow of 1.0 is injected (in reversed orientation).
- succ: The reversed adjacency dict, where succ[u][v] is a tuple of edges (u->v in reversed sense).
- flow_dict: The net flow dictionary to be updated with the BFS distribution.
- """
- # Count total parallel edges leaving each node
- node_split: Dict[NodeID, int] = {}
- for node, neighbors in succ.items():
- node_split[node] = sum(len(edge_tuple) for edge_tuple in neighbors.values())
-
- queue: Deque[Tuple[NodeID, float]] = deque([(src_node, 1.0)])
- visited: Set[NodeID] = set()
-
- while queue:
- node, incoming_flow = queue.popleft()
- visited.add(node)
-
- # If no edges or negligible incoming flow, skip
- split_count = node_split.get(node, 0)
- if split_count <= 0 or incoming_flow < MIN_FLOW:
- continue
-
- # Distribute the incoming_flow among outgoing edges, proportional to the count of parallel edges
- for nxt, edge_tuple in succ[node].items():
- if not edge_tuple:
- continue
- push_flow = (incoming_flow * len(edge_tuple)) / float(split_count)
- if push_flow < MIN_FLOW:
- continue
-
- flow_dict[node][nxt] += push_flow
- flow_dict[nxt][node] -= push_flow
-
- if nxt not in visited:
- # Note: we queue each node only once in this scheme.
- # If a node can be reached from multiple parents before being popped,
- # the BFS will handle the first discovered flow.
- # This behavior matches the existing tests and usage expectations.
- queue.append((nxt, push_flow))
-
-
-def calc_graph_capacity(
- flow_graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]],
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
-) -> Tuple[float, Dict[NodeID, Dict[NodeID, float]]]:
- """Calculate feasible flow and flow fractions between two nodes.
-
- In PROPORTIONAL mode (similar to Dinic in reversed orientation):
- 1. Build the reversed residual graph from dst_node (via `_init_graph_data`).
- 2. Use BFS (in `_set_levels_bfs`) to build a level graph and DFS (`_push_flow_dfs`)
- to push blocking flows, repeating until no more flow can be pushed.
- 3. The net flow found is stored in reversed orientation. Convert final flows
- to forward orientation by negating and normalizing by the total.
-
- In EQUAL_BALANCED mode:
- 1. Build reversed adjacency from dst_node (also via `_init_graph_data`),
- ignoring capacity checks in that BFS.
- 2. Perform a BFS pass from src_node (`_equal_balance_bfs`) to distribute a
- nominal flow of 1.0 equally among parallel edges.
- 3. Determine the scaling ratio so that no edge capacity is exceeded.
- Scale the flow assignments accordingly, then normalize to the forward sense.
-
- Args:
- flow_graph: The multigraph with capacity and flow attributes.
- src_node: The source node in the forward graph.
- dst_node: The destination node in the forward graph.
- pred: Forward adjacency mapping (node -> (adjacent node -> list of EdgeIDs)),
- typically produced by `spf(..., multipath=True)`. Must be a DAG.
- flow_placement: The flow distribution strategy (PROPORTIONAL or EQUAL_BALANCED).
- capacity_attr: Name of the capacity attribute on edges.
- flow_attr: Name of the flow attribute on edges.
-
- Returns:
- tuple[float, dict[NodeID, dict[NodeID, float]]]:
- - Total feasible flow from ``src_node`` to ``dst_node``.
- - Normalized flow fractions in forward orientation (``[u][v]`` >= 0).
-
- Raises:
- ValueError: If src_node or dst_node is not in the graph, or the flow_placement
- is unsupported.
- """
- if src_node not in flow_graph or dst_node not in flow_graph:
- raise ValueError(
- f"Source node {src_node} or destination node {dst_node} not found in the graph."
- )
-
- # Handle self-loop case: when source equals destination, max flow is always 0
- # Degenerate case (s == t):
- # Flow value |f| is the net surplus at the vertex.
- # Conservation forces that surplus to zero, so the
- # only feasible (and thus maximum) flow value is 0.
- if src_node == dst_node:
- return 0.0, defaultdict(dict)
-
- # Build reversed data structures from dst_node
- succ, levels, residual_cap, flow_dict = _init_graph_data(
- flow_graph=flow_graph,
- pred=pred,
- init_node=dst_node,
- flow_placement=flow_placement,
- capacity_attr=capacity_attr,
- flow_attr=flow_attr,
- )
-
- total_flow = 0.0
-
- if flow_placement == FlowPlacement.PROPORTIONAL:
- # Repeatedly build the level graph and push blocking flows
- while True:
- _set_levels_bfs(dst_node, levels, residual_cap)
- # If src_node is unreachable (level <= 0), no more flow
- if levels.get(src_node, -1) <= 0:
- break
-
- pushed = _push_flow_dfs(
- current=dst_node,
- sink=src_node,
- flow_in=float("inf"),
- residual_cap=residual_cap,
- flow_dict=flow_dict,
- levels=levels,
- )
- if pushed < MIN_FLOW:
- break
- total_flow += pushed
-
- # If no flow found, reset flows to zero
- if total_flow < MIN_FLOW:
- total_flow = 0.0
- for u in flow_dict:
- for v in flow_dict[u]:
- flow_dict[u][v] = 0.0
- else:
- # Convert reversed flows to forward sense
- for u in flow_dict:
- for v in flow_dict[u]:
- # Negative and normalized
- flow_dict[u][v] = -(flow_dict[u][v] / total_flow)
-
- elif flow_placement == FlowPlacement.EQUAL_BALANCED:
- # 1. Distribute nominal flow of 1.0 from src_node
- _equal_balance_bfs(src_node, succ, flow_dict)
-
- # 2. Determine the scaling ratio so that no edge in reversed orientation exceeds capacity
- min_ratio = float("inf")
- for u, neighbors in succ.items():
- for v in neighbors:
- # Use safe lookup: some edges may not receive any nominal flow (e.g. due to very high branching)
- assigned_flow = flow_dict.get(u, {}).get(v, 0.0)
- if assigned_flow >= MIN_FLOW:
- cap_uv = residual_cap.get(u, {}).get(v, 0.0)
- # Only consider positive assignments for scaling
- if assigned_flow > 0.0:
- ratio = cap_uv / assigned_flow if assigned_flow != 0.0 else 0.0
- if ratio < min_ratio:
- min_ratio = ratio
-
- if min_ratio == float("inf") or min_ratio < MIN_FLOW:
- # No feasible flow
- total_flow = 0.0
- else:
- total_flow = min_ratio
- # Scale flows to fit capacities
- for u in flow_dict:
- for v in flow_dict[u]:
- val = flow_dict[u][v] * total_flow
- flow_dict[u][v] = val if abs(val) >= MIN_FLOW else 0.0
-
- # Normalize flows to forward direction
- for u in flow_dict:
- for v in flow_dict[u]:
- if abs(flow_dict[u][v]) > 0.0:
- flow_dict[u][v] /= total_flow
-
- else:
- raise ValueError(f"Unsupported flow placement: {flow_placement}")
-
- # Clamp small flows to zero
- for u in flow_dict:
- for v in flow_dict[u]:
- if abs(flow_dict[u][v]) < MIN_FLOW:
- flow_dict[u][v] = 0.0
-
- return total_flow, flow_dict
diff --git a/ngraph/algorithms/edge_select.py b/ngraph/algorithms/edge_select.py
deleted file mode 100644
index 6fc92a7..0000000
--- a/ngraph/algorithms/edge_select.py
+++ /dev/null
@@ -1,294 +0,0 @@
-"""Edge selection algorithms for routing.
-
-Provides selection routines used by SPF to choose candidate edges between
-neighbors according to cost and capacity constraints.
-"""
-
-from math import isclose
-from typing import Any, Callable, Dict, List, Optional, Set, Tuple
-
-from ngraph.algorithms.base import MIN_CAP, Cost, EdgeSelect
-from ngraph.graph.strict_multidigraph import (
- AttrDict,
- EdgeID,
- NodeID,
- StrictMultiDiGraph,
-)
-
-
-def edge_select_fabric(
- edge_select: EdgeSelect,
- select_value: Optional[Any] = None,
- edge_select_func: Optional[
- Callable[
- [
- StrictMultiDiGraph,
- NodeID,
- NodeID,
- Dict[EdgeID, AttrDict],
- Optional[Set[EdgeID]],
- Optional[Set[NodeID]],
- ],
- Tuple[Cost, List[EdgeID]],
- ]
- ] = None,
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- cost_attr: str = "cost",
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
-) -> Callable[
- [
- StrictMultiDiGraph,
- NodeID,
- NodeID,
- Dict[EdgeID, AttrDict],
- Optional[Set[EdgeID]],
- Optional[Set[NodeID]],
- ],
- Tuple[Cost, List[EdgeID]],
-]:
- """Create an edge-selection callable for SPF.
-
- Args:
- edge_select: An EdgeSelect enum specifying the selection strategy.
- select_value: An optional numeric threshold or scaling factor for capacity checks.
- edge_select_func: A user-supplied function if edge_select=USER_DEFINED.
- excluded_edges: A set of edges to ignore entirely.
- excluded_nodes: A set of nodes to skip (if the destination node is in this set).
- cost_attr: The edge attribute name representing cost.
- capacity_attr: The edge attribute name representing capacity.
- flow_attr: The edge attribute name representing current flow.
-
- Returns:
- Callable: Function with signature
- ``(graph, src, dst, edges_dict, excluded_edges, excluded_nodes) ->
- (selected_cost, [edge_ids])``.
- """
-
- def get_all_min_cost_edges(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- edges_map: Dict[EdgeID, AttrDict],
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- ) -> Tuple[Cost, List[EdgeID]]:
- """Return all edges whose cost is the minimum among available edges.
- If the destination node is excluded, returns (inf, []).
- """
- if excluded_nodes and dst_node in excluded_nodes:
- return float("inf"), []
-
- edge_list: List[EdgeID] = []
- min_cost = float("inf")
-
- for edge_id, attr in edges_map.items():
- if excluded_edges and edge_id in excluded_edges:
- continue
-
- cost_val = attr[cost_attr]
- if cost_val < min_cost:
- min_cost = cost_val
- edge_list = [edge_id]
- elif isclose(cost_val, min_cost, abs_tol=1e-12):
- edge_list.append(edge_id)
-
- return min_cost, edge_list
-
- def get_single_min_cost_edge(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- edges_map: Dict[EdgeID, AttrDict],
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- ) -> Tuple[Cost, List[EdgeID]]:
- """Return exactly one edge: the single lowest-cost edge.
- If the destination node is excluded, returns (inf, []).
- """
- if excluded_nodes and dst_node in excluded_nodes:
- return float("inf"), []
-
- chosen_edge: List[EdgeID] = []
- min_cost = float("inf")
-
- for edge_id, attr in edges_map.items():
- if excluded_edges and edge_id in excluded_edges:
- continue
-
- cost_val = attr[cost_attr]
- if cost_val < min_cost:
- min_cost = cost_val
- chosen_edge = [edge_id]
-
- return min_cost, chosen_edge
-
- def get_all_edges_with_cap_remaining(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- edges_map: Dict[EdgeID, AttrDict],
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- ) -> Tuple[Cost, List[EdgeID]]:
- """Return all edges that have remaining capacity >= min_cap,
- ignoring cost differences (though return the minimal cost found).
- """
- if excluded_nodes and dst_node in excluded_nodes:
- return float("inf"), []
-
- edge_list: List[EdgeID] = []
- min_cost = float("inf")
- min_cap = select_value if select_value is not None else MIN_CAP
-
- for edge_id, attr in edges_map.items():
- if excluded_edges and edge_id in excluded_edges:
- continue
-
- capacity_val = attr[capacity_attr]
- flow_val = attr[flow_attr]
- remaining_cap = capacity_val - flow_val
-
- if remaining_cap >= min_cap:
- cost_val = attr[cost_attr]
- if cost_val < min_cost:
- min_cost = cost_val
- edge_list.append(edge_id)
-
- return min_cost, edge_list
-
- def get_all_min_cost_edges_with_cap_remaining(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- edges_map: Dict[EdgeID, AttrDict],
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- ) -> Tuple[Cost, List[EdgeID]]:
- """Return all edges that have remaining capacity >= min_cap
- among those with the minimum cost.
- """
- if excluded_nodes and dst_node in excluded_nodes:
- return float("inf"), []
-
- edge_list: List[EdgeID] = []
- min_cost = float("inf")
- min_cap = select_value if select_value is not None else MIN_CAP
-
- for edge_id, attr in edges_map.items():
- if excluded_edges and edge_id in excluded_edges:
- continue
-
- capacity_val = attr[capacity_attr]
- flow_val = attr[flow_attr]
- remaining_cap = capacity_val - flow_val
-
- if remaining_cap >= min_cap:
- cost_val = attr[cost_attr]
- if cost_val < min_cost:
- min_cost = cost_val
- edge_list = [edge_id]
- elif isclose(cost_val, min_cost, abs_tol=1e-12):
- edge_list.append(edge_id)
-
- return min_cost, edge_list
-
- def get_single_min_cost_edge_with_cap_remaining(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- edges_map: Dict[EdgeID, AttrDict],
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- ) -> Tuple[Cost, List[EdgeID]]:
- """Return exactly one edge with the minimal cost among those
- that have remaining capacity >= min_cap.
- """
- if excluded_nodes and dst_node in excluded_nodes:
- return float("inf"), []
-
- chosen_edge: List[EdgeID] = []
- min_cost = float("inf")
- min_cap = select_value if select_value is not None else MIN_CAP
-
- for edge_id, attr in edges_map.items():
- if excluded_edges and edge_id in excluded_edges:
- continue
-
- capacity_val = attr[capacity_attr]
- flow_val = attr[flow_attr]
- remaining_cap = capacity_val - flow_val
-
- if remaining_cap >= min_cap:
- cost_val = attr[cost_attr]
- if cost_val < min_cost:
- min_cost = cost_val
- chosen_edge = [edge_id]
-
- return min_cost, chosen_edge
-
- def get_single_min_cost_edge_with_cap_remaining_load_factored(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- edges_map: Dict[EdgeID, AttrDict],
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- ) -> Tuple[Cost, List[EdgeID]]:
- """Return exactly one edge, factoring both cost and load level
- into a combined cost: cost_factor = (cost * 100) + round((flow/capacity)*10).
- Only edges with remaining capacity >= min_cap are considered.
- """
- if excluded_nodes and dst_node in excluded_nodes:
- return float("inf"), []
-
- chosen_edge: List[EdgeID] = []
- min_cost_factor = float("inf")
- min_cap = select_value if select_value is not None else MIN_CAP
-
- for edge_id, attr in edges_map.items():
- if excluded_edges and edge_id in excluded_edges:
- continue
-
- capacity_val = attr[capacity_attr]
- flow_val = attr[flow_attr]
- remaining_cap = capacity_val - flow_val
-
- if remaining_cap >= min_cap:
- base_cost = attr[cost_attr] * 100
- # Avoid division by zero if capacity_val == 0
- load_factor = (
- round((flow_val / capacity_val) * 10) if capacity_val else 999999
- )
- cost_val = base_cost + load_factor
-
- if cost_val < min_cost_factor:
- min_cost_factor = cost_val
- chosen_edge = [edge_id]
-
- return float(min_cost_factor), chosen_edge
-
- # --------------------------------------------------------------------------
- # Map the EdgeSelect enum to the appropriate inner function.
- # --------------------------------------------------------------------------
- if edge_select == EdgeSelect.ALL_MIN_COST:
- return get_all_min_cost_edges
- elif edge_select == EdgeSelect.SINGLE_MIN_COST:
- return get_single_min_cost_edge
- elif edge_select == EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING:
- return get_all_min_cost_edges_with_cap_remaining
- elif edge_select == EdgeSelect.ALL_ANY_COST_WITH_CAP_REMAINING:
- return get_all_edges_with_cap_remaining
- elif edge_select == EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING:
- return get_single_min_cost_edge_with_cap_remaining
- elif edge_select == EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING_LOAD_FACTORED:
- return get_single_min_cost_edge_with_cap_remaining_load_factored
- elif edge_select == EdgeSelect.USER_DEFINED:
- if edge_select_func is None:
- raise ValueError(
- "edge_select=USER_DEFINED requires 'edge_select_func' to be provided."
- )
- return edge_select_func
- else:
- raise ValueError(f"Unknown edge_select value {edge_select}")
diff --git a/ngraph/algorithms/flow_init.py b/ngraph/algorithms/flow_init.py
deleted file mode 100644
index 27a5db4..0000000
--- a/ngraph/algorithms/flow_init.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""Flow graph initialization utilities.
-
-Ensures nodes and edges carry aggregate (``flow_attr``) and per-flow
-(``flows_attr``) attributes, optionally resetting existing values.
-"""
-
-from __future__ import annotations
-
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def init_flow_graph(
- flow_graph: StrictMultiDiGraph,
- flow_attr: str = "flow",
- flows_attr: str = "flows",
- reset_flow_graph: bool = True,
-) -> StrictMultiDiGraph:
- """Ensure that nodes and edges expose flow-related attributes.
-
- For each node and edge:
-
- - The attribute named `flow_attr` (default: "flow") is set to 0.
- - The attribute named `flows_attr` (default: "flows") is set to an empty dict.
-
- If `reset_flow_graph` is True, any existing flow values in these attributes
- are overwritten; otherwise they are only created if missing.
-
- Args:
- flow_graph: The StrictMultiDiGraph whose nodes and edges should be
- prepared for flow assignment.
- flow_attr: The attribute name to track a numeric flow value per node/edge.
- flows_attr: The attribute name to track multiple flow identifiers (and flows).
- reset_flow_graph: If True, reset existing flows (set to 0). If False, do not overwrite.
-
- Returns:
- StrictMultiDiGraph: The same graph instance after attribute checks.
- """
- # Initialize or reset edge attributes
- for edge_data in flow_graph.get_edges().values():
- attr_dict = edge_data[3] # The fourth element is the edge attribute dict
- attr_dict.setdefault(flow_attr, 0)
- attr_dict.setdefault(flows_attr, {})
- if reset_flow_graph:
- attr_dict[flow_attr] = 0
- attr_dict[flows_attr] = {}
-
- # Initialize or reset node attributes
- for node_data in flow_graph.get_nodes().values():
- node_data.setdefault(flow_attr, 0)
- node_data.setdefault(flows_attr, {})
- if reset_flow_graph:
- node_data[flow_attr] = 0
- node_data[flows_attr] = {}
-
- return flow_graph
diff --git a/ngraph/algorithms/max_flow.py b/ngraph/algorithms/max_flow.py
deleted file mode 100644
index 4f3d8ba..0000000
--- a/ngraph/algorithms/max_flow.py
+++ /dev/null
@@ -1,561 +0,0 @@
-"""Maximum-flow computation via iterative shortest-path augmentation.
-
-Implements a practical Edmonds-Karp-like procedure using SPF with capacity
-constraints and configurable flow-splitting across equal-cost parallel edges.
-Provides helpers for saturated-edge detection and simple sensitivity analysis.
-"""
-
-from typing import Dict, Literal, Union, overload
-
-from ngraph.algorithms.base import EdgeSelect, FlowPlacement
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.algorithms.placement import place_flow_on_graph
-from ngraph.algorithms.spf import Cost, spf
-from ngraph.algorithms.types import FlowSummary
-from ngraph.graph.strict_multidigraph import NodeID, StrictMultiDiGraph
-
-
-# Use @overload to provide precise static type safety for conditional return types.
-# The function returns different types based on boolean flags: float, tuple[float, FlowSummary],
-# tuple[float, StrictMultiDiGraph], or tuple[float, FlowSummary, StrictMultiDiGraph].
-@overload
-def calc_max_flow(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- *,
- return_summary: Literal[False] = False,
- return_graph: Literal[False] = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- shortest_path: bool = False,
- reset_flow_graph: bool = False,
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
- flows_attr: str = "flows",
- copy_graph: bool = True,
- tolerance: float = 1e-10,
-) -> float: ...
-
-
-@overload
-def calc_max_flow(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- *,
- return_summary: Literal[True],
- return_graph: Literal[False] = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- shortest_path: bool = False,
- reset_flow_graph: bool = False,
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
- flows_attr: str = "flows",
- copy_graph: bool = True,
- tolerance: float = 1e-10,
-) -> tuple[float, FlowSummary]: ...
-
-
-@overload
-def calc_max_flow(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- *,
- return_summary: Literal[False] = False,
- return_graph: Literal[True],
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- shortest_path: bool = False,
- reset_flow_graph: bool = False,
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
- flows_attr: str = "flows",
- copy_graph: bool = True,
- tolerance: float = 1e-10,
-) -> tuple[float, StrictMultiDiGraph]: ...
-
-
-@overload
-def calc_max_flow(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- *,
- return_summary: Literal[True],
- return_graph: Literal[True],
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- shortest_path: bool = False,
- reset_flow_graph: bool = False,
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
- flows_attr: str = "flows",
- copy_graph: bool = True,
- tolerance: float = 1e-10,
-) -> tuple[float, FlowSummary, StrictMultiDiGraph]: ...
-
-
-def calc_max_flow(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- *,
- return_summary: bool = False,
- return_graph: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- shortest_path: bool = False,
- reset_flow_graph: bool = False,
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
- flows_attr: str = "flows",
- copy_graph: bool = True,
- tolerance: float = 1e-10,
-) -> Union[float, tuple]:
- """Compute max flow between two nodes in a directed multi-graph.
-
- Uses iterative shortest-path augmentation with capacity-aware SPF and
- configurable flow placement.
-
- By default, this function:
- 1. Creates or re-initializes a flow-aware copy of the graph (via ``init_flow_graph``).
- 2. Repeatedly finds a path from ``src_node`` to ``dst_node`` using ``spf`` with
- capacity constraints (``EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING``).
- 3. Places flow along that path (via ``place_flow_on_graph``) until no augmenting path
- remains or the capacities are exhausted.
-
- If ``shortest_path=True``, the function performs only one iteration (single augmentation)
- and returns the flow placed along that single path (not the true max flow).
-
- Args:
- graph (StrictMultiDiGraph):
- The original graph containing capacity/flow attributes on each edge.
- src_node (NodeID):
- The source node for flow.
- dst_node (NodeID):
- The destination node for flow.
- return_summary (bool):
- If True, return a FlowSummary with detailed flow analytics.
- Defaults to False.
- return_graph (bool):
- If True, return the mutated flow graph along with other results.
- Defaults to False.
- flow_placement (FlowPlacement):
- Determines how flow is split among parallel edges of equal cost.
- Defaults to ``FlowPlacement.PROPORTIONAL``.
- shortest_path (bool):
- If True, place flow only once along the first shortest path found and return
- immediately, rather than iterating for the true max flow.
- reset_flow_graph (bool):
- If True, reset any existing flow data (e.g., ``flow_attr``, ``flows_attr``).
- Defaults to False.
- capacity_attr (str):
- The name of the capacity attribute on edges. Defaults to "capacity".
- flow_attr (str):
- The name of the aggregated flow attribute on edges. Defaults to "flow".
- flows_attr (str):
- The name of the per-flow dictionary attribute on edges. Defaults to "flows".
- copy_graph (bool):
- If True, work on a copy of the original graph so it remains unmodified.
- Defaults to True.
- tolerance (float):
- Tolerance for floating-point comparisons when determining saturated edges
- and residual capacity. Defaults to 1e-10.
-
- Returns:
- Union[float, tuple]:
- - If neither flag: ``float`` total flow.
- - If return_summary only: ``tuple[float, FlowSummary]``.
- - If both flags: ``tuple[float, FlowSummary, StrictMultiDiGraph]``.
-
- Notes:
- - When using return_summary or return_graph, the return value is a tuple.
-
- Examples:
- >>> g = StrictMultiDiGraph()
- >>> g.add_node('A')
- >>> g.add_node('B')
- >>> g.add_node('C')
- >>> g.add_edge('A', 'B', capacity=10.0, flow=0.0, flows={}, cost=1)
- >>> g.add_edge('B', 'C', capacity=5.0, flow=0.0, flows={}, cost=1)
- >>>
- >>> # Basic usage (scalar return)
- >>> max_flow_value = calc_max_flow(g, 'A', 'C')
- >>> print(max_flow_value)
- 5.0
- >>>
- >>> # With flow summary analytics
- >>> flow, summary = calc_max_flow(g, 'A', 'C', return_summary=True)
- >>> print(f"Min-cut edges: {summary.min_cut}")
- >>>
- >>> # With both summary and mutated graph
- >>> flow, summary, flow_graph = calc_max_flow(
- ... g, 'A', 'C', return_summary=True, return_graph=True
- ... )
- >>> # flow_graph contains the flow assignments
- """
- # Handle self-loop case: when source equals destination, max flow is always 0
- # Degenerate case (s == t):
- # Flow value |f| is the net surplus at the vertex.
- # Conservation forces that surplus to zero, so the
- # only feasible (and thus maximum) flow value is 0.
- if src_node == dst_node:
- if return_summary or return_graph:
- # For consistency, we need to create a minimal flow graph for summary/graph returns
- flow_graph = init_flow_graph(
- graph.copy() if copy_graph else graph,
- flow_attr,
- flows_attr,
- reset_flow_graph,
- )
- return _build_return_value(
- 0.0,
- flow_graph,
- src_node,
- return_summary,
- return_graph,
- capacity_attr,
- flow_attr,
- tolerance,
- {}, # Empty cost distribution for self-loop case
- )
- else:
- return 0.0
-
- # Initialize a flow-aware graph (copy or in-place).
- flow_graph = init_flow_graph(
- graph.copy() if copy_graph else graph,
- flow_attr,
- flows_attr,
- reset_flow_graph,
- )
-
- # Initialize cost distribution tracking
- cost_distribution: Dict[Cost, float] = {}
-
- # First path-finding iteration.
- costs, pred = spf(
- flow_graph,
- src_node,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- dst_node=dst_node,
- )
- flow_meta = place_flow_on_graph(
- flow_graph,
- src_node,
- dst_node,
- pred,
- flow_placement=flow_placement,
- capacity_attr=capacity_attr,
- flow_attr=flow_attr,
- flows_attr=flows_attr,
- )
- max_flow = flow_meta.placed_flow
-
- # Track cost distribution for first iteration
- if dst_node in costs and flow_meta.placed_flow > 0:
- path_cost = costs[dst_node]
- cost_distribution[path_cost] = (
- cost_distribution.get(path_cost, 0.0) + flow_meta.placed_flow
- )
-
- # If only one path (single augmentation) is desired, return early.
- if shortest_path:
- return _build_return_value(
- max_flow,
- flow_graph,
- src_node,
- return_summary,
- return_graph,
- capacity_attr,
- flow_attr,
- tolerance,
- cost_distribution,
- )
-
- # Otherwise, repeatedly find augmenting paths until no new flow can be placed.
- while True:
- costs, pred = spf(
- flow_graph,
- src_node,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- dst_node=dst_node,
- )
- if dst_node not in pred:
- # No path found; we've reached max flow.
- break
-
- flow_meta = place_flow_on_graph(
- flow_graph,
- src_node,
- dst_node,
- pred,
- flow_placement=flow_placement,
- capacity_attr=capacity_attr,
- flow_attr=flow_attr,
- flows_attr=flows_attr,
- )
- if flow_meta.placed_flow <= tolerance:
- # No significant additional flow could be placed; at capacity.
- break
-
- max_flow += flow_meta.placed_flow
-
- # Track cost distribution for this iteration
- if dst_node in costs and flow_meta.placed_flow > 0:
- path_cost = costs[dst_node]
- cost_distribution[path_cost] = (
- cost_distribution.get(path_cost, 0.0) + flow_meta.placed_flow
- )
-
- return _build_return_value(
- max_flow,
- flow_graph,
- src_node,
- return_summary,
- return_graph,
- capacity_attr,
- flow_attr,
- tolerance,
- cost_distribution,
- )
-
-
-def _build_return_value(
- max_flow: float,
- flow_graph: StrictMultiDiGraph,
- src_node: NodeID,
- return_summary: bool,
- return_graph: bool,
- capacity_attr: str,
- flow_attr: str,
- tolerance: float,
- cost_distribution: Dict[Cost, float],
-) -> Union[float, tuple]:
- """Build the appropriate return value based on the requested flags."""
- if not (return_summary or return_graph):
- return max_flow
-
- summary = None
- if return_summary:
- summary = _build_flow_summary(
- max_flow,
- flow_graph,
- src_node,
- capacity_attr,
- flow_attr,
- tolerance,
- cost_distribution,
- )
-
- ret: list = [max_flow]
- if return_summary:
- ret.append(summary)
- if return_graph:
- ret.append(flow_graph)
-
- return tuple(ret) if len(ret) > 1 else ret[0]
-
-
-def _build_flow_summary(
- total_flow: float,
- flow_graph: StrictMultiDiGraph,
- src_node: NodeID,
- capacity_attr: str,
- flow_attr: str,
- tolerance: float,
- cost_distribution: Dict[Cost, float],
-) -> FlowSummary:
- """Construct a ``FlowSummary`` from the flow-graph state."""
- edge_flow = {}
- residual_cap = {}
-
- # Extract flow and residual capacity for each edge
- for u, v, k, d in flow_graph.edges(data=True, keys=True):
- edge = (u, v, k)
- f = d.get(flow_attr, 0.0)
- edge_flow[edge] = f
- residual_cap[edge] = d[capacity_attr] - f
-
- # BFS in residual graph to find reachable nodes from source.
- # Residual graph has:
- # - Forward residual capacity: capacity - flow
- # - Reverse residual capacity: flow
- # We must traverse both to correctly identify the s-side of the min-cut.
- reachable = set()
- stack = [src_node]
- while stack:
- n = stack.pop()
- if n in reachable:
- continue
- reachable.add(n)
-
- # Forward residual arcs: u -> v when residual > tolerance
- for _, nbr, _, d in flow_graph.out_edges(n, data=True, keys=True):
- if (
- d[capacity_attr] - d.get(flow_attr, 0.0) > tolerance
- and nbr not in reachable
- ):
- stack.append(nbr)
-
- # Reverse residual arcs: v -> u when flow > tolerance on edge (u->v)
- for pred, _, _, d in flow_graph.in_edges(n, data=True, keys=True):
- if d.get(flow_attr, 0.0) > tolerance and pred not in reachable:
- stack.append(pred)
-
- # Find min-cut edges (saturated edges crossing the cut)
- min_cut = [
- (u, v, k)
- for u, v, k, d in flow_graph.edges(data=True, keys=True)
- if u in reachable
- and v not in reachable
- and d[capacity_attr] - d.get(flow_attr, 0.0) <= tolerance
- ]
-
- return FlowSummary(
- total_flow=total_flow,
- edge_flow=edge_flow,
- residual_cap=residual_cap,
- reachable=reachable,
- min_cut=min_cut,
- cost_distribution=cost_distribution,
- )
-
-
-def saturated_edges(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- *,
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
- tolerance: float = 1e-10,
- **kwargs,
-) -> list[tuple]:
- """Identify saturated edges in the max-flow solution.
-
- Args:
- graph: The graph to analyze
- src_node: Source node
- dst_node: Destination node
- capacity_attr: Name of capacity attribute
- flow_attr: Name of flow attribute
- tolerance: Tolerance for considering an edge saturated
- **kwargs: Additional arguments passed to calc_max_flow
-
- Returns:
- list[tuple]: Edges ``(u, v, k)`` with residual capacity <= ``tolerance``.
- """
- result = calc_max_flow(
- graph,
- src_node,
- dst_node,
- return_summary=True,
- capacity_attr=capacity_attr,
- flow_attr=flow_attr,
- **kwargs,
- )
- # Ensure we have a tuple to unpack
- if isinstance(result, tuple) and len(result) >= 2:
- # Handle tuple unpacking - could be 2 or 3 elements
- if len(result) == 2:
- _, summary = result
- else:
- _, summary, _ = result
- else:
- raise ValueError(
- "Expected tuple return from calc_max_flow with return_summary=True"
- )
-
- return [
- edge for edge, residual in summary.residual_cap.items() if residual <= tolerance
- ]
-
-
-def run_sensitivity(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- *,
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
- change_amount: float = 1.0,
- **kwargs,
-) -> dict[tuple, float]:
- """Simple sensitivity analysis for per-edge capacity changes.
-
- Tests changing each saturated edge capacity by change_amount and measures
- the resulting change in total flow. Positive values increase capacity,
- negative values decrease capacity (with validation to prevent negative capacities).
-
- Args:
- graph: The graph to analyze
- src_node: Source node
- dst_node: Destination node
- capacity_attr: Name of capacity attribute
- flow_attr: Name of flow attribute
- change_amount: Amount to change capacity for testing (positive=increase, negative=decrease)
- **kwargs: Additional arguments passed to calc_max_flow
-
- Returns:
- dict[tuple, float]: Flow delta per modified edge.
- """
- # Get baseline flow and identify saturated edges - ensure scalar return
- baseline_flow = calc_max_flow(
- graph,
- src_node,
- dst_node,
- return_summary=False,
- return_graph=False,
- capacity_attr=capacity_attr,
- flow_attr=flow_attr,
- **kwargs,
- )
- assert isinstance(baseline_flow, (int, float))
-
- saturated = saturated_edges(
- graph,
- src_node,
- dst_node,
- capacity_attr=capacity_attr,
- flow_attr=flow_attr,
- **kwargs,
- )
-
- sensitivity = {}
-
- for edge in saturated:
- u, v, k = edge
-
- # Create modified graph with changed edge capacity
- test_graph = graph.copy()
- edge_data = test_graph.get_edge_data(u, v, k)
- if edge_data is not None:
- # Create a mutable copy of the edge data
- edge_data = dict(edge_data)
- original_capacity = edge_data[capacity_attr]
- new_capacity = original_capacity + change_amount
-
- # If the change would result in negative capacity, set to 0
- if new_capacity < 0:
- new_capacity = 0
-
- edge_data[capacity_attr] = new_capacity
- test_graph.remove_edge(u, v, k)
- test_graph.add_edge(u, v, k, **edge_data)
-
- # Calculate new max flow - ensure scalar return
- new_flow = calc_max_flow(
- test_graph,
- src_node,
- dst_node,
- return_summary=False,
- return_graph=False,
- capacity_attr=capacity_attr,
- flow_attr=flow_attr,
- **kwargs,
- )
- assert isinstance(new_flow, (int, float))
-
- # Record flow change
- sensitivity[edge] = new_flow - baseline_flow
-
- return sensitivity
diff --git a/ngraph/algorithms/paths.py b/ngraph/algorithms/paths.py
deleted file mode 100644
index f179c2b..0000000
--- a/ngraph/algorithms/paths.py
+++ /dev/null
@@ -1,90 +0,0 @@
-"""Path manipulation utilities.
-
-Provides helpers to enumerate realized paths from a predecessor map produced by
-SPF/KSP, with optional expansion of parallel edges into distinct paths.
-"""
-
-from __future__ import annotations
-
-from itertools import product
-from typing import Any, Dict, Iterator, List
-
-from ngraph.algorithms.base import PathTuple
-from ngraph.graph.strict_multidigraph import EdgeID, NodeID
-
-
-def resolve_to_paths(
- src_node: NodeID,
- dst_node: NodeID,
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]],
- split_parallel_edges: bool = False,
-) -> Iterator[PathTuple]:
- """Enumerate all paths from a predecessor map.
-
- Args:
- src_node: Source node ID.
- dst_node: Destination node ID.
- pred: Predecessor map from SPF or KSP.
- split_parallel_edges: If True, expand parallel edges into distinct paths.
-
- Yields:
- PathTuple: Sequence of ``(node_id, (edge_ids,))`` pairs from source to dest.
- """
- # If dst_node not in pred, no paths exist
- if dst_node not in pred:
- return
-
- seen = {dst_node}
- # Each stack entry: [(current_node, tuple_of_edgeIDs), predecessor_index]
- stack: List[List[Any]] = [[(dst_node, ()), 0]]
- top = 0
-
- while top >= 0:
- node_edges, nbr_idx = stack[top]
- current_node, _ = node_edges
-
- if current_node == src_node:
- # Rebuild the path by slicing stack up to top, then reversing
- full_path_reversed = [frame[0] for frame in stack[: top + 1]]
- path_tuple = tuple(reversed(full_path_reversed))
-
- if not split_parallel_edges:
- yield path_tuple
- else:
- # Expand parallel edges for each segment except the final destination
- ranges = [range(len(seg[1])) for seg in path_tuple[:-1]]
- for combo in product(*ranges):
- expanded = []
- for i, seg in enumerate(path_tuple):
- if i < len(combo):
- # pick a single edge from seg[1]
- chosen_edge = (seg[1][combo[i]],)
- expanded.append((seg[0], chosen_edge))
- else:
- # last node has an empty edges tuple
- expanded.append((seg[0], ()))
- yield tuple(expanded)
-
- # Try next predecessor of current_node
- current_pred_map = pred[current_node]
- keys = list(current_pred_map.keys())
- if nbr_idx < len(keys):
- stack[top][1] = nbr_idx + 1
- next_pred = keys[nbr_idx]
- edge_list = current_pred_map[next_pred]
-
- if next_pred in seen:
- # cycle detected, skip
- continue
- seen.add(next_pred)
-
- top += 1
- next_node_edges = (next_pred, tuple(edge_list))
- if top == len(stack):
- stack.append([next_node_edges, 0])
- else:
- stack[top] = [next_node_edges, 0]
- else:
- # backtrack
- seen.discard(current_node)
- top -= 1
diff --git a/ngraph/algorithms/placement.py b/ngraph/algorithms/placement.py
deleted file mode 100644
index ed05ce6..0000000
--- a/ngraph/algorithms/placement.py
+++ /dev/null
@@ -1,176 +0,0 @@
-"""Flow placement for routing over equal-cost predecessor DAGs.
-
-Places feasible flow on a graph given predecessor relations and a placement
-strategy, updating aggregate and per-flow attributes.
-"""
-
-from __future__ import annotations
-
-from dataclasses import dataclass, field
-from typing import Dict, Hashable, List, Optional, Set
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.algorithms.capacity import calc_graph_capacity
-from ngraph.graph.strict_multidigraph import EdgeID, NodeID, StrictMultiDiGraph
-
-
-@dataclass
-class FlowPlacementMeta:
- """Metadata describing how flow was placed on the graph.
-
- Attributes:
- placed_flow: The amount of flow actually placed.
- remaining_flow: The portion of flow that could not be placed due to capacity limits.
- nodes: Set of node IDs that participated in the flow.
- edges: Set of edge IDs that carried some portion of this flow.
- """
-
- placed_flow: float
- remaining_flow: float
- nodes: Set[NodeID] = field(default_factory=set)
- edges: Set[EdgeID] = field(default_factory=set)
-
-
-def place_flow_on_graph(
- flow_graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]],
- flow: float = float("inf"),
- flow_index: Optional[Hashable] = None,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- capacity_attr: str = "capacity",
- flow_attr: str = "flow",
- flows_attr: str = "flows",
-) -> FlowPlacementMeta:
- """Place flow from ``src_node`` to ``dst_node`` on ``flow_graph``.
-
- Uses a precomputed `flow_dict` from `calc_graph_capacity` to figure out how
- much flow can be placed. Updates the graph's edges and nodes with the placed flow.
-
- Args:
- flow_graph: The graph on which flow will be placed.
- src_node: The source node.
- dst_node: The destination node.
- pred: A dictionary of node->(adj_node->list_of_edge_IDs) giving path adjacency.
- flow: Requested flow amount; can be infinite.
- flow_index: Identifier for this flow (used to track multiple flows).
- flow_placement: Strategy for distributing flow among parallel equal cost paths.
- capacity_attr: Attribute name on edges for capacity.
- flow_attr: Attribute name on edges/nodes for aggregated flow.
- flows_attr: Attribute name on edges/nodes for per-flow tracking.
-
- Returns:
- FlowPlacementMeta: Amount placed, remaining amount, and touched nodes/edges.
- """
- # Handle self-loop case: when source equals destination, max flow is always 0
- # Degenerate case (s == t):
- # Flow value |f| is the net surplus at the vertex.
- # Conservation forces that surplus to zero, so the
- # only feasible (and thus maximum) flow value is 0.
- if src_node == dst_node:
- return FlowPlacementMeta(0.0, flow)
-
- # 1) Determine the maximum feasible flow via calc_graph_capacity.
- rem_cap, flow_dict = calc_graph_capacity(
- flow_graph, src_node, dst_node, pred, flow_placement, capacity_attr, flow_attr
- )
-
- # 2) Decide how much flow we can place, given the request and the remaining capacity.
- placed_flow = min(rem_cap, flow)
- remaining_flow = max(flow - rem_cap if flow != float("inf") else float("inf"), 0.0)
- if placed_flow <= 0:
- # If no flow can be placed, return early with zero placement.
- return FlowPlacementMeta(0.0, flow)
-
- # Track the placement metadata.
- flow_placement_meta = FlowPlacementMeta(placed_flow, remaining_flow)
-
- # For convenience, get direct references to edges and nodes structures.
- edges = flow_graph.get_edges()
- nodes = flow_graph.get_nodes()
-
- # Ensure we capture source and destination in the metadata.
- flow_placement_meta.nodes.add(src_node)
- flow_placement_meta.nodes.add(dst_node)
-
- # 3) Distribute the feasible flow across the nodes/edges according to flow_dict.
- for node_a, to_dict in flow_dict.items():
- for node_b, flow_fraction in to_dict.items():
- if flow_fraction > 0.0:
- # Mark these nodes as active in the flow.
- flow_placement_meta.nodes.add(node_a)
- flow_placement_meta.nodes.add(node_b)
-
- # Update node flow attributes.
- node_a_attr = nodes[node_a]
- node_a_attr[flow_attr] += flow_fraction * placed_flow
- node_a_attr[flows_attr].setdefault(flow_index, 0.0)
- node_a_attr[flows_attr][flow_index] += flow_fraction * placed_flow
-
- # The edges from node_b->node_a in `pred` carry the flow in forward direction.
- edge_list = pred[node_b][node_a]
-
- if flow_placement == FlowPlacement.PROPORTIONAL:
- # Distribute proportionally to each edge's unused capacity.
- total_rem_cap = sum(
- edges[eid][3][capacity_attr] - edges[eid][3][flow_attr]
- for eid in edge_list
- )
- if total_rem_cap > 0.0:
- for eid in edge_list:
- edge_cap = edges[eid][3][capacity_attr]
- edge_flow = edges[eid][3][flow_attr]
- unused = edge_cap - edge_flow
- if unused > 0:
- edge_subflow = (
- flow_fraction * placed_flow / total_rem_cap * unused
- )
- if edge_subflow > 0.0:
- flow_placement_meta.edges.add(eid)
- edges[eid][3][flow_attr] += edge_subflow
- edges[eid][3][flows_attr].setdefault(
- flow_index, 0.0
- )
- edges[eid][3][flows_attr][flow_index] += (
- edge_subflow
- )
-
- elif flow_placement == FlowPlacement.EQUAL_BALANCED:
- # Split equally across all parallel edges in edge_list.
- if len(edge_list) > 0:
- edge_subflow = (flow_fraction * placed_flow) / len(edge_list)
- for eid in edge_list:
- flow_placement_meta.edges.add(eid)
- edges[eid][3][flow_attr] += edge_subflow
- edges[eid][3][flows_attr].setdefault(flow_index, 0.0)
- edges[eid][3][flows_attr][flow_index] += edge_subflow
-
- return flow_placement_meta
-
-
-def remove_flow_from_graph(
- flow_graph: StrictMultiDiGraph,
- flow_index: Optional[Hashable] = None,
- flow_attr: str = "flow",
- flows_attr: str = "flows",
-) -> None:
- """Remove one or all flows from the graph.
-
- Args:
- flow_graph: Graph whose edge flow attributes will be modified.
- flow_index: If provided, remove only the specified flow; otherwise remove all.
- flow_attr: Aggregate flow attribute name on edges.
- flows_attr: Per-flow attribute name on edges.
- """
- edges = flow_graph.get_edges()
- for _edge_id, (_, _, _, edge_attr) in edges.items():
- if flow_index is not None and flow_index in edge_attr[flows_attr]:
- # Subtract only the specified flow
- removed = edge_attr[flows_attr][flow_index]
- edge_attr[flow_attr] -= removed
- del edge_attr[flows_attr][flow_index]
- elif flow_index is None:
- # Remove all flows
- edge_attr[flow_attr] = 0.0
- edge_attr[flows_attr] = {}
diff --git a/ngraph/algorithms/spf.py b/ngraph/algorithms/spf.py
deleted file mode 100644
index 91c4bd3..0000000
--- a/ngraph/algorithms/spf.py
+++ /dev/null
@@ -1,518 +0,0 @@
-"""Shortest-path-first (SPF) algorithms.
-
-Implements Dijkstra-like SPF with pluggable edge-selection policies and a
-Yen-like KSP generator. Specialized fast paths exist for common selection
-strategies without exclusions.
-
-Notes:
- When a destination node is known, SPF supports an optimized mode that
- terminates once the destination's minimal distance is settled. In this mode:
- - The destination node is not expanded (no neighbor relaxation from ``dst``).
- - The algorithm continues processing any nodes with equal distance to capture
- equal-cost predecessors (needed by proportional flow placement).
-"""
-
-from heapq import heappop, heappush
-from typing import (
- Callable,
- Dict,
- Iterator,
- List,
- Optional,
- Set,
- Tuple,
-)
-
-from ngraph.algorithms.base import (
- MIN_CAP,
- Cost,
- EdgeSelect,
-)
-from ngraph.algorithms.edge_select import edge_select_fabric
-from ngraph.algorithms.paths import resolve_to_paths
-from ngraph.graph.strict_multidigraph import (
- AttrDict,
- EdgeID,
- NodeID,
- StrictMultiDiGraph,
-)
-
-
-def _spf_fast_all_min_cost_dijkstra(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- multipath: bool,
- dst_node: Optional[NodeID] = None,
-) -> Tuple[Dict[NodeID, Cost], Dict[NodeID, Dict[NodeID, List[EdgeID]]]]:
- """Specialized Dijkstra SPF for:
- - EdgeSelect.ALL_MIN_COST
- - No excluded edges/nodes.
-
- Finds all edges with the same minimal cost between two nodes if multipath=True.
- If multipath=False, new minimal-cost paths overwrite old ones, though edges
- are still collected together for immediate neighbor expansion.
-
- Args:
- graph: Directed graph (StrictMultiDiGraph).
- src_node: Source node for SPF.
- multipath: Whether to record multiple equal-cost paths.
- dst_node: Optional destination node. If provided, Dijkstra terminates
- once ``dst_node`` is popped at minimal distance. The destination is
- not expanded, but equal-cost predecessors up to that distance are
- retained.
-
- Returns:
- A tuple of (costs, pred):
- - costs: Maps each reachable node to the minimal cost from src_node.
- - pred: For each reachable node, a dict of predecessor -> list of edges
- from the predecessor to that node. If multipath=True, there may be
- multiple predecessors for the same node.
- """
- outgoing_adjacencies = graph._adj # type: ignore[attr-defined]
- if src_node not in outgoing_adjacencies:
- raise KeyError(f"Source node '{src_node}' is not in the graph.")
-
- costs: Dict[NodeID, Cost] = {src_node: 0.0}
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {src_node: {}}
- min_pq: List[Tuple[Cost, NodeID]] = [(0.0, src_node)]
-
- best_dst_cost: Optional[Cost] = None
-
- while min_pq:
- current_cost, node_id = heappop(min_pq)
- if current_cost > costs[node_id]:
- continue
-
- # Early exit: once we have popped the destination at minimal cost,
- # we only need to process nodes with cost <= best_dst_cost to collect
- # equal-cost predecessor alternatives. When the next node to pop has
- # cost greater than best_dst_cost, we can stop.
- if dst_node is not None and node_id == dst_node and best_dst_cost is None:
- best_dst_cost = current_cost
-
- # Explore neighbors (skip expanding from destination itself)
- if not (dst_node is not None and node_id == dst_node):
- for neighbor_id, edges_map in outgoing_adjacencies[node_id].items():
- min_edge_cost: Optional[Cost] = None
- selected_edges: List[EdgeID] = []
-
- # Gather the minimal cost edge(s)
- for e_id, e_attr in edges_map.items():
- edge_cost = e_attr["cost"]
- if min_edge_cost is None or edge_cost < min_edge_cost:
- min_edge_cost = edge_cost
- selected_edges = [e_id]
- elif multipath and edge_cost == min_edge_cost:
- selected_edges.append(e_id)
-
- if min_edge_cost is None:
- continue
-
- new_cost = current_cost + min_edge_cost
- if (neighbor_id not in costs) or (new_cost < costs[neighbor_id]):
- costs[neighbor_id] = new_cost
- pred[neighbor_id] = {node_id: selected_edges}
- heappush(min_pq, (new_cost, neighbor_id))
- elif multipath and new_cost == costs[neighbor_id]:
- pred[neighbor_id][node_id] = selected_edges
-
- if best_dst_cost is not None:
- # If the next candidate in the heap has a strictly higher cost,
- # all remaining nodes are farther than the destination's minimal cost.
- # Stop the search.
- if not min_pq or min_pq[0][0] > best_dst_cost:
- break
-
- return costs, pred
-
-
-def _spf_fast_all_min_cost_with_cap_remaining_dijkstra(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- multipath: bool,
- dst_node: Optional[NodeID] = None,
-) -> Tuple[Dict[NodeID, Cost], Dict[NodeID, Dict[NodeID, List[EdgeID]]]]:
- """Specialized Dijkstra SPF for:
- - EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING
- - No excluded edges/nodes
-
- Only considers edges whose (capacity - flow) >= MIN_CAP. Among those edges,
- finds all edges with the same minimal cost if multipath=True.
-
- Args:
- graph: Directed graph (StrictMultiDiGraph).
- src_node: Source node for SPF.
- multipath: Whether to record multiple equal-cost paths.
- dst_node: Optional destination node. If provided, Dijkstra terminates
- once ``dst_node`` is popped at minimal distance. The destination is
- not expanded, but equal-cost predecessors up to that distance are
- retained.
-
- Returns:
- A tuple of (costs, pred):
- - costs: Maps each reachable node to the minimal cost from src_node.
- - pred: For each reachable node, a dict of predecessor -> list of edges
- from the predecessor to that node.
- """
- outgoing_adjacencies = graph._adj # type: ignore[attr-defined]
- if src_node not in outgoing_adjacencies:
- raise KeyError(f"Source node '{src_node}' is not in the graph.")
-
- costs: Dict[NodeID, Cost] = {src_node: 0.0}
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {src_node: {}}
- min_pq: List[Tuple[Cost, NodeID]] = [(0.0, src_node)]
-
- best_dst_cost: Optional[Cost] = None
-
- while min_pq:
- current_cost, node_id = heappop(min_pq)
- if current_cost > costs[node_id]:
- continue
-
- if dst_node is not None and node_id == dst_node and best_dst_cost is None:
- best_dst_cost = current_cost
-
- # Explore neighbors; skip edges without enough remaining capacity
- # and do not expand from destination itself
- if not (dst_node is not None and node_id == dst_node):
- for neighbor_id, edges_map in outgoing_adjacencies[node_id].items():
- min_edge_cost: Optional[Cost] = None
- selected_edges: List[EdgeID] = []
-
- for e_id, e_attr in edges_map.items():
- if (e_attr["capacity"] - e_attr["flow"]) >= MIN_CAP:
- edge_cost = e_attr["cost"]
- if min_edge_cost is None or edge_cost < min_edge_cost:
- min_edge_cost = edge_cost
- selected_edges = [e_id]
- elif multipath and edge_cost == min_edge_cost:
- selected_edges.append(e_id)
-
- if min_edge_cost is None:
- continue
-
- new_cost = current_cost + min_edge_cost
- if (neighbor_id not in costs) or (new_cost < costs[neighbor_id]):
- costs[neighbor_id] = new_cost
- pred[neighbor_id] = {node_id: selected_edges}
- heappush(min_pq, (new_cost, neighbor_id))
- elif multipath and new_cost == costs[neighbor_id]:
- pred[neighbor_id][node_id] = selected_edges
-
- if best_dst_cost is not None:
- if not min_pq or min_pq[0][0] > best_dst_cost:
- break
-
- return costs, pred
-
-
-def spf(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- edge_select: EdgeSelect = EdgeSelect.ALL_MIN_COST,
- edge_select_func: Optional[
- Callable[
- [
- StrictMultiDiGraph,
- NodeID,
- NodeID,
- Dict[EdgeID, AttrDict],
- Set[EdgeID],
- Set[NodeID],
- ],
- Tuple[Cost, List[EdgeID]],
- ]
- ] = None,
- multipath: bool = True,
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- dst_node: Optional[NodeID] = None,
-) -> Tuple[Dict[NodeID, Cost], Dict[NodeID, Dict[NodeID, List[EdgeID]]]]:
- """Compute shortest paths from a source node.
-
- By default, uses EdgeSelect.ALL_MIN_COST. If multipath=True, multiple equal-cost
- paths to the same node will be recorded in the predecessor structure. If no
- excluded edges/nodes are given and edge_select is one of the specialized
- (ALL_MIN_COST or ALL_MIN_COST_WITH_CAP_REMAINING), it uses a fast specialized
- routine.
-
- Args:
- graph: The directed graph (StrictMultiDiGraph).
- src_node: The source node from which to compute shortest paths.
- edge_select: The edge selection strategy. Defaults to ALL_MIN_COST.
- edge_select_func: If provided, overrides the default edge selection function.
- Must return (cost, list_of_edges) for the given node->neighbor adjacency.
- multipath: Whether to record multiple same-cost paths.
- excluded_edges: A set of edge IDs to ignore in the graph.
- excluded_nodes: A set of node IDs to ignore in the graph.
- dst_node: Optional destination node. If provided, SPF avoids expanding
- from the destination and performs early termination once the next
- candidate in the heap would exceed the settled distance for
- ``dst_node``. This preserves equal-cost predecessors while avoiding
- unnecessary relaxations beyond the destination.
-
- Returns:
- tuple[dict[NodeID, Cost], dict[NodeID, dict[NodeID, list[EdgeID]]]]:
- Costs and predecessor mapping.
-
- Raises:
- KeyError: If src_node does not exist in graph.
- """
- if excluded_edges is None:
- excluded_edges = set()
- if excluded_nodes is None:
- excluded_nodes = set()
-
- # Use specialized fast code if applicable
- if edge_select_func is None:
- if not excluded_edges and not excluded_nodes:
- if edge_select == EdgeSelect.ALL_MIN_COST:
- return _spf_fast_all_min_cost_dijkstra(
- graph, src_node, multipath, dst_node
- )
- elif edge_select == EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING:
- return _spf_fast_all_min_cost_with_cap_remaining_dijkstra(
- graph, src_node, multipath, dst_node
- )
- else:
- edge_select_func = edge_select_fabric(edge_select)
-
- # Ensure edge_select_func is set at this point
- if edge_select_func is None:
- edge_select_func = edge_select_fabric(edge_select)
-
- outgoing_adjacencies = graph._adj # type: ignore[attr-defined]
- if src_node not in outgoing_adjacencies:
- raise KeyError(f"Source node '{src_node}' is not in the graph.")
-
- costs: Dict[NodeID, Cost] = {src_node: 0.0}
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {src_node: {}}
- min_pq: List[Tuple[Cost, NodeID]] = [(0.0, src_node)]
-
- best_dst_cost: Optional[Cost] = None
-
- while min_pq:
- current_cost, node_id = heappop(min_pq)
- if current_cost > costs[node_id]:
- continue
- if node_id in excluded_nodes:
- continue
-
- if dst_node is not None and node_id == dst_node and best_dst_cost is None:
- best_dst_cost = current_cost
-
- # Evaluate each neighbor using the provided edge_select_func.
- if not (dst_node is not None and node_id == dst_node):
- for neighbor_id, edges_dict in outgoing_adjacencies[node_id].items():
- if neighbor_id in excluded_nodes:
- continue
-
- edge_cost, selected_edges = edge_select_func(
- graph,
- node_id,
- neighbor_id,
- edges_dict,
- excluded_edges,
- excluded_nodes,
- )
- if not selected_edges:
- continue
-
- new_cost = current_cost + edge_cost
- if (neighbor_id not in costs) or (new_cost < costs[neighbor_id]):
- costs[neighbor_id] = new_cost
- pred[neighbor_id] = {node_id: selected_edges}
- heappush(min_pq, (new_cost, neighbor_id))
- elif multipath and new_cost == costs[neighbor_id]:
- pred[neighbor_id][node_id] = selected_edges
-
- if best_dst_cost is not None:
- if not min_pq or min_pq[0][0] > best_dst_cost:
- break
-
- return costs, pred
-
-
-def ksp(
- graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- edge_select: EdgeSelect = EdgeSelect.ALL_MIN_COST,
- edge_select_func: Optional[
- Callable[
- [
- StrictMultiDiGraph,
- NodeID,
- NodeID,
- Dict[EdgeID, AttrDict],
- Set[EdgeID],
- Set[NodeID],
- ],
- Tuple[Cost, List[EdgeID]],
- ]
- ] = None,
- max_k: Optional[int] = None,
- max_path_cost: Cost = float("inf"),
- max_path_cost_factor: Optional[float] = None,
- multipath: bool = True,
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
-) -> Iterator[Tuple[Dict[NodeID, Cost], Dict[NodeID, Dict[NodeID, List[EdgeID]]]]]:
- """Yield up to k shortest paths using a Yen-like algorithm.
-
- The initial SPF (shortest path) is computed; subsequent paths are found by systematically
- excluding edges/nodes used by previously generated paths. Each iteration yields a
- (costs, pred) describing one path. Stops if there are no more valid paths or if max_k
- is reached.
-
- Args:
- graph: The directed graph (StrictMultiDiGraph).
- src_node: The source node.
- dst_node: The destination node.
- edge_select: The edge selection strategy. Defaults to ALL_MIN_COST.
- edge_select_func: Optional override of the default edge selection function.
- max_k: If set, yields at most k distinct paths.
- max_path_cost: If set, do not yield any path whose total cost > max_path_cost.
- max_path_cost_factor: If set, updates max_path_cost to:
- min(max_path_cost, best_path_cost * max_path_cost_factor).
- multipath: Whether to consider multiple same-cost expansions in SPF.
- excluded_edges: Set of edge IDs to exclude globally.
- excluded_nodes: Set of node IDs to exclude globally.
-
- Yields:
- Tuple of ``(costs, pred)`` per discovered path in ascending cost order.
- """
- if edge_select_func is None:
- edge_select_func = edge_select_fabric(edge_select)
-
- excluded_edges = excluded_edges or set()
- excluded_nodes = excluded_nodes or set()
-
- shortest_paths = [] # Stores paths found so far: (costs, pred, excl_e, excl_n)
- candidates: List[
- Tuple[
- Cost,
- int,
- Dict[NodeID, Cost],
- Dict[NodeID, Dict[NodeID, List[EdgeID]]],
- Set[EdgeID],
- Set[NodeID],
- ]
- ] = []
- visited = set() # Tracks path signatures to avoid duplicates
-
- # 1) Compute the initial shortest path. Pass dst_node to enable SPF early-exit.
- costs_init, pred_init = spf(
- graph,
- src_node,
- edge_select,
- edge_select_func,
- multipath,
- excluded_edges,
- excluded_nodes,
- dst_node=dst_node,
- )
- if dst_node not in pred_init:
- return # No path exists from src_node to dst_node
-
- best_path_cost = costs_init[dst_node]
- if max_path_cost_factor:
- max_path_cost = min(max_path_cost, best_path_cost * max_path_cost_factor)
-
- if best_path_cost > max_path_cost:
- return
-
- shortest_paths.append(
- (costs_init, pred_init, excluded_edges.copy(), excluded_nodes.copy())
- )
- yield costs_init, pred_init
-
- candidate_id = 0
-
- while True:
- if max_k and len(shortest_paths) >= max_k:
- break
-
- root_costs, root_pred, root_excl_e, root_excl_n = shortest_paths[-1]
- # For each realized path from src->dst in the last SPF
- for path in resolve_to_paths(src_node, dst_node, root_pred):
- # Spur node iteration
- for idx, (spur_node, _edges_list) in enumerate(path[:-1]):
- # The path up to but not including spur_node
- root_path = path[:idx]
-
- # Copy the excluded sets
- excl_e = root_excl_e.copy()
- excl_n = root_excl_n.copy()
-
- # Remove edges (and possibly nodes) used in previous shortest paths that
- # share the same root_path
- for _sp_costs, sp_pred, sp_ex_e, sp_ex_n in shortest_paths:
- for p in resolve_to_paths(src_node, dst_node, sp_pred):
- if p[:idx] == root_path:
- excl_e.update(sp_ex_e)
- # Exclude the next edge in that path to force a different route
- excl_e.update(p[idx][1])
- excl_n.update(sp_ex_n)
- excl_n.update(n_e[0] for n_e in p[:idx])
-
- # Spur SPF with early-exit toward dst_node
- spur_costs, spur_pred = spf(
- graph,
- spur_node,
- edge_select,
- edge_select_func,
- multipath,
- excl_e,
- excl_n,
- dst_node=dst_node,
- )
- if dst_node not in spur_pred:
- continue
-
- # Shift all spur_costs relative to the cost from src->spur_node
- spur_base_cost = root_costs[spur_node]
- for node_key, node_val in spur_costs.items():
- spur_costs[node_key] = node_val + spur_base_cost
-
- # Combine root + spur costs and preds
- total_costs = dict(root_costs)
- total_costs.update(spur_costs)
-
- total_pred = dict(root_pred)
- for node_key, node_pred in spur_pred.items():
- # Replace spur_node's chain, but keep root_path info
- if node_key != spur_node:
- total_pred[node_key] = node_pred
-
- path_edge_ids = tuple(
- sorted(
- edge_id
- for nbrs in total_pred.values()
- for edge_list_ids in nbrs.values()
- for edge_id in edge_list_ids
- )
- )
- if path_edge_ids not in visited:
- if total_costs[dst_node] <= max_path_cost:
- heappush(
- candidates,
- (
- total_costs[dst_node],
- candidate_id,
- total_costs,
- total_pred,
- excl_e,
- excl_n,
- ),
- )
- visited.add(path_edge_ids)
- candidate_id += 1
-
- if not candidates:
- break
-
- # Pop the best candidate path from the min-heap
- _, _, costs_cand, pred_cand, excl_e_cand, excl_n_cand = heappop(candidates)
- shortest_paths.append((costs_cand, pred_cand, excl_e_cand, excl_n_cand))
- yield costs_cand, pred_cand
diff --git a/ngraph/algorithms/types.py b/ngraph/algorithms/types.py
deleted file mode 100644
index 742036a..0000000
--- a/ngraph/algorithms/types.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""Types and data structures for algorithm analytics.
-
-Defines immutable summary containers and aliases for algorithm outputs.
-"""
-
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Dict, Hashable, List, Set, Tuple
-
-from ngraph.algorithms.base import Cost
-
-# Edge identifier tuple: (source_node, destination_node, edge_key)
-# The edge key type aligns with StrictMultiDiGraph, which uses hashable keys
-# (monotonically increasing integers by default, or explicit keys when provided).
-Edge = Tuple[str, str, Hashable]
-
-
-@dataclass(frozen=True)
-class FlowSummary:
- """Summary of max-flow computation results.
-
- Captures edge flows, residual capacities, reachable set, and min-cut.
-
- Attributes:
- total_flow: Maximum flow value achieved.
- edge_flow: Flow amount per edge, indexed by ``(src, dst, key)``.
- residual_cap: Remaining capacity per edge after placement.
- reachable: Nodes reachable from source in residual graph.
- min_cut: Saturated edges crossing the s-t cut.
- cost_distribution: Mapping of path cost to flow volume placed at that cost.
- """
-
- total_flow: float
- edge_flow: Dict[Edge, float]
- residual_cap: Dict[Edge, float]
- reachable: Set[str]
- min_cut: List[Edge]
- cost_distribution: Dict[Cost, float]
diff --git a/ngraph/cli.py b/ngraph/cli.py
index 55ce3fd..51060f3 100644
--- a/ngraph/cli.py
+++ b/ngraph/cli.py
@@ -14,7 +14,7 @@
from ngraph.explorer import NetworkExplorer
from ngraph.logging import get_logger, set_global_log_level
-from ngraph.profiling import PerformanceProfiler, PerformanceReporter
+from ngraph.profiling.profiler import PerformanceProfiler, PerformanceReporter
from ngraph.scenario import Scenario
from ngraph.utils.output_paths import (
ensure_parent_dir,
diff --git a/ngraph/config.py b/ngraph/config.py
deleted file mode 100644
index 27f25cf..0000000
--- a/ngraph/config.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""Configuration classes for NetGraph components."""
-
-from dataclasses import dataclass
-
-
-@dataclass
-class TrafficManagerConfig:
- """Configuration for traffic demand placement estimation."""
-
- # Default number of placement rounds when no data is available
- default_rounds: int = 5
-
- # Minimum number of placement rounds
- min_rounds: int = 5
-
- # Maximum number of placement rounds
- max_rounds: int = 100
-
- # Multiplier for ratio-based round estimation
- ratio_base: int = 5
- ratio_multiplier: int = 5
-
- def estimate_rounds(self, demand_capacity_ratio: float) -> int:
- """Calculate placement rounds based on demand to capacity ratio."""
- estimated = int(self.ratio_base + self.ratio_multiplier * demand_capacity_ratio)
- return max(self.min_rounds, min(estimated, self.max_rounds))
-
-
-# Global configuration instance
-TRAFFIC_CONFIG = TrafficManagerConfig()
diff --git a/ngraph/demand/__init__.py b/ngraph/demand/__init__.py
deleted file mode 100644
index 76b5526..0000000
--- a/ngraph/demand/__init__.py
+++ /dev/null
@@ -1,141 +0,0 @@
-"""Demand primitives for traffic placement.
-
-Defines the `Demand` dataclass, which represents traffic volume between two
-nodes. A `Demand` delegates flow realization and placement to a single
-`FlowPolicy` instance.
-"""
-
-from __future__ import annotations
-
-import math
-from dataclasses import dataclass, field
-from typing import Optional, Tuple
-
-from ngraph.algorithms.base import MIN_FLOW
-from ngraph.flows.policy import FlowPolicy
-from ngraph.graph.strict_multidigraph import NodeID, StrictMultiDiGraph
-
-
-@dataclass
-class Demand:
- """Network demand between two nodes.
-
- A demand is realized via one or more flows created by a single
- `FlowPolicy`.
-
- Attributes:
- src_node: Source node identifier.
- dst_node: Destination node identifier.
- volume: Total demand volume to place.
- demand_class: Priority class; lower value indicates higher priority.
- flow_policy: Policy used to create and place flows for this demand.
- placed_demand: Volume successfully placed so far.
- """
-
- src_node: NodeID
- dst_node: NodeID
- volume: float
- demand_class: int = 0
- flow_policy: Optional[FlowPolicy] = None
- placed_demand: float = field(default=0.0, init=False)
-
- @staticmethod
- def _round_float(value: float) -> float:
- """Round ``value`` to avoid tiny floating point drift."""
- if math.isfinite(value):
- rounded = round(value, 12)
- if abs(rounded) < MIN_FLOW:
- return 0.0
- return rounded
- return value
-
- def __lt__(self, other: Demand) -> bool:
- """Return True if this demand should sort before ``other``.
-
- Demands sort by ``demand_class`` ascending (lower value = higher priority).
-
- Args:
- other: Demand to compare against.
-
- Returns:
- True if this instance has a lower ``demand_class`` than ``other``.
- """
- return self.demand_class < other.demand_class
-
- def __str__(self) -> str:
- """Return a concise representation with src, dst, volume, priority, placed."""
- return (
- f"Demand(src_node={self.src_node}, dst_node={self.dst_node}, "
- f"volume={self.volume}, demand_class={self.demand_class}, "
- f"placed_demand={self.placed_demand})"
- )
-
- def place(
- self,
- flow_graph: StrictMultiDiGraph,
- max_fraction: float = 1.0,
- max_placement: Optional[float] = None,
- ) -> Tuple[float, float]:
- """Places demand volume onto the network via self.flow_policy.
-
- Args:
- flow_graph: Graph to place flows onto.
- max_fraction: Fraction of the remaining demand to place now.
- max_placement: Absolute upper bound on the volume to place now.
-
- Returns:
- A tuple ``(placed_now, remaining)`` where:
- - ``placed_now`` is the volume placed in this call.
- - ``remaining`` is the volume that could not be placed in this call.
-
- Raises:
- RuntimeError: If no FlowPolicy is set on this Demand.
- ValueError: If max_fraction is outside [0, 1].
- """
- if self.flow_policy is None:
- raise RuntimeError("No FlowPolicy set on this Demand.")
-
- if not (0 <= max_fraction <= 1):
- raise ValueError("max_fraction must be in the range [0, 1].")
-
- to_place = self.volume - self.placed_demand
- if max_placement is not None:
- to_place = min(to_place, max_placement)
-
- if max_fraction > 0:
- to_place = min(to_place, self.volume * max_fraction)
- else:
- # If max_fraction <= 0, do not place any new volume (unless volume is infinite).
- to_place = self.volume if self.volume == float("inf") else 0.0
-
- # Ensure we request at least MIN_FLOW when there is meaningful leftover
- if 0.0 < to_place < MIN_FLOW:
- to_place = min(self.volume - self.placed_demand, MIN_FLOW)
-
- # Delegate flow placement (do not force min_flow threshold here; policy handles it)
- # Use a demand-unique flow_class to avoid collisions across different
- # Demand instances that share the same numerical demand_class.
- demand_unique_flow_class = (
- self.demand_class,
- self.src_node,
- self.dst_node,
- id(self),
- )
-
- self.flow_policy.place_demand(
- flow_graph,
- self.src_node,
- self.dst_node,
- demand_unique_flow_class,
- to_place,
- )
-
- # placed_now is the difference from the old placed_demand
- placed_now = self.flow_policy.placed_demand - self.placed_demand
- self.placed_demand = self._round_float(self.flow_policy.placed_demand)
- remaining = to_place - placed_now
-
- placed_now = self._round_float(placed_now)
- remaining = self._round_float(remaining)
-
- return placed_now, remaining
diff --git a/ngraph/demand/manager/expand.py b/ngraph/demand/manager/expand.py
deleted file mode 100644
index 98f01d6..0000000
--- a/ngraph/demand/manager/expand.py
+++ /dev/null
@@ -1,214 +0,0 @@
-"""Expansion helpers for traffic demand specifications.
-
-Public functions here convert user-facing `TrafficDemand` specifications into
-concrete `Demand` objects that can be placed on a `StrictMultiDiGraph`.
-
-This module provides the pure expansion logic that was previously embedded in
-`TrafficManager`.
-"""
-
-from __future__ import annotations
-
-from typing import Dict, List, Tuple, Union
-
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.demand import Demand
-from ngraph.demand.spec import TrafficDemand
-from ngraph.flows.policy import FlowPolicyConfig, get_flow_policy
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.model.network import Network, Node
-
-try:
- # Avoid importing at runtime if not needed while keeping type hints precise
- from typing import TYPE_CHECKING
-
- if TYPE_CHECKING: # pragma: no cover - typing only
- from ngraph.model.view import NetworkView
-except Exception: # pragma: no cover - defensive for environments without extras
- TYPE_CHECKING = False
-
-
-def expand_demands(
- network: Union[Network, "NetworkView"],
- graph: StrictMultiDiGraph | None,
- traffic_demands: List[TrafficDemand],
- default_flow_policy_config: FlowPolicyConfig,
-) -> Tuple[List[Demand], Dict[str, List[Demand]]]:
- """Expand traffic demands into concrete `Demand` objects.
-
- The result is a flat list of `Demand` plus a mapping from
- ``TrafficDemand.id`` to the list of expanded demands for that entry.
-
- Args:
- network: Network or NetworkView used for node group selection.
- graph: Flow graph to operate on. If ``None``, expansion that requires
- graph mutation (pseudo nodes/edges) is skipped.
- traffic_demands: List of high-level traffic demand specifications.
- default_flow_policy_config: Default policy to apply when a demand does
- not specify an explicit `flow_policy`.
-
- Returns:
- A tuple ``(expanded, td_map)`` where:
- - ``expanded`` is the flattened, sorted list of all expanded demands
- (sorted by ascending ``demand_class``).
- - ``td_map`` maps ``TrafficDemand.id`` to its expanded demands.
- """
- td_to_demands: Dict[str, List[Demand]] = {}
- expanded: List[Demand] = []
-
- for td in traffic_demands:
- # Gather node groups for source and sink
- src_groups = network.select_node_groups_by_path(td.source_path)
- snk_groups = network.select_node_groups_by_path(td.sink_path)
-
- if not src_groups or not snk_groups:
- td_to_demands[td.id] = []
- continue
-
- demands_of_td: List[Demand] = []
- if td.mode == "combine":
- _expand_combine(
- demands_of_td,
- td,
- src_groups,
- snk_groups,
- graph,
- default_flow_policy_config,
- )
- elif td.mode == "pairwise":
- _expand_pairwise(
- demands_of_td,
- td,
- src_groups,
- snk_groups,
- default_flow_policy_config,
- )
- else:
- raise ValueError(f"Unknown mode: {td.mode}")
-
- expanded.extend(demands_of_td)
- td_to_demands[td.id] = demands_of_td
-
- # Sort final demands by ascending demand_class (i.e., priority)
- expanded.sort(key=lambda d: d.demand_class)
- return expanded, td_to_demands
-
-
-def _expand_combine(
- expanded: List[Demand],
- td: TrafficDemand,
- src_groups: Dict[str, List[Node]],
- snk_groups: Dict[str, List[Node]],
- graph: StrictMultiDiGraph | None,
- default_flow_policy_config: FlowPolicyConfig,
-) -> None:
- """Expand a single demand using the ``combine`` mode.
-
- Adds pseudo-source and pseudo-sink nodes, connects them to real nodes
- with infinite-capacity, zero-cost edges, and creates one aggregate
- `Demand` from pseudo-source to pseudo-sink with the full volume.
- """
- # Flatten and sort source and sink node lists for deterministic order
- src_nodes = sorted(
- (node for group_nodes in src_groups.values() for node in group_nodes),
- key=lambda n: n.name,
- )
- dst_nodes = sorted(
- (node for group_nodes in snk_groups.values() for node in group_nodes),
- key=lambda n: n.name,
- )
-
- if not src_nodes or not dst_nodes or graph is None:
- return
-
- # Create pseudo-source / pseudo-sink names
- pseudo_source_name = f"combine_src::{td.id}"
- pseudo_sink_name = f"combine_snk::{td.id}"
-
- # Add pseudo nodes to the graph only if missing (idempotent)
- if pseudo_source_name not in graph:
- graph.add_node(pseudo_source_name)
- if pseudo_sink_name not in graph:
- graph.add_node(pseudo_sink_name)
-
- # Link pseudo-source to real sources, and real sinks to pseudo-sink (idempotent)
- for s_node in src_nodes:
- if not graph.edges_between(pseudo_source_name, s_node.name):
- graph.add_edge(
- pseudo_source_name, s_node.name, capacity=float("inf"), cost=0
- )
- for t_node in dst_nodes:
- if not graph.edges_between(t_node.name, pseudo_sink_name):
- graph.add_edge(t_node.name, pseudo_sink_name, capacity=float("inf"), cost=0)
-
- # Initialize flow-related attributes without resetting existing usage
- init_flow_graph(graph, reset_flow_graph=False)
-
- # Create a single Demand with the full volume
- if td.flow_policy:
- flow_policy = td.flow_policy.deep_copy()
- else:
- fp_config = td.flow_policy_config or default_flow_policy_config
- flow_policy = get_flow_policy(fp_config)
-
- expanded.append(
- Demand(
- src_node=pseudo_source_name,
- dst_node=pseudo_sink_name,
- volume=td.demand,
- demand_class=td.priority,
- flow_policy=flow_policy,
- )
- )
-
-
-def _expand_pairwise(
- expanded: List[Demand],
- td: TrafficDemand,
- src_groups: Dict[str, List[Node]],
- snk_groups: Dict[str, List[Node]],
- default_flow_policy_config: FlowPolicyConfig,
-) -> None:
- """Expand a single demand using the ``pairwise`` mode.
-
- Creates one `Demand` for each valid source-destination pair (excluding
- self-pairs) and splits total volume evenly across pairs.
- """
- # Flatten and sort source and sink node lists for deterministic order
- src_nodes = sorted(
- (node for group_nodes in src_groups.values() for node in group_nodes),
- key=lambda n: n.name,
- )
- dst_nodes = sorted(
- (node for group_nodes in snk_groups.values() for node in group_nodes),
- key=lambda n: n.name,
- )
-
- # Generate all valid (src, dst) pairs in deterministic lexicographic order
- valid_pairs = []
- for s_node in src_nodes:
- for t_node in dst_nodes:
- if s_node.name != t_node.name:
- valid_pairs.append((s_node, t_node))
- pair_count = len(valid_pairs)
- if pair_count == 0:
- return
-
- demand_per_pair = td.demand / float(pair_count)
-
- for s_node, t_node in valid_pairs:
- if td.flow_policy:
- flow_policy = td.flow_policy.deep_copy()
- else:
- fp_config = td.flow_policy_config or default_flow_policy_config
- flow_policy = get_flow_policy(fp_config)
-
- expanded.append(
- Demand(
- src_node=s_node.name,
- dst_node=t_node.name,
- volume=demand_per_pair,
- demand_class=td.priority,
- flow_policy=flow_policy,
- )
- )
diff --git a/ngraph/demand/manager/manager.py b/ngraph/demand/manager/manager.py
deleted file mode 100644
index 39158d0..0000000
--- a/ngraph/demand/manager/manager.py
+++ /dev/null
@@ -1,408 +0,0 @@
-"""Traffic demand management and placement.
-
-`TrafficManager` expands `TrafficDemand` specs into concrete `Demand` objects,
-builds a working `StrictMultiDiGraph` from a `Network`, and places flows via
-per-demand `FlowPolicy` instances.
-"""
-
-from __future__ import annotations
-
-import statistics
-from dataclasses import dataclass, field
-from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Tuple, Union
-
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.demand import Demand
-from ngraph.demand.manager.expand import expand_demands as expand_demands_helper
-from ngraph.demand.manager.schedule import place_demands_round_robin
-from ngraph.demand.spec import TrafficDemand
-from ngraph.flows.policy import FlowPolicyConfig
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.model.network import Network
-
-if TYPE_CHECKING: # pragma: no cover - typing-only imports
- from ngraph.demand.matrix import TrafficMatrixSet # pragma: no cover
- from ngraph.model.view import NetworkView # pragma: no cover
-
-
-def _new_td_map() -> Dict[str, List[Demand]]:
- """Return a new mapping from TrafficDemand id to expanded demands.
-
- Returns:
- Empty mapping with the correct type for ``_td_to_demands``.
- """
- return {}
-
-
-class TrafficResult(NamedTuple):
- """Traffic demand result entry.
-
- Attributes:
- priority: Demand priority class (lower value is more critical).
- total_volume: Total traffic volume for this entry.
- placed_volume: Volume actually placed in the flow graph.
- unplaced_volume: Volume not placed (``total_volume - placed_volume``).
- src: Source node or path.
- dst: Destination node or path.
- """
-
- priority: int
- total_volume: float
- placed_volume: float
- unplaced_volume: float
- src: str
- dst: str
-
-
-@dataclass
-class TrafficManager:
- """Manage expansion and placement of traffic demands on a `Network`.
-
- This class:
-
- 1) Builds (or rebuilds) a StrictMultiDiGraph from the given Network.
- 2) Expands each TrafficDemand into one or more Demand objects based
- on a configurable 'mode' ("combine" or "pairwise").
- 3) Each Demand is associated with a FlowPolicy, which handles how flows
- are placed (split across paths, balancing, etc.).
- 4) Provides methods to place all demands incrementally with optional
- re-optimization, reset usage, and retrieve flow/usage summaries.
-
- Auto rounds semantics:
- - placement_rounds="auto" performs up to a small number of fairness passes
- (at most 3), with early stop when diminishing returns are detected. Each
- pass asks the scheduler to place full leftovers without step splitting.
-
- In particular:
- - 'combine' mode:
- * Combine all matched sources into a single pseudo-source node, and all
- matched sinks into a single pseudo-sink node (named using the traffic
- demand's `source_path` and `sink_path`). A single Demand is created
- from the pseudo-source to the pseudo-sink, with the full volume.
-
- - 'pairwise' mode:
- * All matched sources form one group, all matched sinks form another group.
- A separate Demand is created for each (src_node, dst_node) pair,
- skipping self-pairs. The total volume is split evenly across the pairs.
-
- The sum of volumes of all expanded Demands for a given TrafficDemand matches
- that TrafficDemand's `demand` value (unless no valid node pairs exist, in which
- case no demands are created).
-
- Attributes:
- network (Union[Network, NetworkView]): The underlying network or view object.
- traffic_matrix_set (TrafficMatrixSet): Traffic matrices containing demands.
- matrix_name (Optional[str]): Name of specific matrix to use, or None for default.
- default_flow_policy_config (FlowPolicyConfig): Default FlowPolicy if
- a TrafficDemand does not specify one.
- graph (StrictMultiDiGraph): Active graph built from the network.
- demands (List[Demand]): All expanded demands from the active matrix.
- _td_to_demands (Dict[str, List[Demand]]): Internal mapping from
- TrafficDemand.id to its expanded Demand objects.
- """
-
- network: Union[Network, "NetworkView"]
- traffic_matrix_set: "TrafficMatrixSet"
- matrix_name: Optional[str] = None
- default_flow_policy_config: FlowPolicyConfig = FlowPolicyConfig.SHORTEST_PATHS_ECMP
-
- graph: Optional[StrictMultiDiGraph] = None
- demands: List[Demand] = field(default_factory=list)
- _td_to_demands: Dict[str, List[Demand]] = field(default_factory=_new_td_map)
-
- def _get_traffic_demands(self) -> List[TrafficDemand]:
- """Return traffic demands from the matrix set.
-
- Returns:
- Traffic demands from the specified matrix or the default matrix.
- """
- if self.matrix_name:
- return self.traffic_matrix_set.get_matrix(self.matrix_name)
- else:
- return self.traffic_matrix_set.get_default_matrix()
-
- def build_graph(self, add_reverse: bool = True) -> None:
- """Build or rebuild the internal `StrictMultiDiGraph` from ``network``.
-
- Also initializes flow-related edge attributes (for example, ``flow=0``).
-
- Args:
- add_reverse: If True, for every link A->B, add a mirrored link B->A
- with the same capacity and cost.
- """
- self.graph = self.network.to_strict_multidigraph(
- add_reverse=add_reverse, compact=True
- )
- init_flow_graph(self.graph) # Initialize flow-related attributes
-
- def expand_demands(self) -> None:
- """Expand each `TrafficDemand` into one or more `Demand` objects.
-
- The expanded demands are stored in ``demands``, sorted by ascending
- ``demand_class`` (priority). Also populates ``_td_to_demands[td.id]``.
-
- Raises:
- ValueError: If an unknown mode is encountered.
- """
- expanded, td_map = expand_demands_helper(
- network=self.network,
- graph=self.graph,
- traffic_demands=self._get_traffic_demands(),
- default_flow_policy_config=self.default_flow_policy_config,
- )
- self._td_to_demands = td_map
- self.demands = expanded
-
- def place_all_demands(
- self,
- placement_rounds: Union[int, str] = "auto",
- reoptimize_after_each_round: bool = False,
- ) -> float:
- """Place all expanded demands in ascending priority order.
-
- Uses multiple incremental rounds per priority class. Optionally
- re-optimizes after each round.
-
- Args:
- placement_rounds: Number of passes per priority class. If ``"auto"``,
- choose using a heuristic based on total demand and capacity.
- reoptimize_after_each_round: Remove and re-place each demand after
- the round to better share capacity.
-
- Returns:
- Total volume successfully placed across all demands.
-
- Raises:
- RuntimeError: If the graph has not been built yet.
- """
- if self.graph is None:
- raise RuntimeError("Graph not built yet. Call build_graph() first.")
-
- if isinstance(placement_rounds, str) and placement_rounds.lower() == "auto":
- # Simple, reliable auto: up to 3 passes with early stop.
- from ngraph.algorithms.base import MIN_FLOW
-
- total_placed = 0.0
- max_auto_rounds = 3
- for _ in range(max_auto_rounds):
- placed_now = place_demands_round_robin(
- graph=self.graph,
- demands=self.demands,
- placement_rounds=1,
- reoptimize_after_each_round=False,
- )
- total_placed += placed_now
-
- # Early stops: no progress or negligible leftover
- if placed_now < MIN_FLOW:
- break
-
- leftover_total = sum(
- max(0.0, d.volume - d.placed_demand) for d in self.demands
- )
- if leftover_total < 0.05 * placed_now:
- break
-
- # Fairness check: if served ratios are already close, stop
- served = [
- (d.placed_demand / d.volume)
- for d in self.demands
- if d.volume > 0.0 and (d.volume - d.placed_demand) >= MIN_FLOW
- ]
- if served:
- s_min, s_max = min(served), max(served)
- if s_max <= 0.0 or s_min >= 0.8 * s_max:
- break
- else:
- # Ensure placement_rounds is an int for range() and arithmetic operations
- placement_rounds_int = (
- int(placement_rounds)
- if isinstance(placement_rounds, str)
- else placement_rounds
- )
-
- if placement_rounds_int <= 0:
- raise ValueError("placement_rounds must be positive")
-
- total_placed = place_demands_round_robin(
- graph=self.graph,
- demands=self.demands,
- placement_rounds=placement_rounds_int,
- reoptimize_after_each_round=reoptimize_after_each_round,
- )
-
- # Update each TrafficDemand's placed volume
- for td in self._get_traffic_demands():
- dlist = self._td_to_demands.get(td.id, [])
- td.demand_placed = sum(d.placed_demand for d in dlist)
-
- return total_placed
-
- def reset_all_flow_usages(self) -> None:
- """Remove flow usage for each demand and reset placements to 0.
-
- Also sets ``TrafficDemand.demand_placed`` to 0 for each top-level demand.
- """
- if self.graph is None:
- return
-
- # First, remove flows for currently tracked demands to reset internal
- # policy state (placed_flow counters) without destroying flow objects.
- for dmd in self.demands:
- if dmd.flow_policy:
- dmd.flow_policy.remove_demand(self.graph)
- dmd.placed_demand = 0.0
-
- # Then, ensure the graph itself is clean of any stray flows that may have
- # been created by previously expanded demands (no longer referenced).
- # This guarantees a full graph-level reset regardless of demand set churn.
- from ngraph.algorithms.flow_init import init_flow_graph
-
- init_flow_graph(self.graph, reset_flow_graph=True)
-
- for td in self._get_traffic_demands():
- td.demand_placed = 0.0
-
- def get_flow_details(self) -> Dict[Tuple[int, int], Dict[str, object]]:
- """Summarize flows from each demand's policy.
-
- Returns:
- Mapping keyed by ``(demand_index, flow_index)``; each value includes
- ``placed_flow``, ``src_node``, ``dst_node``, and ``edges``.
- """
- details: Dict[Tuple[int, int], Dict[str, object]] = {}
- for i, dmd in enumerate(self.demands):
- if not dmd.flow_policy:
- continue
- for j, (_f_idx, flow_obj) in enumerate(dmd.flow_policy.flows.items()):
- details[(i, j)] = {
- "placed_flow": flow_obj.placed_flow,
- "src_node": flow_obj.src_node,
- "dst_node": flow_obj.dst_node,
- "edges": list(flow_obj.path_bundle.edges),
- }
- return details
-
- def summarize_link_usage(self) -> Dict[str, float]:
- """Return total flow usage per edge in the graph.
-
- Returns:
- Mapping from ``edge_key`` to current flow on that edge.
- """
- usage: Dict[str, float] = {}
- if self.graph is None:
- return usage
-
- for edge_key, edge_tuple in self.graph.get_edges().items():
- attr_dict = edge_tuple[3]
- usage[str(edge_key)] = attr_dict.get("flow", 0.0)
-
- return usage
-
- def get_traffic_results(self, detailed: bool = False) -> List[TrafficResult]:
- """Return traffic demand summaries.
-
- If ``detailed`` is False, return one entry per top-level `TrafficDemand`.
- If True, return one entry per expanded `Demand`.
-
- Args:
- detailed: Whether to return per-expanded-demand data instead of
- top-level aggregated data.
-
- Returns:
- List of ``TrafficResult`` entries.
- """
- results: List[TrafficResult] = []
-
- if not detailed:
- # Summaries for top-level TrafficDemands
- for td in self._get_traffic_demands():
- total_volume = td.demand
- placed_volume = td.demand_placed
- unplaced_volume = total_volume - placed_volume
-
- # For aggregated results, we return the original src/dst "paths."
- results.append(
- TrafficResult(
- priority=td.priority,
- total_volume=total_volume,
- placed_volume=placed_volume,
- unplaced_volume=unplaced_volume,
- src=td.source_path,
- dst=td.sink_path,
- )
- )
- else:
- # Summaries for each expanded Demand
- for dmd in self.demands:
- total_volume = dmd.volume
- placed_volume = dmd.placed_demand
- unplaced_volume = total_volume - placed_volume
-
- results.append(
- TrafficResult(
- priority=dmd.demand_class,
- total_volume=total_volume,
- placed_volume=placed_volume,
- unplaced_volume=unplaced_volume,
- src=str(dmd.src_node),
- dst=str(dmd.dst_node),
- )
- )
-
- return results
-
- def _reoptimize_priority_demands(self, demands_in_prio: List[Demand]) -> None:
- # Retained for backward-compat within this class; internal only. The
- # scheduling module provides its own implementation.
- if self.graph is None:
- return
- for dmd in demands_in_prio:
- if not dmd.flow_policy:
- continue
- placed_volume = dmd.placed_demand
- dmd.flow_policy.remove_demand(self.graph)
- dmd.flow_policy.place_demand(
- self.graph,
- dmd.src_node,
- dmd.dst_node,
- dmd.demand_class,
- placed_volume,
- )
- dmd.placed_demand = dmd.flow_policy.placed_demand
-
- def _estimate_rounds(self) -> int:
- """Estimate a suitable number of placement rounds.
-
- Compares median demand volume with median edge capacity. Falls back to
- a default when data is insufficient.
-
- Returns:
- Estimated number of rounds to use for traffic placement.
- """
- from ngraph.config import TRAFFIC_CONFIG
-
- if not self.demands:
- return TRAFFIC_CONFIG.default_rounds
-
- demand_volumes = [demand.volume for demand in self.demands if demand.volume > 0]
- if not demand_volumes:
- return TRAFFIC_CONFIG.default_rounds
-
- median_demand = statistics.median(demand_volumes)
-
- if not self.graph:
- return TRAFFIC_CONFIG.default_rounds
-
- edges = self.graph.get_edges().values()
- capacities = [
- edge_data[3].get("capacity", 0)
- for edge_data in edges
- if edge_data[3].get("capacity", 0) > 0
- ]
- if not capacities:
- return TRAFFIC_CONFIG.default_rounds
-
- median_capacity = statistics.median(capacities)
- ratio = median_demand / median_capacity
- return TRAFFIC_CONFIG.estimate_rounds(ratio)
diff --git a/ngraph/demand/manager/schedule.py b/ngraph/demand/manager/schedule.py
deleted file mode 100644
index 4064dd6..0000000
--- a/ngraph/demand/manager/schedule.py
+++ /dev/null
@@ -1,198 +0,0 @@
-"""Scheduling utilities for demand placement rounds.
-
-Provides the simple priority-aware round-robin scheduler that was previously
-implemented in `TrafficManager`.
-"""
-
-from __future__ import annotations
-
-import logging as _logging
-from collections import defaultdict
-from typing import Dict, List
-
-from ngraph.algorithms import base
-from ngraph.demand import Demand
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.logging import get_logger
-
-_logger = get_logger(__name__)
-
-
-def place_demands_round_robin(
- graph: StrictMultiDiGraph,
- demands: List[Demand],
- placement_rounds: int,
- reoptimize_after_each_round: bool = False,
-) -> float:
- """Place demands using priority buckets and round-robin within each bucket.
-
- Args:
- graph: Active flow graph.
- demands: Expanded demands to place.
- placement_rounds: Number of passes per priority class.
- reoptimize_after_each_round: Whether to re-run placement for each demand
- after a round to better share capacity.
-
- Returns:
- Total volume successfully placed across all demands.
- """
- # Group demands by priority class
- prio_map: Dict[int, List[Demand]] = defaultdict(list)
- for dmd in demands:
- prio_map[dmd.demand_class].append(dmd)
-
- total_placed = 0.0
- sorted_priorities = sorted(prio_map.keys())
-
- if _logger.isEnabledFor(_logging.DEBUG):
- _logger.debug(
- "rr:start placement_rounds=%s total_demands=%d priorities=%s",
- str(placement_rounds),
- len(demands),
- ",".join(str(p) for p in sorted_priorities),
- )
-
- for priority_class in sorted_priorities:
- demands_in_class = prio_map[priority_class]
- placed_before_class = sum(d.placed_demand for d in demands_in_class)
-
- if _logger.isEnabledFor(_logging.DEBUG):
- _logger.debug(
- "rr:prio begin prio=%d demands=%d placed_before=%.6g",
- priority_class,
- len(demands_in_class),
- placed_before_class,
- )
-
- # Unified fairness loop: attempt to place full leftover per demand each round.
- # For rounds > 0, reorder by least-served ratio to improve fairness.
- reopt_attempted = False
- for round_idx in range(placement_rounds):
- placed_in_this_round = 0.0
-
- if round_idx == 0:
- iteration_order = list(demands_in_class)
- else:
- iteration_order = sorted(
- demands_in_class,
- key=lambda d: (
- (d.placed_demand / d.volume) if d.volume > 0 else 1.0,
- d.placed_demand,
- ),
- )
-
- for demand in iteration_order:
- leftover = demand.volume - demand.placed_demand
- if leftover < base.MIN_FLOW:
- continue
-
- if _logger.isEnabledFor(_logging.DEBUG):
- fp0 = getattr(demand, "flow_policy", None)
- flows_count = (
- len(getattr(fp0, "flows", {})) if fp0 is not None else 0
- )
- _logger.debug(
- "rr:place prio=%d src=%s dst=%s request=%.6g flows=%d",
- priority_class,
- str(getattr(demand, "src_node", "")),
- str(getattr(demand, "dst_node", "")),
- float(leftover),
- flows_count,
- )
-
- placed_now, _remain = demand.place(flow_graph=graph)
- placed_in_this_round += placed_now
-
- if _logger.isEnabledFor(_logging.DEBUG):
- after_leftover = demand.volume - demand.placed_demand
- fp1 = getattr(demand, "flow_policy", None)
- flows_count_after = (
- len(getattr(fp1, "flows", {})) if fp1 is not None else 0
- )
- # Extract FlowPolicy per-call metrics for verification
- fp = getattr(demand, "flow_policy", None)
- last = getattr(fp, "last_metrics", {}) if fp else {}
- _logger.debug(
- (
- "rr:placed prio=%d src=%s dst=%s placed_now=%.6g "
- "left_after=%.6g flows=%d iters=%.0f spf_calls=%.0f flows_created=%.0f"
- ),
- priority_class,
- str(getattr(demand, "src_node", "")),
- str(getattr(demand, "dst_node", "")),
- float(placed_now),
- float(after_leftover),
- flows_count_after,
- float(last.get("iterations", 0.0)),
- float(last.get("spf_calls", 0.0)),
- float(last.get("flows_created", 0.0)),
- )
-
- if reoptimize_after_each_round and placed_in_this_round > 0.0:
- _reoptimize_priority_demands(graph, demands_in_class)
-
- if placed_in_this_round < base.MIN_FLOW:
- any_leftover = any(
- (d.volume - d.placed_demand) >= base.MIN_FLOW
- for d in demands_in_class
- )
- if not any_leftover:
- break
- if not reopt_attempted:
- _reoptimize_priority_demands(graph, demands_in_class)
- reopt_attempted = True
- continue
- break
-
- if _logger.isEnabledFor(_logging.DEBUG):
- served = sum(d.placed_demand for d in demands_in_class)
- _logger.debug(
- "rr:round end prio=%d round=%d placed_in_round=%.6g placed_total=%.6g",
- priority_class,
- round_idx,
- placed_in_this_round,
- served,
- )
-
- # Add only the net increase for this class to avoid double counting
- placed_after_class = sum(d.placed_demand for d in demands_in_class)
- total_placed += max(0.0, placed_after_class - placed_before_class)
-
- if _logger.isEnabledFor(_logging.DEBUG):
- _logger.debug(
- "rr:prio end prio=%d placed_delta=%.6g placed_after=%.6g",
- priority_class,
- max(0.0, placed_after_class - placed_before_class),
- placed_after_class,
- )
-
- return total_placed
-
-
-def _reoptimize_priority_demands(
- graph: StrictMultiDiGraph, demands_in_prio: List[Demand]
-) -> None:
- """Re-run placement for each demand in the same priority class.
-
- Allows the policy to adjust to capacity changes due to other demands.
- """
- for dmd in demands_in_prio:
- if not dmd.flow_policy:
- continue
- placed_volume = dmd.placed_demand
- dmd.flow_policy.remove_demand(graph)
- # Use a demand-unique flow_class key to keep policy flows consistent
- flow_class_key = (
- dmd.demand_class,
- dmd.src_node,
- dmd.dst_node,
- id(dmd),
- )
- dmd.flow_policy.place_demand(
- graph,
- dmd.src_node,
- dmd.dst_node,
- flow_class_key,
- placed_volume,
- )
- dmd.placed_demand = dmd.flow_policy.placed_demand
diff --git a/ngraph/dsl/blueprints/expand.py b/ngraph/dsl/blueprints/expand.py
index 9ae53a7..da24d66 100644
--- a/ngraph/dsl/blueprints/expand.py
+++ b/ngraph/dsl/blueprints/expand.py
@@ -7,9 +7,9 @@
from itertools import product, zip_longest
from typing import Any, Dict, List, Set
-from ngraph.dsl.blueprints import parse as _bp_parse
-from ngraph.failure.conditions import FailureCondition as _Cond
-from ngraph.failure.conditions import evaluate_conditions as _eval_conditions
+from ngraph.dsl.blueprints import parser as _bp_parse
+from ngraph.model.failure.conditions import FailureCondition as _Cond
+from ngraph.model.failure.conditions import evaluate_conditions as _eval_conditions
from ngraph.model.network import Link, Network, Node
diff --git a/ngraph/dsl/blueprints/parse.py b/ngraph/dsl/blueprints/parser.py
similarity index 100%
rename from ngraph/dsl/blueprints/parse.py
rename to ngraph/dsl/blueprints/parser.py
diff --git a/ngraph/dsl/loader.py b/ngraph/dsl/loader.py
new file mode 100644
index 0000000..a473f6c
--- /dev/null
+++ b/ngraph/dsl/loader.py
@@ -0,0 +1,115 @@
+"""YAML loader + schema validation for Scenario DSL.
+
+Provides a single entrypoint to parse a YAML string, normalize keys where
+needed, validate against the packaged JSON schema, and return a canonical
+dictionary suitable for downstream expansion/parsing.
+"""
+
+from __future__ import annotations
+
+import json
+from importlib import resources
+from typing import Any, Dict
+
+import yaml
+
+from ngraph.utils.yaml_utils import normalize_yaml_dict_keys
+
+
+def load_scenario_yaml(yaml_str: str) -> Dict[str, Any]:
+ """Load, normalize, and validate a Scenario YAML string.
+
+ Returns a canonical dictionary representation that downstream parsers can
+ consume without worrying about YAML-specific quirks (e.g., boolean-like
+ keys) and with schema shape already enforced.
+ """
+ data = yaml.safe_load(yaml_str)
+ if data is None:
+ data = {}
+ if not isinstance(data, dict):
+ raise ValueError("The provided YAML must map to a dictionary at top-level.")
+
+ # Normalize known sections that suffer from YAML key ambiguities
+ if isinstance(data.get("traffic_matrix_set"), dict):
+ data["traffic_matrix_set"] = normalize_yaml_dict_keys(
+ data["traffic_matrix_set"] # type: ignore[arg-type]
+ )
+
+ # Early shape checks helpful for better error messages prior to schema validation
+ network_section = data.get("network")
+ if isinstance(network_section, dict):
+ if "nodes" in network_section and not isinstance(
+ network_section["nodes"], dict
+ ):
+ raise ValueError("'nodes' must be a mapping")
+ if "links" in network_section and not isinstance(
+ network_section["links"], list
+ ):
+ raise ValueError("'links' must be a list")
+ if isinstance(network_section.get("links"), list):
+ for entry in network_section["links"]:
+ if not isinstance(entry, dict):
+ raise ValueError(
+ "Each link definition must be a mapping with 'source' and 'target'"
+ )
+ if "source" not in entry or "target" not in entry:
+ raise ValueError(
+ "Each link definition must include 'source' and 'target'"
+ )
+ if isinstance(network_section.get("nodes"), dict):
+ for _node_name, node_def in network_section["nodes"].items():
+ if isinstance(node_def, dict):
+ allowed = {"attrs", "disabled", "risk_groups"}
+ for k in node_def.keys():
+ if k not in allowed:
+ raise ValueError(
+ f"Unrecognized key '{k}' in node '{_node_name}'"
+ )
+
+ if isinstance(data.get("risk_groups"), list):
+ for rg in data["risk_groups"]:
+ if not isinstance(rg, dict) or "name" not in rg:
+ raise ValueError("RiskGroup entry missing 'name' field")
+
+ # JSON Schema validation
+ try:
+ import jsonschema # type: ignore
+ except Exception as exc: # pragma: no cover
+ raise RuntimeError(
+ "jsonschema is required for scenario validation. Install dev extras or add 'jsonschema' to dependencies."
+ ) from exc
+
+ try:
+ with (
+ resources.files("ngraph.schemas")
+ .joinpath("scenario.json")
+ .open("r", encoding="utf-8")
+ ) as f: # type: ignore[attr-defined]
+ schema_data = json.load(f)
+ except Exception as exc: # pragma: no cover
+ raise RuntimeError(
+ "Failed to locate packaged NetGraph scenario schema 'ngraph/schemas/scenario.json'."
+ ) from exc
+
+ jsonschema.validate(data, schema_data) # type: ignore[arg-type]
+
+ # Enforce allowed top-level keys
+ recognized_keys = {
+ "vars",
+ "blueprints",
+ "network",
+ "failure_policy_set",
+ "traffic_matrix_set",
+ "workflow",
+ "components",
+ "risk_groups",
+ "seed",
+ }
+ extra = set(data.keys()) - recognized_keys
+ if extra:
+ raise ValueError(
+ f"Unrecognized top-level key(s) in scenario: {', '.join(sorted(extra))}. "
+ f"Allowed keys are {sorted(recognized_keys)}"
+ )
+
+ return data
diff --git a/ngraph/exec/analysis/__init__.py b/ngraph/exec/analysis/__init__.py
new file mode 100644
index 0000000..403527b
--- /dev/null
+++ b/ngraph/exec/analysis/__init__.py
@@ -0,0 +1,19 @@
+"""Analysis functions for network evaluation.
+
+Provides domain-specific analysis functions designed for use with FailureManager.
+Functions follow the AnalysisFunction protocol: they accept a network with exclusion
+sets and return structured results. All functions use only hashable parameters to
+support multiprocessing and caching.
+"""
+
+from .flow import (
+ demand_placement_analysis,
+ max_flow_analysis,
+ sensitivity_analysis,
+)
+
+__all__ = [
+ "max_flow_analysis",
+ "demand_placement_analysis",
+ "sensitivity_analysis",
+]
diff --git a/ngraph/exec/analysis/flow.py b/ngraph/exec/analysis/flow.py
new file mode 100644
index 0000000..f0ed74e
--- /dev/null
+++ b/ngraph/exec/analysis/flow.py
@@ -0,0 +1,450 @@
+"""Flow analysis functions for network evaluation.
+
+These functions are designed for use with FailureManager and follow the
+AnalysisFunction protocol: analysis_func(network: Network, excluded_nodes: Set[str],
+excluded_links: Set[str], **kwargs) -> Any.
+
+All functions accept only simple, hashable parameters to ensure compatibility
+with FailureManager's caching and multiprocessing systems.
+
+Graph caching enables efficient repeated analysis with different exclusion
+sets by building the graph once and using O(|excluded|) masks for exclusions.
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, Optional, Set
+
+import netgraph_core
+
+from ngraph.adapters.core import (
+ GraphCache,
+ build_edge_mask,
+ build_graph_cache,
+ build_node_mask,
+)
+from ngraph.exec.demand.expand import expand_demands
+from ngraph.model.demand.spec import TrafficDemand
+from ngraph.model.flow.policy_config import FlowPolicyPreset, create_flow_policy
+from ngraph.results.flow import FlowEntry, FlowIterationResult, FlowSummary
+from ngraph.solver.maxflow import (
+ MaxFlowGraphCache,
+ build_maxflow_cache,
+ max_flow,
+ max_flow_with_details,
+)
+from ngraph.solver.maxflow import (
+ sensitivity_analysis as solver_sensitivity_analysis,
+)
+from ngraph.types.base import FlowPlacement
+
+if TYPE_CHECKING:
+ from ngraph.model.network import Network
+
+
+def max_flow_analysis(
+ network: "Network",
+ excluded_nodes: Set[str],
+ excluded_links: Set[str],
+ source_regex: str,
+ sink_regex: str,
+ mode: str = "combine",
+ shortest_path: bool = False,
+ flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
+ include_flow_details: bool = False,
+ include_min_cut: bool = False,
+ _graph_cache: Optional[MaxFlowGraphCache] = None,
+ **kwargs,
+) -> FlowIterationResult:
+ """Analyze maximum flow capacity between node groups.
+
+ When `_graph_cache` is provided, uses O(|excluded|) mask building instead
+ of O(V+E) graph reconstruction for efficient repeated analysis.
+
+ Args:
+ network: Network instance.
+ excluded_nodes: Set of node names to exclude temporarily.
+ excluded_links: Set of link IDs to exclude temporarily.
+ source_regex: Regex pattern for source node groups.
+ sink_regex: Regex pattern for sink node groups.
+ mode: Flow analysis mode ("combine" or "pairwise").
+ shortest_path: Whether to use shortest paths only.
+ flow_placement: Flow placement strategy.
+ include_flow_details: Whether to collect cost distribution and similar details.
+ include_min_cut: Whether to include min-cut edge list in entry data.
+ _graph_cache: Pre-built cache for efficient repeated analysis.
+ **kwargs: Ignored. Accepted for interface compatibility.
+
+ Returns:
+ FlowIterationResult describing this iteration.
+ """
+ flow_entries: list[FlowEntry] = []
+ total_demand = 0.0
+ total_placed = 0.0
+
+ if include_flow_details or include_min_cut:
+ flows = max_flow_with_details(
+ network,
+ source_regex,
+ sink_regex,
+ mode=mode,
+ shortest_path=shortest_path,
+ flow_placement=flow_placement,
+ excluded_nodes=excluded_nodes,
+ excluded_links=excluded_links,
+ _cache=_graph_cache,
+ )
+ for (src, dst), summary in flows.items():
+ value = float(summary.total_flow)
+ cost_dist = getattr(summary, "cost_distribution", {}) or {}
+ min_cut = getattr(summary, "saturated_edges", []) or []
+ entry = FlowEntry(
+ source=str(src),
+ destination=str(dst),
+ priority=0,
+ demand=value,
+ placed=value,
+ dropped=0.0,
+ cost_distribution=(
+ {float(k): float(v) for k, v in cost_dist.items()}
+ if include_flow_details
+ else {}
+ ),
+ data=(
+ {"edges": [str(e) for e in min_cut], "edges_kind": "min_cut"}
+ if include_min_cut and min_cut
+ else {}
+ ),
+ )
+ flow_entries.append(entry)
+ total_demand += value
+ total_placed += value
+ else:
+ flows = max_flow(
+ network,
+ source_regex,
+ sink_regex,
+ mode=mode,
+ shortest_path=shortest_path,
+ flow_placement=flow_placement,
+ excluded_nodes=excluded_nodes,
+ excluded_links=excluded_links,
+ _cache=_graph_cache,
+ )
+ for (src, dst), val in flows.items():
+ value = float(val)
+ entry = FlowEntry(
+ source=str(src),
+ destination=str(dst),
+ priority=0,
+ demand=value,
+ placed=value,
+ dropped=0.0,
+ )
+ flow_entries.append(entry)
+ total_demand += value
+ total_placed += value
+
+ overall_ratio = (total_placed / total_demand) if total_demand > 0 else 1.0
+ dropped_flows = sum(1 for e in flow_entries if e.dropped > 0.0)
+ summary = FlowSummary(
+ total_demand=total_demand,
+ total_placed=total_placed,
+ overall_ratio=overall_ratio,
+ dropped_flows=dropped_flows,
+ num_flows=len(flow_entries),
+ )
+ return FlowIterationResult(flows=flow_entries, summary=summary)
+
+
+def demand_placement_analysis(
+ network: "Network",
+ excluded_nodes: Set[str],
+ excluded_links: Set[str],
+ demands_config: list[dict[str, Any]],
+ placement_rounds: int | str = "auto",
+ include_flow_details: bool = False,
+ include_used_edges: bool = False,
+ _graph_cache: Optional[GraphCache] = None,
+ **kwargs,
+) -> FlowIterationResult:
+ """Analyze traffic demand placement success rates using Core directly.
+
+ This function:
+ 1. Builds Core infrastructure (graph, algorithms, flow_graph) or uses cached
+ 2. Expands demands into concrete (src, dst, volume) tuples
+ 3. Places each demand using Core's FlowPolicy with exclusion masks
+ 4. Aggregates results into FlowIterationResult
+
+ Args:
+ network: Network instance.
+ excluded_nodes: Set of node names to exclude temporarily.
+ excluded_links: Set of link IDs to exclude temporarily.
+ demands_config: List of demand configurations (serializable dicts).
+ placement_rounds: Number of placement optimization rounds (unused - Core handles internally).
+ include_flow_details: When True, include cost_distribution per flow.
+ include_used_edges: When True, include set of used edges per demand in entry data.
+ _graph_cache: Pre-built graph cache for fast repeated analysis.
+ **kwargs: Ignored. Accepted for interface compatibility.
+
+ Returns:
+ FlowIterationResult describing this iteration.
+ """
+ # Reconstruct TrafficDemand objects from config
+ traffic_demands = []
+ for config in demands_config:
+ demand = TrafficDemand(
+ source_path=config["source_path"],
+ sink_path=config["sink_path"],
+ demand=config["demand"],
+ mode=config.get("mode", "pairwise"),
+ flow_policy_config=config.get("flow_policy_config"),
+ priority=config.get("priority", 0),
+ )
+ traffic_demands.append(demand)
+
+ # Phase 1: Expand demands (pure logic, returns names + augmentations)
+ expansion = expand_demands(
+ network,
+ traffic_demands,
+ default_policy_preset=FlowPolicyPreset.SHORTEST_PATHS_ECMP,
+ )
+
+ # Phase 2: Use cached graph infrastructure or build fresh
+ if _graph_cache is not None:
+ cache = _graph_cache
+ else:
+ # Build fresh cache (slower path for direct calls without pre-built cache)
+ cache = build_graph_cache(network, augmentations=expansion.augmentations)
+
+ graph_handle = cache.graph_handle
+ multidigraph = cache.multidigraph
+ edge_mapper = cache.edge_mapper
+ node_mapper = cache.node_mapper
+ algorithms = cache.algorithms
+
+ # Build masks for exclusions (consistent behavior for both paths)
+ node_mask = None
+ edge_mask = None
+ if excluded_nodes or excluded_links:
+ node_mask = build_node_mask(cache, excluded_nodes)
+ edge_mask = build_edge_mask(cache, excluded_links)
+
+ flow_graph = netgraph_core.FlowGraph(multidigraph)
+
+ # Phase 3: Place demands using Core FlowPolicy
+ flow_entries: list[FlowEntry] = []
+ total_demand = 0.0
+ total_placed = 0.0
+
+ for demand in expansion.demands:
+ # Resolve node names to IDs (includes pseudo nodes from augmentations)
+ src_id = node_mapper.to_id(demand.src_name)
+ dst_id = node_mapper.to_id(demand.dst_name)
+
+ # Create FlowPolicy for this demand with masks
+ policy = create_flow_policy(
+ algorithms,
+ graph_handle,
+ demand.policy_preset,
+ node_mask=node_mask,
+ edge_mask=edge_mask,
+ )
+
+ # Place demand using Core
+ placed, flow_count = policy.place_demand(
+ flow_graph,
+ src_id,
+ dst_id,
+ demand.priority, # flowClass
+ demand.volume,
+ )
+
+ # Collect flow details if requested
+ cost_distribution: dict[float, float] = {}
+ used_edges: set[str] = set()
+
+ if include_flow_details or include_used_edges:
+ # Get flows from policy
+ flows_dict = policy.flows
+ for flow_key, flow_data in flows_dict.items():
+ # flow_key is (src, dst, flowClass, flowId)
+ # flow_data is (src, dst, cost, placed_flow)
+ if include_flow_details:
+ cost = float(flow_data[2])
+ flow_vol = float(flow_data[3])
+ if flow_vol > 0:
+ cost_distribution[cost] = (
+ cost_distribution.get(cost, 0.0) + flow_vol
+ )
+
+ if include_used_edges:
+ # Get edges for this flow
+ flow_idx = netgraph_core.FlowIndex(
+ flow_key[0], flow_key[1], flow_key[2], flow_key[3]
+ )
+ edges = flow_graph.get_flow_edges(flow_idx)
+ for edge_id, _ in edges:
+ edge_ref = edge_mapper.to_ref(edge_id, multidigraph)
+ if edge_ref is not None:
+ used_edges.add(f"{edge_ref.link_id}:{edge_ref.direction}")
+
+ # Build entry data
+ entry_data: dict[str, Any] = {}
+ if include_used_edges and used_edges:
+ entry_data["edges"] = sorted(used_edges)
+ entry_data["edges_kind"] = "used"
+
+ # Create flow entry
+ entry = FlowEntry(
+ source=demand.src_name,
+ destination=demand.dst_name,
+ priority=demand.priority,
+ demand=demand.volume,
+ placed=placed,
+ dropped=demand.volume - placed,
+ cost_distribution=cost_distribution if include_flow_details else {},
+ data=entry_data,
+ )
+ flow_entries.append(entry)
+ total_demand += demand.volume
+ total_placed += placed
+
+ # Build summary
+ overall_ratio = (total_placed / total_demand) if total_demand > 0 else 1.0
+ dropped_flows = sum(1 for e in flow_entries if e.dropped > 0.0)
+ summary = FlowSummary(
+ total_demand=total_demand,
+ total_placed=total_placed,
+ overall_ratio=overall_ratio,
+ dropped_flows=dropped_flows,
+ num_flows=len(flow_entries),
+ )
+
+ return FlowIterationResult(
+ flows=flow_entries,
+ summary=summary,
+ data={},
+ )
+
+
+def sensitivity_analysis(
+ network: "Network",
+ excluded_nodes: Set[str],
+ excluded_links: Set[str],
+ source_regex: str,
+ sink_regex: str,
+ mode: str = "combine",
+ shortest_path: bool = False,
+ flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
+ _graph_cache: Optional[MaxFlowGraphCache] = None,
+ **kwargs,
+) -> dict[str, dict[str, float]]:
+ """Analyze component sensitivity to failures.
+
+ Identifies critical edges (saturated edges) and computes the flow reduction
+ caused by removing each one.
+
+ When `_graph_cache` is provided, uses O(|excluded|) mask building instead
+ of O(V+E) graph reconstruction for efficient repeated analysis.
+
+ Args:
+ network: Network instance.
+ excluded_nodes: Set of node names to exclude temporarily.
+ excluded_links: Set of link IDs to exclude temporarily.
+ source_regex: Regex pattern for source node groups.
+ sink_regex: Regex pattern for sink node groups.
+ mode: Flow analysis mode ("combine" or "pairwise").
+ shortest_path: If True, use single-tier shortest-path flow (IP/IGP mode).
+ Reports only edges used under ECMP routing. If False (default), use
+ full iterative max-flow (SDN/TE mode) and report all saturated edges.
+ flow_placement: Flow placement strategy.
+ _graph_cache: Pre-built cache for efficient repeated analysis.
+ **kwargs: Ignored. Accepted for interface compatibility.
+
+ Returns:
+ Dictionary mapping flow keys ("src->dst") to dictionaries of component
+ identifiers mapped to sensitivity scores.
+ """
+ results = solver_sensitivity_analysis(
+ network,
+ source_regex,
+ sink_regex,
+ mode=mode,
+ shortest_path=shortest_path,
+ flow_placement=flow_placement,
+ excluded_nodes=excluded_nodes,
+ excluded_links=excluded_links,
+ _cache=_graph_cache,
+ )
+
+ # Remap keys from tuple (src, dst) to string "src->dst"
+ out = {}
+ for (src, dst), components in results.items():
+ key = f"{src}->{dst}"
+ out[key] = components
+
+ return out
+
+
+def build_demand_graph_cache(
+ network: "Network",
+ demands_config: list[dict[str, Any]],
+) -> GraphCache:
+ """Build a graph cache for repeated demand placement analysis.
+
+ Pre-computes the graph with augmentations (pseudo source/sink nodes) for
+ efficient repeated analysis with different exclusion sets.
+
+ Args:
+ network: Network instance.
+ demands_config: List of demand configurations (same format as demand_placement_analysis).
+
+ Returns:
+ GraphCache ready for use with demand_placement_analysis.
+ """
+ # Reconstruct TrafficDemand objects
+ traffic_demands = []
+ for config in demands_config:
+ demand = TrafficDemand(
+ source_path=config["source_path"],
+ sink_path=config["sink_path"],
+ demand=config["demand"],
+ mode=config.get("mode", "pairwise"),
+ flow_policy_config=config.get("flow_policy_config"),
+ priority=config.get("priority", 0),
+ )
+ traffic_demands.append(demand)
+
+ # Expand demands to get augmentations
+ expansion = expand_demands(
+ network,
+ traffic_demands,
+ default_policy_preset=FlowPolicyPreset.SHORTEST_PATHS_ECMP,
+ )
+
+ # Build cache with augmentations
+ return build_graph_cache(network, augmentations=expansion.augmentations)
+
+
+def build_maxflow_graph_cache(
+ network: "Network",
+ source_regex: str,
+ sink_regex: str,
+ mode: str = "combine",
+) -> MaxFlowGraphCache:
+ """Build a graph cache for repeated max-flow analysis.
+
+ Pre-computes the graph with pseudo source/sink nodes for all source/sink
+ pairs, enabling O(|excluded|) mask building per iteration.
+
+ Args:
+ network: Network instance.
+ source_regex: Regex pattern for source node groups.
+ sink_regex: Regex pattern for sink node groups.
+ mode: Flow analysis mode ("combine" or "pairwise").
+
+ Returns:
+ MaxFlowGraphCache ready for use with max_flow_analysis or sensitivity_analysis.
+ """
+ return build_maxflow_cache(network, source_regex, sink_regex, mode=mode)
diff --git a/ngraph/monte_carlo/types.py b/ngraph/exec/analysis/types.py
similarity index 92%
rename from ngraph/monte_carlo/types.py
rename to ngraph/exec/analysis/types.py
index b2ec04d..2e5b9a0 100644
--- a/ngraph/monte_carlo/types.py
+++ b/ngraph/exec/analysis/types.py
@@ -1,6 +1,7 @@
-"""Typed protocols for Monte Carlo analysis IPC payloads.
+"""Typed protocols for analysis IPC payloads.
-Defines lightweight, serializable structures used across worker boundaries.
+Defines lightweight, serializable structures used across worker boundaries
+during parallel analysis execution.
"""
from __future__ import annotations
diff --git a/ngraph/demand/manager/__init__.py b/ngraph/exec/demand/__init__.py
similarity index 100%
rename from ngraph/demand/manager/__init__.py
rename to ngraph/exec/demand/__init__.py
diff --git a/ngraph/demand/manager/builder.py b/ngraph/exec/demand/builder.py
similarity index 83%
rename from ngraph/demand/manager/builder.py
rename to ngraph/exec/demand/builder.py
index d68ccb8..65f738e 100644
--- a/ngraph/demand/manager/builder.py
+++ b/ngraph/exec/demand/builder.py
@@ -8,10 +8,10 @@
from typing import Any, Dict, List, Optional
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.demand.spec import TrafficDemand
-from ngraph.flows.policy import FlowPolicyConfig
-from ngraph.yaml_utils import normalize_yaml_dict_keys
+from ngraph.model.demand.matrix import TrafficMatrixSet
+from ngraph.model.demand.spec import TrafficDemand
+from ngraph.model.flow.policy_config import FlowPolicyPreset
+from ngraph.utils.yaml_utils import normalize_yaml_dict_keys
def build_traffic_matrix_set(raw: Dict[str, List[dict]]) -> TrafficMatrixSet:
@@ -46,7 +46,7 @@ def build_traffic_matrix_set(raw: Dict[str, List[dict]]) -> TrafficMatrixSet:
f"Entries in matrix '{name}' must be dicts, got {type(d).__name__}"
)
- # Coerce flow_policy_config into FlowPolicyConfig enum when provided
+ # Coerce flow_policy_config into FlowPolicyPreset enum when provided
if "flow_policy_config" in d:
d = dict(d) # shallow copy to avoid mutating caller data
d["flow_policy_config"] = _coerce_flow_policy_config(
@@ -60,12 +60,12 @@ def build_traffic_matrix_set(raw: Dict[str, List[dict]]) -> TrafficMatrixSet:
return tms
-def _coerce_flow_policy_config(value: Any) -> Optional[FlowPolicyConfig]:
- """Return a FlowPolicyConfig from various user-friendly forms.
+def _coerce_flow_policy_config(value: Any) -> Optional[FlowPolicyPreset]:
+ """Return a FlowPolicyPreset from various user-friendly forms.
Accepts:
- None: returns None
- - FlowPolicyConfig: returned as-is
+ - FlowPolicyPreset: returned as-is
- int: mapped by value (e.g., 1 -> SHORTEST_PATHS_ECMP)
- str: name of enum (case-insensitive); numeric strings are allowed
@@ -74,11 +74,11 @@ def _coerce_flow_policy_config(value: Any) -> Optional[FlowPolicyConfig]:
"""
if value is None:
return None
- if isinstance(value, FlowPolicyConfig):
+ if isinstance(value, FlowPolicyPreset):
return value
if isinstance(value, int):
try:
- return FlowPolicyConfig(value)
+ return FlowPolicyPreset(value)
except Exception as exc: # pragma: no cover - validated by enum
raise ValueError(f"Unknown flow policy config value: {value}") from exc
if isinstance(value, str):
@@ -88,12 +88,12 @@ def _coerce_flow_policy_config(value: Any) -> Optional[FlowPolicyConfig]:
# Allow numeric strings
if s.isdigit():
try:
- return FlowPolicyConfig(int(s))
+ return FlowPolicyPreset(int(s))
except Exception as exc:
raise ValueError(f"Unknown flow policy config value: {s}") from exc
# Enum name lookup (case-insensitive)
try:
- return FlowPolicyConfig[s.upper()]
+ return FlowPolicyPreset[s.upper()]
except KeyError as exc:
raise ValueError(f"Unknown flow policy config: {value}") from exc
diff --git a/ngraph/exec/demand/expand.py b/ngraph/exec/demand/expand.py
new file mode 100644
index 0000000..9bffe43
--- /dev/null
+++ b/ngraph/exec/demand/expand.py
@@ -0,0 +1,212 @@
+"""Demand expansion: converts TrafficDemand specs into concrete placement demands.
+
+Supports both pairwise and combine modes through augmentation-based pseudo nodes.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import List
+
+from ngraph.adapters.core import AugmentationEdge
+from ngraph.model.demand.spec import TrafficDemand
+from ngraph.model.flow.policy_config import FlowPolicyPreset
+from ngraph.model.network import Network
+
+# Pseudo edge capacity (avoid float('inf') due to Core int64 limitation)
+PSEUDO_EDGE_CAPACITY = 1e15
+
+
+@dataclass
+class ExpandedDemand:
+ """Concrete demand ready for placement.
+
+ Uses node names (not IDs) so expansion happens before graph building.
+ Node IDs are resolved after the graph is built with pseudo nodes.
+
+ Attributes:
+ src_name: Source node name (real or pseudo).
+ dst_name: Destination node name (real or pseudo).
+ volume: Traffic volume to place.
+ priority: Priority class (lower is higher priority).
+ policy_preset: FlowPolicy configuration preset.
+ demand_id: Parent TrafficDemand ID (for tracking).
+ """
+
+ src_name: str
+ dst_name: str
+ volume: float
+ priority: int
+ policy_preset: FlowPolicyPreset
+ demand_id: str
+
+
+@dataclass
+class DemandExpansion:
+ """Demand expansion result.
+
+ Attributes:
+ demands: Concrete demands ready for placement (sorted by priority).
+ augmentations: Augmentation edges for pseudo nodes (empty for pairwise).
+ """
+
+ demands: List[ExpandedDemand]
+ augmentations: List[AugmentationEdge]
+
+
+def _collect_active_node_names(groups) -> list[str]:
+ """Extract active (non-disabled) node names from selection groups."""
+ return [n.name for nodes in groups.values() for n in nodes if not n.disabled]
+
+
+def _collect_active_nodes(groups) -> list:
+ """Extract active (non-disabled) nodes from selection groups."""
+ return [n for nodes in groups.values() for n in nodes if not n.disabled]
+
+
+def _expand_combine(
+ td: TrafficDemand,
+ src_groups,
+ dst_groups,
+ policy_preset: FlowPolicyPreset,
+) -> tuple[list[ExpandedDemand], list[AugmentationEdge]]:
+ """Expand combine mode: aggregate sources/sinks through pseudo nodes."""
+ pseudo_src = f"_src_{td.id}"
+ pseudo_snk = f"_snk_{td.id}"
+
+ src_names = _collect_active_node_names(src_groups)
+ dst_names = _collect_active_node_names(dst_groups)
+
+ if not src_names or not dst_names:
+ return [], []
+
+ augmentations = []
+
+ # Pseudo-source → real sources (unidirectional OUT)
+ for src_name in src_names:
+ augmentations.append(
+ AugmentationEdge(pseudo_src, src_name, PSEUDO_EDGE_CAPACITY, 0)
+ )
+
+ # Real sinks → pseudo-sink (unidirectional IN)
+ for dst_name in dst_names:
+ augmentations.append(
+ AugmentationEdge(dst_name, pseudo_snk, PSEUDO_EDGE_CAPACITY, 0)
+ )
+
+ # Single aggregated demand
+ demand = ExpandedDemand(
+ src_name=pseudo_src,
+ dst_name=pseudo_snk,
+ volume=td.demand,
+ priority=td.priority,
+ policy_preset=policy_preset,
+ demand_id=td.id,
+ )
+
+ return [demand], augmentations
+
+
+def _expand_pairwise(
+ td: TrafficDemand,
+ src_groups,
+ dst_groups,
+ policy_preset: FlowPolicyPreset,
+) -> tuple[list[ExpandedDemand], list[AugmentationEdge]]:
+ """Expand pairwise mode: create demand for each (src, dst) pair."""
+ src_nodes = _collect_active_nodes(src_groups)
+ dst_nodes = _collect_active_nodes(dst_groups)
+
+ # Filter self-pairs
+ pairs = [
+ (src, dst) for src in src_nodes for dst in dst_nodes if src.name != dst.name
+ ]
+
+ if not pairs:
+ return [], []
+
+ # Distribute volume evenly
+ volume_per_pair = td.demand / len(pairs)
+
+ demands = [
+ ExpandedDemand(
+ src_name=src.name,
+ dst_name=dst.name,
+ volume=volume_per_pair,
+ priority=td.priority,
+ policy_preset=policy_preset,
+ demand_id=td.id,
+ )
+ for src, dst in pairs
+ ]
+
+ return demands, [] # No augmentations for pairwise
+
+
+def expand_demands(
+ network: Network,
+ traffic_demands: List[TrafficDemand],
+ default_policy_preset: FlowPolicyPreset = FlowPolicyPreset.SHORTEST_PATHS_ECMP,
+) -> DemandExpansion:
+ """Expand TrafficDemand specifications into concrete demands with augmentations.
+
+ Pure function that:
+ 1. Selects node groups using Network's selection API
+ 2. Distributes volume based on mode (combine/pairwise)
+ 3. Generates augmentation edges for combine mode (pseudo nodes)
+ 4. Returns demands (node names) + augmentations
+
+ Node names are used (not IDs) so expansion happens BEFORE graph building.
+ IDs are resolved after graph is built with augmentations.
+
+ Args:
+ network: Network for node selection.
+ traffic_demands: High-level demand specifications.
+ default_policy_preset: Default policy if demand doesn't specify one.
+
+ Returns:
+ DemandExpansion with demands and augmentations.
+
+ Raises:
+ ValueError: If no demands could be expanded or unsupported mode.
+ """
+ all_demands: List[ExpandedDemand] = []
+ all_augmentations: List[AugmentationEdge] = []
+
+ for td in traffic_demands:
+ # Select node groups
+ src_groups = network.select_node_groups_by_path(td.source_path)
+ dst_groups = network.select_node_groups_by_path(td.sink_path)
+
+ if not src_groups or not dst_groups:
+ continue
+
+ policy_preset = td.flow_policy_config or default_policy_preset
+
+ # Expand based on mode
+ if td.mode == "combine":
+ demands, augmentations = _expand_combine(
+ td, src_groups, dst_groups, policy_preset
+ )
+ elif td.mode == "pairwise":
+ demands, augmentations = _expand_pairwise(
+ td, src_groups, dst_groups, policy_preset
+ )
+ else:
+ raise ValueError(f"Unknown demand mode: {td.mode}")
+
+ all_demands.extend(demands)
+ all_augmentations.extend(augmentations)
+
+ if not all_demands:
+ raise ValueError(
+ "No demands could be expanded. Possible causes:\n"
+ " - Source/sink paths don't match any nodes\n"
+ " - All matching nodes are disabled\n"
+ " - Source and sink are identical (self-loops not allowed)"
+ )
+
+ # Sort by priority (lower = higher priority)
+ sorted_demands = sorted(all_demands, key=lambda d: d.priority)
+
+ return DemandExpansion(demands=sorted_demands, augmentations=all_augmentations)
diff --git a/ngraph/failure/manager/__init__.py b/ngraph/exec/failure/__init__.py
similarity index 100%
rename from ngraph/failure/manager/__init__.py
rename to ngraph/exec/failure/__init__.py
diff --git a/ngraph/failure/manager/manager.py b/ngraph/exec/failure/manager.py
similarity index 79%
rename from ngraph/failure/manager/manager.py
rename to ngraph/exec/failure/manager.py
index dece5a3..22a083b 100644
--- a/ngraph/failure/manager/manager.py
+++ b/ngraph/exec/failure/manager.py
@@ -1,46 +1,43 @@
"""FailureManager for Monte Carlo failure analysis.
Provides the failure analysis engine for NetGraph. Supports parallel
-processing, per-worker caching, and failure policy handling for workflow steps
+processing, graph caching, and failure policy handling for workflow steps
and direct programmatic use.
Performance characteristics:
-Time complexity: O(I × A / P), where I is iteration count, A is analysis cost,
-and P is parallelism. Worker-local caching reduces repeated work when exclusion
-sets repeat across iterations. Network serialization happens once per worker,
-not per iteration.
-
-Space complexity: O(V + E + I × R + C), where V and E are node and link counts,
-R is result size per iteration, and C is cache size. The per-worker cache is
-bounded and evicts in FIFO order after 1000 unique patterns.
-
-Parallelism: For small iteration counts, serial execution avoids IPC overhead.
-For larger workloads, parallel execution benefits from worker caching and CPU
-utilization. Optimal parallelism is the number of CPU cores for analysis-bound
-workloads.
+Time complexity: O(S + I × A / P), where S is one-time graph setup cost,
+I is iteration count, A is per-iteration analysis cost, and P is parallelism.
+Graph caching amortizes expensive graph construction across all iterations,
+and O(|excluded|) mask building replaces O(V+E) iteration.
+
+Space complexity: O(V + E + I × R), where V and E are node and link counts,
+and R is result size per iteration. The pre-built graph is shared across
+all iterations.
+
+Parallelism: The C++ Core backend releases the GIL during computation,
+enabling true parallelism with Python threads. With graph caching, most
+per-iteration work happens in GIL-free C++ code, achieving near-linear
+scaling with thread count.
"""
from __future__ import annotations
import hashlib
-import logging
import os
-import pickle
import time
-from concurrent.futures import ProcessPoolExecutor
+from concurrent.futures import ThreadPoolExecutor
from typing import TYPE_CHECKING, Any, Dict, Protocol, Set, TypeVar
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.failure.policy_set import FailurePolicySet
from ngraph.logging import get_logger
-from ngraph.model.view import NetworkView
+from ngraph.model.failure.policy_set import FailurePolicySet
+from ngraph.types.base import FlowPlacement
if TYPE_CHECKING:
import cProfile
from ngraph.model.network import Network
-from ngraph.failure.policy import FailurePolicy
+from ngraph.model.failure.policy import FailurePolicy
logger = get_logger(__name__)
@@ -95,6 +92,46 @@ def _create_cache_key(
return base_key + (tuple(hashable_kwargs),)
+def _shallow_copy_result(value: Any) -> Any:
+ """Create a shallow copy of a result object for deduplication expansion.
+
+ For FlowIterationResult-like objects, creates a new instance that shares
+ the expensive flows list and summary but has its own identity fields
+ (failure_id, failure_state) that can be set independently.
+
+ This avoids the overhead of deepcopy while preventing aliasing issues
+ when we later mutate failure_id and failure_state per iteration.
+
+ Args:
+ value: Result object to copy (typically FlowIterationResult).
+
+ Returns:
+ A shallow copy suitable for independent mutation of identity fields.
+ """
+ # Import here to avoid circular imports
+ from ngraph.results.flow import FlowIterationResult
+
+ if isinstance(value, FlowIterationResult):
+ # Create new instance sharing flows and summary (read-only after creation)
+ # but with fresh identity fields for per-iteration mutation
+ return FlowIterationResult(
+ failure_id=value.failure_id,
+ failure_state=value.failure_state,
+ flows=value.flows, # Share reference - never mutated after creation
+ summary=value.summary, # Share reference - never mutated after creation
+ data=dict(value.data) if value.data else {}, # Shallow copy of data dict
+ )
+
+ # For dict-like objects with known structure, shallow copy
+ if isinstance(value, dict):
+ return dict(value)
+
+ # Fallback: use copy.copy for shallow copy (faster than deepcopy)
+ from copy import copy
+
+ return copy(value)
+
+
def _auto_adjust_parallelism(parallelism: int, analysis_func: Any) -> int:
"""Adjust parallelism based on function characteristics.
@@ -118,7 +155,7 @@ def _auto_adjust_parallelism(parallelism: int, analysis_func: Any) -> int:
return parallelism
-# Global shared state for worker processes
+# Global shared state for worker threads
_shared_network: "Network | None" = None
T = TypeVar("T")
@@ -127,41 +164,40 @@ def _auto_adjust_parallelism(parallelism: int, analysis_func: Any) -> int:
class AnalysisFunction(Protocol):
"""Protocol for analysis functions used with FailureManager.
- Analysis functions should take a NetworkView and any additional
+ Analysis functions should take a Network, exclusion sets, and any additional
keyword arguments, returning analysis results of any type.
"""
- def __call__(self, network_view: NetworkView, **kwargs) -> Any:
- """Execute analysis on network view with optional parameters."""
+ def __call__(
+ self,
+ network: "Network",
+ excluded_nodes: Set[str],
+ excluded_links: Set[str],
+ **kwargs,
+ ) -> Any:
+ """Execute analysis on network with exclusions and optional parameters."""
...
-def _worker_init(network_pickle: bytes) -> None:
- """Initialize worker process with shared network and clear cache.
+def _worker_init(network: "Network") -> None:
+ """Initialize worker thread with shared network reference.
- Called once per worker process lifetime via ProcessPoolExecutor's
- initializer mechanism. Network is deserialized once per worker (not per task)
- to avoid repeated serialization overhead. Process boundaries provide
- isolation so no cross-contamination is possible.
+ Called once per worker thread lifetime via ThreadPoolExecutor's
+ initializer mechanism. Network is shared by reference (zero-copy)
+ across all threads, which is safe since the network is read-only
+ during analysis.
Args:
- network_pickle: Serialized Network object to deserialize and share.
+ network: Network object to share by reference across threads.
"""
global _shared_network
- # Each worker process has its own copy of globals (process isolation)
- _shared_network = pickle.loads(network_pickle)
-
- # Respect parent-requested log level if provided
- env_level = os.getenv("NGRAPH_LOG_LEVEL")
- if env_level:
- level_value = getattr(logging, env_level.upper(), logging.INFO)
- from ngraph.logging import set_global_log_level
-
- set_global_log_level(level_value)
+ # In threading, globals are shared across threads
+ # We set this once to make it available to all worker threads
+ _shared_network = network
worker_logger = get_logger(f"{__name__}.worker")
- worker_logger.debug(f"Worker {os.getpid()} initialized with network")
+ worker_logger.debug("Worker thread initialized with network reference")
def _generic_worker(args: tuple[Any, ...]) -> tuple[Any, int, bool, set[str], set[str]]:
@@ -206,32 +242,33 @@ def _generic_worker(args: tuple[Any, ...]) -> tuple[Any, int, bool, set[str], se
import cProfile
profiler = cProfile.Profile()
- profiler.enable()
+ try:
+ profiler.enable()
+ except ValueError:
+ # Another profiler is already active (e.g., pytest-cov in threading mode)
+ profiler = None
+ collect_profile = False
- worker_pid = os.getpid()
+ import threading
+
+ worker_id = threading.current_thread().name
worker_logger.debug(
- f"Worker {worker_pid} starting: iteration={iteration_index}, "
+ f"Worker {worker_id} starting: iteration={iteration_index}, "
f"excluded_nodes={len(excluded_nodes)}, excluded_links={len(excluded_links)}"
)
- # Use NetworkView for exclusion without copying network
- worker_logger.debug(f"Worker {worker_pid} computing analysis")
- network_view = NetworkView.from_excluded_sets(
- _shared_network,
- excluded_nodes=excluded_nodes,
- excluded_links=excluded_links,
+ # Execute analysis function with network and exclusion sets
+ worker_logger.debug(f"Worker {worker_id} executing {analysis_name}")
+ result = analysis_func(
+ _shared_network, excluded_nodes, excluded_links, **analysis_kwargs
)
- worker_logger.debug(f"Worker {worker_pid} created NetworkView")
-
- # Execute analysis function
- worker_logger.debug(f"Worker {worker_pid} executing {analysis_name}")
- result = analysis_func(network_view, **analysis_kwargs)
- worker_logger.debug(f"Worker {worker_pid} completed analysis")
+ worker_logger.debug(f"Worker {worker_id} completed analysis")
# Dump profile if enabled (for performance analysis)
if profiler is not None:
profiler.disable()
import pstats
+ import threading
import uuid
from pathlib import Path
@@ -239,8 +276,9 @@ def _generic_worker(args: tuple[Any, ...]) -> tuple[Any, int, bool, set[str], se
if profile_dir is not None:
profile_dir.mkdir(parents=True, exist_ok=True)
unique_id = uuid.uuid4().hex[:8]
+ thread_id = threading.current_thread().ident
profile_path = (
- profile_dir / f"{analysis_name}_worker_{worker_pid}_{unique_id}.pstats"
+ profile_dir / f"{analysis_name}_thread_{thread_id}_{unique_id}.pstats"
)
pstats.Stats(profiler).dump_stats(profile_path)
worker_logger.debug("Saved worker profile to %s", profile_path.name)
@@ -255,9 +293,9 @@ class FailureManager:
Provides parallel processing, worker caching, and failure
policy handling for workflow steps and direct notebook usage.
- The FailureManager can execute any analysis function that takes a NetworkView
- and returns results, making it generic for different types of
- failure analysis (capacity, traffic, connectivity, etc.).
+ The FailureManager can execute any analysis function that takes a Network
+ with exclusion sets and returns results, making it generic for different
+ types of failure analysis (capacity, traffic, connectivity, etc.).
Attributes:
network: The underlying network (not modified during analysis).
@@ -281,6 +319,8 @@ def __init__(
self.network = network
self.failure_policy_set = failure_policy_set
self.policy_name = policy_name
+ self._merged_node_attrs: dict[str, dict[str, Any]] | None = None
+ self._merged_link_attrs: dict[str, dict[str, Any]] | None = None
def get_failure_policy(self) -> "FailurePolicy | None":
"""Get failure policy for analysis.
@@ -332,6 +372,9 @@ def compute_exclusions(
# policy matching and risk-group expansion. This ensures attributes like
# 'risk_groups' are available to the policy engine.
def _merge_node_attrs() -> dict[str, dict[str, Any]]:
+ if self._merged_node_attrs is not None:
+ return self._merged_node_attrs
+
merged: dict[str, dict[str, Any]] = {}
for node_name, node in self.network.nodes.items():
attrs: dict[str, Any] = {
@@ -342,9 +385,14 @@ def _merge_node_attrs() -> dict[str, dict[str, Any]]:
# Top-level fields take precedence over attrs on conflict
attrs.update({k: v for k, v in node.attrs.items() if k not in attrs})
merged[node_name] = attrs
+
+ self._merged_node_attrs = merged
return merged
def _merge_link_attrs() -> dict[str, dict[str, Any]]:
+ if self._merged_link_attrs is not None:
+ return self._merged_link_attrs
+
merged: dict[str, dict[str, Any]] = {}
for link_id, link in self.network.links.items():
attrs: dict[str, Any] = {
@@ -358,6 +406,8 @@ def _merge_link_attrs() -> dict[str, dict[str, Any]]:
}
attrs.update({k: v for k, v in link.attrs.items() if k not in attrs})
merged[link_id] = attrs
+
+ self._merged_link_attrs = merged
return merged
node_map = _merge_node_attrs()
@@ -368,7 +418,7 @@ def _merge_link_attrs() -> dict[str, dict[str, Any]]:
node_map, link_map, self.network.risk_groups, seed=seed_offset
)
- # Separate entity types for NetworkView creation
+ # Separate entity types for exclusion sets
for f_id in failed_ids:
if f_id in self.network.nodes:
excluded_nodes.add(f_id)
@@ -392,34 +442,6 @@ def _merge_link_attrs() -> dict[str, dict[str, Any]]:
return excluded_nodes, excluded_links
- def create_network_view(
- self,
- excluded_nodes: set[str] | None = None,
- excluded_links: set[str] | None = None,
- ) -> NetworkView:
- """Create NetworkView with specified exclusions.
-
- Args:
- excluded_nodes: Set of node IDs to exclude. Empty set if None.
- excluded_links: Set of link IDs to exclude. Empty set if None.
-
- Returns:
- NetworkView with exclusions applied, or original network if no exclusions.
- """
- if not excluded_nodes and not excluded_links:
- # Return NetworkView with no exclusions instead of raw Network
- return NetworkView.from_excluded_sets(
- self.network,
- excluded_nodes=set(),
- excluded_links=set(),
- )
-
- return NetworkView.from_excluded_sets(
- self.network,
- excluded_nodes=excluded_nodes or set(),
- excluded_links=excluded_links or set(),
- )
-
def run_monte_carlo_analysis(
self,
analysis_func: AnalysisFunction,
@@ -437,8 +459,8 @@ def run_monte_carlo_analysis(
application, while allowing flexibility in the analysis function.
Args:
- analysis_func: Function that takes (network_view, **kwargs) and returns results.
- Must be serializable for parallel execution.
+ analysis_func: Function that takes (network, excluded_nodes, excluded_links, **kwargs)
+ and returns results. Must be serializable for parallel execution.
iterations: Number of Monte Carlo iterations to run.
parallelism: Number of parallel worker processes to use.
baseline: If True, first iteration runs without failures as baseline.
@@ -485,6 +507,35 @@ def run_monte_carlo_analysis(
logger.info(f"Running {mc_iters} Monte-Carlo iterations")
+ # Pre-build graph cache for analysis functions
+ # This amortizes expensive graph construction across all iterations
+ if "_graph_cache" not in analysis_kwargs:
+ analysis_kwargs = dict(analysis_kwargs) # Don't mutate caller's dict
+ cache_start = time.time()
+
+ if "demands_config" in analysis_kwargs:
+ # Demand placement analysis
+ from ngraph.exec.analysis.flow import build_demand_graph_cache
+
+ logger.debug("Pre-building graph cache for demand placement analysis")
+ analysis_kwargs["_graph_cache"] = build_demand_graph_cache(
+ self.network, analysis_kwargs["demands_config"]
+ )
+ logger.debug(f"Graph cache built in {time.time() - cache_start:.3f}s")
+
+ elif "source_regex" in analysis_kwargs and "sink_regex" in analysis_kwargs:
+ # Max-flow analysis or sensitivity analysis
+ from ngraph.exec.analysis.flow import build_maxflow_graph_cache
+
+ logger.debug("Pre-building graph cache for max-flow analysis")
+ analysis_kwargs["_graph_cache"] = build_maxflow_graph_cache(
+ self.network,
+ analysis_kwargs["source_regex"],
+ analysis_kwargs["sink_regex"],
+ mode=analysis_kwargs.get("mode", "combine"),
+ )
+ logger.debug(f"Graph cache built in {time.time() - cache_start:.3f}s")
+
# Get function name safely (Protocol doesn't guarantee __name__)
func_name = getattr(analysis_func, "__name__", "analysis_function")
logger.debug(
@@ -570,10 +621,10 @@ def run_monte_carlo_analysis(
):
key_to_result[dedup_key] = value
- # Build full results list in original order. Clone value per member to avoid aliasing
- # when the same unique task maps to multiple iterations.
- from copy import deepcopy
-
+ # Build full results list in original order. Create shallow copies that share
+ # the expensive flows/summary data but have their own mutable identity fields.
+ # This avoids deepcopy overhead while preventing aliasing issues when we later
+ # set failure_id and failure_state per iteration.
results: list[Any] = [None] * mc_iters # type: ignore[var-annotated]
for key, members in key_to_members.items():
if key not in key_to_result:
@@ -581,7 +632,7 @@ def run_monte_carlo_analysis(
continue
value = key_to_result[key]
for idx in members:
- results[idx] = deepcopy(value)
+ results[idx] = _shallow_copy_result(value)
# Reconstruct failure patterns per original iteration if requested
failure_patterns: list[dict[str, Any]] = []
@@ -678,16 +729,16 @@ def _run_parallel(
) -> tuple[list[Any], list[dict[str, Any]]]:
"""Run analysis in parallel using shared network approach.
- Network is serialized once in the main process and deserialized once per
- worker via the initializer, avoiding repeated serialization overhead.
- Each worker receives only small exclusion sets instead of modified network
- copies, reducing IPC overhead.
+ Network is shared by reference across all threads (zero-copy), which is
+ safe since the network is read-only during analysis. Each worker receives
+ only small exclusion sets, and the C++ Core backend releases the GIL
+ during computation to enable true parallelism.
Args:
worker_args: Pre-computed worker arguments for all iterations.
- mc_iters: Number of iterations to run.
+ total_tasks: Number of tasks to run.
store_failure_patterns: Whether to collect failure pattern details.
- parallelism: Number of parallel worker processes to use.
+ parallelism: Number of parallel worker threads to use.
Returns:
Tuple of (results_list, failure_patterns_list).
@@ -697,11 +748,10 @@ def _run_parallel(
f"Running parallel analysis with {workers} workers for {total_tasks} iterations"
)
- # Serialize network once for all workers
- network_pickle = pickle.dumps(self.network)
- logger.debug(f"Serialized network once: {len(network_pickle)} bytes")
+ # Network is shared by reference (zero-copy) across threads
+ logger.debug(f"Sharing network by reference across {workers} threads")
- # Calculate optimal chunksize to minimize IPC overhead
+ # Calculate optimal chunksize to minimize overhead
chunksize = max(1, total_tasks // (workers * 4))
logger.debug(f"Using chunksize={chunksize} for parallel execution")
@@ -710,17 +760,13 @@ def _run_parallel(
results = []
failure_patterns = []
- # Propagate logging level to workers via environment
- parent_level = logging.getLogger("ngraph").getEffectiveLevel()
- os.environ["NGRAPH_LOG_LEVEL"] = logging.getLevelName(parent_level)
-
- with ProcessPoolExecutor(
+ with ThreadPoolExecutor(
max_workers=workers,
initializer=_worker_init,
- initargs=(network_pickle,),
+ initargs=(self.network,),
) as pool:
logger.debug(
- f"ProcessPoolExecutor created with {workers} workers and shared network"
+ f"ThreadPoolExecutor created with {workers} workers and shared network"
)
logger.info(f"Starting parallel execution of {total_tasks} iterations")
@@ -871,7 +917,8 @@ def run_single_failure_scenario(
run_monte_carlo_analysis().
Args:
- analysis_func: Function that takes (network_view, **kwargs) and returns results.
+ analysis_func: Function that takes (network, excluded_nodes, excluded_links, **kwargs)
+ and returns results.
**kwargs: Additional arguments passed to analysis_func.
Returns:
@@ -919,9 +966,12 @@ def run_max_flow_monte_carlo(
include_flow_summary: Whether to collect detailed flow summary data.
Returns:
- CapacityEnvelopeResults object with envelope statistics and analysis methods.
+ Dictionary with keys:
+ - 'results': list[FlowIterationResult] for each iteration
+ - 'failure_patterns': list of failure pattern dicts (if store_failure_patterns=True)
+ - 'metadata': execution metadata (iterations, timing, etc.)
"""
- from ngraph.monte_carlo.functions import max_flow_analysis
+ from ngraph.exec.analysis.flow import max_flow_analysis
# Convert string flow_placement to enum if needed
if isinstance(flow_placement, str):
@@ -993,50 +1043,6 @@ def _process_sensitivity_results(
)
return processed_scores
- def _build_sensitivity_failure_patterns(
- self,
- failure_patterns: list[dict[str, Any]],
- results: list[dict[str, dict[str, float]]],
- ) -> dict[str, Any]:
- """Build failure pattern results for sensitivity analysis.
-
- Args:
- failure_patterns: List of failure pattern details from FailureManager.
- results: List of sensitivity results for building pattern analysis.
-
- Returns:
- Dictionary mapping pattern keys to sensitivity pattern results.
- """
- import json
-
- pattern_map = {}
-
- for i, pattern in enumerate(failure_patterns):
- # Create pattern key from exclusions
- key = json.dumps(
- {
- "excluded_nodes": pattern["excluded_nodes"],
- "excluded_links": pattern["excluded_links"],
- },
- sort_keys=True,
- )
-
- if key not in pattern_map:
- # Get sensitivity result for this pattern
- sensitivity_result = results[i] if i < len(results) else {}
-
- pattern_map[key] = {
- "excluded_nodes": pattern["excluded_nodes"],
- "excluded_links": pattern["excluded_links"],
- "sensitivity_result": sensitivity_result,
- "count": 0,
- "is_baseline": pattern["is_baseline"],
- }
-
- pattern_map[key]["count"] += 1
-
- return pattern_map
-
def run_demand_placement_monte_carlo(
self,
demands_config: list[dict[str, Any]]
@@ -1066,9 +1072,12 @@ def run_demand_placement_monte_carlo(
store_failure_patterns: Whether to store failure patterns in results.
Returns:
- DemandPlacementResults object with SLA and placement metrics.
+ Dictionary with keys:
+ - 'results': list[FlowIterationResult] for each iteration
+ - 'failure_patterns': list of failure pattern dicts (if store_failure_patterns=True)
+ - 'metadata': execution metadata (iterations, timing, etc.)
"""
- from ngraph.monte_carlo.functions import demand_placement_analysis
+ from ngraph.exec.analysis.flow import demand_placement_analysis
# If caller passed a sequence of TrafficDemand objects, convert to dicts
if not isinstance(demands_config, list):
@@ -1125,11 +1134,12 @@ def run_sensitivity_monte_carlo(
seed: int | None = None,
store_failure_patterns: bool = False,
**kwargs,
- ) -> Any: # Will be SensitivityResults when imports are enabled
+ ) -> dict[str, Any]:
"""Analyze component criticality for flow capacity under failures.
- Ranks network components by their impact on flow capacity when
- they fail, across Monte Carlo failure scenarios.
+ Identifies critical network components by measuring their impact on flow
+ capacity across Monte Carlo failure scenarios. Returns aggregated sensitivity
+ scores showing which components have the greatest effect on network capacity.
Args:
source_path: Regex pattern for source node groups.
@@ -1144,10 +1154,13 @@ def run_sensitivity_monte_carlo(
store_failure_patterns: Whether to store failure patterns in results.
Returns:
- SensitivityResults object with component criticality rankings.
+ Dictionary with keys:
+ - 'results': list of per-iteration sensitivity dicts mapping flow keys to component scores
+ - 'component_scores': aggregated statistics (mean, max, min, count) per component per flow
+ - 'failure_patterns': list of failure pattern dicts (if store_failure_patterns=True)
+ - 'metadata': execution metadata (iterations, timing, source/sink patterns, etc.)
"""
- from ngraph.monte_carlo.functions import sensitivity_analysis
- from ngraph.monte_carlo.results import SensitivityResults
+ from ngraph.exec.analysis.flow import sensitivity_analysis
# Convert string flow_placement to enum if needed
if isinstance(flow_placement, str):
@@ -1175,30 +1188,14 @@ def run_sensitivity_monte_carlo(
**kwargs,
)
- # Process sensitivity results to aggregate component scores
- component_scores = self._process_sensitivity_results(raw_results["results"])
-
- # Process failure patterns if requested
- failure_patterns = {}
- if store_failure_patterns and raw_results["failure_patterns"]:
- failure_patterns = self._build_sensitivity_failure_patterns(
- raw_results["failure_patterns"], raw_results["results"]
- )
+ # Aggregate component scores across iterations for statistical analysis
+ raw_results["component_scores"] = self._process_sensitivity_results(
+ raw_results["results"]
+ )
- # Extract baseline if present
- baseline_result = None
- if baseline and raw_results["results"]:
- # Baseline is the first result when baseline=True
- baseline_result = raw_results["results"][0]
+ # Augment metadata with analysis-specific context
+ raw_results["metadata"]["source_pattern"] = source_path
+ raw_results["metadata"]["sink_pattern"] = sink_path
+ raw_results["metadata"]["mode"] = mode
- return SensitivityResults(
- raw_results=raw_results,
- iterations=iterations,
- baseline=baseline_result,
- component_scores=component_scores,
- failure_patterns=failure_patterns,
- source_pattern=source_path,
- sink_pattern=sink_path,
- mode=mode,
- metadata=raw_results["metadata"],
- )
+ return raw_results
diff --git a/ngraph/explorer.py b/ngraph/explorer.py
index 8725f58..6eba8c9 100644
--- a/ngraph/explorer.py
+++ b/ngraph/explorer.py
@@ -5,18 +5,28 @@
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Set
-from ngraph.components import (
+from ngraph.logging import get_logger
+from ngraph.model.components import (
ComponentsLibrary,
resolve_link_end_components,
resolve_node_hardware,
totals_with_multiplier,
)
-from ngraph.logging import get_logger
-from ngraph.model.network import Network, Node
+from ngraph.model.network import Link, Network, Node
logger = get_logger(__name__)
+def _node_is_disabled(node: Node) -> bool:
+ """Return True if the node should be treated as disabled."""
+ return bool(node.disabled)
+
+
+def _link_is_disabled(link: Link) -> bool:
+ """Return True if the link should be treated as disabled."""
+ return bool(link.disabled)
+
+
@dataclass
class ExternalLinkBreakdown:
"""Holds stats for external links to a particular other subtree.
@@ -257,13 +267,13 @@ def _compute_subtree_sets_all(self, node: TreeNode) -> Set[str]:
def _compute_subtree_sets_active(self, node: TreeNode) -> Set[str]:
"""Recursively collect enabled node names into active_subtree_nodes.
- A node is considered enabled if nd.attrs.get("disabled") is not truthy.
+ A node is considered enabled when the disabled flag is False.
"""
collected = set()
for child in node.children.values():
collected |= self._compute_subtree_sets_active(child)
for nd in node.raw_nodes:
- if not nd.attrs.get("disabled"):
+ if not _node_is_disabled(nd):
collected.add(nd.name)
node.active_subtree_nodes = collected
return collected
@@ -373,7 +383,7 @@ def set_node_counts(n: TreeNode):
)
# "Active" excludes disabled
- if not nd.attrs.get("disabled"):
+ if not _node_is_disabled(nd):
for an in self._get_ancestors(tree_node):
an.active_stats.total_capex += cost_val
an.active_stats.total_power += power_val
@@ -386,7 +396,7 @@ def set_node_counts(n: TreeNode):
if (
comp is not None
and node_comp_capacity > 0.0
- and not nd.attrs.get("disabled")
+ and not _node_is_disabled(nd)
):
# Sum capacities of all enabled links attached to this node
attached_capacity = 0.0
@@ -394,14 +404,13 @@ def set_node_counts(n: TreeNode):
used_optics_equiv = 0.0
used_ports = 0.0
for lk in self.network.links.values():
- if lk.attrs.get("disabled"):
+ if _link_is_disabled(lk):
continue
if lk.source == nd.name or lk.target == nd.name:
# If the opposite endpoint is disabled, skip in active view
other = lk.target if lk.source == nd.name else lk.source
- if self.network.nodes.get(other, Node(name=other)).attrs.get(
- "disabled"
- ):
+ other_node = self.network.nodes.get(other, Node(name=other))
+ if _node_is_disabled(other_node):
continue
attached_capacity += float(lk.capacity)
@@ -454,7 +463,7 @@ def set_node_counts(n: TreeNode):
ports_utilization=float(ports_utilization),
capacity_violation=bool(capacity_violation),
ports_violation=bool(ports_violation),
- disabled=bool(nd.attrs.get("disabled")),
+ disabled=_node_is_disabled(nd),
)
# Enforce strict behavior after recording
@@ -617,11 +626,11 @@ def set_node_counts(n: TreeNode):
# ----- "ACTIVE" stats and validations -----
# If link or either endpoint is disabled, skip
- if link.attrs.get("disabled"):
+ if _link_is_disabled(link):
continue
- if self.network.nodes[src].attrs.get("disabled"):
+ if _node_is_disabled(self.network.nodes[src]):
continue
- if self.network.nodes[dst].attrs.get("disabled"):
+ if _node_is_disabled(self.network.nodes[dst]):
continue
# Validation: if both ends provide capacity, enforce min-end capacity
diff --git a/ngraph/failure/manager/aggregate.py b/ngraph/failure/manager/aggregate.py
deleted file mode 100644
index 3794839..0000000
--- a/ngraph/failure/manager/aggregate.py
+++ /dev/null
@@ -1,9 +0,0 @@
-"""Aggregation helpers for failure analysis results.
-
-Utilities in this module group and summarize outputs produced by
-`FailureManager` runs. Functions are factored here to keep `manager.py`
-focused on orchestration. This module intentionally avoids importing heavy
-dependencies to keep import cost low in the common path.
-"""
-
-from __future__ import annotations
diff --git a/ngraph/failure/manager/enumerate.py b/ngraph/failure/manager/enumerate.py
deleted file mode 100644
index 6f79af6..0000000
--- a/ngraph/failure/manager/enumerate.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""Failure pattern enumeration helpers.
-
-Hosts utilities for generating or iterating over failure patterns for testing
-and analysis workflows. These helpers are separate from the Monte Carlo engine
-to keep the main manager small and focused.
-"""
-
-from __future__ import annotations
diff --git a/ngraph/failure/manager/simulate.py b/ngraph/failure/manager/simulate.py
deleted file mode 100644
index 7b631d7..0000000
--- a/ngraph/failure/manager/simulate.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Simulation helpers for failure analyses.
-
-Contains small helpers used to drive simulations in tests and examples. The
-main orchestration lives in `manager.py`.
-"""
-
-from __future__ import annotations
diff --git a/ngraph/flows/__init__.py b/ngraph/flows/__init__.py
deleted file mode 100644
index 5af775a..0000000
--- a/ngraph/flows/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-"""Flow primitives and routing policies.
-
-This subpackage defines the building blocks for demand routing:
-
-- FlowIndex: Immutable identifier for a flow.
-- Flow: Routed demand portion bound to a `PathBundle`.
-- FlowPolicy: Creates, places, rebalances, and removes flows on a
- `StrictMultiDiGraph`.
-- FlowPolicyConfig and get_flow_policy(): Factory for common policy presets.
-
-Components here interact with `ngraph.algorithms` for path selection and
-placement, and with `ngraph.paths` for path bundle representation.
-"""
diff --git a/ngraph/flows/flow.py b/ngraph/flows/flow.py
deleted file mode 100644
index e83e558..0000000
--- a/ngraph/flows/flow.py
+++ /dev/null
@@ -1,111 +0,0 @@
-"""Flow and FlowIndex classes for traffic flow representation."""
-
-from __future__ import annotations
-
-from typing import Hashable, NamedTuple, Optional, Set, Tuple
-
-from ngraph.algorithms.base import MIN_FLOW
-from ngraph.algorithms.placement import (
- FlowPlacement,
- place_flow_on_graph,
- remove_flow_from_graph,
-)
-from ngraph.graph.strict_multidigraph import EdgeID, NodeID, StrictMultiDiGraph
-from ngraph.paths.bundle import PathBundle
-
-
-class FlowIndex(NamedTuple):
- """Unique identifier for a flow.
-
- Attributes:
- src_node: Source node.
- dst_node: Destination node.
- flow_class: Flow class label (hashable).
- flow_id: Monotonic integer id for this flow.
- """
-
- src_node: NodeID
- dst_node: NodeID
- flow_class: Hashable
- flow_id: int
-
-
-class Flow:
- """Represents a fraction of demand routed along a given PathBundle.
-
- In traffic-engineering scenarios, a `Flow` object can model:
- - MPLS LSPs/tunnels with explicit paths,
- - IP forwarding behavior (with ECMP or WCMP),
- - Or anything that follows a specific set of paths.
- """
-
- def __init__(
- self,
- path_bundle: PathBundle,
- flow_index: FlowIndex,
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- ) -> None:
- """Initialize a flow.
-
- Args:
- path_bundle: Paths this flow uses.
- flow_index: Identifier for this flow.
- excluded_edges: Edges to exclude from usage.
- excluded_nodes: Nodes to exclude from usage.
- """
- self.path_bundle: PathBundle = path_bundle
- self.flow_index: FlowIndex = flow_index
- self.excluded_edges: Set[EdgeID] = excluded_edges or set()
- self.excluded_nodes: Set[NodeID] = excluded_nodes or set()
-
- # Convenience references for flow endpoints
- self.src_node: NodeID = path_bundle.src_node
- self.dst_node: NodeID = path_bundle.dst_node
-
- # Track how much flow has been successfully placed
- self.placed_flow: float = 0.0
-
- def __str__(self) -> str:
- """Return a concise string for this flow."""
- return f"Flow(flow_index={self.flow_index}, placed_flow={self.placed_flow})"
-
- def place_flow(
- self,
- flow_graph: StrictMultiDiGraph,
- to_place: float,
- flow_placement: FlowPlacement,
- ) -> Tuple[float, float]:
- """Place or update this flow on the graph.
-
- Args:
- flow_graph: Graph tracking capacities and usage.
- to_place: Amount of flow requested to be placed.
- flow_placement: Strategy for distributing flow among equal-cost paths.
-
- Returns:
- tuple[float, float]: (placed_flow, remaining_flow).
- """
- placed_flow = 0.0
-
- # Only place flow if above the minimum threshold
- if to_place >= MIN_FLOW:
- flow_placement_meta = place_flow_on_graph(
- flow_graph=flow_graph,
- src_node=self.src_node,
- dst_node=self.dst_node,
- pred=self.path_bundle.pred,
- flow=to_place,
- flow_index=self.flow_index,
- flow_placement=flow_placement,
- )
- placed_flow = flow_placement_meta.placed_flow
- to_place = flow_placement_meta.remaining_flow
- self.placed_flow += placed_flow
-
- return placed_flow, to_place
-
- def remove_flow(self, flow_graph: StrictMultiDiGraph) -> None:
- """Remove this flow from the graph."""
- remove_flow_from_graph(flow_graph, flow_index=self.flow_index)
- self.placed_flow = 0.0
diff --git a/ngraph/flows/policy.py b/ngraph/flows/policy.py
deleted file mode 100644
index d840cbb..0000000
--- a/ngraph/flows/policy.py
+++ /dev/null
@@ -1,852 +0,0 @@
-"""FlowPolicy and FlowPolicyConfig classes for traffic routing algorithms."""
-
-from __future__ import annotations
-
-import copy
-from collections import deque
-from enum import IntEnum
-from typing import Any, Callable, Dict, Hashable, List, Optional, Set, Tuple
-
-from ngraph.algorithms import base, edge_select, spf
-from ngraph.algorithms.placement import FlowPlacement
-from ngraph.flows.flow import Flow, FlowIndex
-from ngraph.graph.strict_multidigraph import (
- AttrDict,
- EdgeID,
- NodeID,
- StrictMultiDiGraph,
-)
-from ngraph.logging import get_logger
-from ngraph.paths.bundle import PathBundle
-
-
-class FlowPolicyConfig(IntEnum):
- """Enumerates supported flow policy configurations."""
-
- SHORTEST_PATHS_ECMP = 1
- SHORTEST_PATHS_WCMP = 2
- TE_WCMP_UNLIM = 3
- TE_ECMP_UP_TO_256_LSP = 4
- TE_ECMP_16_LSP = 5
-
-
-class FlowPolicy:
- """Create, place, rebalance, and remove flows on a network graph.
-
- Converts a demand into one or more `Flow` objects subject to capacity
- constraints and configuration: path selection, edge selection, and flow
- placement method.
- """
-
- def __init__(
- self,
- path_alg: base.PathAlg,
- flow_placement: FlowPlacement,
- edge_select: base.EdgeSelect,
- multipath: bool,
- min_flow_count: int = 1,
- max_flow_count: Optional[int] = None,
- max_path_cost: Optional[base.Cost] = None,
- max_path_cost_factor: Optional[float] = None,
- static_paths: Optional[List[PathBundle]] = None,
- edge_select_func: Optional[
- Callable[
- [
- StrictMultiDiGraph,
- NodeID,
- NodeID,
- Dict[EdgeID, AttrDict],
- Optional[Set[EdgeID]],
- Optional[Set[NodeID]],
- ],
- Tuple[base.Cost, List[EdgeID]],
- ]
- ] = None,
- edge_select_value: Optional[Any] = None,
- reoptimize_flows_on_each_placement: bool = False,
- max_no_progress_iterations: int = 100,
- max_total_iterations: int = 10000,
- # Diminishing-returns cutoff configuration
- diminishing_returns_enabled: bool = True,
- diminishing_returns_window: int = 8,
- diminishing_returns_epsilon_frac: float = 1e-3,
- ) -> None:
- """Initialize a policy instance.
-
- Args:
- path_alg: Path algorithm (e.g., SPF).
- flow_placement: Flow placement method (e.g., EQUAL_BALANCED, PROPORTIONAL).
- edge_select: Edge selection mode (e.g., ALL_MIN_COST).
- multipath: Whether to allow multiple parallel paths at the SPF stage.
- min_flow_count: Minimum number of flows to create for a demand.
- max_flow_count: Maximum number of flows allowable for a demand.
- max_path_cost: Absolute cost limit for allowable paths.
- max_path_cost_factor: Relative cost factor limit (multiplying the best path cost).
- static_paths: Predefined paths to force flows onto, if provided.
- edge_select_func: Custom function for edge selection.
- edge_select_value: Additional parameter for certain edge selection strategies.
- reoptimize_flows_on_each_placement: Re-run path optimization after every placement.
- max_no_progress_iterations: Max consecutive iterations with no progress before loop detection.
- max_total_iterations: Absolute max iterations regardless of progress.
-
- Raises:
- ValueError: If static_paths length does not match max_flow_count,
- or if EQUAL_BALANCED placement is used without a
- specified max_flow_count.
- """
- # Module logger
- self._logger = get_logger(__name__)
- self.path_alg: base.PathAlg = path_alg
- self.flow_placement: FlowPlacement = flow_placement
- self.edge_select: base.EdgeSelect = edge_select
- self.multipath: bool = multipath
- self.min_flow_count: int = min_flow_count
- self.max_flow_count: Optional[int] = max_flow_count
- self.max_path_cost: Optional[base.Cost] = max_path_cost
- self.max_path_cost_factor: Optional[float] = max_path_cost_factor
- self.static_paths: Optional[List[PathBundle]] = static_paths
- self.edge_select_func = edge_select_func
- self.edge_select_value: Optional[Any] = edge_select_value
- self.reoptimize_flows_on_each_placement: bool = (
- reoptimize_flows_on_each_placement
- )
-
- # Termination parameters for place_demand algorithm
- self.max_no_progress_iterations: int = max_no_progress_iterations
- self.max_total_iterations: int = max_total_iterations
-
- # Diminishing-returns cutoff parameters
- self.diminishing_returns_enabled: bool = diminishing_returns_enabled
- self.diminishing_returns_window: int = diminishing_returns_window
- self.diminishing_returns_epsilon_frac: float = diminishing_returns_epsilon_frac
-
- # Dictionary to track all flows by their FlowIndex.
- self.flows: Dict[Tuple, Flow] = {}
-
- # Track the best path cost found to enforce maximum path cost constraints.
- self.best_path_cost: Optional[base.Cost] = None
-
- # Internal flow ID counter.
- self._next_flow_id: int = 0
-
- # Basic placement metrics (cumulative totals over lifetime of this policy)
- self._metrics_totals: Dict[str, float] = {
- "spf_calls_total": 0.0,
- "flows_created_total": 0.0,
- "reopt_calls_total": 0.0,
- "place_iterations_total": 0.0,
- }
- # Snapshot of last place_demand call
- self.last_metrics: Dict[str, float] = {}
-
- # Cache for edge selectors to avoid rebuilding fabric callables
- # Keyed by (edge_select, effective_select_value)
- self._edge_selector_cache: Dict[Tuple[base.EdgeSelect, Any], Callable] = {}
-
- # Validate static_paths versus max_flow_count constraints.
- if static_paths:
- if max_flow_count is not None and len(static_paths) != max_flow_count:
- raise ValueError(
- "If set, max_flow_count must be equal to the number of static paths."
- )
- self.max_flow_count = len(static_paths)
- if (
- flow_placement == FlowPlacement.EQUAL_BALANCED
- and self.max_flow_count is None
- ):
- raise ValueError("max_flow_count must be set for EQUAL_BALANCED placement.")
-
- def deep_copy(self) -> FlowPolicy:
- """Return a deep copy of this policy including flows."""
- return copy.deepcopy(self)
-
- @property
- def flow_count(self) -> int:
- """Number of flows currently tracked by the policy."""
- return len(self.flows)
-
- @property
- def placed_demand(self) -> float:
- """Sum of all placed flow volumes across flows."""
- return sum(flow.placed_flow for flow in self.flows.values())
-
- def _get_next_flow_id(self) -> int:
- """Retrieve and increment the internal flow id counter.
-
- Returns:
- int: Next available flow id.
- """
- next_flow_id = self._next_flow_id
- self._next_flow_id += 1
- return next_flow_id
-
- def _build_flow_index(
- self,
- src_node: NodeID,
- dst_node: NodeID,
- flow_class: Hashable,
- flow_id: int,
- ) -> FlowIndex:
- """Construct a `FlowIndex` to track flows.
-
- Args:
- src_node: The source node identifier.
- dst_node: The destination node identifier.
- flow_class: The flow class or type identifier.
- flow_id: Unique identifier for this flow.
-
- Returns:
- FlowIndex: Identifier for the flow.
- """
- return FlowIndex(src_node, dst_node, flow_class, flow_id)
-
- def _get_path_bundle(
- self,
- flow_graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- min_flow: Optional[float] = None,
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- ) -> Optional[PathBundle]:
- """Find a path bundle from src_node to dst_node.
-
- Optionally exclude certain edges or nodes.
-
- Args:
- flow_graph: The network graph.
- src_node: The source node identifier.
- dst_node: The destination node identifier.
- min_flow: Minimum flow threshold for selection.
- excluded_edges: Set of edges to exclude.
- excluded_nodes: Set of nodes to exclude.
-
- Returns:
- PathBundle | None: Bundle if found and cost-constrained; otherwise None.
-
- Raises:
- ValueError: If the selected path algorithm is not supported.
- """
- effective_select_value = (
- min_flow if min_flow is not None else self.edge_select_value
- )
- # Determine whether we can use SPF's internal fast path.
- # Fast path is available when:
- # - no custom edge selector is provided
- # - no custom select value is required (uses MIN_CAP internally)
- # In that case, we pass only the EdgeSelect enum to spf.spf and avoid
- # constructing an edge_select_func, which unlocks specialized inner loops.
- use_spf_fast_path = (
- self.edge_select_func is None and effective_select_value is None
- )
-
- edge_select_func = None
- if not use_spf_fast_path:
- # Build (and cache) a selector when fast path is not applicable
- if self.edge_select_func is None:
- cache_key = (self.edge_select, effective_select_value)
- edge_select_func = self._edge_selector_cache.get(cache_key)
- if edge_select_func is None:
- edge_select_func = edge_select.edge_select_fabric(
- edge_select=self.edge_select,
- select_value=effective_select_value,
- excluded_edges=None,
- excluded_nodes=None,
- edge_select_func=None,
- )
- self._edge_selector_cache[cache_key] = edge_select_func
- else:
- # Respect a user-provided selector (do not cache)
- edge_select_func = edge_select.edge_select_fabric(
- edge_select=self.edge_select,
- select_value=effective_select_value,
- excluded_edges=None,
- excluded_nodes=None,
- edge_select_func=self.edge_select_func,
- )
-
- if self.path_alg == base.PathAlg.SPF:
- path_func = spf.spf
- else:
- raise ValueError(f"Unsupported path algorithm {self.path_alg}")
-
- # Count SPF invocations for metrics
- self._metrics_totals["spf_calls_total"] += 1.0
-
- if use_spf_fast_path:
- cost, pred = path_func(
- flow_graph,
- src_node=src_node,
- edge_select=self.edge_select,
- edge_select_func=None,
- multipath=self.multipath,
- excluded_edges=excluded_edges,
- excluded_nodes=excluded_nodes,
- dst_node=dst_node,
- )
- else:
- cost, pred = path_func(
- flow_graph,
- src_node=src_node,
- edge_select=self.edge_select,
- edge_select_func=edge_select_func,
- multipath=self.multipath,
- excluded_edges=excluded_edges,
- excluded_nodes=excluded_nodes,
- dst_node=dst_node,
- )
-
- if dst_node in pred:
- dst_cost = cost[dst_node]
- # Update best_path_cost if we found a cheaper path.
- if self.best_path_cost is None or dst_cost < self.best_path_cost:
- self.best_path_cost = dst_cost
-
- # Enforce maximum path cost constraints, if specified.
- if self.max_path_cost or self.max_path_cost_factor:
- max_path_cost_factor = self.max_path_cost_factor or 1.0
- max_path_cost = self.max_path_cost or float("inf")
- if dst_cost > min(
- max_path_cost, self.best_path_cost * max_path_cost_factor
- ):
- return None
-
- return PathBundle(src_node, dst_node, pred, dst_cost)
-
- return None
-
- def _create_flow(
- self,
- flow_graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- flow_class: Hashable,
- min_flow: Optional[float] = None,
- path_bundle: Optional[PathBundle] = None,
- excluded_edges: Optional[Set[EdgeID]] = None,
- excluded_nodes: Optional[Set[NodeID]] = None,
- ) -> Optional[Flow]:
- """Create a new flow and register it within the policy.
-
- Args:
- flow_graph: The network graph.
- src_node: The source node identifier.
- dst_node: The destination node identifier.
- flow_class: The flow class or type identifier.
- min_flow: Minimum flow threshold for path selection.
- path_bundle: Optionally, a precomputed path bundle.
- excluded_edges: Edges to exclude during path-finding.
- excluded_nodes: Nodes to exclude during path-finding.
-
- Returns:
- Flow | None: Newly created flow, or None if no valid path bundle is found.
- """
- # Try last path bundle reuse for this (src,dst) if available and still valid
- if path_bundle is None:
- last_pb: Optional[PathBundle] = getattr(self, "_last_path_bundle", None)
- if (
- last_pb is not None
- and last_pb.src_node == src_node
- and last_pb.dst_node == dst_node
- ):
- # Attempt to reuse by checking that all edges exist and have remaining capacity >= min_flow
- can_reuse = True
- # Require at least MIN_FLOW to be deliverable to consider reuse
- min_required = (
- float(min_flow) if min_flow is not None else float(base.MIN_FLOW)
- )
- edges = flow_graph.get_edges()
- # Respect exclusions if provided
- if excluded_edges and any(e in excluded_edges for e in last_pb.edges):
- can_reuse = False
- if excluded_nodes and any(
- n in excluded_nodes for n in getattr(last_pb, "nodes", set())
- ):
- can_reuse = False
- for e_id in last_pb.edges:
- if e_id not in edges:
- can_reuse = False
- break
- cap = edges[e_id][3].get("capacity", 0.0)
- flow = edges[e_id][3].get("flow", 0.0)
- if (cap - flow) < min_required:
- can_reuse = False
- break
- if can_reuse:
- path_bundle = last_pb
-
- path_bundle = path_bundle or self._get_path_bundle(
- flow_graph, src_node, dst_node, min_flow, excluded_edges, excluded_nodes
- )
- if not path_bundle:
- return None
-
- flow_index = self._build_flow_index(
- src_node, dst_node, flow_class, self._get_next_flow_id()
- )
- flow = Flow(path_bundle, flow_index)
- self.flows[flow_index] = flow
- self._metrics_totals["flows_created_total"] += 1.0
- # Cache last path bundle for potential reuse within this demand's placement session
- self._last_path_bundle = path_bundle
- return flow
-
- def _create_flows(
- self,
- flow_graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- flow_class: Hashable,
- min_flow: Optional[float] = None,
- ) -> None:
- """Create the initial set of flows for a new demand.
-
- If static paths are defined, use them directly; otherwise, create flows via
- path-finding.
-
- Args:
- flow_graph: The network graph.
- src_node: The source node identifier.
- dst_node: The destination node identifier.
- flow_class: The flow class or type identifier.
- min_flow: Minimum flow threshold for path selection.
-
- Raises:
- ValueError: If the static paths do not match the demand's source/destination.
- """
- if self.static_paths:
- for path_bundle in self.static_paths:
- if (
- path_bundle.src_node == src_node
- and path_bundle.dst_node == dst_node
- ):
- self._create_flow(
- flow_graph,
- src_node,
- dst_node,
- flow_class,
- min_flow,
- path_bundle,
- )
- else:
- raise ValueError(
- "Source and destination nodes of static paths do not match demand."
- )
- else:
- for _ in range(self.min_flow_count):
- self._create_flow(flow_graph, src_node, dst_node, flow_class, min_flow)
-
- def _delete_flow(
- self, flow_graph: StrictMultiDiGraph, flow_index: FlowIndex
- ) -> None:
- """Delete a flow from the policy and remove it from the graph.
-
- Args:
- flow_graph: The network graph.
- flow_index: The key identifying the flow to delete.
-
- Raises:
- KeyError: If the specified flow_index does not exist.
- """
- flow = self.flows.pop(flow_index)
- flow.remove_flow(flow_graph)
-
- def _reoptimize_flow(
- self,
- flow_graph: StrictMultiDiGraph,
- flow_index: FlowIndex,
- headroom: float = 0.0,
- ) -> Optional[Flow]:
- """Re-optimize a flow by finding a new path that can accommodate headroom.
-
- If no better path is found, restore the original path.
-
- Args:
- flow_graph: The network graph.
- flow_index: The key identifying the flow to update.
- headroom: Additional volume to accommodate on the new path.
-
- Returns:
- Flow | None: Updated flow if successful; otherwise None.
- """
- flow = self.flows[flow_index]
- current_flow_volume = flow.placed_flow
- new_min_volume = current_flow_volume + headroom
- flow.remove_flow(flow_graph)
-
- path_bundle = self._get_path_bundle(
- flow_graph,
- flow.path_bundle.src_node,
- flow.path_bundle.dst_node,
- new_min_volume,
- flow.excluded_edges,
- flow.excluded_nodes,
- )
- # If no suitable alternative path is found or the new path is the same set of edges,
- # revert to the original path.
- if not path_bundle or path_bundle.edges == flow.path_bundle.edges:
- flow.place_flow(flow_graph, current_flow_volume, self.flow_placement)
- return None
-
- new_flow = Flow(
- path_bundle, flow_index, flow.excluded_edges, flow.excluded_nodes
- )
- new_flow.place_flow(flow_graph, current_flow_volume, self.flow_placement)
- self.flows[flow_index] = new_flow
- try:
- self._metrics_totals["reopt_calls_total"] += 1.0
- except Exception:
- pass
- return new_flow
-
- def place_demand(
- self,
- flow_graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- flow_class: Hashable,
- volume: float,
- target_flow_volume: Optional[float] = None,
- min_flow: Optional[float] = None,
- ) -> Tuple[float, float]:
- """Place demand volume on the graph by splitting or creating flows as needed.
-
- Optionally re-optimize flows based on the policy configuration.
-
- Args:
- flow_graph: The network graph.
- src_node: The source node identifier.
- dst_node: The destination node identifier.
- flow_class: The flow class or type identifier.
- volume: The demand volume to place.
- target_flow_volume: The target volume to aim for on each flow.
- min_flow: Minimum flow threshold for path selection.
-
- Returns:
- tuple[float, float]: (placed_flow, remaining_volume).
-
- Raises:
- RuntimeError: If an infinite loop is detected due to misconfigured flow policy
- parameters, or if maximum iteration limit is exceeded.
- """
- # If flows exist but reference edges that no longer exist (e.g., after
- # a graph rebuild), prune them so that placement can recreate valid flows.
- if self.flows:
- edges = flow_graph.get_edges()
- invalid = [
- flow_index
- for flow_index, flow in list(self.flows.items())
- if any(
- eid not in edges
- for eid in getattr(flow.path_bundle, "edges", set())
- )
- ]
- for flow_index in invalid:
- # Remove from internal registry; nothing to remove from graph for stale ids
- self.flows.pop(flow_index, None)
-
- if not self.flows:
- self._create_flows(flow_graph, src_node, dst_node, flow_class, min_flow)
-
- flow_queue = deque(self.flows.values())
- target_flow_volume = target_flow_volume or volume
-
- # Metrics snapshot at entry
- totals_before = dict(self._metrics_totals)
- initial_request = volume
-
- total_placed_flow = 0.0
- consecutive_no_progress = 0
- total_iterations = 0
-
- # Track diminishing returns over a sliding window
- recent_placements = deque(maxlen=self.diminishing_returns_window)
- cutoff_triggered = False
-
- while volume >= base.MIN_FLOW and flow_queue:
- flow = flow_queue.popleft()
- placed_flow, _ = flow.place_flow(
- flow_graph, min(target_flow_volume, volume), self.flow_placement
- )
- volume -= placed_flow
- total_placed_flow += placed_flow
- total_iterations += 1
- recent_placements.append(placed_flow)
- self._metrics_totals["place_iterations_total"] += 1.0
-
- # Track progress to detect infinite loops in flow creation/optimization
- if placed_flow < base.MIN_FLOW:
- consecutive_no_progress += 1
- # Occasional debug to aid troubleshooting of misconfigured policies
- if consecutive_no_progress == 1 or (consecutive_no_progress % 25 == 0):
- import logging as _logging
-
- if self._logger.isEnabledFor(_logging.DEBUG):
- self._logger.debug(
- "place_demand no-progress: src=%s dst=%s vol_left=%.6g target=%.6g "
- "flows=%d queue=%d iters=%d last_cost=%s edge_sel=%s placement=%s multipath=%s",
- str(getattr(flow, "src_node", "")),
- str(getattr(flow, "dst_node", "")),
- float(volume),
- float(target_flow_volume),
- len(self.flows),
- len(flow_queue),
- total_iterations,
- str(
- getattr(
- getattr(flow, "path_bundle", None), "cost", None
- )
- ),
- self.edge_select.name,
- self.flow_placement.name,
- str(self.multipath),
- )
- if consecutive_no_progress >= self.max_no_progress_iterations:
- # This indicates an infinite loop where flows keep being created
- # but can't place any meaningful volume
- raise RuntimeError(
- f"Infinite loop detected in place_demand: "
- f"{consecutive_no_progress} consecutive iterations with no progress. "
- f"This typically indicates misconfigured flow policy parameters "
- f"(e.g., non-capacity-aware edge selection with high max_flow_count)."
- )
- else:
- consecutive_no_progress = 0 # Reset counter on progress
-
- # Safety net for pathological cases
- if total_iterations > self.max_total_iterations:
- raise RuntimeError(
- f"Maximum iteration limit ({self.max_total_iterations}) exceeded in place_demand."
- )
-
- # Diminishing-returns cutoff: if the recent placements collectively fall
- # below a meaningful threshold, stop iterating to avoid chasing dust.
- if (
- self.diminishing_returns_enabled
- and len(recent_placements) == self.diminishing_returns_window
- ):
- recent_sum = sum(recent_placements)
- threshold = max(
- base.MIN_FLOW,
- self.diminishing_returns_epsilon_frac * float(initial_request),
- )
- if recent_sum < threshold:
- # Gracefully stop iterating for this demand; leave remaining volume.
- import logging as _logging
-
- if self._logger.isEnabledFor(_logging.DEBUG):
- self._logger.debug(
- "place_demand cutoff: src=%s dst=%s recent_sum=%.6g threshold=%.6g "
- "remaining=%.6g flows=%d iters=%d edge_sel=%s placement=%s multipath=%s",
- str(src_node),
- str(dst_node),
- float(recent_sum),
- float(threshold),
- float(volume),
- len(self.flows),
- total_iterations,
- self.edge_select.name,
- self.flow_placement.name,
- str(self.multipath),
- )
- cutoff_triggered = True
- break
-
- # If the flow can accept more volume, attempt to create or update.
- if (
- target_flow_volume - flow.placed_flow >= base.MIN_FLOW
- and not self.static_paths
- ):
- if not self.max_flow_count or len(self.flows) < self.max_flow_count:
- # Avoid unbounded flow creation under non-capacity-aware selection
- # with PROPORTIONAL placement when no progress was made.
- non_cap_selects = {
- base.EdgeSelect.ALL_MIN_COST,
- base.EdgeSelect.SINGLE_MIN_COST,
- }
- if (
- placed_flow < base.MIN_FLOW
- and self.flow_placement == FlowPlacement.PROPORTIONAL
- and self.edge_select in non_cap_selects
- ):
- new_flow = None
- else:
- new_flow = self._create_flow(
- flow_graph, src_node, dst_node, flow_class
- )
- else:
- new_flow = self._reoptimize_flow(
- flow_graph, flow.flow_index, headroom=base.MIN_FLOW
- )
- if new_flow:
- flow_queue.append(new_flow)
- import logging as _logging
-
- if self._logger.isEnabledFor(_logging.DEBUG):
- self._logger.debug(
- "place_demand appended flow: total_flows=%d new_cost=%s",
- len(self.flows),
- str(getattr(new_flow.path_bundle, "cost", None)),
- )
-
- # For EQUAL_BALANCED placement, rebalance flows to maintain equal volumes.
- if self.flow_placement == FlowPlacement.EQUAL_BALANCED and len(self.flows) > 0:
- target_flow_volume_eq = self.placed_demand / float(len(self.flows))
- # If flows are not already near balanced, rebalance them.
- if any(
- abs(target_flow_volume_eq - f.placed_flow) >= base.MIN_FLOW
- for f in self.flows.values()
- ):
- # Perform a single rebalance pass; do not recurse into rebalancing again
- prev_reopt = self.reoptimize_flows_on_each_placement
- self.reoptimize_flows_on_each_placement = False
- try:
- total_placed_flow, excess_flow = self.rebalance_demand(
- flow_graph,
- src_node,
- dst_node,
- flow_class,
- target_flow_volume_eq,
- )
- volume += excess_flow
- finally:
- self.reoptimize_flows_on_each_placement = prev_reopt
-
- # Optionally re-run optimization for all flows after placement.
- if self.reoptimize_flows_on_each_placement:
- for flow in self.flows.values():
- self._reoptimize_flow(flow_graph, flow.flow_index)
-
- # Update last_metrics snapshot
-
- totals_after = self._metrics_totals
- self.last_metrics = {
- "placed": float(total_placed_flow),
- "remaining": float(volume),
- "iterations": float(total_iterations),
- "flows_created": float(
- totals_after["flows_created_total"]
- - totals_before["flows_created_total"]
- ),
- "spf_calls": float(
- totals_after["spf_calls_total"] - totals_before["spf_calls_total"]
- ),
- "reopt_calls": float(
- totals_after["reopt_calls_total"] - totals_before["reopt_calls_total"]
- ),
- "cutoff_triggered": float(1.0 if cutoff_triggered else 0.0),
- "initial_request": float(initial_request),
- }
-
- return total_placed_flow, volume
-
- def get_metrics(self) -> Dict[str, float]:
- """Return cumulative placement metrics for this policy instance.
-
- Returns:
- dict[str, float]: Totals including 'spf_calls_total', 'flows_created_total',
- 'reopt_calls_total', and 'place_iterations_total'.
- """
- return dict(self._metrics_totals)
-
- def rebalance_demand(
- self,
- flow_graph: StrictMultiDiGraph,
- src_node: NodeID,
- dst_node: NodeID,
- flow_class: Hashable,
- target_flow_volume: float,
- ) -> Tuple[float, float]:
- """Rebalance demand across existing flows towards the target volume per flow.
-
- Achieved by removing all flows from the graph and re-placing them.
-
- Args:
- flow_graph: The network graph.
- src_node: The source node identifier.
- dst_node: The destination node identifier.
- flow_class: The flow class or type identifier.
- target_flow_volume: The desired volume per flow.
-
- Returns:
- tuple[float, float]: Same semantics as `place_demand`.
- """
- volume = self.placed_demand
- self.remove_demand(flow_graph)
- return self.place_demand(
- flow_graph, src_node, dst_node, flow_class, volume, target_flow_volume
- )
-
- def remove_demand(self, flow_graph: StrictMultiDiGraph) -> None:
- """Removes all flows from the network graph without clearing internal state.
- This allows subsequent re-optimization.
-
- Args:
- flow_graph: The network graph.
- """
- for flow in list(self.flows.values()):
- flow.remove_flow(flow_graph)
-
-
-def get_flow_policy(flow_policy_config: FlowPolicyConfig) -> FlowPolicy:
- """Create a policy instance from a configuration preset.
-
- Args:
- flow_policy_config: A FlowPolicyConfig enum value specifying the desired policy.
-
- Returns:
- FlowPolicy: Pre-configured policy instance.
-
- Raises:
- ValueError: If an unknown FlowPolicyConfig value is provided.
- """
- if flow_policy_config == FlowPolicyConfig.SHORTEST_PATHS_ECMP:
- # Hop-by-hop equal-cost balanced routing (similar to IP forwarding with ECMP).
- return FlowPolicy(
- path_alg=base.PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=base.EdgeSelect.ALL_MIN_COST,
- multipath=True,
- max_flow_count=1, # Single flow from the perspective of the flow object,
- # but multipath can create parallel SPF paths.
- )
- elif flow_policy_config == FlowPolicyConfig.SHORTEST_PATHS_WCMP:
- # Hop-by-hop weighted ECMP (WCMP) over equal-cost paths (proportional split).
- return FlowPolicy(
- path_alg=base.PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=base.EdgeSelect.ALL_MIN_COST,
- multipath=True,
- max_flow_count=1,
- )
- elif flow_policy_config == FlowPolicyConfig.TE_WCMP_UNLIM:
- # Traffic engineering with WCMP (proportional split) and capacity-aware selection.
- return FlowPolicy(
- path_alg=base.PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=base.EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
- elif flow_policy_config == FlowPolicyConfig.TE_ECMP_UP_TO_256_LSP:
- # TE with up to 256 LSPs using ECMP flow placement.
- return FlowPolicy(
- path_alg=base.PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=base.EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING_LOAD_FACTORED,
- multipath=False,
- max_flow_count=256,
- reoptimize_flows_on_each_placement=True,
- )
- elif flow_policy_config == FlowPolicyConfig.TE_ECMP_16_LSP:
- # TE with 16 LSPs using ECMP flow placement.
- return FlowPolicy(
- path_alg=base.PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=base.EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING_LOAD_FACTORED,
- multipath=False,
- min_flow_count=16,
- max_flow_count=16,
- reoptimize_flows_on_each_placement=True,
- )
- else:
- raise ValueError(f"Unknown flow policy config: {flow_policy_config}")
diff --git a/ngraph/graph/__init__.py b/ngraph/graph/__init__.py
deleted file mode 100644
index 2c4fdf7..0000000
--- a/ngraph/graph/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""Graph primitives and helpers.
-
-This package provides the strict multi-directed graph type `StrictMultiDiGraph`
-and helper modules for conversion (`convert`) and serialization (`io`).
-"""
diff --git a/ngraph/graph/convert.py b/ngraph/graph/convert.py
deleted file mode 100644
index 08658da..0000000
--- a/ngraph/graph/convert.py
+++ /dev/null
@@ -1,144 +0,0 @@
-"""Graph conversion utilities between StrictMultiDiGraph and NetworkX graphs.
-
-Functions in this module consolidate or expand multi-edges and can preserve
-original edge data for reversion through a special ``_uv_edges`` attribute.
-"""
-
-from typing import Callable, Optional
-
-import networkx as nx
-
-from ngraph.graph.strict_multidigraph import NodeID, StrictMultiDiGraph
-
-
-def to_digraph(
- graph: StrictMultiDiGraph,
- edge_func: Optional[
- Callable[[StrictMultiDiGraph, NodeID, NodeID, dict], dict]
- ] = None,
- revertible: bool = True,
-) -> nx.DiGraph:
- """Convert a StrictMultiDiGraph to a NetworkX DiGraph.
-
- This function consolidates multi-edges between nodes into a single edge.
- Optionally, a custom edge function can be provided to compute edge attributes.
- If `revertible` is True, the original multi-edge data is stored in the '_uv_edges'
- attribute of each consolidated edge, allowing for later reversion.
-
- Args:
- graph: The StrictMultiDiGraph to convert.
- edge_func: Optional function to compute consolidated edge attributes.
- The callable receives ``(graph, u, v, edges)`` and returns a dict.
- revertible: If True, store the original multi-edge data for reversion.
-
- Returns:
- A NetworkX DiGraph representing the input graph.
- """
- nx_graph = nx.DiGraph()
- nx_graph.add_nodes_from(graph.get_nodes())
-
- # Iterate over nodes and their neighbors using the adjacency method.
- for u, neighbors in graph.adjacency():
- for v, edges in neighbors.items():
- # Convert edges to the expected dict format
- typed_edges: dict = dict(edges)
- if edge_func:
- edge_data = edge_func(graph, u, v, typed_edges)
- nx_graph.add_edge(u, v, **edge_data)
- else:
- nx_graph.add_edge(u, v)
-
- if revertible:
- # Store the original multi-edge data in the '_uv_edges' attribute.
- edge_attr = nx_graph.edges[u, v]
- edge_attr.setdefault("_uv_edges", [])
- edge_attr["_uv_edges"].append((u, v, typed_edges))
- return nx_graph
-
-
-def from_digraph(nx_graph: nx.DiGraph) -> StrictMultiDiGraph:
- """Convert a revertible NetworkX DiGraph to a StrictMultiDiGraph.
-
- This function reconstructs the original StrictMultiDiGraph by restoring
- multi-edge information from the '_uv_edges' attribute of each edge.
-
- Args:
- nx_graph: A revertible NetworkX DiGraph with ``_uv_edges`` attributes.
-
- Returns:
- A StrictMultiDiGraph reconstructed from the input DiGraph.
- """
- graph = StrictMultiDiGraph()
- graph.add_nodes_from(nx_graph.nodes)
-
- # Restore original multi-edges from the consolidated edge attribute.
- for _u, _v, data in nx_graph.edges(data=True):
- uv_edges = data.get("_uv_edges", [])
- for orig_u, orig_v, edges in uv_edges:
- for edge_id, edge_data in edges.items():
- graph.add_edge(orig_u, orig_v, edge_id, **edge_data)
- return graph
-
-
-def to_graph(
- graph: StrictMultiDiGraph,
- edge_func: Optional[
- Callable[[StrictMultiDiGraph, NodeID, NodeID, dict], dict]
- ] = None,
- revertible: bool = True,
-) -> nx.Graph:
- """Convert a StrictMultiDiGraph to a NetworkX Graph.
-
- This function works similarly to `to_digraph` but returns an undirected graph.
-
- Args:
- graph: The StrictMultiDiGraph to convert.
- edge_func: Optional function to compute consolidated edge attributes.
- revertible: If True, store the original multi-edge data for reversion.
-
- Returns:
- A NetworkX Graph representing the input graph.
- """
- nx_graph = nx.Graph()
- nx_graph.add_nodes_from(graph.get_nodes())
-
- # Iterate over the adjacency to consolidate edges.
- for u, neighbors in graph.adjacency():
- for v, edges in neighbors.items():
- # Convert edges to the expected dict format
- typed_edges: dict = dict(edges)
- if edge_func:
- edge_data = edge_func(graph, u, v, typed_edges)
- nx_graph.add_edge(u, v, **edge_data)
- else:
- nx_graph.add_edge(u, v)
-
- if revertible:
- edge_attr = nx_graph.edges[u, v]
- edge_attr.setdefault("_uv_edges", [])
- edge_attr["_uv_edges"].append((u, v, typed_edges))
- return nx_graph
-
-
-def from_graph(nx_graph: nx.Graph) -> StrictMultiDiGraph:
- """Convert a revertible NetworkX Graph to a StrictMultiDiGraph.
-
- Restores the original multi-edge structure from the '_uv_edges' attribute stored
- in each consolidated edge.
-
- Args:
- nx_graph: A revertible NetworkX Graph with ``_uv_edges`` attributes.
-
- Returns:
- A StrictMultiDiGraph reconstructed from the input Graph.
- """
- graph = StrictMultiDiGraph()
- graph.add_nodes_from(nx_graph.nodes)
-
- # Restore multi-edge data from each edge's '_uv_edges' attribute.
- for _u, _v, data in nx_graph.edges(data=True):
- uv_edges = data.get("_uv_edges", [])
- for orig_u, orig_v, edges in uv_edges:
- for edge_id, edge_data in edges.items():
- graph.add_edge(orig_u, orig_v, edge_id, **edge_data)
- return graph
diff --git a/ngraph/graph/io.py b/ngraph/graph/io.py
deleted file mode 100644
index 94329f2..0000000
--- a/ngraph/graph/io.py
+++ /dev/null
@@ -1,233 +0,0 @@
-"""Graph serialization functions for node-link and edge-list formats."""
-
-from __future__ import annotations
-
-from typing import Any, Dict, Iterable, List, Optional
-
-from ngraph.graph.strict_multidigraph import NodeID, StrictMultiDiGraph
-
-
-def graph_to_node_link(graph: StrictMultiDiGraph) -> Dict[str, Any]:
- """Convert a StrictMultiDiGraph into a node-link dict representation.
-
- This representation is suitable for JSON serialization (e.g., for D3.js or Nx formats).
-
- The returned dict has the following structure:
- {
- "graph": { ... top-level graph attributes ... },
- "nodes": [
- {"id": node_id, "attr": { ... node attributes ... }},
- ...
- ],
- "links": [
- {
- "source": ,
- "target": ,
- "key": ,
- "attr": { ... edge attributes ... }
- },
- ...
- ]
- }
-
- Args:
- graph: The StrictMultiDiGraph to convert.
-
- Returns:
- A dict containing the 'graph' attributes, list of 'nodes', and list of 'links'.
- """
- # Get nodes with their attributes and enforce a stable ordering.
- node_dict = graph.get_nodes()
- node_list = list(node_dict.keys())
- node_map = {node_id: i for i, node_id in enumerate(node_list)}
-
- return {
- "graph": dict(graph.graph),
- "nodes": [
- {"id": node_id, "attr": dict(node_dict[node_id])} for node_id in node_list
- ],
- "links": [
- {
- "source": node_map[src],
- "target": node_map[dst],
- "key": edge_id,
- "attr": dict(edge_attrs),
- }
- for edge_id, (src, dst, _, edge_attrs) in graph.get_edges().items()
- ],
- }
-
-
-def node_link_to_graph(data: Dict[str, Any]) -> StrictMultiDiGraph:
- """Reconstruct a StrictMultiDiGraph from its node-link dict representation.
-
- Expected input format:
- {
- "graph": { ... graph attributes ... },
- "nodes": [
- {"id": , "attr": { ... node attributes ... }},
- ...
- ],
- "links": [
- {
- "source": ,
- "target": ,
- "key": ,
- "attr": { ... edge attributes ... }
- },
- ...
- ]
- }
-
- Args:
- data: A dict representing the node-link structure.
-
- Returns:
- A StrictMultiDiGraph reconstructed from the provided data.
-
- Raises:
- KeyError: If required keys (e.g., "id" or "attr" on nodes) are missing.
- """
- # Create the graph with the top-level attributes.
- graph_attrs = data.get("graph", {})
- graph = StrictMultiDiGraph(**graph_attrs)
-
- # Build a mapping from integer indices to original node IDs.
- node_map: Dict[int, NodeID] = {}
- for idx, node_obj in enumerate(data.get("nodes", [])):
- node_id = node_obj["id"]
- graph.add_node(node_id, **node_obj["attr"])
- node_map[idx] = node_id
-
- # Add edges using the index mapping.
- for edge_obj in data.get("links", []):
- src_id = node_map[edge_obj["source"]]
- dst_id = node_map[edge_obj["target"]]
- edge_key = edge_obj.get("key", None)
- edge_attr = edge_obj.get("attr", {})
- graph.add_edge(src_id, dst_id, key=edge_key, **edge_attr)
-
- return graph
-
-
-def edgelist_to_graph(
- lines: Iterable[str],
- columns: List[str],
- separator: str = " ",
- graph: Optional[StrictMultiDiGraph] = None,
- source: str = "src",
- target: str = "dst",
- key: str = "key",
-) -> StrictMultiDiGraph:
- """Build or update a StrictMultiDiGraph from an edge list.
-
- Each line in the input is split by the specified separator into tokens. These tokens
- are mapped to column names provided in `columns`. The tokens corresponding to `source`
- and `target` become the node IDs. If a `key` column exists, its token is used as the edge
- ID; remaining tokens are added as edge attributes.
-
- Args:
- lines: An iterable of strings, each representing one edge.
- columns: A list of column names, e.g. ["src", "dst", "cost"].
- separator: The separator used to split each line (default is a space).
- graph: An existing StrictMultiDiGraph to update; if None, a new graph is created.
- source: The column name for the source node ID.
- target: The column name for the target node ID.
- key: The column name for a custom edge ID (if present).
-
- Returns:
- The updated (or newly created) StrictMultiDiGraph.
-
- Raises:
- RuntimeError: If a line does not match the expected number of columns.
- """
- if graph is None:
- graph = StrictMultiDiGraph()
-
- for line in lines:
- # Remove only newline characters.
- line = line.rstrip("\r\n")
- tokens = line.split(separator)
- if len(tokens) != len(columns):
- raise RuntimeError(
- f"Line '{line}' does not match expected columns {columns} (token count mismatch)."
- )
-
- line_dict = dict(zip(columns, tokens, strict=False))
- src_id = line_dict[source]
- dst_id = line_dict[target]
- edge_key = line_dict.get(key, None)
-
- # All tokens not corresponding to source, target, or key become edge attributes.
- attr_dict = {
- k: v for k, v in line_dict.items() if k not in (source, target, key)
- }
-
- # Ensure nodes exist since StrictMultiDiGraph does not auto-create nodes.
- if src_id not in graph:
- graph.add_node(src_id)
- if dst_id not in graph:
- graph.add_node(dst_id)
-
- graph.add_edge(src_id, dst_id, key=edge_key, **attr_dict)
-
- return graph
-
-
-def graph_to_edgelist(
- graph: StrictMultiDiGraph,
- columns: Optional[List[str]] = None,
- separator: str = " ",
- source_col: str = "src",
- target_col: str = "dst",
- key_col: str = "key",
-) -> List[str]:
- """Convert a StrictMultiDiGraph into an edge-list text representation.
-
- Each line in the output represents one edge with tokens joined by the given separator.
- By default, the output columns are:
- [source_col, target_col, key_col] + sorted(edge_attribute_names)
-
- If an explicit list of columns is provided, those columns (in that order) are used,
- and any missing values are output as an empty string.
-
- Args:
- graph: The StrictMultiDiGraph to export.
- columns: Optional list of column names. If None, they are auto-generated.
- separator: The string used to join tokens (default is a space).
- source_col: The column name for the source node (default "src").
- target_col: The column name for the target node (default "dst").
- key_col: The column name for the edge key (default "key").
-
- Returns:
- A list of strings, each representing one edge in the specified column format.
- """
- edge_dicts: List[Dict[str, str]] = []
- all_attr_keys = set()
-
- # Build a list of dicts for each edge.
- for edge_id, (src, dst, _, edge_attrs) in graph.get_edges().items():
- # Use "is not None" to correctly handle edge keys such as 0.
- key_val = str(edge_id) if edge_id is not None else ""
- row = {
- source_col: str(src),
- target_col: str(dst),
- key_col: key_val,
- }
- for attr_key, attr_val in edge_attrs.items():
- row[attr_key] = str(attr_val)
- all_attr_keys.add(attr_key)
- edge_dicts.append(row)
-
- # Auto-generate columns if not provided.
- if columns is None:
- sorted_attr_keys = sorted(all_attr_keys)
- columns = [source_col, target_col, key_col] + sorted_attr_keys
-
- lines: List[str] = []
- for row_dict in edge_dicts:
- # For each specified column, output the corresponding value or an empty string if absent.
- tokens = [row_dict.get(col, "") for col in columns]
- lines.append(separator.join(tokens))
-
- return lines
diff --git a/ngraph/graph/strict_multidigraph.py b/ngraph/graph/strict_multidigraph.py
deleted file mode 100644
index ccd2d20..0000000
--- a/ngraph/graph/strict_multidigraph.py
+++ /dev/null
@@ -1,329 +0,0 @@
-"""Strict multi-directed graph with validation and convenience APIs.
-
-`StrictMultiDiGraph` extends `networkx.MultiDiGraph` to enforce explicit node
-management, unique edge identifiers, and predictable error handling. It exposes
-helpers to access nodes/edges as dictionaries and to serialize in node-link
-format via `to_dict()`.
-"""
-
-from __future__ import annotations
-
-from pickle import dumps, loads
-from typing import Any, Dict, Hashable, List, Optional, Tuple
-
-import networkx as nx
-
-NodeID = Hashable
-EdgeID = Hashable
-AttrDict = Dict[str, Any]
-EdgeTuple = Tuple[NodeID, NodeID, EdgeID, AttrDict]
-
-
-class StrictMultiDiGraph(nx.MultiDiGraph):
- """A custom multi-directed graph with strict rules and unique edge IDs.
-
- This class enforces:
- - No automatic creation of missing nodes when adding an edge.
- - No duplicate nodes (raises ValueError on duplicates).
- - No duplicate edges by key (raises ValueError on duplicates).
- - Removing non-existent nodes or edges raises ValueError.
- - Each edge key must be unique; by default, a Base64-UUID is generated
- if none is provided.
- - ``copy()`` can perform a pickle-based deep copy that may be faster
- than the NetworkX default.
-
- Inherits from:
- networkx.MultiDiGraph
- """
-
- def __init__(self, *args, **kwargs) -> None:
- """Initialize a StrictMultiDiGraph.
-
- Args:
- *args: Positional arguments forwarded to the MultiDiGraph constructor.
- **kwargs: Keyword arguments forwarded to the MultiDiGraph constructor.
-
- Attributes:
- _edges: Map edge key to ``(source_node, target_node, edge_key, attribute_dict)``.
- """
- super().__init__(*args, **kwargs)
- self._edges: Dict[EdgeID, EdgeTuple] = {}
- # Monotonically increasing integer for auto-assigned edge IDs.
- # This counter only advances; removed edges do not reuse IDs.
- self._next_edge_id: int = 0
-
- def new_edge_key(self, u: NodeID, v: NodeID, key: Optional[int] = None) -> int: # type: ignore[override]
- """Return a new unique integer edge ID.
-
- Signature matches NetworkX's ``new_edge_key(self, u, v, key=None)``.
-
- Args:
- u: Source node identifier (unused here).
- v: Destination node identifier (unused here).
- key: Optional suggestion (ignored); maintained for API compatibility.
-
- Returns:
- int: A new unique integer edge id.
- """
- next_edge_id = self._next_edge_id
- self._next_edge_id += 1
- return int(next_edge_id)
-
- def copy(self, as_view: bool = False, pickle: bool = True) -> StrictMultiDiGraph:
- """Create a copy of this graph.
-
- By default, use pickle-based deep copying. If ``pickle=False``,
- call the parent class's copy, which supports views.
-
- Args:
- as_view: If True, return a view instead of a full copy; only used
- if ``pickle=False``.
- pickle: If True, perform a pickle-based deep copy.
-
- Returns:
- StrictMultiDiGraph: A new instance (or view) of the graph.
- """
- if not pickle:
- return super().copy(as_view=as_view) # type: ignore[return-value]
- return loads(dumps(self))
-
- #
- # Node management
- #
- def add_node(self, node_for_adding: NodeID, **attr: Any) -> None:
- """Add a single node, disallowing duplicates.
-
- Args:
- node_for_adding: The node to add.
- **attr: Arbitrary attributes for this node.
-
- Raises:
- ValueError: If the node already exists in the graph.
- """
- if node_for_adding in self:
- raise ValueError(f"Node '{node_for_adding}' already exists in this graph.")
- super().add_node(node_for_adding, **attr)
-
- def remove_node(self, n: NodeID) -> None:
- """Remove a single node and all incident edges.
-
- Args:
- n: The node to remove.
-
- Raises:
- ValueError: If the node does not exist in the graph.
- """
- if n not in self:
- raise ValueError(f"Node '{n}' does not exist.")
- # Remove any edges that reference this node
- to_delete = [
- e_id for e_id, (s, t, _, _) in self._edges.items() if s == n or t == n
- ]
- for e_id in to_delete:
- del self._edges[e_id]
-
- super().remove_node(n)
-
- #
- # Edge management
- #
- def add_edge( # pyright: ignore[reportIncompatibleMethodOverride]
- self,
- u_for_edge: NodeID,
- v_for_edge: NodeID,
- key: Optional[EdgeID] = None,
- **attr: Any,
- ) -> EdgeID:
- """Add a directed edge from u_for_edge to v_for_edge.
-
- If no key is provided, a unique monotonically increasing integer key is
- assigned via ``new_edge_key``. This method does not create nodes
- automatically; both u_for_edge and v_for_edge must already exist in the
- graph. When an explicit integer key is provided, the internal counter is
- advanced to avoid collisions with future auto-assigned keys.
-
- Args:
- u_for_edge: The source node. Must exist in the graph.
- v_for_edge: The target node. Must exist in the graph.
- key: The unique edge key. If None, a new key is generated. Must not
- already be in use if provided.
- **attr: Arbitrary edge attributes.
-
- Returns:
- EdgeID: The key associated with this new edge.
-
- Raises:
- ValueError: If either node does not exist, or if the key is already in use.
- """
- if u_for_edge not in self:
- raise ValueError(f"Source node '{u_for_edge}' does not exist.")
- if v_for_edge not in self:
- raise ValueError(f"Target node '{v_for_edge}' does not exist.")
-
- if key is None:
- key = self.new_edge_key(u_for_edge, v_for_edge)
- else:
- if key in self._edges:
- raise ValueError(f"Edge with id '{key}' already exists.")
- # Keep the auto counter ahead of any explicit integer keys to avoid collisions
- if isinstance(key, int) and key >= self._next_edge_id:
- self._next_edge_id = key + 1
-
- super().add_edge(u_for_edge, v_for_edge, key=key, **attr)
- # At this point, key is guaranteed to be non-None (either provided or generated)
- assert key is not None
- self._edges[key] = (
- u_for_edge,
- v_for_edge,
- key,
- self[u_for_edge][v_for_edge][key], # pyright: ignore[reportArgumentType]
- )
- return key
-
- def remove_edge(
- self,
- u: NodeID,
- v: NodeID,
- key: Optional[EdgeID] = None,
- ) -> None:
- """Remove an edge (or edges) between nodes u and v.
-
- If key is provided, remove only that edge. Otherwise, remove all edges
- from u to v.
-
- Args:
- u: The source node of the edge(s). Must exist in the graph.
- v: The target node of the edge(s). Must exist in the graph.
- key: If provided, remove the edge with this key. Otherwise, remove
- all edges from u to v.
-
- Raises:
- ValueError: If the nodes do not exist, or if the specified edge key
- does not exist, or if no edges are found from u to v.
- """
- if u not in self:
- raise ValueError(f"Source node '{u}' does not exist.")
- if v not in self:
- raise ValueError(f"Target node '{v}' does not exist.")
-
- if key is not None:
- if key not in self._edges:
- raise ValueError(f"No edge with id='{key}' found from {u} to {v}.")
- src_node, dst_node, _, _ = self._edges[key]
- if src_node != u or dst_node != v:
- raise ValueError(
- f"Edge with id='{key}' is actually from {src_node} to {dst_node}, "
- f"not from {u} to {v}."
- )
- self.remove_edge_by_id(key)
- else:
- if v not in self.succ[u]:
- raise ValueError(f"No edges from '{u}' to '{v}' to remove.")
- edge_ids = tuple(self.succ[u][v])
- if not edge_ids:
- raise ValueError(f"No edges from '{u}' to '{v}' to remove.")
- for e_id in edge_ids:
- self.remove_edge_by_id(e_id)
-
- def remove_edge_by_id(self, key: EdgeID) -> None:
- """Remove a directed edge by its unique key.
-
- Args:
- key: The key identifying the edge to remove.
-
- Raises:
- ValueError: If no edge with this key exists in the graph.
- """
- if key not in self._edges:
- raise ValueError(f"Edge with id='{key}' not found.")
- src_node, dst_node, _, _ = self._edges.pop(key)
- super().remove_edge(src_node, dst_node, key=key)
-
- #
- # Convenience methods
- #
- def get_nodes(self) -> Dict[NodeID, AttrDict]:
- """Retrieve all nodes and their attributes as a dictionary.
-
- Returns:
- Dict[NodeID, AttrDict]: A mapping of node ID to its attributes.
- """
- return dict(self.nodes(data=True))
-
- def get_edges(self) -> Dict[EdgeID, EdgeTuple]:
- """Retrieve a dictionary of all edges by their keys.
-
- Returns:
- Dict[EdgeID, EdgeTuple]: A mapping of edge key to
- ``(source_node, target_node, edge_key, edge_attributes)``.
- """
- return self._edges
-
- def get_edge_attr(self, key: EdgeID) -> AttrDict:
- """Retrieve the attribute dictionary of a specific edge.
-
- Args:
- key: The unique edge key.
-
- Returns:
- AttrDict: The attribute dictionary for the edge.
-
- Raises:
- ValueError: If no edge with this key is found.
- """
- if key not in self._edges:
- raise ValueError(f"Edge with id='{key}' not found.")
- return self._edges[key][3]
-
- def has_edge_by_id(self, key: EdgeID) -> bool:
- """Check whether an edge with the given key exists.
-
- Args:
- key: The unique edge key to check.
-
- Returns:
- bool: True if the edge key exists, otherwise False.
- """
- return key in self._edges
-
- def edges_between(self, u: NodeID, v: NodeID) -> List[EdgeID]:
- """List all edge keys from node u to node v.
-
- Args:
- u: The source node.
- v: The target node.
-
- Returns:
- List[EdgeID]: List of edge keys from u to v, or an empty list if none exist.
- """
- if u not in self.succ or v not in self.succ[u]:
- return []
- return list(self.succ[u][v].keys())
-
- def update_edge_attr(self, key: EdgeID, **attr: Any) -> None:
- """Update attributes on an existing edge by key.
-
- Args:
- key: The unique edge key to update.
- **attr: Arbitrary edge attributes to add or modify.
-
- Raises:
- ValueError: If the edge with the given key does not exist.
- """
- if key not in self._edges:
- raise ValueError(f"Edge with id='{key}' not found.")
- self._edges[key][3].update(attr)
-
- def to_dict(self) -> Dict[str, Any]:
- """Convert the graph to a dictionary representation suitable for JSON serialization.
-
- Return a node-link format dictionary with graph attributes, nodes, and edges.
- The format is compatible with visualization libraries like D3.js.
-
- Returns:
- Dict[str, Any]: Dictionary containing 'graph', 'nodes', and 'links' keys.
- """
- # Import here to avoid circular import
- from ngraph.graph.io import graph_to_node_link
-
- return graph_to_node_link(self)
diff --git a/ngraph/model/__init__.py b/ngraph/model/__init__.py
index db9b886..6576271 100644
--- a/ngraph/model/__init__.py
+++ b/ngraph/model/__init__.py
@@ -1,8 +1,8 @@
"""Network model package.
This package defines the core network data model used across NetGraph, including
-nodes, links, risk groups, the mutable scenario-level `Network`, and the
-read-only `NetworkView` overlay for analysis with temporary exclusions.
+nodes, links, risk groups, and the scenario-level `Network`. Temporary exclusions
+for analysis are handled via node_mask and edge_mask parameters in Core algorithms.
"""
__all__ = [
diff --git a/ngraph/components.py b/ngraph/model/components.py
similarity index 99%
rename from ngraph/components.py
rename to ngraph/model/components.py
index bd8bd36..ba3839b 100644
--- a/ngraph/components.py
+++ b/ngraph/model/components.py
@@ -8,7 +8,7 @@
import yaml
-from ngraph.yaml_utils import normalize_yaml_dict_keys
+from ngraph.utils.yaml_utils import normalize_yaml_dict_keys
@dataclass
diff --git a/ngraph/demand/matrix.py b/ngraph/model/demand/matrix.py
similarity index 98%
rename from ngraph/demand/matrix.py
rename to ngraph/model/demand/matrix.py
index a8ccc5f..025db0a 100644
--- a/ngraph/demand/matrix.py
+++ b/ngraph/model/demand/matrix.py
@@ -10,7 +10,7 @@
from dataclasses import dataclass, field
from typing import Any
-from ngraph.demand.spec import TrafficDemand
+from ngraph.model.demand.spec import TrafficDemand
@dataclass
diff --git a/ngraph/demand/spec.py b/ngraph/model/demand/spec.py
similarity index 70%
rename from ngraph/demand/spec.py
rename to ngraph/model/demand/spec.py
index a027e3a..c3c3805 100644
--- a/ngraph/demand/spec.py
+++ b/ngraph/model/demand/spec.py
@@ -2,15 +2,22 @@
Defines `TrafficDemand`, a user-facing specification used by demand expansion
and placement. It can carry either a concrete `FlowPolicy` instance or a
-`FlowPolicyConfig` enum to construct one.
+`FlowPolicyPreset` enum to construct one.
"""
from dataclasses import dataclass, field
-from typing import Any, Dict, Optional
+from typing import TYPE_CHECKING, Any, Dict, Optional
-from ngraph.flows.policy import FlowPolicy, FlowPolicyConfig
+from ngraph.model.flow.policy_config import FlowPolicyPreset
from ngraph.utils.ids import new_base64_uuid
+if TYPE_CHECKING:
+ import netgraph_core
+
+ FlowPolicy = netgraph_core.FlowPolicy
+else:
+ FlowPolicy = None # type: ignore
+
@dataclass
class TrafficDemand:
@@ -22,8 +29,8 @@ class TrafficDemand:
priority: Priority class for this demand (lower value = higher priority).
demand: Total demand volume.
demand_placed: Portion of this demand placed so far.
- flow_policy_config: Policy configuration used to build a `FlowPolicy` if
- ``flow_policy`` is not provided.
+ flow_policy_config: Policy preset (FlowPolicyPreset enum) used to build
+ a `FlowPolicy` if ``flow_policy`` is not provided.
flow_policy: Concrete policy instance. If set, it overrides
``flow_policy_config``.
mode: Expansion mode, ``"combine"`` or ``"pairwise"``.
@@ -36,8 +43,8 @@ class TrafficDemand:
priority: int = 0
demand: float = 0.0
demand_placed: float = 0.0
- flow_policy_config: Optional[FlowPolicyConfig] = None
- flow_policy: Optional[FlowPolicy] = None
+ flow_policy_config: Optional[FlowPolicyPreset] = None
+ flow_policy: Optional["FlowPolicy"] = None # type: ignore[valid-type]
mode: str = "combine"
attrs: Dict[str, Any] = field(default_factory=dict)
id: str = field(init=False)
diff --git a/ngraph/failure/__init__.py b/ngraph/model/failure/__init__.py
similarity index 85%
rename from ngraph/failure/__init__.py
rename to ngraph/model/failure/__init__.py
index 5de3a09..0ae54e5 100644
--- a/ngraph/failure/__init__.py
+++ b/ngraph/model/failure/__init__.py
@@ -3,7 +3,7 @@
Provides primitives to define failure selection rules and to run Monte Carlo
failure analyses. The `policy` module defines data classes for expressing
selection logic over nodes, links, and risk groups. The `manager` subpackage
-contains the engine that applies those policies to a `NetworkView` and runs
+contains the engine that applies those policies to a `Network` and runs
iterative analyses.
Public entry points:
diff --git a/ngraph/failure/conditions.py b/ngraph/model/failure/conditions.py
similarity index 100%
rename from ngraph/failure/conditions.py
rename to ngraph/model/failure/conditions.py
diff --git a/ngraph/model/failure/parser.py b/ngraph/model/failure/parser.py
new file mode 100644
index 0000000..6d76cfb
--- /dev/null
+++ b/ngraph/model/failure/parser.py
@@ -0,0 +1,140 @@
+"""Parsers for FailurePolicySet and related failure modeling structures."""
+
+from __future__ import annotations
+
+from typing import Any, Callable, Dict, List, Optional
+
+from ngraph.logging import get_logger
+from ngraph.model.failure.policy import (
+ FailureCondition,
+ FailureMode,
+ FailurePolicy,
+ FailureRule,
+)
+from ngraph.model.failure.policy_set import FailurePolicySet
+from ngraph.model.network import RiskGroup
+from ngraph.utils.yaml_utils import normalize_yaml_dict_keys
+
+_logger = get_logger(__name__)
+
+
+def build_risk_groups(rg_data: List[Dict[str, Any]]) -> List[RiskGroup]:
+ def build_one(d: Dict[str, Any]) -> RiskGroup:
+ name = d.get("name")
+ if not name:
+ raise ValueError("RiskGroup entry missing 'name' field.")
+ disabled = d.get("disabled", False)
+ children_list = d.get("children", [])
+ child_objs = [build_one(cd) for cd in children_list]
+ attrs = normalize_yaml_dict_keys(d.get("attrs", {}))
+ return RiskGroup(name=name, disabled=disabled, children=child_objs, attrs=attrs)
+
+ return [build_one(entry) for entry in rg_data]
+
+
+def build_failure_policy(
+ fp_data: Dict[str, Any],
+ *,
+ policy_name: str,
+ derive_seed: Callable[[str], Optional[int]],
+) -> FailurePolicy:
+ def build_rules(rule_dicts: List[Dict[str, Any]]) -> List[FailureRule]:
+ out: List[FailureRule] = []
+ for rule_dict in rule_dicts:
+ entity_scope = rule_dict.get("entity_scope", "node")
+ conditions_data = rule_dict.get("conditions", [])
+ if not isinstance(conditions_data, list):
+ raise ValueError("Each rule's 'conditions' must be a list if present.")
+ conditions: List[FailureCondition] = []
+ for cond_dict in conditions_data:
+ conditions.append(
+ FailureCondition(
+ attr=cond_dict["attr"],
+ operator=cond_dict["operator"],
+ value=cond_dict["value"],
+ )
+ )
+ out.append(
+ FailureRule(
+ entity_scope=entity_scope,
+ conditions=conditions,
+ logic=rule_dict.get("logic", "or"),
+ rule_type=rule_dict.get("rule_type", "all"),
+ probability=rule_dict.get("probability", 1.0),
+ count=rule_dict.get("count", 1),
+ weight_by=rule_dict.get("weight_by"),
+ )
+ )
+ return out
+
+ fail_srg = fp_data.get("fail_risk_groups", False)
+ fail_rg_children = fp_data.get("fail_risk_group_children", False)
+ attrs = normalize_yaml_dict_keys(fp_data.get("attrs", {}))
+
+ modes: List[FailureMode] = []
+ modes_data = fp_data.get("modes", [])
+ if not isinstance(modes_data, list) or not modes_data:
+ raise ValueError("failure_policy requires non-empty 'modes' list.")
+ for m in modes_data:
+ if not isinstance(m, dict):
+ raise ValueError("Each mode must be a mapping.")
+ weight = float(m.get("weight", 0.0))
+ mode_rules_data = m.get("rules", [])
+ if not isinstance(mode_rules_data, list):
+ raise ValueError("Each mode 'rules' must be a list.")
+ mode_rules = build_rules(mode_rules_data)
+ mode_attrs = normalize_yaml_dict_keys(m.get("attrs", {}))
+ modes.append(FailureMode(weight=weight, rules=mode_rules, attrs=mode_attrs))
+
+ policy_seed = derive_seed(policy_name)
+
+ return FailurePolicy(
+ attrs=attrs,
+ fail_risk_groups=fail_srg,
+ fail_risk_group_children=fail_rg_children,
+ seed=policy_seed,
+ modes=modes,
+ )
+
+
+def build_failure_policy_set(
+ raw: Dict[str, Any],
+ *,
+ derive_seed: Callable[[str], Optional[int]],
+) -> FailurePolicySet:
+ """Build a FailurePolicySet from raw config data.
+
+ Args:
+ raw: Mapping of policy name -> policy definition dict.
+ derive_seed: Callable to derive deterministic seeds from component names.
+
+ Returns:
+ Configured FailurePolicySet.
+
+ Raises:
+ ValueError: If raw is not a dict or contains invalid policy definitions.
+ """
+ if not isinstance(raw, dict):
+ raise ValueError(
+ "'failure_policy_set' must be a mapping of name -> FailurePolicy definition"
+ )
+
+ normalized_fps = normalize_yaml_dict_keys(raw)
+ fps = FailurePolicySet()
+
+ # Capture derive_seed in a closure with a different name to avoid confusion
+ # when passing to build_failure_policy (which also has a derive_seed parameter)
+ outer_derive_seed = derive_seed
+
+ for name, fp_data in normalized_fps.items():
+ if not isinstance(fp_data, dict):
+ raise ValueError(
+ f"Failure policy '{name}' must map to a FailurePolicy definition dict"
+ )
+ policy = build_failure_policy(
+ fp_data,
+ policy_name=name,
+ derive_seed=lambda n, _fn=outer_derive_seed: _fn(f"failure_policy:{n}"),
+ )
+ fps.add(name, policy)
+ return fps
diff --git a/ngraph/failure/policy.py b/ngraph/model/failure/policy.py
similarity index 100%
rename from ngraph/failure/policy.py
rename to ngraph/model/failure/policy.py
diff --git a/ngraph/failure/policy_set.py b/ngraph/model/failure/policy_set.py
similarity index 97%
rename from ngraph/failure/policy_set.py
rename to ngraph/model/failure/policy_set.py
index 508d94c..efe65e9 100644
--- a/ngraph/failure/policy_set.py
+++ b/ngraph/model/failure/policy_set.py
@@ -10,7 +10,7 @@
from dataclasses import dataclass, field
from typing import Any
-from ngraph.failure.policy import FailurePolicy
+from ngraph.model.failure.policy import FailurePolicy
@dataclass
diff --git a/ngraph/model/flow/policy_config.py b/ngraph/model/flow/policy_config.py
new file mode 100644
index 0000000..5b66614
--- /dev/null
+++ b/ngraph/model/flow/policy_config.py
@@ -0,0 +1,189 @@
+"""Flow policy preset configurations for NetGraph.
+
+Provides convenient factory functions to create common FlowPolicy configurations
+using NetGraph-Core's FlowPolicy and FlowPolicyConfig.
+"""
+
+from __future__ import annotations
+
+from enum import IntEnum
+
+try:
+ import netgraph_core
+except ImportError as e:
+ raise ImportError(
+ "netgraph_core module not found. Ensure NetGraph-Core is installed."
+ ) from e
+
+
+class FlowPolicyPreset(IntEnum):
+ """Enumerates common flow policy presets for traffic routing.
+
+ These presets map to specific combinations of path algorithms, flow placement
+ strategies, and edge selection modes provided by NetGraph-Core.
+ """
+
+ SHORTEST_PATHS_ECMP = 1
+ """Hop-by-hop equal-cost multi-path routing (ECMP).
+
+ Single flow with equal-cost path splitting, similar to IP forwarding with ECMP.
+ """
+
+ SHORTEST_PATHS_WCMP = 2
+ """Hop-by-hop weighted cost multi-path routing (WCMP).
+
+ Single flow with proportional splitting over equal-cost paths.
+ """
+
+ TE_WCMP_UNLIM = 3
+ """Traffic engineering with unlimited WCMP flows.
+
+ Capacity-aware path selection with proportional flow placement.
+ """
+
+ TE_ECMP_UP_TO_256_LSP = 4
+ """Traffic engineering with up to 256 label-switched paths (LSPs) using ECMP.
+
+ Capacity-aware path selection with equal-balanced placement and reoptimization.
+
+ Each LSP is a distinct tunnel using a single path (MPLS LSP semantics). Multiple LSPs
+ can share the same path. With N LSPs and M paths where N > M, LSPs are distributed
+ across paths (~N/M LSPs per path). ECMP constraint ensures all LSPs carry equal volume.
+
+ Configuration: multipath=False ensures tunnel-based ECMP (not hash-based ECMP).
+ """
+
+ TE_ECMP_16_LSP = 5
+ """Traffic engineering with exactly 16 LSPs using ECMP.
+
+ Fixed 16 flows with capacity-aware selection, equal-balanced placement, and reoptimization.
+
+ Each LSP is a distinct tunnel using a single path (MPLS LSP semantics). With 16 LSPs
+ and M paths: if M ≥ 16, one LSP per path; if M < 16, some paths carry multiple LSPs.
+ ECMP constraint ensures all LSPs carry equal volume.
+
+ Example: 15 parallel paths (capacity 1.0 each) with 16 LSPs:
+ - 15 paths carry 1 LSP, 1 path carries 2 LSPs
+ - ECMP constraint limits all LSPs to 0.5 units (bottleneck path: 1.0 / 2 = 0.5)
+ - Total: 16 × 0.5 = 8.0 units
+
+ Configuration: multipath=False ensures tunnel-based ECMP (not hash-based ECMP).
+ """
+
+
+def create_flow_policy(
+ algorithms: netgraph_core.Algorithms,
+ graph: netgraph_core.Graph,
+ preset: FlowPolicyPreset,
+ node_mask=None, # Will be supported after C++ bindings are rebuilt
+ edge_mask=None, # Will be supported after C++ bindings are rebuilt
+) -> netgraph_core.FlowPolicy:
+ """Create a FlowPolicy instance from a preset configuration.
+
+ Args:
+ algorithms: NetGraph-Core Algorithms instance.
+ graph: NetGraph-Core Graph handle.
+ preset: FlowPolicyPreset enum value specifying the desired policy.
+ node_mask: Optional numpy bool array for node exclusions (True = include).
+ edge_mask: Optional numpy bool array for edge exclusions (True = include).
+
+ Returns:
+ netgraph_core.FlowPolicy: Configured policy instance.
+
+ Raises:
+ ValueError: If an unknown FlowPolicyPreset value is provided.
+
+ Example:
+ >>> backend = netgraph_core.Backend.cpu()
+ >>> algs = netgraph_core.Algorithms(backend)
+ >>> graph = algs.build_graph(strict_multidigraph)
+ >>> policy = create_flow_policy(algs, graph, FlowPolicyPreset.SHORTEST_PATHS_ECMP)
+ """
+ if preset == FlowPolicyPreset.SHORTEST_PATHS_ECMP:
+ # Hop-by-hop equal-cost balanced routing (similar to IP forwarding with ECMP)
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.EQUAL_BALANCED
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=True,
+ require_capacity=False,
+ tie_break=netgraph_core.EdgeTieBreak.DETERMINISTIC,
+ )
+ config.min_flow_count = 1
+ config.max_flow_count = 1
+ return netgraph_core.FlowPolicy(
+ algorithms, graph, config, node_mask=node_mask, edge_mask=edge_mask
+ )
+
+ elif preset == FlowPolicyPreset.SHORTEST_PATHS_WCMP:
+ # Hop-by-hop weighted ECMP (WCMP) over equal-cost paths (proportional split)
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.PROPORTIONAL
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=True,
+ require_capacity=False,
+ tie_break=netgraph_core.EdgeTieBreak.DETERMINISTIC,
+ )
+ config.min_flow_count = 1
+ config.max_flow_count = 1
+ return netgraph_core.FlowPolicy(
+ algorithms, graph, config, node_mask=node_mask, edge_mask=edge_mask
+ )
+
+ elif preset == FlowPolicyPreset.TE_WCMP_UNLIM:
+ # Traffic engineering with WCMP (proportional split) and capacity-aware selection
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.PROPORTIONAL
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=True,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config.min_flow_count = 1
+ # max_flow_count defaults to None (unlimited)
+ return netgraph_core.FlowPolicy(
+ algorithms, graph, config, node_mask=node_mask, edge_mask=edge_mask
+ )
+
+ elif preset == FlowPolicyPreset.TE_ECMP_UP_TO_256_LSP:
+ # TE with up to 256 LSPs using ECMP flow placement
+ # multipath=False ensures each LSP is a single path (MPLS tunnel semantics)
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.EQUAL_BALANCED
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config.multipath = False # Each LSP uses a single path (tunnel-based ECMP)
+ config.min_flow_count = 1
+ config.max_flow_count = 256
+ config.reoptimize_flows_on_each_placement = True
+ return netgraph_core.FlowPolicy(
+ algorithms, graph, config, node_mask=node_mask, edge_mask=edge_mask
+ )
+
+ elif preset == FlowPolicyPreset.TE_ECMP_16_LSP:
+ # TE with exactly 16 LSPs using ECMP flow placement
+ # multipath=False ensures each LSP is a single path (MPLS tunnel semantics)
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.EQUAL_BALANCED
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config.multipath = False # Each LSP uses a single path (tunnel-based ECMP)
+ config.min_flow_count = 16
+ config.max_flow_count = 16
+ config.reoptimize_flows_on_each_placement = True
+ return netgraph_core.FlowPolicy(
+ algorithms, graph, config, node_mask=node_mask, edge_mask=edge_mask
+ )
+
+ else:
+ raise ValueError(f"Unknown flow policy preset: {preset}")
diff --git a/ngraph/model/network.py b/ngraph/model/network.py
index 8357187..d2704c8 100644
--- a/ngraph/model/network.py
+++ b/ngraph/model/network.py
@@ -1,44 +1,17 @@
-"""Network topology modeling with Node, Link, RiskGroup, and Network classes."""
+"""Network topology modeling with Node, Link, RiskGroup, and Network classes.
+
+This module provides the core network model classes (Node, Link, RiskGroup, Network)
+that can be used independently. The build_core_graph() method requires netgraph_core
+to be installed and will raise ImportError with a clear message if missing.
+"""
from __future__ import annotations
import re
-from collections.abc import Set as AbstractSet
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Set, Tuple
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.algorithms.types import FlowSummary
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
from ngraph.logging import get_logger
-from ngraph.paths.path import Path as _NGPath
-from ngraph.solver.maxflow import (
- max_flow as _solver_max_flow,
-)
-from ngraph.solver.maxflow import (
- max_flow_detailed as _solver_max_flow_detailed,
-)
-from ngraph.solver.maxflow import (
- max_flow_with_graph as _solver_max_flow_with_graph,
-)
-from ngraph.solver.maxflow import (
- max_flow_with_summary as _solver_max_flow_with_summary,
-)
-from ngraph.solver.maxflow import (
- saturated_edges as _solver_saturated_edges,
-)
-from ngraph.solver.maxflow import (
- sensitivity_analysis as _solver_sensitivity_analysis,
-)
-from ngraph.solver.paths import (
- k_shortest_paths as _solver_k_shortest_paths,
-)
-from ngraph.solver.paths import (
- shortest_path_costs as _solver_shortest_path_costs,
-)
-from ngraph.solver.paths import (
- shortest_paths as _solver_shortest_paths,
-)
from ngraph.utils.ids import new_base64_uuid
LOGGER = get_logger(__name__)
@@ -121,8 +94,8 @@ class Network:
Network represents the scenario-level topology with persistent state (nodes/links
that are disabled in the scenario configuration). For temporary exclusion of
- nodes/links during analysis (e.g., failure simulation), use NetworkView instead
- of modifying the Network's disabled states.
+ nodes/links during analysis (e.g., failure simulation), use node_mask and edge_mask
+ parameters when calling NetGraph-Core algorithms.
Attributes:
nodes (Dict[str, Node]): Mapping from node name -> Node object.
@@ -135,6 +108,9 @@ class Network:
links: Dict[str, Link] = field(default_factory=dict)
risk_groups: Dict[str, RiskGroup] = field(default_factory=dict)
attrs: Dict[str, Any] = field(default_factory=dict)
+ _selection_cache: Dict[str, Dict[str, List[Node]]] = field(
+ default_factory=dict, init=False, repr=False
+ )
def add_node(self, node: Node) -> None:
"""Add a node to the network (keyed by node.name).
@@ -148,6 +124,7 @@ def add_node(self, node: Node) -> None:
if node.name in self.nodes:
raise ValueError(f"Node '{node.name}' already exists in the network.")
self.nodes[node.name] = node
+ self._selection_cache.clear() # Invalidate cache on modification
def add_link(self, link: Link) -> None:
"""Add a link to the network (keyed by the link's auto-generated ID).
@@ -165,115 +142,6 @@ def add_link(self, link: Link) -> None:
self.links[link.id] = link
- def to_strict_multidigraph(
- self,
- add_reverse: bool = True,
- *,
- compact: bool = False,
- ) -> StrictMultiDiGraph:
- """Create a StrictMultiDiGraph representation of this Network.
-
- Only includes nodes and links that are not disabled in the scenario.
- Adds reverse edges by default so links behave bidirectionally in analysis.
-
- When ``compact=True``, edges receive monotonically increasing integer keys and
- only essential attributes (``capacity`` and ``cost``) are set. When
- ``compact=False``, original network link IDs are used as edge keys and also
- stored on edges as ``link_id`` for traceability, along with any custom
- link attrs.
-
- Args:
- add_reverse: If True, add a reverse edge for each link.
- compact: If True, omit non-essential attributes and use integer keys.
-
- Returns:
- StrictMultiDiGraph: Directed multigraph representation of the network.
- """
- return self._build_graph(add_reverse=add_reverse, compact=compact)
-
- def _build_graph(
- self,
- add_reverse: bool = True,
- excluded_nodes: Optional[AbstractSet[str]] = None,
- excluded_links: Optional[AbstractSet[str]] = None,
- *,
- compact: bool = False,
- ) -> StrictMultiDiGraph:
- """Create a StrictMultiDiGraph with optional exclusions.
-
- Args:
- add_reverse: If True, add reverse edges for each link.
- excluded_nodes: Additional nodes to exclude beyond disabled ones.
- excluded_links: Additional links to exclude beyond disabled ones.
-
- Returns:
- StrictMultiDiGraph with specified exclusions applied.
- """
- if excluded_nodes is None:
- excluded_nodes = set()
- if excluded_links is None:
- excluded_links = set()
-
- graph = StrictMultiDiGraph()
-
- # Collect all nodes to exclude (scenario-disabled + analysis exclusions)
- all_excluded_nodes = excluded_nodes | {
- name for name, nd in self.nodes.items() if nd.disabled
- }
-
- # Add enabled nodes
- for node_name, node in self.nodes.items():
- if node_name not in all_excluded_nodes:
- if compact:
- graph.add_node(node_name)
- else:
- graph.add_node(node_name, **node.attrs)
-
- # Add enabled links
- for link_id, link in self.links.items():
- if (
- link_id not in excluded_links
- and not link.disabled
- and link.source not in all_excluded_nodes
- and link.target not in all_excluded_nodes
- ):
- if compact:
- # Forward edge with minimal attributes
- graph.add_edge(
- link.source,
- link.target,
- capacity=link.capacity,
- cost=link.cost,
- )
- if add_reverse:
- graph.add_edge(
- link.target,
- link.source,
- capacity=link.capacity,
- cost=link.cost,
- )
- else:
- # Preserve original link id as attribute; edge key is assigned by graph
- graph.add_edge(
- link.source,
- link.target,
- capacity=link.capacity,
- cost=link.cost,
- link_id=link_id,
- **link.attrs,
- )
- if add_reverse:
- graph.add_edge(
- link.target,
- link.source,
- capacity=link.capacity,
- cost=link.cost,
- link_id=link_id,
- **link.attrs,
- )
-
- return graph
-
def select_node_groups_by_path(self, path: str) -> Dict[str, List[Node]]:
r"""Select and group nodes by regex on name or by attribute directive.
@@ -295,6 +163,10 @@ def select_node_groups_by_path(self, path: str) -> Dict[str, List[Node]]:
Returns:
Mapping from group label to list of nodes.
"""
+ # Check cache first
+ if path in self._selection_cache:
+ return self._selection_cache[path]
+
# Strict attribute directive detection: attr:
attr_match = re.fullmatch(r"attr:([A-Za-z_]\w*)", path)
if attr_match:
@@ -310,6 +182,7 @@ def select_node_groups_by_path(self, path: str) -> Dict[str, List[Node]]:
"Attribute directive '%s' matched no nodes (attribute missing)",
path,
)
+ self._selection_cache[path] = groups_by_attr
return groups_by_attr
# Fallback: regex over node.name
@@ -326,38 +199,9 @@ def select_node_groups_by_path(self, path: str) -> Dict[str, List[Node]]:
label = path
groups_map.setdefault(label, []).append(node)
+ self._selection_cache[path] = groups_map
return groups_map
- def max_flow(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- ) -> Dict[Tuple[str, str], float]:
- """Compute maximum flow between node groups in this network.
-
- Args:
- source_path: Regex for selecting source nodes or ``attr:``.
- sink_path: Regex for selecting sink nodes or ``attr:``.
- mode: ``"combine"`` to merge all matching sources and sinks into
- one group each; ``"pairwise"`` to compute per-group pairs.
- shortest_path: If True, restrict flows to shortest paths.
- flow_placement: Strategy for splitting flow among equal-cost paths.
-
- Returns:
- Mapping from ``(source_label, sink_label)`` to flow values.
- """
- return _solver_max_flow(
- self,
- source_path,
- sink_path,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=flow_placement,
- )
-
def disable_node(self, node_name: str) -> None:
"""Mark a node as disabled.
@@ -543,238 +387,46 @@ def enable_risk_group(self, name: str, recursive: bool = True) -> None:
if link_obj.risk_groups & to_enable:
self.enable_link(link_id)
- def saturated_edges(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- tolerance: float = 1e-10,
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- ) -> Dict[Tuple[str, str], List[Tuple[str, str, str]]]:
- """Identify saturated edges in max flow solutions.
-
- Args:
- source_path: Regex for selecting source nodes or ``attr:``.
- sink_path: Regex for selecting sink nodes or ``attr:``.
- mode: ``"combine"`` or ``"pairwise"``.
- tolerance: Threshold for considering an edge saturated.
- shortest_path: If True, restrict flows to shortest paths.
- flow_placement: Strategy for splitting among equal-cost paths.
-
- Returns:
- Mapping from ``(source_label, sink_label)`` to lists of saturated
- edge tuples ``(u, v, key)``.
- """
- return _solver_saturated_edges(
- self,
- source_path,
- sink_path,
- mode=mode,
- tolerance=tolerance,
- shortest_path=shortest_path,
- flow_placement=flow_placement,
- )
-
- def sensitivity_analysis(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- change_amount: float = 1.0,
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- ) -> Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]]:
- """Perform sensitivity analysis for capacity changes.
-
- Args:
- source_path: Regex for selecting source nodes or ``attr:``.
- sink_path: Regex for selecting sink nodes or ``attr:``.
- mode: ``"combine"`` or ``"pairwise"``.
- change_amount: Capacity change applied during analysis.
- shortest_path: If True, restrict flows to shortest paths.
- flow_placement: Strategy for splitting among equal-cost paths.
-
- Returns:
- Mapping from ``(source_label, sink_label)`` to per-edge sensitivity
- values, keyed by edge ``(u, v, key)``.
- """
- return _solver_sensitivity_analysis(
- self,
- source_path,
- sink_path,
- mode=mode,
- change_amount=change_amount,
- shortest_path=shortest_path,
- flow_placement=flow_placement,
- )
-
- def max_flow_with_summary(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- ) -> Dict[Tuple[str, str], Tuple[float, FlowSummary]]:
- """Compute maximum flow and return per-pair analytics summary.
-
- Args:
- source_path: Regex for selecting source nodes or ``attr:``.
- sink_path: Regex for selecting sink nodes or ``attr:``.
- mode: ``"combine"`` or ``"pairwise"``.
- shortest_path: If True, restrict flows to shortest paths.
- flow_placement: Strategy for splitting among equal-cost paths.
-
- Returns:
- Mapping from ``(source_label, sink_label)`` to
- ``(flow_value, summary)``.
- """
- return _solver_max_flow_with_summary(
- self,
- source_path,
- sink_path,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=flow_placement,
- )
-
- def max_flow_with_graph(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- ) -> Dict[Tuple[str, str], Tuple[float, StrictMultiDiGraph]]:
- """Compute maximum flow and return flow-assigned graphs.
-
- Args:
- source_path: Regex for selecting source nodes or ``attr:``.
- sink_path: Regex for selecting sink nodes or ``attr:``.
- mode: ``"combine"`` or ``"pairwise"``.
- shortest_path: If True, restrict flows to shortest paths.
- flow_placement: Strategy for splitting among equal-cost paths.
-
- Returns:
- Mapping from ``(source_label, sink_label)`` to ``(flow_value, graph)``.
- """
- return _solver_max_flow_with_graph(
- self,
- source_path,
- sink_path,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=flow_placement,
- )
-
- def max_flow_detailed(
+ def build_core_graph(
self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- ) -> Dict[Tuple[str, str], Tuple[float, FlowSummary, StrictMultiDiGraph]]:
- """Compute maximum flow with both analytics summary and graph.
-
- Args:
- source_path: Regex for selecting source nodes or ``attr:``.
- sink_path: Regex for selecting sink nodes or ``attr:``.
- mode: ``"combine"`` or ``"pairwise"``.
- shortest_path: If True, restrict flows to shortest paths.
- flow_placement: Strategy for splitting among equal-cost paths.
-
- Returns:
- Mapping from ``(source_label, sink_label)`` to
- ``(flow_value, summary, graph)``.
- """
- return _solver_max_flow_detailed(
- self,
- source_path,
- sink_path,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=flow_placement,
- )
-
- def shortest_path_costs(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- ) -> Dict[Tuple[str, str], float]:
- """Return minimal path costs between node groups in this network.
-
- Args:
- source_path: Regex or ``attr:`` for source selection.
- sink_path: Regex or ``attr:`` for sink selection.
- mode: "combine" or "pairwise".
-
- Returns:
- Mapping from (source_label, sink_label) to minimal cost; ``inf`` if unreachable.
- """
- return _solver_shortest_path_costs(self, source_path, sink_path, mode=mode)
+ add_reverse: bool = True,
+ augmentations: Optional[List] = None,
+ excluded_nodes: Optional[Set[str]] = None,
+ excluded_links: Optional[Set[str]] = None,
+ ) -> Tuple[Any, Any, Any, Any]:
+ """Build NetGraph-Core graph representation.
- def shortest_paths(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- *,
- split_parallel_edges: bool = False,
- ) -> Dict[Tuple[str, str], List[_NGPath]]:
- """Return concrete shortest path(s) between selected node groups.
+ Convenience method that delegates to build_graph() from ngraph.adapters.core.
+ Supports augmentations (for pseudo nodes) and exclusions (for filtered topology).
Args:
- source_path: Regex or ``attr:`` for source selection.
- sink_path: Regex or ``attr:`` for sink selection.
- mode: "combine" or "pairwise".
- split_parallel_edges: Expand parallel edges into distinct paths when True.
+ add_reverse: If True, add reverse edges for bidirectional links.
+ augmentations: Optional list of AugmentationEdge for pseudo nodes.
+ excluded_nodes: Optional set of node names to exclude.
+ excluded_links: Optional set of link IDs to exclude.
Returns:
- Mapping from (source_label, sink_label) to lists of Path.
- """
- return _solver_shortest_paths(
- self,
- source_path,
- sink_path,
- mode=mode,
- split_parallel_edges=split_parallel_edges,
- )
+ Tuple of (graph_handle, multidigraph, edge_mapper, node_mapper):
+ - graph_handle: netgraph_core.Graph (opaque handle, not picklable)
+ - multidigraph: netgraph_core.StrictMultiDiGraph (picklable)
+ - edge_mapper: EdgeMapper for link_id <-> ext_edge_id translation
+ - node_mapper: NodeMapper for name <-> ID translation
- def k_shortest_paths(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "pairwise",
- *,
- max_k: int = 3,
- max_path_cost: float = float("inf"),
- max_path_cost_factor: Optional[float] = None,
- split_parallel_edges: bool = False,
- ) -> Dict[Tuple[str, str], List[_NGPath]]:
- """Return up to K shortest paths per group pair.
-
- Args:
- source_path: Regex or ``attr:`` for source selection.
- sink_path: Regex or ``attr:`` for sink selection.
- mode: "pairwise" (default) or "combine".
- max_k: Max number of paths per pair.
- max_path_cost: Absolute cost threshold.
- max_path_cost_factor: Relative threshold versus best path.
- split_parallel_edges: Expand parallel edges into distinct paths when True.
-
- Returns:
- Mapping from (source_label, sink_label) to lists of Path.
+ Raises:
+ ImportError: If netgraph_core is not installed.
"""
- return _solver_k_shortest_paths(
+ try:
+ from ngraph.adapters.core import build_graph
+ except ImportError as e:
+ raise ImportError(
+ "netgraph_core module not found. Ensure NetGraph-Core is installed. "
+ "See: https://github.com/networmix/NetGraph-Core"
+ ) from e
+
+ return build_graph(
self,
- source_path,
- sink_path,
- mode=mode,
- max_k=max_k,
- max_path_cost=max_path_cost,
- max_path_cost_factor=max_path_cost_factor,
- split_parallel_edges=split_parallel_edges,
+ add_reverse=add_reverse,
+ augmentations=augmentations,
+ excluded_nodes=excluded_nodes,
+ excluded_links=excluded_links,
)
diff --git a/ngraph/paths/path.py b/ngraph/model/path.py
similarity index 53%
rename from ngraph/paths/path.py
rename to ngraph/model/path.py
index 5610dab..03e7859 100644
--- a/ngraph/paths/path.py
+++ b/ngraph/model/path.py
@@ -4,87 +4,93 @@
cost. Cached properties expose derived sequences for nodes and edges, and
helpers provide equality, ordering by cost, and sub-path extraction with cost
recalculation.
+
+Breaking change from v1.x: Edge references now use EdgeRef (link_id + direction)
+instead of integer edge keys for stable scenario-level edge identification.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from functools import cached_property
-from typing import Any, Iterator, Set, Tuple
+from typing import TYPE_CHECKING, Any, Iterator, Set, Tuple
+
+from ngraph.types.base import Cost
+from ngraph.types.dto import EdgeRef
-from ngraph.algorithms.base import Cost, PathTuple
-from ngraph.graph.strict_multidigraph import EdgeID, NodeID, StrictMultiDiGraph
+if TYPE_CHECKING:
+ from ngraph.graph.strict_multidigraph import ( # pyright: ignore[reportMissingImports]
+ StrictMultiDiGraph,
+ )
@dataclass
class Path:
"""Represents a single path in the network.
+ Breaking change from v1.x: path field now uses EdgeRef (link_id + direction)
+ instead of integer edge keys for stable scenario-level edge identification.
+
Attributes:
- path_tuple (PathTuple):
- A sequence of path elements. Each element is a tuple of the form
- (node_id, (edge_id_1, edge_id_2, ...)), where the final element typically has an empty tuple.
- cost (Cost):
- The total numeric cost (e.g., distance or metric) of the path.
- edges (Set[EdgeID]):
- A set of all edge IDs encountered in the path.
- nodes (Set[NodeID]):
- A set of all node IDs encountered in the path.
- edge_tuples (Set[Tuple[EdgeID, ...]]):
- A set of all tuples of parallel edges from each path element (including the final empty tuple).
+ path: Sequence of (node_name, (edge_refs...)) tuples representing the path.
+ The final element typically has an empty tuple of edge refs.
+ cost: Total numeric cost (e.g., distance or metric) of the path.
+ edges: Set of all EdgeRefs encountered in the path.
+ nodes: Set of all node names encountered in the path.
+ edge_tuples: Set of all tuples of parallel EdgeRefs from each path element.
"""
- path_tuple: PathTuple
+ path: Tuple[Tuple[str, Tuple[EdgeRef, ...]], ...]
cost: Cost
- edges: Set[EdgeID] = field(init=False, default_factory=set, repr=False)
- nodes: Set[NodeID] = field(init=False, default_factory=set, repr=False)
- edge_tuples: Set[Tuple[EdgeID, ...]] = field(
+ edges: Set[EdgeRef] = field(init=False, default_factory=set, repr=False)
+ nodes: Set[str] = field(init=False, default_factory=set, repr=False)
+ edge_tuples: Set[Tuple[EdgeRef, ...]] = field(
init=False, default_factory=set, repr=False
)
def __post_init__(self) -> None:
- """Populate `edges`, `nodes`, and `edge_tuples` from `path_tuple`."""
- for node, parallel_edges in self.path_tuple:
+ """Populate `edges`, `nodes`, and `edge_tuples` from `path`."""
+ for node, parallel_edges in self.path:
self.nodes.add(node)
self.edges.update(parallel_edges)
self.edge_tuples.add(parallel_edges)
- def __getitem__(self, idx: int) -> Tuple[NodeID, Tuple[EdgeID, ...]]:
+ def __getitem__(self, idx: int) -> Tuple[str, Tuple[EdgeRef, ...]]:
"""Return the (node, parallel_edges) tuple at the specified index.
Args:
idx: The index of the desired path element.
Returns:
- A tuple containing the node ID and its associated parallel edges.
+ A tuple containing the node name and its associated parallel edge refs.
"""
- return self.path_tuple[idx]
+ return self.path[idx]
- def __iter__(self) -> Iterator[Tuple[NodeID, Tuple[EdgeID, ...]]]:
+ def __iter__(self) -> Iterator[Tuple[str, Tuple[EdgeRef, ...]]]:
"""Iterate over each (node, parallel_edges) element in the path.
Yields:
- Each element from `path_tuple` in order.
+ Each element from `path` in order.
"""
- return iter(self.path_tuple)
+ return iter(self.path)
def __len__(self) -> int:
"""Return the number of elements in the path.
Returns:
- The length of `path_tuple`.
+ The length of `path`.
"""
- return len(self.path_tuple)
+ return len(self.path)
@property
- def src_node(self) -> NodeID:
+ def src_node(self) -> str:
"""Return the first node in the path (the source node)."""
- return self.path_tuple[0][0]
+ return self.path[0][0]
@property
- def dst_node(self) -> NodeID:
+ def dst_node(self) -> str:
"""Return the last node in the path (the destination node)."""
- return self.path_tuple[-1][0]
+ return self.path[-1][0]
def __lt__(self, other: Any) -> bool:
"""Compare two paths based on their cost.
@@ -107,20 +113,20 @@ def __eq__(self, other: Any) -> bool:
other: Another Path instance.
Returns:
- True if both the `path_tuple` and `cost` are equal; otherwise, False.
+ True if both the `path` and `cost` are equal; otherwise, False.
Returns NotImplemented if `other` is not a Path.
"""
if not isinstance(other, Path):
return NotImplemented
- return (self.path_tuple == other.path_tuple) and (self.cost == other.cost)
+ return (self.path == other.path) and (self.cost == other.cost)
def __hash__(self) -> int:
- """Compute a hash based on the (path_tuple, cost) tuple.
+ """Compute a hash based on the (path, cost) tuple.
Returns:
The hash value of this Path.
"""
- return hash((self.path_tuple, self.cost))
+ return hash((self.path, self.cost))
def __repr__(self) -> str:
"""Return a string representation of the path including its tuple and cost.
@@ -128,58 +134,63 @@ def __repr__(self) -> str:
Returns:
A debug-friendly string representation.
"""
- return f"Path({self.path_tuple}, cost={self.cost})"
+ return f"Path({self.path}, cost={self.cost})"
@cached_property
- def edges_seq(self) -> Tuple[Tuple[EdgeID, ...], ...]:
+ def edges_seq(self) -> Tuple[Tuple[EdgeRef, ...], ...]:
"""Return a tuple containing the sequence of parallel-edge tuples for each path element except the last.
Returns:
A tuple of parallel-edge tuples; returns an empty tuple if the path has 1 or fewer elements.
"""
- if len(self.path_tuple) <= 1:
+ if len(self.path) <= 1:
return ()
- return tuple(parallel_edges for _, parallel_edges in self.path_tuple[:-1])
+ return tuple(parallel_edges for _, parallel_edges in self.path[:-1])
@cached_property
- def nodes_seq(self) -> Tuple[NodeID, ...]:
- """Return a tuple of node IDs in order along the path.
+ def nodes_seq(self) -> Tuple[str, ...]:
+ """Return a tuple of node names in order along the path.
Returns:
A tuple containing the ordered sequence of nodes from source to destination.
"""
- return tuple(node for node, _ in self.path_tuple)
+ return tuple(node for node, _ in self.path)
def get_sub_path(
self,
- dst_node: NodeID,
- graph: StrictMultiDiGraph,
+ dst_node: str,
+ graph: StrictMultiDiGraph | None = None,
cost_attr: str = "cost",
) -> Path:
- """Create a sub-path ending at the specified destination node, recalculating the cost.
+ """Create a sub-path ending at the specified destination node.
The sub-path is formed by truncating the original path at the first occurrence
of `dst_node` and ensuring that the final element has an empty tuple of edges.
- The cost is recalculated as the sum of the minimum cost (based on `cost_attr`)
- among parallel edges for each step leading up to (but not including) the target.
+
+ Note: With EdgeRef-based paths, cost recalculation requires graph lookup.
+ The graph parameter is reserved for future implementation. Currently, cost
+ is set to infinity to explicitly indicate it needs recalculation. Check for
+ `math.isinf(sub_path.cost)` if you need the actual cost.
Args:
dst_node: The node at which to truncate the path.
- graph: The graph containing edge attributes.
- cost_attr: The edge attribute name to use for cost (default is "cost").
+ graph: Reserved for future cost recalculation (currently unused).
+ cost_attr: Reserved for future cost recalculation (currently unused).
Returns:
- A new Path instance representing the sub-path from the original source to `dst_node`.
+ A new Path instance representing the sub-path from the original source
+ to `dst_node`. Cost is set to infinity to indicate recalculation needed.
Raises:
ValueError: If `dst_node` is not found in the current path.
"""
- edges_map = graph.get_edges()
+ # Suppress unused parameter warnings - reserved for future cost recalculation
+ _ = graph, cost_attr
+
new_elements = []
- new_cost = 0.0
found = False
- for node, parallel_edges in self.path_tuple:
+ for node, parallel_edges in self.path:
if node == dst_node:
found = True
# Append the target node with an empty edge tuple.
@@ -187,13 +198,10 @@ def get_sub_path(
break
new_elements.append((node, parallel_edges))
- if parallel_edges:
- # Accumulate cost using the minimum cost among parallel edges.
- new_cost += min(
- edges_map[e_id][3][cost_attr] for e_id in parallel_edges
- )
if not found:
raise ValueError(f"Node '{dst_node}' not found in path.")
- return Path(tuple(new_elements), new_cost)
+ # Cost set to infinity to explicitly signal recalculation is needed.
+ # EdgeRef-based cost calculation requires mapping back to graph edges.
+ return Path(tuple(new_elements), float("inf"))
diff --git a/ngraph/model/view.py b/ngraph/model/view.py
deleted file mode 100644
index 5a84abb..0000000
--- a/ngraph/model/view.py
+++ /dev/null
@@ -1,503 +0,0 @@
-"""Read-only view of a ``Network`` with temporary exclusions.
-
-This module defines a view over ``Network`` objects that can exclude nodes and
-links for analysis without mutating the base network. It supports what-if
-analysis, including failure simulations.
-"""
-
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple
-
-from ngraph.solver.maxflow import (
- max_flow as _solver_max_flow,
-)
-from ngraph.solver.maxflow import (
- max_flow_detailed as _solver_max_flow_detailed,
-)
-from ngraph.solver.maxflow import (
- max_flow_with_graph as _solver_max_flow_with_graph,
-)
-from ngraph.solver.maxflow import (
- max_flow_with_summary as _solver_max_flow_with_summary,
-)
-from ngraph.solver.maxflow import (
- saturated_edges as _solver_saturated_edges,
-)
-from ngraph.solver.maxflow import (
- sensitivity_analysis as _solver_sensitivity_analysis,
-)
-from ngraph.solver.paths import (
- k_shortest_paths as _solver_k_shortest_paths,
-)
-from ngraph.solver.paths import (
- shortest_path_costs as _solver_shortest_path_costs,
-)
-from ngraph.solver.paths import (
- shortest_paths as _solver_shortest_paths,
-)
-
-if TYPE_CHECKING:
- from ngraph.algorithms.base import FlowPlacement
- from ngraph.algorithms.types import FlowSummary
- from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
- from ngraph.model.network import Link, Network, Node, RiskGroup
- from ngraph.paths.path import Path as _NGPath
-
-__all__ = ["NetworkView"]
-
-
-@dataclass(frozen=True)
-class NetworkView:
- """Read-only overlay that hides selected nodes/links from a base Network.
-
- NetworkView provides filtered access to a Network where both scenario-disabled
- elements (Node.disabled, Link.disabled) and analysis-excluded elements are
- hidden from algorithms. This enables failure simulation and what-if analysis
- without mutating the base Network.
-
- Multiple NetworkView instances can safely operate on the same base Network
- concurrently, each with different exclusion sets.
-
- Example:
- ```python
- # Create view excluding specific nodes for failure analysis
- view = NetworkView.from_excluded_sets(
- base_network,
- excluded_nodes=["node1", "node2"],
- excluded_links=["link1"]
- )
-
- # Run analysis on filtered topology
- flows = view.max_flow("source.*", "sink.*")
- ```
-
- Attributes:
- _base: The underlying Network object.
- _excluded_nodes: Frozen set of node names to exclude from analysis.
- _excluded_links: Frozen set of link IDs to exclude from analysis.
- """
-
- _base: "Network"
- _excluded_nodes: frozenset[str] = frozenset()
- _excluded_links: frozenset[str] = frozenset()
-
- def is_node_hidden(self, name: str) -> bool:
- """Check if a node is hidden in this view.
-
- Args:
- name: Name of the node to check.
-
- Returns:
- True if the node is hidden (disabled or excluded), False otherwise.
- """
- node = self._base.nodes.get(name)
- if node is None:
- return True # Node doesn't exist, treat as hidden
- return node.disabled or name in self._excluded_nodes
-
- def is_link_hidden(self, link_id: str) -> bool:
- """Check if a link is hidden in this view.
-
- Args:
- link_id: ID of the link to check.
-
- Returns:
- True if the link is hidden (disabled or excluded), False otherwise.
- """
- link = self._base.links.get(link_id)
- if link is None:
- return True # Link doesn't exist, treat as hidden
- return (
- link.disabled
- or link_id in self._excluded_links
- or self.is_node_hidden(link.source)
- or self.is_node_hidden(link.target)
- )
-
- @property
- def nodes(self) -> Dict[str, "Node"]:
- """Get visible nodes in this view.
-
- Returns:
- Dictionary mapping node names to Node objects for all visible nodes.
- """
- return {
- name: node
- for name, node in self._base.nodes.items()
- if not self.is_node_hidden(name)
- }
-
- @property
- def links(self) -> Dict[str, "Link"]:
- """Get visible links in this view.
-
- Returns:
- Dictionary mapping link IDs to Link objects for all visible links.
- """
- return {
- link_id: link
- for link_id, link in self._base.links.items()
- if not self.is_link_hidden(link_id)
- }
-
- @property
- def risk_groups(self) -> Dict[str, "RiskGroup"]:
- """Get all risk groups from the base network.
-
- Returns:
- Dictionary mapping risk group names to RiskGroup objects.
- """
- return self._base.risk_groups
-
- @property
- def attrs(self) -> Dict[str, Any]:
- """Get network attributes from the base network.
-
- Returns:
- Dictionary of network attributes.
- """
- return self._base.attrs
-
- def to_strict_multidigraph(
- self, add_reverse: bool = True, *, compact: bool = False
- ) -> "StrictMultiDiGraph":
- """Create a StrictMultiDiGraph representation of this view.
-
- Creates a filtered graph excluding disabled nodes/links and analysis exclusions.
- Results are cached for performance when multiple flow operations are called.
-
- Args:
- add_reverse: If True, add reverse edges for each link.
-
- Returns:
- StrictMultiDiGraph with scenario-disabled and analysis-excluded
- elements filtered out.
- """
- # Get or initialize cache (handle frozen dataclass)
- cache = getattr(self, "_graph_cache", None)
- if cache is None:
- cache = {}
- object.__setattr__(self, "_graph_cache", cache)
-
- # Use simple cache based on (add_reverse, compact)
- cache_key = (bool(add_reverse), bool(compact))
- if cache_key not in cache:
- cache[cache_key] = self._base._build_graph(
- add_reverse=add_reverse,
- excluded_nodes=self._excluded_nodes,
- excluded_links=self._excluded_links,
- compact=compact,
- )
- return cache[cache_key]
-
- def select_node_groups_by_path(self, path: str) -> Dict[str, List["Node"]]:
- """Select and group visible nodes by regex or attribute directive.
-
- Follows the same semantics as ``Network.select_node_groups_by_path`` but
- filters out nodes hidden in this view (disabled or excluded).
-
- Args:
- path: Regex pattern on node.name, or strict attribute directive ``attr:``.
-
- Returns:
- Dictionary mapping group labels to lists of matching visible nodes.
- """
- # Get groups from base network, then filter to visible nodes
- base_groups = self._base.select_node_groups_by_path(path)
- filtered_groups = {}
-
- for label, nodes in base_groups.items():
- visible_nodes = [
- node for node in nodes if not self.is_node_hidden(node.name)
- ]
- if visible_nodes: # Only include groups with visible nodes
- filtered_groups[label] = visible_nodes
-
- return filtered_groups
-
- def max_flow(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: Optional["FlowPlacement"] = None,
- ) -> Dict[Tuple[str, str], float]:
- """Compute maximum flow between node groups in this view.
-
- Args:
- source_path: Regex pattern for selecting source nodes.
- sink_path: Regex pattern for selecting sink nodes.
- mode: Either "combine" or "pairwise".
- shortest_path: If True, flows are constrained to shortest paths.
- flow_placement: Flow placement strategy.
-
- Returns:
- Dictionary mapping (source_label, sink_label) to flow values.
- """
- return _solver_max_flow(
- self,
- source_path,
- sink_path,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=(
- flow_placement
- or __import__(
- "ngraph.algorithms.base", fromlist=["FlowPlacement"]
- ).FlowPlacement.PROPORTIONAL
- ),
- )
-
- def max_flow_with_summary(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: Optional["FlowPlacement"] = None,
- ) -> Dict[Tuple[str, str], Tuple[float, "FlowSummary"]]:
- """Compute maximum flow with detailed analytics summary.
-
- Args:
- source_path: Regex pattern for selecting source nodes.
- sink_path: Regex pattern for selecting sink nodes.
- mode: Either "combine" or "pairwise".
- shortest_path: If True, flows are constrained to shortest paths.
- flow_placement: Flow placement strategy.
-
- Returns:
- Dictionary mapping (source_label, sink_label) to (flow_value, summary) tuples.
- """
- return _solver_max_flow_with_summary(
- self,
- source_path,
- sink_path,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=(
- flow_placement
- or __import__(
- "ngraph.algorithms.base", fromlist=["FlowPlacement"]
- ).FlowPlacement.PROPORTIONAL
- ),
- )
-
- def max_flow_with_graph(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: Optional["FlowPlacement"] = None,
- ) -> Dict[Tuple[str, str], Tuple[float, "StrictMultiDiGraph"]]:
- """Compute maximum flow and return flow-assigned graph.
-
- Args:
- source_path: Regex pattern for selecting source nodes.
- sink_path: Regex pattern for selecting sink nodes.
- mode: Either "combine" or "pairwise".
- shortest_path: If True, flows are constrained to shortest paths.
- flow_placement: Flow placement strategy.
-
- Returns:
- Dictionary mapping (source_label, sink_label) to (flow_value, flow_graph) tuples.
- """
- return _solver_max_flow_with_graph(
- self,
- source_path,
- sink_path,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=(
- flow_placement
- or __import__(
- "ngraph.algorithms.base", fromlist=["FlowPlacement"]
- ).FlowPlacement.PROPORTIONAL
- ),
- )
-
- def max_flow_detailed(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: Optional["FlowPlacement"] = None,
- ) -> Dict[Tuple[str, str], Tuple[float, "FlowSummary", "StrictMultiDiGraph"]]:
- """Compute maximum flow with complete analytics and graph.
-
- Args:
- source_path: Regex pattern for selecting source nodes.
- sink_path: Regex pattern for selecting sink nodes.
- mode: Either "combine" or "pairwise".
- shortest_path: If True, flows are constrained to shortest paths.
- flow_placement: Flow placement strategy.
-
- Returns:
- Dictionary mapping (source_label, sink_label) to
- (flow_value, summary, flow_graph) tuples.
- """
- return _solver_max_flow_detailed(
- self,
- source_path,
- sink_path,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=(
- flow_placement
- or __import__(
- "ngraph.algorithms.base", fromlist=["FlowPlacement"]
- ).FlowPlacement.PROPORTIONAL
- ),
- )
-
- def saturated_edges(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- tolerance: float = 1e-10,
- shortest_path: bool = False,
- flow_placement: Optional["FlowPlacement"] = None,
- ) -> Dict[Tuple[str, str], List[Tuple[str, str, str]]]:
- """Identify saturated edges in max flow solutions.
-
- Args:
- source_path: Regex pattern for selecting source nodes.
- sink_path: Regex pattern for selecting sink nodes.
- mode: Either "combine" or "pairwise".
- tolerance: Tolerance for considering an edge saturated.
- shortest_path: If True, flows are constrained to shortest paths.
- flow_placement: Flow placement strategy.
-
- Returns:
- Dictionary mapping (source_label, sink_label) to lists of
- saturated edge tuples (u, v, key).
- """
- return _solver_saturated_edges(
- self,
- source_path,
- sink_path,
- mode=mode,
- tolerance=tolerance,
- shortest_path=shortest_path,
- flow_placement=(
- flow_placement
- or __import__(
- "ngraph.algorithms.base", fromlist=["FlowPlacement"]
- ).FlowPlacement.PROPORTIONAL
- ),
- )
-
- def sensitivity_analysis(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- change_amount: float = 1.0,
- shortest_path: bool = False,
- flow_placement: Optional["FlowPlacement"] = None,
- ) -> Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]]:
- """Perform sensitivity analysis on capacity changes.
-
- Args:
- source_path: Regex pattern for selecting source nodes.
- sink_path: Regex pattern for selecting sink nodes.
- mode: Either "combine" or "pairwise".
- change_amount: Amount to change capacity for testing.
- shortest_path: If True, flows are constrained to shortest paths.
- flow_placement: Flow placement strategy.
-
- Returns:
- Dictionary mapping (source_label, sink_label) to dictionaries
- of edge sensitivity values.
- """
- return _solver_sensitivity_analysis(
- self,
- source_path,
- sink_path,
- mode=mode,
- change_amount=change_amount,
- shortest_path=shortest_path,
- flow_placement=(
- flow_placement
- or __import__(
- "ngraph.algorithms.base", fromlist=["FlowPlacement"]
- ).FlowPlacement.PROPORTIONAL
- ),
- )
-
- def shortest_path_costs(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- ) -> Dict[Tuple[str, str], float]:
- """Return minimal path costs between node groups in this view."""
- return _solver_shortest_path_costs(self, source_path, sink_path, mode=mode)
-
- def shortest_paths(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "combine",
- *,
- split_parallel_edges: bool = False,
- ) -> Dict[Tuple[str, str], List[_NGPath]]:
- """Return concrete shortest path(s) between selected node groups."""
- return _solver_shortest_paths(
- self,
- source_path,
- sink_path,
- mode=mode,
- split_parallel_edges=split_parallel_edges,
- )
-
- def k_shortest_paths(
- self,
- source_path: str,
- sink_path: str,
- mode: str = "pairwise",
- *,
- max_k: int = 3,
- max_path_cost: float = float("inf"),
- max_path_cost_factor: Optional[float] = None,
- split_parallel_edges: bool = False,
- ) -> Dict[Tuple[str, str], List[_NGPath]]:
- """Return up to K shortest paths per group pair."""
- return _solver_k_shortest_paths(
- self,
- source_path,
- sink_path,
- mode=mode,
- max_k=max_k,
- max_path_cost=max_path_cost,
- max_path_cost_factor=max_path_cost_factor,
- split_parallel_edges=split_parallel_edges,
- )
-
- @classmethod
- def from_excluded_sets(
- cls,
- base: "Network",
- excluded_nodes: Iterable[str] = (),
- excluded_links: Iterable[str] = (),
- ) -> "NetworkView":
- """Create a NetworkView with specified exclusions.
-
- Args:
- base: Base Network to create view over.
- excluded_nodes: Node names to exclude from analysis.
- excluded_links: Link IDs to exclude from analysis.
-
- Returns:
- NetworkView with specified exclusions applied.
- """
- return cls(
- _base=base,
- _excluded_nodes=frozenset(excluded_nodes),
- _excluded_links=frozenset(excluded_links),
- )
diff --git a/ngraph/monte_carlo/__init__.py b/ngraph/monte_carlo/__init__.py
deleted file mode 100644
index a7b754e..0000000
--- a/ngraph/monte_carlo/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""Monte Carlo analysis helpers for FailureManager simulations.
-
-Provides picklable analysis functions and structured result classes used by
-``FailureManager.run_monte_carlo_analysis()`` when evaluating failure patterns.
-Functions accept only hashable arguments to support multiprocessing and caching.
-"""
-
-from .functions import (
- demand_placement_analysis,
- max_flow_analysis,
- sensitivity_analysis,
-)
-from .results import SensitivityResults
-
-__all__ = [
- "max_flow_analysis",
- "demand_placement_analysis",
- "sensitivity_analysis",
- "SensitivityResults",
-]
diff --git a/ngraph/monte_carlo/functions.py b/ngraph/monte_carlo/functions.py
deleted file mode 100644
index b8668d7..0000000
--- a/ngraph/monte_carlo/functions.py
+++ /dev/null
@@ -1,321 +0,0 @@
-"""Picklable Monte Carlo analysis functions for FailureManager simulations.
-
-These functions are designed for use with FailureManager.run_monte_carlo_analysis()
-and follow the pattern: analysis_func(network_view: NetworkView, **kwargs) -> Any.
-
-All functions accept only simple, hashable parameters to ensure compatibility
-with FailureManager's caching and multiprocessing systems for Monte Carlo
-failure analysis scenarios.
-
-This module provides only computation functions. Visualization and notebook
-analysis live in external packages.
-"""
-
-from __future__ import annotations
-
-from typing import TYPE_CHECKING, Any
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.demand.manager.manager import TrafficManager
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.demand.spec import TrafficDemand
-from ngraph.results.flow import FlowEntry, FlowIterationResult, FlowSummary
-
-if TYPE_CHECKING:
- from ngraph.model.view import NetworkView
-
-
-def max_flow_analysis(
- network_view: "NetworkView",
- source_regex: str,
- sink_regex: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- include_flow_details: bool = False,
- include_min_cut: bool = False,
- **kwargs,
-) -> FlowIterationResult:
- """Analyze maximum flow capacity between node groups.
-
- Args:
- network_view: NetworkView with potential exclusions applied.
- source_regex: Regex pattern for source node groups.
- sink_regex: Regex pattern for sink node groups.
- mode: Flow analysis mode ("combine" or "pairwise").
- shortest_path: Whether to use shortest paths only.
- flow_placement: Flow placement strategy.
- include_flow_details: Whether to collect cost distribution and similar details.
- include_min_cut: Whether to include min-cut edge list in entry data.
- **kwargs: Ignored. Accepted for interface compatibility.
-
- Returns:
- FlowIterationResult describing this iteration.
- """
- flow_entries: list[FlowEntry] = []
- total_demand = 0.0
- total_placed = 0.0
-
- if include_flow_details or include_min_cut:
- flows = network_view.max_flow_with_summary(
- source_regex,
- sink_regex,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=flow_placement,
- )
- for (src, dst), (val, summary) in flows.items():
- value = float(val)
- cost_dist = getattr(summary, "cost_distribution", {}) or {}
- min_cut = getattr(summary, "min_cut", []) or []
- entry = FlowEntry(
- source=str(src),
- destination=str(dst),
- priority=0,
- demand=value,
- placed=value,
- dropped=0.0,
- cost_distribution=(
- {float(k): float(v) for k, v in cost_dist.items()}
- if include_flow_details
- else {}
- ),
- data=(
- {"edges": [str(e) for e in min_cut], "edges_kind": "min_cut"}
- if include_min_cut and min_cut
- else {}
- ),
- )
- flow_entries.append(entry)
- total_demand += value
- total_placed += value
- else:
- flows = network_view.max_flow(
- source_regex,
- sink_regex,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=flow_placement,
- )
- for (src, dst), val in flows.items():
- value = float(val)
- entry = FlowEntry(
- source=str(src),
- destination=str(dst),
- priority=0,
- demand=value,
- placed=value,
- dropped=0.0,
- )
- flow_entries.append(entry)
- total_demand += value
- total_placed += value
-
- overall_ratio = (total_placed / total_demand) if total_demand > 0 else 1.0
- dropped_flows = sum(1 for e in flow_entries if e.dropped > 0.0)
- summary = FlowSummary(
- total_demand=total_demand,
- total_placed=total_placed,
- overall_ratio=overall_ratio,
- dropped_flows=dropped_flows,
- num_flows=len(flow_entries),
- )
- return FlowIterationResult(flows=flow_entries, summary=summary)
-
-
-def demand_placement_analysis(
- network_view: "NetworkView",
- demands_config: list[dict[str, Any]],
- placement_rounds: int | str = "auto",
- include_flow_details: bool = False,
- include_used_edges: bool = False,
- **kwargs,
-) -> FlowIterationResult:
- """Analyze traffic demand placement success rates.
-
- Produces per-demand FlowEntry records and an iteration-level summary suitable
- for downstream statistics (e.g., delivered percentiles) without reconstructing
- joint distributions.
-
- Additionally exposes placement engine counters to aid performance analysis:
- - Per-demand: ``FlowEntry.data.policy_metrics`` (dict) with totals collected by
- the active FlowPolicy (e.g., ``spf_calls_total``, ``flows_created_total``,
- ``reopt_calls_total``, ``place_iterations_total``).
- - Per-iteration: ``FlowIterationResult.data.iteration_metrics`` aggregating the
- same counters across all demands in the iteration. Use
- ``FlowIterationResult.summary.total_placed`` for placed volume totals.
-
- Args:
- network_view: NetworkView with potential exclusions applied.
- demands_config: List of demand configurations (serializable dicts).
- placement_rounds: Number of placement optimization rounds.
- include_flow_details: When True, include cost_distribution per flow.
- include_used_edges: When True, include set of used edges per demand in entry data
- as ``FlowEntry.data.edges`` with ``edges_kind='used'``.
- **kwargs: Ignored. Accepted for interface compatibility.
-
- Returns:
- FlowIterationResult describing this iteration. The ``data`` field contains
- ``{"iteration_metrics": { ... }}``.
- """
- # Reconstruct demands from config to avoid passing complex objects
- demands = []
- for config in demands_config:
- demand = TrafficDemand(
- source_path=config["source_path"],
- sink_path=config["sink_path"],
- demand=config["demand"],
- mode=config.get("mode", "pairwise"),
- flow_policy_config=config.get("flow_policy_config"),
- priority=config.get("priority", 0),
- )
- demands.append(demand)
-
- traffic_matrix_set = TrafficMatrixSet()
- traffic_matrix_set.add("main", demands)
-
- tm = TrafficManager(
- network=network_view,
- traffic_matrix_set=traffic_matrix_set,
- matrix_name="main",
- )
- tm.build_graph()
- tm.expand_demands()
- tm.place_all_demands(placement_rounds=placement_rounds)
-
- # Build per-demand entries and overall summary
- flow_entries: list[FlowEntry] = []
- total_demand = 0.0
- total_placed = 0.0
-
- # Aggregate iteration-level engine metrics across all demands
- iteration_metrics: dict[str, float] = {
- "spf_calls_total": 0.0,
- "flows_created_total": 0.0,
- "reopt_calls_total": 0.0,
- "place_iterations_total": 0.0,
- }
-
- for dmd in tm.demands:
- offered = float(getattr(dmd, "volume", 0.0))
- placed = float(getattr(dmd, "placed_demand", 0.0))
- priority = int(getattr(dmd, "priority", getattr(dmd, "demand_class", 0)))
- dropped = offered - placed
- extra: dict[str, Any] = {}
- cost_distribution: dict[float, float] = {}
- if (include_flow_details or include_used_edges) and getattr(
- dmd, "flow_policy", None
- ) is not None:
- edge_strings: set[str] = set()
- for flow in dmd.flow_policy.flows.values(): # type: ignore[union-attr]
- # Accumulate placed volume by path cost
- bundle = getattr(flow, "path_bundle", None)
- if (
- include_flow_details
- and bundle is not None
- and hasattr(bundle, "cost")
- ):
- cost_val = float(bundle.cost)
- vol_val = float(getattr(flow, "placed_flow", 0.0))
- if vol_val > 0.0:
- cost_distribution[cost_val] = (
- cost_distribution.get(cost_val, 0.0) + vol_val
- )
- # Collect used edges for reference
- if include_used_edges:
- for eid in getattr(flow.path_bundle, "edges", set()):
- edge_strings.add(str(eid))
- if include_used_edges and edge_strings:
- extra["edges"] = sorted(edge_strings)
- extra["edges_kind"] = "used"
-
- # Always expose per-demand FlowPolicy metrics when available
- fp = getattr(dmd, "flow_policy", None)
- if fp is not None:
- try:
- # Cumulative totals over the policy's lifetime within this iteration
- totals: dict[str, float] = fp.get_metrics() # type: ignore[assignment]
- except Exception:
- totals = {}
- if totals:
- extra["policy_metrics"] = {k: float(v) for k, v in totals.items()}
- # Accumulate iteration-level totals across demands on known keys
- for key in iteration_metrics.keys():
- if key in totals:
- try:
- iteration_metrics[key] += float(totals[key])
- except Exception:
- pass
-
- entry = FlowEntry(
- source=str(getattr(dmd, "src_node", "")),
- destination=str(getattr(dmd, "dst_node", "")),
- priority=priority,
- demand=offered,
- placed=placed,
- dropped=dropped,
- cost_distribution=(cost_distribution if include_flow_details else {}),
- data=extra,
- )
- flow_entries.append(entry)
- total_demand += offered
- total_placed += placed
-
- overall_ratio = (total_placed / total_demand) if total_demand > 0 else 1.0
- dropped_flows = sum(1 for e in flow_entries if e.dropped > 0.0)
- summary = FlowSummary(
- total_demand=total_demand,
- total_placed=total_placed,
- overall_ratio=overall_ratio,
- dropped_flows=dropped_flows,
- num_flows=len(flow_entries),
- )
- return FlowIterationResult(
- flows=flow_entries,
- summary=summary,
- data={"iteration_metrics": iteration_metrics},
- )
-
-
-def sensitivity_analysis(
- network_view: "NetworkView",
- source_regex: str,
- sink_regex: str,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
- **kwargs,
-) -> dict[str, dict[str, float]]:
- """Analyze component sensitivity to failures.
-
- Args:
- network_view: NetworkView with potential exclusions applied.
- source_regex: Regex pattern for source node groups.
- sink_regex: Regex pattern for sink node groups.
- mode: Flow analysis mode ("combine" or "pairwise").
- shortest_path: Whether to use shortest paths only.
- flow_placement: Flow placement strategy.
- **kwargs: Ignored. Accepted for interface compatibility.
-
- Returns:
- Dictionary mapping flow keys ("src->dst") to dictionaries of component
- identifiers mapped to sensitivity scores.
- """
- sensitivity = network_view.sensitivity_analysis(
- source_regex,
- sink_regex,
- mode=mode,
- shortest_path=shortest_path,
- flow_placement=flow_placement,
- )
-
- # Convert to serializable format - sensitivity returns nested dict structure
- # sensitivity is Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]]
- result = {}
- for flow_pair, sensitivity_dict in sensitivity.items():
- flow_key = f"{flow_pair[0]}->{flow_pair[1]}"
- result[flow_key] = {
- str(component): float(score)
- for component, score in sensitivity_dict.items()
- }
- return result
diff --git a/ngraph/monte_carlo/results.py b/ngraph/monte_carlo/results.py
deleted file mode 100644
index bbe51fb..0000000
--- a/ngraph/monte_carlo/results.py
+++ /dev/null
@@ -1,233 +0,0 @@
-"""Structured result objects for FailureManager analysis functions.
-
-These classes provide interfaces for accessing Monte Carlo analysis
-results from FailureManager convenience methods. Visualization is handled by
-specialized analyzer classes in the workflow.analysis module.
-"""
-
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Any, Dict, List, Optional
-
-import pandas as pd
-
-from ngraph.results.artifacts import CapacityEnvelope, FailurePatternResult
-
-
-@dataclass
-class CapacityEnvelopeResults: # Deprecated: retained temporarily for import stability
- envelopes: Dict[str, CapacityEnvelope]
- failure_patterns: Dict[str, FailurePatternResult]
- source_pattern: str
- sink_pattern: str
- mode: str
- iterations: int
- metadata: Dict[str, Any]
-
- # Minimal API to prevent import errors in non-updated modules while we remove usages
- def export_summary(self) -> Dict[str, Any]: # pragma: no cover
- return {
- "source_pattern": self.source_pattern,
- "sink_pattern": self.sink_pattern,
- "mode": self.mode,
- "iterations": self.iterations,
- "metadata": self.metadata,
- "envelopes": {key: env.to_dict() for key, env in self.envelopes.items()},
- "failure_patterns": {
- key: fp.to_dict() for key, fp in self.failure_patterns.items()
- },
- }
-
-
-@dataclass
-class DemandPlacementResults: # Deprecated: retained temporarily for import stability
- raw_results: dict[str, Any]
- iterations: int
- baseline: Optional[dict[str, Any]] = None
- failure_patterns: Optional[Dict[str, Any]] = None
- metadata: Optional[Dict[str, Any]] = None
-
- def __post_init__(self) -> None: # pragma: no cover
- if self.failure_patterns is None:
- self.failure_patterns = {}
- if self.metadata is None:
- self.metadata = {}
-
-
-@dataclass
-class SensitivityResults:
- """Results from sensitivity Monte Carlo analysis.
-
- Attributes:
- raw_results: Raw results from FailureManager
- iterations: Number of Monte Carlo iterations
- baseline: Optional baseline result (no failures)
- component_scores: Aggregated component impact scores by flow
- failure_patterns: Dictionary mapping pattern keys to failure pattern results
- source_pattern: Source node regex pattern used in analysis
- sink_pattern: Sink node regex pattern used in analysis
- mode: Flow analysis mode ("combine" or "pairwise")
- metadata: Additional analysis metadata from FailureManager
- """
-
- raw_results: dict[str, Any]
- iterations: int
- baseline: Optional[dict[str, Any]] = None
- component_scores: Optional[Dict[str, Dict[str, Dict[str, float]]]] = None
- failure_patterns: Optional[Dict[str, Any]] = None
- source_pattern: Optional[str] = None
- sink_pattern: Optional[str] = None
- mode: Optional[str] = None
- metadata: Optional[Dict[str, Any]] = None
-
- def __post_init__(self):
- """Initialize default values for optional fields."""
- if self.component_scores is None:
- self.component_scores = {}
- if self.failure_patterns is None:
- self.failure_patterns = {}
- if self.metadata is None:
- self.metadata = {}
-
- def component_impact_distribution(self) -> pd.DataFrame:
- """Get component impact distribution as DataFrame.
-
- Returns:
- DataFrame with component criticality scores.
- """
- if not self.component_scores:
- return pd.DataFrame()
-
- # Flatten component scores across all flows
- data = []
- for flow_key, components in self.component_scores.items():
- for component_key, stats in components.items():
- row = {
- "flow_key": flow_key,
- "component": component_key,
- "mean_impact": stats.get("mean", 0.0),
- "max_impact": stats.get("max", 0.0),
- "min_impact": stats.get("min", 0.0),
- "sample_count": stats.get("count", 0),
- }
- data.append(row)
-
- return pd.DataFrame(data)
-
- def flow_keys(self) -> List[str]:
- """Get list of all flow keys in results.
-
- Returns:
- List of flow keys (e.g., ["datacenter->edge", "edge->datacenter"]).
- """
- return list(self.component_scores.keys()) if self.component_scores else []
-
- def get_flow_sensitivity(self, flow_key: str) -> Dict[str, Dict[str, float]]:
- """Get component sensitivity scores for a specific flow.
-
- Args:
- flow_key: Flow key (e.g., "datacenter->edge").
-
- Returns:
- Dictionary mapping component IDs to impact statistics
-
- Raises:
- KeyError: If flow_key not found in results.
- """
- if not self.component_scores or flow_key not in self.component_scores:
- available = (
- ", ".join(self.component_scores.keys())
- if self.component_scores
- else "none"
- )
- raise KeyError(f"Flow key '{flow_key}' not found. Available: {available}")
- return self.component_scores[flow_key]
-
- def summary_statistics(self) -> Dict[str, Dict[str, float]]:
- """Get summary statistics for component impact across all flows.
-
- Returns:
- Dictionary mapping component IDs to aggregated impact statistics
- """
- from collections import defaultdict
-
- if not self.component_scores:
- return {}
-
- # Aggregate across flows for each component
- component_aggregates = defaultdict(list)
- for _flow_key, components in self.component_scores.items():
- for component_key, stats in components.items():
- component_aggregates[component_key].append(stats.get("mean", 0.0))
-
- # Calculate overall statistics
- summary = {}
- for component_key, impact_values in component_aggregates.items():
- if impact_values:
- summary[component_key] = {
- "mean_impact": sum(impact_values) / len(impact_values),
- "max_impact": max(impact_values),
- "min_impact": min(impact_values),
- "flow_count": len(impact_values),
- }
-
- return summary
-
- def to_dataframe(self) -> pd.DataFrame:
- """Convert sensitivity results to DataFrame for analysis.
-
- Returns:
- DataFrame with component impact statistics
- """
- return self.component_impact_distribution()
-
- def get_failure_pattern_summary(self) -> pd.DataFrame:
- """Get summary of failure patterns if available.
-
- Returns:
- DataFrame with failure pattern frequencies and sensitivity impact
- """
- if not self.failure_patterns:
- return pd.DataFrame()
-
- data = []
- for pattern_key, pattern in self.failure_patterns.items():
- row = {
- "pattern_key": pattern_key,
- "count": pattern.get("count", 0),
- "is_baseline": pattern.get("is_baseline", False),
- "failed_nodes": len(pattern.get("excluded_nodes", [])),
- "failed_links": len(pattern.get("excluded_links", [])),
- "total_failures": len(pattern.get("excluded_nodes", []))
- + len(pattern.get("excluded_links", [])),
- }
-
- # Add sensitivity results for each flow
- sensitivity_result = pattern.get("sensitivity_result", {})
- for flow_key, components in sensitivity_result.items():
- # Average sensitivity across components for this pattern
- if components:
- avg_sensitivity = sum(components.values()) / len(components)
- row[f"avg_sensitivity_{flow_key}"] = avg_sensitivity
-
- data.append(row)
-
- return pd.DataFrame(data)
-
- def export_summary(self) -> Dict[str, Any]:
- """Export summary for serialization.
-
- Returns:
- Dictionary with all results data in serializable format
- """
- return {
- "source_pattern": self.source_pattern,
- "sink_pattern": self.sink_pattern,
- "mode": self.mode,
- "iterations": self.iterations,
- "metadata": self.metadata or {},
- "component_scores": self.component_scores or {},
- "failure_patterns": self.failure_patterns or {},
- "summary_statistics": self.summary_statistics(),
- }
diff --git a/ngraph/paths/__init__.py b/ngraph/paths/__init__.py
deleted file mode 100644
index 064878c..0000000
--- a/ngraph/paths/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Path primitives for representing routing sequences and equal-cost bundles.
-
-This package defines lightweight structures for path-centric operations:
-- ``Path`` models a single node-and-parallel-edges sequence with a numeric cost.
-- ``PathBundle`` groups one or more equal-cost paths compactly via a predecessor
- map, enabling enumeration of concrete paths on demand.
-"""
diff --git a/ngraph/paths/bundle.py b/ngraph/paths/bundle.py
deleted file mode 100644
index 9c97bbe..0000000
--- a/ngraph/paths/bundle.py
+++ /dev/null
@@ -1,365 +0,0 @@
-"""Utilities for compact representation of equal-cost path sets.
-
-This module defines ``PathBundle``, a structure that represents one or more
-equal-cost paths between two nodes using a predecessor map. It supports
-concatenation, containment checks, sub-bundle extraction with cost
-recalculation, and enumeration into concrete ``Path`` instances.
-"""
-
-from __future__ import annotations
-
-from collections import deque
-from heapq import heappop, heappush
-from typing import Dict, Iterator, List, Optional, Set, Tuple
-
-from ngraph.algorithms.base import Cost, EdgeSelect
-from ngraph.algorithms.edge_select import edge_select_fabric
-from ngraph.algorithms.paths import resolve_to_paths
-from ngraph.graph.strict_multidigraph import (
- AttrDict,
- EdgeID,
- NodeID,
- StrictMultiDiGraph,
-)
-from ngraph.paths.path import Path
-
-
-class PathBundle:
- """A collection of equal-cost paths between two nodes.
-
- This class encapsulates one or more parallel paths (all of the same cost)
- between `src_node` and `dst_node`. The predecessor map `pred` associates
- each node with the node(s) from which it can be reached, along with a list
- of edge IDs used in that step. The constructor performs a reverse traversal
- from `dst_node` to `src_node` to collect all edges, nodes, and store them
- in this bundle.
-
- The constructor assumes the predecessor relation forms a DAG between
- ``src_node`` and ``dst_node``. No cycle detection is performed. If cycles
- are present, traversal may not terminate.
- """
-
- def __init__(
- self,
- src_node: NodeID,
- dst_node: NodeID,
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]],
- cost: Cost,
- ) -> None:
- """Initialize the PathBundle.
-
- Args:
- src_node: The source node for all paths in this bundle.
- dst_node: The destination node for all paths in this bundle.
- pred: A predecessor map of the form:
- {
- current_node: {
- prev_node: [edge_id_1, edge_id_2, ...],
- ...
- },
- ...
- }
- Typically generated by a shortest-path or multi-path algorithm.
- cost: The total path cost (e.g. distance, cost) of all paths in the bundle.
- """
- self.src_node: NodeID = src_node
- self.dst_node: NodeID = dst_node
- self.cost: Cost = cost
- # We'll rebuild `pred` to store only the relevant portion from dst_node to src_node.
- self.pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {src_node: {}}
- self.edges: Set[EdgeID] = set()
- self.edge_tuples: Set[Tuple[EdgeID, ...]] = set()
- self.nodes: Set[NodeID] = {src_node}
-
- visited: Set[NodeID] = set()
- queue: deque[NodeID] = deque([dst_node])
- visited.add(dst_node)
-
- while queue:
- node = queue.popleft()
- self.nodes.add(node)
- # Ensure key exists even if `node` has no predecessors.
- self.pred.setdefault(node, {})
-
- # Traverse all predecessors of `node`
- for prev_node, edges_list in pred[node].items():
- # Record these edges in our local `pred` structure
- self.pred[node][prev_node] = edges_list
- # Update the set of all edges seen in this bundle
- self.edges.update(edges_list)
- # Store the tuple form for quick equality checks on parallel edges
- self.edge_tuples.add(tuple(edges_list))
-
- # Enqueue the predecessor unless it's the original source.
- # No cycle check is performed, since we trust `pred` is a DAG.
- if prev_node != src_node and prev_node not in visited:
- visited.add(prev_node)
- queue.append(prev_node)
-
- def __lt__(self, other: PathBundle) -> bool:
- """Compare two PathBundles by cost (for sorting)."""
- return self.cost < other.cost
-
- def __eq__(self, other: object) -> bool:
- """Check equality of two PathBundles by (src, dst, cost, edges)."""
- if not isinstance(other, PathBundle):
- return False
- return (
- self.src_node == other.src_node
- and self.dst_node == other.dst_node
- and self.cost == other.cost
- and self.edges == other.edges
- )
-
- def __hash__(self) -> int:
- """Create a unique hash based on (src, dst, cost, frozenset of edges)."""
- return hash((self.src_node, self.dst_node, self.cost, frozenset(self.edges)))
-
- def __repr__(self) -> str:
- """String representation of this PathBundle."""
- return f"PathBundle({self.src_node}, {self.dst_node}, {self.pred}, {self.cost})"
-
- def add(self, other: PathBundle) -> PathBundle:
- """Concatenate this bundle with another bundle (end-to-start).
-
- This effectively merges the predecessor maps and combines costs.
-
- Args:
- other: Another PathBundle whose `src_node` must match this bundle's `dst_node`.
-
- Returns:
- A new PathBundle from `self.src_node` to `other.dst_node`.
-
- Raises:
- ValueError: If this bundle's `dst_node` does not match the other's `src_node`.
- """
- if self.dst_node != other.src_node:
- raise ValueError("PathBundle dst_node != other.src_node")
-
- # Make a combined predecessor map
- new_pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {}
- # Copy from self
- for dnode in self.pred:
- new_pred.setdefault(dnode, {})
- for snode, edges_list in self.pred[dnode].items():
- new_pred[dnode][snode] = list(edges_list)
- # Copy from other
- for dnode in other.pred:
- new_pred.setdefault(dnode, {})
- for snode, edges_list in other.pred[dnode].items():
- new_pred[dnode][snode] = list(edges_list)
-
- return PathBundle(
- self.src_node, other.dst_node, new_pred, self.cost + other.cost
- )
-
- @classmethod
- def from_path(
- cls,
- path: Path,
- resolve_edges: bool = False,
- graph: Optional[StrictMultiDiGraph] = None,
- edge_select: Optional[EdgeSelect] = None,
- cost_attr: str = "cost",
- capacity_attr: str = "capacity",
- ) -> PathBundle:
- """Construct a PathBundle from a single `Path` object.
-
- Args:
- path: A `Path` object which contains node-edge tuples, plus a `cost`.
- resolve_edges: If True, dynamically choose the minimal-cost edges
- between each node pair via the provided `edge_select`.
- graph: The graph used for edge resolution (required if `resolve_edges=True`).
- edge_select: The selection criterion for picking edges if `resolve_edges=True`.
- cost_attr: The attribute name on edges representing cost (e.g., 'cost').
- capacity_attr: The attribute name on edges representing capacity.
-
- Returns:
- A new PathBundle corresponding to the single path. If `resolve_edges`
- is True, the cost is recalculated; otherwise the original `path.cost` is used.
-
- Raises:
- ValueError: If `resolve_edges` is True but no `graph` is provided.
- """
- if resolve_edges:
- if not graph:
- raise ValueError(
- "A StrictMultiDiGraph `graph` is required when resolve_edges=True."
- )
- if edge_select is None:
- raise ValueError(
- "edge_select must be provided when resolve_edges=True."
- )
- edge_selector = edge_select_fabric(
- edge_select,
- cost_attr=cost_attr,
- capacity_attr=capacity_attr,
- )
- else:
- edge_selector = None
-
- src_node = path.path_tuple[0][0]
- dst_node = path.path_tuple[-1][0]
- pred_map: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {src_node: {}}
- total_cost: Cost = 0
-
- # Build the predecessor map from each hop
- for (a_node, a_edges), (z_node, _) in zip(
- path.path_tuple[:-1], path.path_tuple[1:], strict=True
- ):
- pred_map.setdefault(z_node, {})
- # If we're not resolving edges, just copy whatever the path has
- if not resolve_edges:
- pred_map[z_node][a_node] = list(a_edges)
- else:
- # Re-select edges from a_node to z_node
- if edge_selector is not None and graph is not None:
- # Convert edges_dict to the expected Dict[EdgeID, AttrDict] format
- # Since EdgeID is just Hashable, we can cast the keys directly
- typed_edges_dict: Dict[EdgeID, AttrDict] = {
- k: v for k, v in graph[a_node][z_node].items()
- }
- min_cost, edge_list = edge_selector(
- graph, a_node, z_node, typed_edges_dict, None, None
- )
- pred_map[z_node][a_node] = edge_list
- total_cost += min_cost
-
- if resolve_edges:
- return cls(src_node, dst_node, pred_map, total_cost)
- return cls(src_node, dst_node, pred_map, path.cost)
-
- def resolve_to_paths(self, split_parallel_edges: bool = False) -> Iterator[Path]:
- """Generate all concrete `Path` objects contained in this PathBundle.
-
- Args:
- split_parallel_edges: If False, any parallel edges are grouped together
- into a single path segment. If True, produce all permutations
- of parallel edges as distinct paths.
-
- Yields:
- A `Path` object for each distinct route from `src_node` to `dst_node`.
- """
- for path_tuple in resolve_to_paths(
- self.src_node,
- self.dst_node,
- self.pred,
- split_parallel_edges,
- ):
- yield Path(path_tuple, self.cost)
-
- def contains(self, other: PathBundle) -> bool:
- """Check if this bundle's edge set contains all edges of `other`.
-
- Args:
- other: Another PathBundle.
-
- Returns:
- True if `other`'s edges are a subset of this bundle's edges.
- """
- return self.edges.issuperset(other.edges)
-
- def is_subset_of(self, other: PathBundle) -> bool:
- """Check if this bundle's edge set is contained in `other`'s edge set.
-
- Args:
- other: Another PathBundle.
-
- Returns:
- True if all edges in this bundle are in `other`.
- """
- return self.edges.issubset(other.edges)
-
- def is_disjoint_from(self, other: PathBundle) -> bool:
- """Check if this bundle shares no edges with `other`.
-
- Args:
- other: Another PathBundle.
-
- Returns:
- True if there are no common edges between the two bundles.
- """
- return self.edges.isdisjoint(other.edges)
-
- def get_sub_path_bundle(
- self,
- new_dst_node: NodeID,
- graph: StrictMultiDiGraph,
- cost_attr: str = "cost",
- ) -> PathBundle:
- """Create a sub-bundle ending at `new_dst_node` with correct minimal cost.
-
- The returned bundle contains the predecessor subgraph that reaches from
- `self.src_node` to `new_dst_node` using only relations present in this
- bundle's `pred`. The `cost` of the returned bundle is recomputed as the
- minimal sum of per-hop costs along any valid path from `self.src_node`
- to `new_dst_node`, where each hop cost is the minimum of `cost_attr`
- across the parallel edges recorded for that hop.
-
- Args:
- new_dst_node: The new destination node, which must be present in `pred`.
- graph: The underlying graph to look up edge attributes.
- cost_attr: The edge attribute representing cost/metric.
-
- Returns:
- A new PathBundle from `self.src_node` to `new_dst_node` with an updated cost.
-
- Raises:
- ValueError: If `new_dst_node` is not found in this bundle's `pred` map.
- """
- if new_dst_node not in self.pred:
- raise ValueError(f"{new_dst_node} not in this PathBundle's pred")
-
- edges_dict = graph.get_edges()
-
- # 1) Build the restricted predecessor subgraph reachable from new_dst_node
- # back to self.src_node. This preserves all allowed predecessors without
- # collapsing to a single path.
- new_pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {self.src_node: {}}
- visited_build: Set[NodeID] = set([new_dst_node])
- queue_build: deque[NodeID] = deque([new_dst_node])
- while queue_build:
- node = queue_build.popleft()
- for prev_node, edges_list in self.pred[node].items():
- new_pred.setdefault(node, {})[prev_node] = edges_list
- if prev_node != self.src_node and prev_node not in visited_build:
- visited_build.add(prev_node)
- queue_build.append(prev_node)
-
- # 2) Compute minimal cost from self.src_node to new_dst_node over new_pred
- # using Dijkstra on the reversed edges (from dst backwards to src).
- def hop_cost(u: NodeID, v: NodeID) -> float:
- # cost to go from u<-v (i.e., v -> u in forward direction)
- edges_list = new_pred[u][v]
- return float(min(edges_dict[eid][3][cost_attr] for eid in edges_list))
-
- # Trivial case: src == dst
- if new_dst_node == self.src_node:
- return PathBundle(self.src_node, new_dst_node, new_pred, 0.0)
-
- dist: Dict[NodeID, float] = {new_dst_node: 0.0}
- heap: List[Tuple[float, NodeID]] = [(0.0, new_dst_node)]
- best_cost: float = float("inf")
-
- while heap:
- cost_to_node, node = heappop(heap)
- if cost_to_node > dist.get(node, float("inf")):
- continue
- if node == self.src_node:
- best_cost = cost_to_node
- break
- # Relax predecessors of `node` (reverse traversal)
- for prev_node in new_pred.get(node, {}):
- c = cost_to_node + hop_cost(node, prev_node)
- if c < dist.get(prev_node, float("inf")):
- dist[prev_node] = c
- heappush(heap, (c, prev_node))
-
- # If src_node was not reached, this subgraph does not connect src->new_dst.
- # Treat as an error to avoid silent mis-reporting.
- if best_cost == float("inf"):
- raise ValueError(
- f"No path from '{self.src_node}' to '{new_dst_node}' within this PathBundle."
- )
-
- return PathBundle(self.src_node, new_dst_node, new_pred, float(best_cost))
diff --git a/ngraph/profiling/reporter.py b/ngraph/profiling/reporter.py
deleted file mode 100644
index feaff7f..0000000
--- a/ngraph/profiling/reporter.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from __future__ import annotations
-
-"""Profiling report helpers.
-
-Use ``ngraph.profiling.profiler.PerformanceReporter`` to generate profiling
-reports from collected ``ProfileResults``. This module exists to group
-report-related utilities if needed in the future.
-"""
diff --git a/ngraph/results/artifacts.py b/ngraph/results/artifacts.py
index d277c21..64363ac 100644
--- a/ngraph/results/artifacts.py
+++ b/ngraph/results/artifacts.py
@@ -3,10 +3,10 @@
This module defines dataclasses that capture outputs from analyses and
simulations in a JSON-serializable form:
-- `PlacementResultSet`: aggregated placement results and statistics
- `CapacityEnvelope`: frequency-based capacity distributions and optional
aggregated flow statistics
- `FailurePatternResult`: capacity results for specific failure patterns
+- `PlacementEnvelope`: per-demand placement envelopes
"""
from __future__ import annotations
@@ -15,55 +15,6 @@
from dataclasses import dataclass, field
from typing import Any, Dict, List
-from ngraph.demand.manager.manager import TrafficResult
-
-
-@dataclass(frozen=True)
-class PlacementResultSet:
- """Aggregated traffic placement results from one or many runs.
-
- This immutable dataclass stores traffic placement results organized by case,
- with overall statistics and per-demand statistics.
-
- Attributes:
- results_by_case: Dictionary mapping case names to TrafficResult lists.
- overall_stats: Dictionary of overall statistics.
- demand_stats: Dictionary mapping demand keys to per-demand statistics.
- """
-
- results_by_case: dict[str, list[TrafficResult]] = field(default_factory=dict)
- overall_stats: dict[str, float] = field(default_factory=dict)
- demand_stats: dict[tuple[str, str, int], dict[str, float]] = field(
- default_factory=dict
- )
-
- def to_dict(self) -> dict[str, Any]:
- """Convert to dictionary for JSON serialization.
-
- Converts TrafficResult objects to dictionaries and formats demand
- statistics keys as strings for JSON compatibility.
-
- Returns:
- Dictionary representation with all fields as JSON-serializable primitives.
- """
- # Convert TrafficResult objects to dictionaries
- cases = {
- case: [result._asdict() for result in results]
- for case, results in self.results_by_case.items()
- }
-
- # Format demand statistics keys as strings
- demand_stats = {
- f"{src}->{dst}|prio={priority}": stats
- for (src, dst, priority), stats in self.demand_stats.items()
- }
-
- return {
- "overall_stats": self.overall_stats,
- "cases": cases,
- "demand_stats": demand_stats,
- }
-
@dataclass
class CapacityEnvelope:
diff --git a/ngraph/results/flow.py b/ngraph/results/flow.py
index 729d573..6da28fe 100644
--- a/ngraph/results/flow.py
+++ b/ngraph/results/flow.py
@@ -4,15 +4,20 @@
for capacity and demand-placement style analyses in a unit-agnostic form.
Objects expose `to_dict()` that returns JSON-safe primitives. Float-keyed
-distributions are normalized to string keys, and arbitrary `data` payloads are
-sanitized. These dicts are written under `data.flow_results` by steps.
+distributions are normalized to string keys via `_fmt_float_key()`, and
+arbitrary `data` payloads are sanitized. These dicts are written under
+`data.flow_results` by steps.
+
+Utilities:
+ _fmt_float_key: Formats floats as stable string keys for JSON serialization.
+ Uses fixed-point notation with trailing zeros stripped for human-readable,
+ canonical representations of numeric keys like cost distributions.
"""
from __future__ import annotations
import math
-from dataclasses import asdict, dataclass, field
-from decimal import ROUND_HALF_EVEN, Decimal
+from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
from ngraph.logging import get_logger
@@ -20,6 +25,28 @@
logger = get_logger(__name__)
+def _fmt_float_key(x: float, places: int = 9) -> str:
+ """Format a float as a canonical string key for JSON serialization.
+
+ Uses fixed-point notation (never exponential) with trailing zeros stripped.
+ This ensures stable, human-readable keys for cost distributions.
+
+ Args:
+ x: Float value to format.
+ places: Decimal places for precision (default 9).
+
+ Returns:
+ Canonical string representation of the float in fixed-point notation.
+ """
+ rounded = round(float(x), places)
+ # Use 'f' format for fixed-point (never exponential), then strip trailing zeros
+ formatted = f"{rounded:.{places}f}"
+ # Strip trailing zeros after decimal point, then trailing decimal point if any
+ if "." in formatted:
+ formatted = formatted.rstrip("0").rstrip(".")
+ return formatted
+
+
@dataclass(slots=True)
class FlowEntry:
"""Represents a single source→destination flow outcome within an iteration.
@@ -137,20 +164,13 @@ def __post_init__(self) -> None:
raise ValueError("FlowEntry.cost_distribution contains invalid entries")
def to_dict(self) -> Dict[str, Any]:
- """Return a JSON-serializable dictionary representation."""
+ """Return a JSON-serializable dictionary representation.
+ Builds dict directly from known fields instead of using asdict() to avoid
+ the overhead of recursive _asdict_inner calls (significant for large result sets).
+ """
# Canonicalize cost_distribution keys as strings to avoid float artifacts
# and ensure stable JSON. Use decimal quantization for determinism.
- def _fmt_float_key(x: float, places: int = 9) -> str:
- q = Decimal(10) ** -places
- try:
- d = Decimal(str(float(x))).quantize(q, rounding=ROUND_HALF_EVEN)
- # Normalize to remove trailing zeros and exponent when possible
- d = d.normalize()
- return format(d, "f") if d == d.to_integral() else format(d, "f")
- except Exception: # pragma: no cover - defensive
- return str(x)
-
normalized_costs: Dict[str, float] = {}
for k, v in self.cost_distribution.items():
try:
@@ -158,11 +178,18 @@ def _fmt_float_key(x: float, places: int = 9) -> str:
normalized_costs[key_str] = float(v)
except Exception: # pragma: no cover - defensive
normalized_costs[str(k)] = float(v)
- d = asdict(self)
- d["cost_distribution"] = normalized_costs
- # Ensure per-flow data payload is JSON-safe to avoid late failures
- d["data"] = _ensure_json_safe(self.data)
- return d
+
+ # Build dict directly from known fields (avoids asdict() overhead)
+ return {
+ "source": self.source,
+ "destination": self.destination,
+ "priority": self.priority,
+ "demand": self.demand,
+ "placed": self.placed,
+ "dropped": self.dropped,
+ "cost_distribution": normalized_costs,
+ "data": _ensure_json_safe(self.data),
+ }
@dataclass(slots=True)
@@ -233,8 +260,17 @@ def __post_init__(self) -> None:
raise ValueError("FlowSummary.overall_ratio inconsistent with totals")
def to_dict(self) -> Dict[str, Any]:
- """Return a JSON-serializable dictionary representation."""
- return asdict(self)
+ """Return a JSON-serializable dictionary representation.
+
+ Builds dict directly from known fields instead of using asdict().
+ """
+ return {
+ "total_demand": self.total_demand,
+ "total_placed": self.total_placed,
+ "overall_ratio": self.overall_ratio,
+ "dropped_flows": self.dropped_flows,
+ "num_flows": self.num_flows,
+ }
@dataclass(slots=True)
diff --git a/ngraph/results/snapshot.py b/ngraph/results/snapshot.py
new file mode 100644
index 0000000..388268c
--- /dev/null
+++ b/ngraph/results/snapshot.py
@@ -0,0 +1,72 @@
+"""Scenario snapshot helpers.
+
+Build a concise dictionary snapshot of failure policies and traffic matrices for
+export into results without keeping heavy domain objects.
+"""
+
+from __future__ import annotations
+
+from typing import Any, Dict
+
+
+def build_scenario_snapshot(
+ *,
+ seed: int | None,
+ failure_policy_set,
+ traffic_matrix_set,
+) -> Dict[str, Any]:
+ snapshot_failure_policies: Dict[str, Any] = {}
+ for name, policy in getattr(failure_policy_set, "policies", {}).items():
+ modes_list: list[dict[str, Any]] = []
+ for mode in getattr(policy, "modes", []) or []:
+ mode_dict = {
+ "weight": float(getattr(mode, "weight", 0.0)),
+ "rules": [],
+ "attrs": dict(getattr(mode, "attrs", {}) or {}),
+ }
+ for rule in getattr(mode, "rules", []) or []:
+ mode_dict["rules"].append(
+ {
+ "entity_scope": getattr(rule, "entity_scope", "node"),
+ "logic": getattr(rule, "logic", "or"),
+ "rule_type": getattr(rule, "rule_type", "all"),
+ "probability": float(getattr(rule, "probability", 1.0)),
+ "count": int(getattr(rule, "count", 1)),
+ "conditions": [
+ {
+ "attr": c.attr,
+ "operator": c.operator,
+ "value": c.value,
+ }
+ for c in getattr(rule, "conditions", []) or []
+ ],
+ }
+ )
+ modes_list.append(mode_dict)
+ snapshot_failure_policies[name] = {
+ "attrs": dict(getattr(policy, "attrs", {}) or {}),
+ "modes": modes_list,
+ }
+
+ snapshot_tms: Dict[str, list[dict[str, Any]]] = {}
+ for mname, demands in getattr(traffic_matrix_set, "matrices", {}).items():
+ entries: list[dict[str, Any]] = []
+ for d in demands:
+ entries.append(
+ {
+ "source_path": getattr(d, "source_path", ""),
+ "sink_path": getattr(d, "sink_path", ""),
+ "demand": float(getattr(d, "demand", 0.0)),
+ "priority": int(getattr(d, "priority", 0)),
+ "mode": getattr(d, "mode", "pairwise"),
+ "flow_policy_config": getattr(d, "flow_policy_config", None),
+ "attrs": dict(getattr(d, "attrs", {}) or {}),
+ }
+ )
+ snapshot_tms[mname] = entries
+
+ return {
+ "seed": seed,
+ "failure_policy_set": snapshot_failure_policies,
+ "traffic_matrices": snapshot_tms,
+ }
diff --git a/ngraph/scenario.py b/ngraph/scenario.py
index ba5d656..c413c4a 100644
--- a/ngraph/scenario.py
+++ b/ngraph/scenario.py
@@ -2,30 +2,23 @@
from __future__ import annotations
-import json
from dataclasses import dataclass, field
-from importlib import resources
-from typing import Any, Dict, List, Optional
+from typing import List, Optional
-import yaml
-
-from ngraph.components import ComponentsLibrary
-from ngraph.demand.manager.builder import build_traffic_matrix_set
-from ngraph.demand.matrix import TrafficMatrixSet
from ngraph.dsl.blueprints.expand import expand_network_dsl
-from ngraph.failure.policy import (
- FailureCondition,
- FailureMode,
- FailurePolicy,
- FailureRule,
-)
-from ngraph.failure.policy_set import FailurePolicySet
+from ngraph.dsl.loader import load_scenario_yaml
+from ngraph.exec.demand.builder import build_traffic_matrix_set
from ngraph.logging import get_logger
-from ngraph.model.network import Network, RiskGroup
+from ngraph.model.components import ComponentsLibrary
+from ngraph.model.demand.matrix import TrafficMatrixSet
+from ngraph.model.failure.parser import build_failure_policy_set, build_risk_groups
+from ngraph.model.failure.policy_set import FailurePolicySet
+from ngraph.model.network import Network
from ngraph.results import Results
-from ngraph.seed_manager import SeedManager
-from ngraph.workflow.base import WORKFLOW_STEP_REGISTRY, WorkflowStep
-from ngraph.yaml_utils import normalize_yaml_dict_keys
+from ngraph.results.snapshot import build_scenario_snapshot
+from ngraph.utils.seed_manager import SeedManager
+from ngraph.workflow.base import WorkflowStep
+from ngraph.workflow.parse import build_workflow_steps
@dataclass
@@ -117,103 +110,7 @@ def from_yaml(
or if there are any unrecognized top-level keys.
TypeError: If a workflow step's arguments are invalid for the step class.
"""
- data = yaml.safe_load(yaml_str)
- if data is None:
- data = {}
- if not isinstance(data, dict):
- raise ValueError("The provided YAML must map to a dictionary at top-level.")
-
- # Normalize YAML parsing quirks and perform early shape checks to preserve
- # error messages expected by callers/tests before schema validation.
- # 1) Normalize boolean-like keys under traffic_matrix_set
- if isinstance(data.get("traffic_matrix_set"), dict):
- data["traffic_matrix_set"] = normalize_yaml_dict_keys(
- data["traffic_matrix_set"] # type: ignore[arg-type]
- )
-
- # 2) Early network structure checks
- network_section = data.get("network")
- if isinstance(network_section, dict):
- if "nodes" in network_section and not isinstance(
- network_section["nodes"], dict
- ):
- raise ValueError("'nodes' must be a mapping")
- if "links" in network_section and not isinstance(
- network_section["links"], list
- ):
- raise ValueError("'links' must be a list")
- # Validate direct link entries have required keys
- if isinstance(network_section.get("links"), list):
- for entry in network_section["links"]:
- if not isinstance(entry, dict):
- raise ValueError(
- "Each link definition must be a mapping with 'source' and 'target'"
- )
- if "source" not in entry or "target" not in entry:
- raise ValueError(
- "Each link definition must include 'source' and 'target'"
- )
- # Validate node entries for unrecognized keys (preserve message)
- if isinstance(network_section.get("nodes"), dict):
- for _node_name, node_def in network_section["nodes"].items():
- if isinstance(node_def, dict):
- allowed = {"attrs", "disabled", "risk_groups"}
- for k in node_def.keys():
- if k not in allowed:
- raise ValueError("Unrecognized key")
-
- # 3) Risk groups must have 'name'
- if isinstance(data.get("risk_groups"), list):
- for rg in data["risk_groups"]:
- if not isinstance(rg, dict) or "name" not in rg:
- raise ValueError("RiskGroup entry missing 'name' field")
-
- # Unconditional JSON Schema validation
- try:
- import jsonschema # type: ignore
- except Exception as exc: # pragma: no cover - import error path
- raise RuntimeError(
- "jsonschema is required for scenario validation. Install dev extras or add 'jsonschema' to dependencies."
- ) from exc
-
- # Load schema from packaged resource
- schema_data: dict[str, Any]
- try:
- with (
- resources.files("ngraph.schemas")
- .joinpath("scenario.json")
- .open("r", encoding="utf-8")
- ) as f: # type: ignore[attr-defined]
- schema_data = json.load(f)
- except Exception as exc:
- raise RuntimeError(
- "Failed to locate packaged NetGraph scenario schema 'ngraph/schemas/scenario.json'."
- ) from exc
-
- try:
- jsonschema.validate(data, schema_data) # type: ignore[arg-type]
- except Exception as exc:
- # Provide actionable error
- raise ValueError(f"Scenario JSON Schema validation failed: {exc}") from exc
-
- # Ensure only recognized top-level keys are present.
- recognized_keys = {
- "vars",
- "blueprints",
- "network",
- "failure_policy_set",
- "traffic_matrix_set",
- "workflow",
- "components",
- "risk_groups",
- "seed",
- }
- extra_keys = set(data.keys()) - recognized_keys
- if extra_keys:
- raise ValueError(
- f"Unrecognized top-level key(s) in scenario: {', '.join(sorted(extra_keys))}. "
- f"Allowed keys are {sorted(recognized_keys)}"
- )
+ data = load_scenario_yaml(yaml_str)
# Extract seed first as it may be used by other components
seed = data.get("seed")
@@ -236,23 +133,11 @@ def from_yaml(
pass
# 2) Build the failure policy set
- fps_data = data.get("failure_policy_set", {})
- if not isinstance(fps_data, dict):
- raise ValueError(
- "'failure_policy_set' must be a mapping of name -> FailurePolicy definition"
- )
-
- # Normalize dictionary keys to handle YAML boolean keys
- normalized_fps = normalize_yaml_dict_keys(fps_data)
- failure_policy_set = FailurePolicySet()
seed_manager = SeedManager(seed)
- for name, fp_data in normalized_fps.items():
- if not isinstance(fp_data, dict):
- raise ValueError(
- f"Failure policy '{name}' must map to a FailurePolicy definition dict"
- )
- failure_policy = cls._build_failure_policy(fp_data, seed_manager, name)
- failure_policy_set.add(name, failure_policy)
+ failure_policy_set = build_failure_policy_set(
+ data.get("failure_policy_set", {}),
+ derive_seed=lambda n: seed_manager.derive_seed("failure_policy", n),
+ )
if failure_policy_set.policies:
try:
@@ -289,7 +174,10 @@ def from_yaml(
# 4) Build workflow steps
workflow_data = data.get("workflow", [])
- workflow_steps = cls._build_workflow_steps(workflow_data, seed_manager)
+ workflow_steps = build_workflow_steps(
+ workflow_data,
+ derive_seed=lambda name: seed_manager.derive_seed("workflow_step", name),
+ )
try:
labels: list[str] = []
for idx, step in enumerate(workflow_steps):
@@ -323,7 +211,7 @@ def from_yaml(
# 6) Parse optional risk_groups, then attach them to the network
rg_data = data.get("risk_groups", [])
if rg_data:
- risk_groups = cls._build_risk_groups(rg_data)
+ risk_groups = build_risk_groups(rg_data)
for rg in risk_groups:
network_obj.risk_groups[rg.name] = rg
if rg.disabled:
@@ -347,63 +235,12 @@ def from_yaml(
# Attach minimal scenario snapshot to results for export
try:
- snapshot_failure_policies: Dict[str, Any] = {}
- for name, policy in failure_policy_set.policies.items():
- modes_list: list[dict[str, Any]] = []
- for mode in getattr(policy, "modes", []) or []:
- mode_dict = {
- "weight": float(getattr(mode, "weight", 0.0)),
- "rules": [],
- "attrs": dict(getattr(mode, "attrs", {}) or {}),
- }
- for rule in getattr(mode, "rules", []) or []:
- rule_dict = {
- "entity_scope": getattr(rule, "entity_scope", "node"),
- "logic": getattr(rule, "logic", "or"),
- "rule_type": getattr(rule, "rule_type", "all"),
- "probability": float(getattr(rule, "probability", 1.0)),
- "count": int(getattr(rule, "count", 1)),
- "conditions": [
- {
- "attr": c.attr,
- "operator": c.operator,
- "value": c.value,
- }
- for c in getattr(rule, "conditions", []) or []
- ],
- }
- mode_dict["rules"].append(rule_dict)
- modes_list.append(mode_dict)
- snapshot_failure_policies[name] = {
- "attrs": dict(getattr(policy, "attrs", {}) or {}),
- "modes": modes_list,
- }
-
- snapshot_tms: Dict[str, list[dict[str, Any]]] = {}
- for mname, demands in tms.matrices.items():
- entries: list[dict[str, Any]] = []
- for d in demands:
- entries.append(
- {
- "source_path": getattr(d, "source_path", ""),
- "sink_path": getattr(d, "sink_path", ""),
- "demand": float(getattr(d, "demand", 0.0)),
- "priority": int(getattr(d, "priority", 0)),
- "mode": getattr(d, "mode", "pairwise"),
- "flow_policy_config": getattr(
- d, "flow_policy_config", None
- ),
- "attrs": dict(getattr(d, "attrs", {}) or {}),
- }
- )
- snapshot_tms[mname] = entries
-
scenario_obj.results.set_scenario_snapshot(
- {
- "seed": seed,
- "failure_policy_set": snapshot_failure_policies,
- "traffic_matrices": snapshot_tms,
- }
+ build_scenario_snapshot(
+ seed=seed,
+ failure_policy_set=failure_policy_set,
+ traffic_matrix_set=tms,
+ )
)
except Exception:
# Snapshot should never block scenario construction
@@ -422,228 +259,3 @@ def from_yaml(
pass
return scenario_obj
-
- @staticmethod
- def _build_risk_groups(rg_data: List[Dict[str, Any]]) -> List[RiskGroup]:
- """Recursively builds a list of RiskGroup objects from YAML data.
-
- Each entry may have keys: "name", "children", "disabled", and "attrs" (dict).
-
- Args:
- rg_data (List[Dict[str, Any]]): The list of risk-group definitions.
-
- Returns:
- List[RiskGroup]: Possibly nested risk groups.
-
- Raises:
- ValueError: If any group is missing 'name'.
- """
-
- def build_one(d: Dict[str, Any]) -> RiskGroup:
- name = d.get("name")
- if not name:
- raise ValueError("RiskGroup entry missing 'name' field.")
- disabled = d.get("disabled", False)
- children_list = d.get("children", [])
- child_objs = [build_one(cd) for cd in children_list]
- attrs = normalize_yaml_dict_keys(d.get("attrs", {}))
- return RiskGroup(
- name=name, disabled=disabled, children=child_objs, attrs=attrs
- )
-
- return [build_one(entry) for entry in rg_data]
-
- @staticmethod
- def _build_failure_policy(
- fp_data: Dict[str, Any], seed_manager: SeedManager, policy_name: str
- ) -> FailurePolicy:
- """Constructs a FailurePolicy from data that may specify multiple rules plus
- optional top-level fields like fail_risk_groups, fail_risk_group_children,
- and attrs.
-
- Example:
- failure_policy_set:
- default:
- fail_risk_groups: true
- fail_risk_group_children: false
- attrs:
- custom_key: custom_val
- rules:
- - entity_scope: "node"
- conditions:
- - attr: "capacity"
- operator: ">"
- value: 100
- logic: "and"
- rule_type: "choice"
- count: 2
-
- Args:
- fp_data (Dict[str, Any]): Dictionary from the 'failure_policy' section of the YAML.
- seed_manager (SeedManager): Seed manager for reproducible operations.
- policy_name (str): Name of the policy for seed derivation.
-
- Returns:
- FailurePolicy: The constructed policy. If no rules exist, it's an empty policy.
-
- Raises:
- ValueError: If 'rules' is present but not a list, or if conditions are not lists.
- """
- fail_srg = fp_data.get("fail_risk_groups", False)
- fail_rg_children = fp_data.get("fail_risk_group_children", False)
- attrs = normalize_yaml_dict_keys(fp_data.get("attrs", {}))
-
- def build_rules(rule_dicts: List[Dict[str, Any]]) -> List[FailureRule]:
- rules_local: List[FailureRule] = []
- for rule_dict in rule_dicts:
- entity_scope = rule_dict.get("entity_scope", "node")
- conditions_data = rule_dict.get("conditions", [])
- if not isinstance(conditions_data, list):
- raise ValueError(
- "Each rule's 'conditions' must be a list if present."
- )
- conditions: List[FailureCondition] = []
- for cond_dict in conditions_data:
- conditions.append(
- FailureCondition(
- attr=cond_dict["attr"],
- operator=cond_dict["operator"],
- value=cond_dict["value"],
- )
- )
-
- rule = FailureRule(
- entity_scope=entity_scope,
- conditions=conditions,
- logic=rule_dict.get("logic", "or"),
- rule_type=rule_dict.get("rule_type", "all"),
- probability=rule_dict.get("probability", 1.0),
- count=rule_dict.get("count", 1),
- weight_by=rule_dict.get("weight_by"),
- )
- rules_local.append(rule)
- return rules_local
-
- # Extract weighted modes (required)
- modes: List[FailureMode] = []
- modes_data = fp_data.get("modes", [])
- if not isinstance(modes_data, list) or not modes_data:
- raise ValueError("failure_policy requires non-empty 'modes' list.")
- for _m_idx, m in enumerate(modes_data):
- if not isinstance(m, dict):
- raise ValueError("Each mode must be a mapping.")
- try:
- weight = float(m.get("weight", 0.0))
- except (TypeError, ValueError) as exc:
- raise ValueError("Each mode 'weight' must be a number.") from exc
- mode_rules_data = m.get("rules", [])
- if not isinstance(mode_rules_data, list):
- raise ValueError("Each mode 'rules' must be a list.")
- mode_rules = build_rules(mode_rules_data)
- mode_attrs = normalize_yaml_dict_keys(m.get("attrs", {}))
- modes.append(FailureMode(weight=weight, rules=mode_rules, attrs=mode_attrs))
-
- # Derive seed for this failure policy
- policy_seed = seed_manager.derive_seed("failure_policy", policy_name)
-
- return FailurePolicy(
- attrs=attrs,
- fail_risk_groups=fail_srg,
- fail_risk_group_children=fail_rg_children,
- seed=policy_seed,
- modes=modes,
- )
-
- @staticmethod
- def _build_workflow_steps(
- workflow_data: List[Dict[str, Any]],
- seed_manager: SeedManager,
- ) -> List[WorkflowStep]:
- """Converts workflow step dictionaries into WorkflowStep objects.
-
- Each step dict must have a "step_type" referencing a registered workflow
- step in WORKFLOW_STEP_REGISTRY. All other keys in the dict are passed
- to that step's constructor as keyword arguments.
-
- Args:
- workflow_data (List[Dict[str, Any]]): A list of dictionaries describing
- each workflow step, for example:
- [
- {
- "step_type": "MyStep",
- "arg1": "value1",
- "arg2": "value2",
- },
- ...
- ]
- seed_manager (SeedManager): Seed manager for reproducible operations.
-
- Returns:
- List[WorkflowStep]: A list of instantiated WorkflowStep objects.
-
- Raises:
- ValueError: If any step lacks "step_type" or references an unknown type.
- TypeError: If step initialization fails due to invalid arguments.
- """
- if not isinstance(workflow_data, list):
- raise ValueError("'workflow' must be a list if present.")
-
- steps: List[WorkflowStep] = []
- # Track assigned names to enforce uniqueness and avoid result/metadata collisions
- assigned_names: set[str] = set()
- for step_index, step_info in enumerate(workflow_data):
- step_type = step_info.get("step_type")
- if not step_type:
- raise ValueError(
- "Each workflow entry must have a 'step_type' field "
- "indicating the WorkflowStep subclass to use."
- )
-
- step_cls = WORKFLOW_STEP_REGISTRY.get(step_type)
- if not step_cls:
- raise ValueError(f"Unrecognized 'step_type': {step_type}")
-
- ctor_args = {k: v for k, v in step_info.items() if k != "step_type"}
- # Normalize constructor argument keys to handle YAML boolean keys
- normalized_ctor_args = normalize_yaml_dict_keys(ctor_args)
-
- # Resolve a concrete step name to prevent collisions in results/metadata
- raw_name = normalized_ctor_args.get("name")
- # Treat blank/whitespace names as missing
- if isinstance(raw_name, str) and raw_name.strip() == "":
- raw_name = None
- step_name = raw_name or f"{step_type}_{step_index}"
-
- # Enforce uniqueness across the workflow
- if step_name in assigned_names:
- raise ValueError(
- f"Duplicate workflow step name '{step_name}'. "
- "Each step must have a unique name."
- )
- assigned_names.add(step_name)
-
- # Ensure the constructed WorkflowStep receives the resolved unique name
- normalized_ctor_args["name"] = step_name
-
- # Determine seed provenance and possibly derive a step seed
- seed_source: str = "none"
- if (
- "seed" in normalized_ctor_args
- and normalized_ctor_args["seed"] is not None
- ):
- seed_source = "explicit-step"
- else:
- derived_seed = seed_manager.derive_seed("workflow_step", step_name)
- if derived_seed is not None:
- normalized_ctor_args["seed"] = derived_seed
- seed_source = "scenario-derived"
-
- step_obj = step_cls(**normalized_ctor_args)
- # Attach internal provenance for metadata collection
- try:
- step_obj._seed_source = seed_source
- except Exception:
- pass
- steps.append(step_obj)
-
- return steps
diff --git a/ngraph/solver/__init__.py b/ngraph/solver/__init__.py
index 849c518..4ef4388 100644
--- a/ngraph/solver/__init__.py
+++ b/ngraph/solver/__init__.py
@@ -1,7 +1,7 @@
"""High-level solver interfaces binding models to algorithm implementations.
This package exposes problem-oriented APIs (e.g., max-flow between groups in a
-`Network` or `NetworkView`) that wrap lower-level algorithm modules. These
+`Network`) that wrap lower-level algorithm modules. These
wrappers avoid mutating the input model by constructing an internal graph with
pseudo source/sink nodes when required.
"""
diff --git a/ngraph/solver/helpers.py b/ngraph/solver/helpers.py
deleted file mode 100644
index 952f29f..0000000
--- a/ngraph/solver/helpers.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from __future__ import annotations
-
-"""Private utilities shared by solver front-ends.
-
-This module is intentionally minimal. It provides a stable import path for
-solver helper utilities so callers can depend on the namespace even when
-no helpers are currently required. No public APIs are exposed.
-"""
diff --git a/ngraph/solver/maxflow.py b/ngraph/solver/maxflow.py
index f555d90..2b58220 100644
--- a/ngraph/solver/maxflow.py
+++ b/ngraph/solver/maxflow.py
@@ -1,788 +1,918 @@
-"""Problem-level max-flow API bound to the model layer.
-
-Functions here operate on a model context that provides:
-
-- to_strict_multidigraph(add_reverse: bool = True) -> StrictMultiDiGraph
-- select_node_groups_by_path(path: str) -> dict[str, list[Node]]
-
-They accept either a `Network` or a `NetworkView`. The input context is not
-mutated. Pseudo source and sink nodes are attached on a working graph when
-computing flows between groups.
+"""Max-flow computation between node groups with NetGraph-Core integration.
+
+This module provides max-flow analysis for Network models by transforming
+multi-source/multi-sink problems into single-source/single-sink problems
+using pseudo nodes.
+
+Key functions:
+- max_flow(): Compute max flow values between node groups
+- max_flow_with_details(): Max flow with cost distribution details
+- sensitivity_analysis(): Identify critical edges and flow reduction
+- build_maxflow_cache(): Build cache for efficient repeated analysis
+
+Graph caching (via MaxFlowGraphCache) enables efficient repeated analysis with
+different exclusion sets by building the graph with pseudo nodes once and using
+O(|excluded|) masks for exclusions. Disabled nodes/links are automatically
+handled via the underlying GraphCache from ngraph.adapters.core.
"""
from __future__ import annotations
-from typing import Any, Dict, List, Optional, Tuple
+from dataclasses import dataclass, field
+from typing import Dict, List, Optional, Set, Tuple
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.algorithms.max_flow import (
- calc_max_flow,
- run_sensitivity,
-)
-from ngraph.algorithms.max_flow import (
- saturated_edges as _algo_saturated_edges,
+import netgraph_core
+
+from ngraph.adapters.core import (
+ AugmentationEdge,
+ GraphCache,
+ build_edge_mask,
+ build_graph,
+ build_graph_cache,
+ build_node_mask,
+ get_disabled_exclusions,
)
-from ngraph.algorithms.types import FlowSummary
+from ngraph.model.network import Network
+from ngraph.types.base import FlowPlacement
+from ngraph.types.dto import FlowSummary
-try:
- from typing import TYPE_CHECKING
+# Large capacity for pseudo edges (avoid float('inf') due to Core limitation)
+LARGE_CAPACITY = 1e15
- if TYPE_CHECKING: # pragma: no cover - typing only
- from ngraph.graph.strict_multidigraph import StrictMultiDiGraph # noqa: F401
- from ngraph.model.network import Network, Node # noqa: F401
-except Exception: # pragma: no cover - safety in unusual environments
- pass
+@dataclass
+class MaxFlowGraphCache:
+ """Pre-built graph with pseudo nodes for efficient repeated max-flow analysis.
-def max_flow(
- context: Any,
+ Composes a GraphCache with additional pseudo node mappings for max-flow.
+
+ Attributes:
+ base_cache: Underlying GraphCache with graph, mappers, and disabled topology.
+ pair_to_pseudo_ids: Mapping from (src_label, snk_label) to (pseudo_src_id, pseudo_snk_id).
+ """
+
+ base_cache: GraphCache
+ pair_to_pseudo_ids: Dict[Tuple[str, str], Tuple[int, int]] = field(
+ default_factory=dict
+ )
+
+ # Convenience properties for backward compatibility
+ @property
+ def graph_handle(self) -> netgraph_core.Graph:
+ return self.base_cache.graph_handle
+
+ @property
+ def multidigraph(self) -> netgraph_core.StrictMultiDiGraph:
+ return self.base_cache.multidigraph
+
+ @property
+ def edge_mapper(self):
+ return self.base_cache.edge_mapper
+
+ @property
+ def node_mapper(self):
+ return self.base_cache.node_mapper
+
+ @property
+ def algorithms(self) -> netgraph_core.Algorithms:
+ return self.base_cache.algorithms
+
+ @property
+ def disabled_node_ids(self) -> Set[int]:
+ return self.base_cache.disabled_node_ids
+
+ @property
+ def disabled_link_ids(self) -> Set[str]:
+ return self.base_cache.disabled_link_ids
+
+ @property
+ def link_id_to_edge_indices(self) -> Dict[str, List[int]]:
+ return self.base_cache.link_id_to_edge_indices
+
+
+def build_maxflow_cache(
+ network: Network,
source_path: str,
sink_path: str,
*,
mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
-) -> Dict[Tuple[str, str], float]:
- """Compute max flow between groups selected from the context.
+) -> MaxFlowGraphCache:
+ """Build cached graph with pseudo nodes for efficient repeated max-flow analysis.
- Creates a working graph from the context, adds a pseudo source attached to
- the selected source nodes and a pseudo sink attached to the selected sink
- nodes, then runs the max-flow routine.
+ Constructs a single graph with all pseudo source/sink nodes for all
+ source/sink pairs, enabling O(|excluded|) mask building per iteration
+ instead of O(V+E) graph reconstruction.
Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: Aggregation strategy. "combine" considers all sources as one
- group and all sinks as one group. "pairwise" evaluates each
- source-label and sink-label pair separately.
- shortest_path: If True, perform a single augmentation along the first
- shortest path instead of the full max-flow.
- flow_placement: Strategy for splitting flow among equal-cost parallel
- edges.
+ network: Network instance.
+ source_path: Selection expression for source node groups.
+ sink_path: Selection expression for sink node groups.
+ mode: "combine" (single pair) or "pairwise" (N×M pairs).
Returns:
- Dict[Tuple[str, str], float]: Total flow per (source_label, sink_label).
+ MaxFlowGraphCache with pre-built graph and pseudo node mappings.
Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is not one of {"combine", "pairwise"}.
+ ValueError: If no matching sources or sinks are found.
"""
- src_groups = context.select_node_groups_by_path(source_path)
- snk_groups = context.select_node_groups_by_path(sink_path)
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
if not src_groups:
raise ValueError(f"No source nodes found matching '{source_path}'.")
if not snk_groups:
raise ValueError(f"No sink nodes found matching '{sink_path}'.")
- base_graph = context.to_strict_multidigraph(compact=True).copy()
+ # Collect all augmentation edges for ALL pairs
+ augmentations: List[AugmentationEdge] = []
+ pair_to_pseudo_names: Dict[Tuple[str, str], Tuple[str, str]] = {}
+
+ def _get_active_node_names(nodes: List) -> List[str]:
+ """Get names of non-disabled nodes."""
+ return [n.name for n in nodes if not n.disabled]
if mode == "combine":
- combined_src_nodes: list = []
- combined_snk_nodes: list = []
+ # Single combined pair
combined_src_label = "|".join(sorted(src_groups.keys()))
combined_snk_label = "|".join(sorted(snk_groups.keys()))
+ combined_src_names = []
for group_nodes in src_groups.values():
- combined_src_nodes.extend(group_nodes)
+ combined_src_names.extend(_get_active_node_names(group_nodes))
+ combined_snk_names = []
for group_nodes in snk_groups.values():
- combined_snk_nodes.extend(group_nodes)
+ combined_snk_names.extend(_get_active_node_names(group_nodes))
- if not combined_src_nodes or not combined_snk_nodes:
- return {(combined_src_label, combined_snk_label): 0.0}
+ # Remove overlap
+ combined_src_names = [
+ n for n in combined_src_names if n not in combined_snk_names
+ ]
- # Overlap -> zero flow
- if {n.name for n in combined_src_nodes} & {n.name for n in combined_snk_nodes}:
- flow_val = 0.0
- else:
- flow_val = _compute_flow_single_group(
- context,
- combined_src_nodes,
- combined_snk_nodes,
- shortest_path,
- flow_placement,
- prebuilt_graph=base_graph,
+ if combined_src_names and combined_snk_names:
+ pseudo_src = "__PSEUDO_SRC__"
+ pseudo_snk = "__PSEUDO_SNK__"
+
+ for src_name in combined_src_names:
+ augmentations.append(
+ AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)
+ )
+ for snk_name in combined_snk_names:
+ augmentations.append(
+ AugmentationEdge(snk_name, pseudo_snk, LARGE_CAPACITY, 0)
+ )
+
+ pair_to_pseudo_names[(combined_src_label, combined_snk_label)] = (
+ pseudo_src,
+ pseudo_snk,
)
- return {(combined_src_label, combined_snk_label): flow_val}
- if mode == "pairwise":
- results: Dict[Tuple[str, str], float] = {}
+ elif mode == "pairwise":
+ # N × M pairs
for src_label, src_nodes in src_groups.items():
for snk_label, snk_nodes in snk_groups.items():
- if src_nodes and snk_nodes:
- if {n.name for n in src_nodes} & {n.name for n in snk_nodes}:
- flow_val = 0.0
- else:
- flow_val = _compute_flow_single_group(
- context,
- src_nodes,
- snk_nodes,
- shortest_path,
- flow_placement,
- prebuilt_graph=base_graph,
- )
- else:
- flow_val = 0.0
- results[(src_label, snk_label)] = flow_val
- return results
+ active_src_names = _get_active_node_names(src_nodes)
+ active_snk_names = _get_active_node_names(snk_nodes)
- raise ValueError(f"Invalid mode '{mode}'. Must be 'combine' or 'pairwise'.")
+ # Skip overlapping pairs
+ if set(active_src_names) & set(active_snk_names):
+ continue
+ if not active_src_names or not active_snk_names:
+ continue
+
+ pseudo_src = f"__PSEUDO_SRC_{src_label}__"
+ pseudo_snk = f"__PSEUDO_SNK_{snk_label}__"
+ for src_name in active_src_names:
+ augmentations.append(
+ AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)
+ )
+ for snk_name in active_snk_names:
+ augmentations.append(
+ AugmentationEdge(snk_name, pseudo_snk, LARGE_CAPACITY, 0)
+ )
-def max_flow_with_summary(
- context: Any,
+ pair_to_pseudo_names[(src_label, snk_label)] = (pseudo_src, pseudo_snk)
+
+ else:
+ raise ValueError(f"Invalid mode '{mode}'. Must be 'combine' or 'pairwise'.")
+
+ # Build base cache with all pseudo nodes (exclusions handled via masks)
+ base_cache = build_graph_cache(
+ network,
+ augmentations=augmentations if augmentations else None,
+ )
+
+ # Pre-compute pseudo node IDs from names
+ pair_to_pseudo_ids: Dict[Tuple[str, str], Tuple[int, int]] = {}
+ for pair_key, (pseudo_src_name, pseudo_snk_name) in pair_to_pseudo_names.items():
+ pseudo_src_id = base_cache.node_mapper.to_id(pseudo_src_name)
+ pseudo_snk_id = base_cache.node_mapper.to_id(pseudo_snk_name)
+ pair_to_pseudo_ids[pair_key] = (pseudo_src_id, pseudo_snk_id)
+
+ return MaxFlowGraphCache(
+ base_cache=base_cache,
+ pair_to_pseudo_ids=pair_to_pseudo_ids,
+ )
+
+
+def max_flow(
+ network: Network,
source_path: str,
sink_path: str,
*,
mode: str = "combine",
shortest_path: bool = False,
flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
-) -> Dict[Tuple[str, str], Tuple[float, FlowSummary]]:
- """Compute max flow and return a summary for each group pair.
+ excluded_nodes: Optional[Set[str]] = None,
+ excluded_links: Optional[Set[str]] = None,
+ _cache: Optional[MaxFlowGraphCache] = None,
+) -> Dict[Tuple[str, str], float]:
+ """Compute max flow between node groups in a network.
- The summary includes total flow, per-edge flow, residual capacity,
- reachable set from the source in the residual graph, min-cut edges, and a
- cost distribution over augmentation steps.
+ This function calculates the maximum flow from a set of source nodes
+ to a set of sink nodes within the provided network.
+
+ When `_cache` is provided, uses O(|excluded|) mask building instead of
+ O(V+E) graph reconstruction for efficient repeated analysis.
Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
+ network: Network instance containing topology and node/link data.
+ source_path: Selection expression for source node groups.
+ sink_path: Selection expression for sink node groups.
+ mode: "combine" (all sources to all sinks) or "pairwise" (each pair separately).
+ shortest_path: If True, restricts flow to shortest paths only.
+ flow_placement: Strategy for distributing flow among equal-cost edges.
+ excluded_nodes: Optional set of node names to exclude.
+ excluded_links: Optional set of link IDs to exclude.
+ _cache: Pre-built cache for efficient repeated analysis.
Returns:
- Dict[Tuple[str, str], Tuple[float, FlowSummary]]: For each
- (source_label, sink_label), the total flow and the associated summary.
+ Dict mapping (source_label, sink_label) to total flow value.
Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
+ ValueError: If no matching sources or sinks are found.
"""
- src_groups = context.select_node_groups_by_path(source_path)
- snk_groups = context.select_node_groups_by_path(sink_path)
+ core_flow_placement = _map_flow_placement(flow_placement)
+
+ # Fast path: use cached graph with masks
+ if _cache is not None:
+ node_mask = None
+ edge_mask = None
+ # Build masks if there are disabled nodes/links in cache or explicit exclusions
+ if (
+ excluded_nodes
+ or excluded_links
+ or _cache.disabled_node_ids
+ or _cache.disabled_link_ids
+ ):
+ node_mask = build_node_mask(_cache.base_cache, excluded_nodes)
+ edge_mask = build_edge_mask(_cache.base_cache, excluded_links)
+
+ results: Dict[Tuple[str, str], float] = {}
+ for pair_key, (
+ pseudo_src_id,
+ pseudo_snk_id,
+ ) in _cache.pair_to_pseudo_ids.items():
+ flow_value, _ = _cache.algorithms.max_flow(
+ _cache.graph_handle,
+ pseudo_src_id,
+ pseudo_snk_id,
+ flow_placement=core_flow_placement,
+ shortest_path=shortest_path,
+ node_mask=node_mask,
+ edge_mask=edge_mask,
+ )
+ results[pair_key] = flow_value
+
+ # Handle pairs that weren't cached (overlapping src/snk)
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
+
+ if mode == "combine":
+ combined_src_label = "|".join(sorted(src_groups.keys()))
+ combined_snk_label = "|".join(sorted(snk_groups.keys()))
+ if (combined_src_label, combined_snk_label) not in results:
+ results[(combined_src_label, combined_snk_label)] = 0.0
+ elif mode == "pairwise":
+ for src_label in src_groups:
+ for snk_label in snk_groups:
+ if (src_label, snk_label) not in results:
+ results[(src_label, snk_label)] = 0.0
+
+ return results
+
+ # Standard path: build graph from scratch
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
if not src_groups:
raise ValueError(f"No source nodes found matching '{source_path}'.")
if not snk_groups:
raise ValueError(f"No sink nodes found matching '{sink_path}'.")
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+
+ def _filter_active_nodes(nodes: List) -> List[str]:
+ return [
+ n.name
+ for n in nodes
+ if not n.disabled
+ and (excluded_nodes is None or n.name not in excluded_nodes)
+ ]
+
if mode == "combine":
- combined_src_nodes: list = []
- combined_snk_nodes: list = []
combined_src_label = "|".join(sorted(src_groups.keys()))
combined_snk_label = "|".join(sorted(snk_groups.keys()))
+
+ combined_src_names = []
for group_nodes in src_groups.values():
- combined_src_nodes.extend(group_nodes)
+ combined_src_names.extend(_filter_active_nodes(group_nodes))
+ combined_snk_names = []
for group_nodes in snk_groups.values():
- combined_snk_nodes.extend(group_nodes)
- if not combined_src_nodes or not combined_snk_nodes:
- empty = _empty_summary()
- return {(combined_src_label, combined_snk_label): (0.0, empty)}
- if {n.name for n in combined_src_nodes} & {n.name for n in combined_snk_nodes}:
- empty = _empty_summary()
- return {(combined_src_label, combined_snk_label): (0.0, empty)}
- flow_val, summary = _compute_flow_with_summary_single_group(
- context,
- combined_src_nodes,
- combined_snk_nodes,
- shortest_path,
- flow_placement,
- )
- return {(combined_src_label, combined_snk_label): (flow_val, summary)}
-
- if mode == "pairwise":
- results: Dict[Tuple[str, str], Tuple[float, FlowSummary]] = {}
- for src_label, src_nodes in src_groups.items():
- for snk_label, snk_nodes in snk_groups.items():
- if src_nodes and snk_nodes:
- if {n.name for n in src_nodes} & {n.name for n in snk_nodes}:
- results[(src_label, snk_label)] = (0.0, _empty_summary())
- else:
- results[(src_label, snk_label)] = (
- _compute_flow_with_summary_single_group(
- context,
- src_nodes,
- snk_nodes,
- shortest_path,
- flow_placement,
- )
- )
- else:
- results[(src_label, snk_label)] = (0.0, _empty_summary())
- return results
-
- raise ValueError(f"Invalid mode '{mode}'. Must be 'combine' or 'pairwise'.")
+ combined_snk_names.extend(_filter_active_nodes(group_nodes))
+ if not combined_src_names or not combined_snk_names:
+ return {(combined_src_label, combined_snk_label): 0.0}
+ if set(combined_src_names) & set(combined_snk_names):
+ return {(combined_src_label, combined_snk_label): 0.0}
-def max_flow_with_graph(
- context: Any,
- source_path: str,
- sink_path: str,
- *,
- mode: str = "combine",
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
-) -> Dict[Tuple[str, str], Tuple[float, "StrictMultiDiGraph"]]:
- """Compute max flow and return the mutated flow graph for each pair.
+ augmentations = []
+ pseudo_src = "__PSEUDO_SRC__"
+ pseudo_snk = "__PSEUDO_SNK__"
- Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
+ for src_name in combined_src_names:
+ augmentations.append(
+ AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)
+ )
+ for snk_name in combined_snk_names:
+ augmentations.append(
+ AugmentationEdge(snk_name, pseudo_snk, LARGE_CAPACITY, 0)
+ )
- Returns:
- Dict[Tuple[str, str], Tuple[float, StrictMultiDiGraph]]: For each
- (source_label, sink_label), the total flow and the flow-assigned graph.
+ # Include disabled nodes/links in exclusions
+ full_excluded_nodes, full_excluded_links = get_disabled_exclusions(
+ network, excluded_nodes, excluded_links
+ )
- Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
- """
- src_groups = context.select_node_groups_by_path(source_path)
- snk_groups = context.select_node_groups_by_path(sink_path)
+ graph_handle, _, _, node_mapper = build_graph(
+ network,
+ augmentations=augmentations,
+ excluded_nodes=full_excluded_nodes,
+ excluded_links=full_excluded_links,
+ )
- if not src_groups:
- raise ValueError(f"No source nodes found matching '{source_path}'.")
- if not snk_groups:
- raise ValueError(f"No sink nodes found matching '{sink_path}'.")
+ pseudo_src_id = node_mapper.to_id(pseudo_src)
+ pseudo_snk_id = node_mapper.to_id(pseudo_snk)
- if mode == "combine":
- combined_src_nodes: list = []
- combined_snk_nodes: list = []
- combined_src_label = "|".join(sorted(src_groups.keys()))
- combined_snk_label = "|".join(sorted(snk_groups.keys()))
- for group_nodes in src_groups.values():
- combined_src_nodes.extend(group_nodes)
- for group_nodes in snk_groups.values():
- combined_snk_nodes.extend(group_nodes)
- if not combined_src_nodes or not combined_snk_nodes:
- base_graph = context.to_strict_multidigraph(compact=True).copy()
- return {(combined_src_label, combined_snk_label): (0.0, base_graph)}
- if {n.name for n in combined_src_nodes} & {n.name for n in combined_snk_nodes}:
- base_graph = context.to_strict_multidigraph(compact=True).copy()
- return {(combined_src_label, combined_snk_label): (0.0, base_graph)}
- flow_val, flow_graph = _compute_flow_with_graph_single_group(
- context,
- combined_src_nodes,
- combined_snk_nodes,
- shortest_path,
- flow_placement,
+ flow_value, _ = algs.max_flow(
+ graph_handle,
+ pseudo_src_id,
+ pseudo_snk_id,
+ flow_placement=core_flow_placement,
+ shortest_path=shortest_path,
)
- return {(combined_src_label, combined_snk_label): (flow_val, flow_graph)}
+ return {(combined_src_label, combined_snk_label): flow_value}
if mode == "pairwise":
- results: Dict[Tuple[str, str], Tuple[float, "StrictMultiDiGraph"]] = {}
+ # Include disabled nodes/links in exclusions
+ full_excluded_nodes, full_excluded_links = get_disabled_exclusions(
+ network, excluded_nodes, excluded_links
+ )
+
+ results = {}
for src_label, src_nodes in src_groups.items():
for snk_label, snk_nodes in snk_groups.items():
- if src_nodes and snk_nodes:
- if {n.name for n in src_nodes} & {n.name for n in snk_nodes}:
- base_graph = context.to_strict_multidigraph(compact=True).copy()
- results[(src_label, snk_label)] = (0.0, base_graph)
- else:
- results[(src_label, snk_label)] = (
- _compute_flow_with_graph_single_group(
- context,
- src_nodes,
- snk_nodes,
- shortest_path,
- flow_placement,
- )
- )
- else:
- base_graph = context.to_strict_multidigraph(compact=True).copy()
- results[(src_label, snk_label)] = (0.0, base_graph)
+ active_src_names = _filter_active_nodes(src_nodes)
+ active_snk_names = _filter_active_nodes(snk_nodes)
+
+ if not active_src_names or not active_snk_names:
+ results[(src_label, snk_label)] = 0.0
+ continue
+ if set(active_src_names) & set(active_snk_names):
+ results[(src_label, snk_label)] = 0.0
+ continue
+
+ augmentations = []
+ pseudo_src = f"__PSEUDO_SRC_{src_label}__"
+ pseudo_snk = f"__PSEUDO_SNK_{snk_label}__"
+
+ for src_name in active_src_names:
+ augmentations.append(
+ AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)
+ )
+ for snk_name in active_snk_names:
+ augmentations.append(
+ AugmentationEdge(snk_name, pseudo_snk, LARGE_CAPACITY, 0)
+ )
+
+ graph_handle, _, _, node_mapper = build_graph(
+ network,
+ augmentations=augmentations,
+ excluded_nodes=full_excluded_nodes,
+ excluded_links=full_excluded_links,
+ )
+
+ pseudo_src_id = node_mapper.to_id(pseudo_src)
+ pseudo_snk_id = node_mapper.to_id(pseudo_snk)
+
+ flow_value, _ = algs.max_flow(
+ graph_handle,
+ pseudo_src_id,
+ pseudo_snk_id,
+ flow_placement=core_flow_placement,
+ shortest_path=shortest_path,
+ )
+ results[(src_label, snk_label)] = flow_value
return results
raise ValueError(f"Invalid mode '{mode}'. Must be 'combine' or 'pairwise'.")
-def max_flow_detailed(
- context: Any,
+def max_flow_with_details(
+ network: Network,
source_path: str,
sink_path: str,
*,
mode: str = "combine",
shortest_path: bool = False,
flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
-) -> Dict[Tuple[str, str], Tuple[float, FlowSummary, "StrictMultiDiGraph"]]:
- """Compute max flow, return summary and flow graph for each pair.
+ excluded_nodes: Optional[Set[str]] = None,
+ excluded_links: Optional[Set[str]] = None,
+ _cache: Optional[MaxFlowGraphCache] = None,
+) -> Dict[Tuple[str, str], FlowSummary]:
+ """Compute max flow with detailed results including cost distribution.
+
+ When `_cache` is provided, uses O(|excluded|) mask building instead of
+ O(V+E) graph reconstruction for efficient repeated analysis.
Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
+ network: Network instance.
source_path: Selection expression for source groups.
sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
+ mode: "combine" or "pairwise".
+ shortest_path: If True, restricts flow to shortest paths.
+ flow_placement: Flow placement strategy.
+ excluded_nodes: Optional set of node names to exclude.
+ excluded_links: Optional set of link IDs to exclude.
+ _cache: Pre-built cache for efficient repeated analysis.
Returns:
- Dict[Tuple[str, str], Tuple[float, FlowSummary, StrictMultiDiGraph]]:
- For each (source_label, sink_label), the total flow, a summary, and the
- flow-assigned graph.
-
- Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
+ Dict mapping (source_label, sink_label) to FlowSummary.
"""
- src_groups = context.select_node_groups_by_path(source_path)
- snk_groups = context.select_node_groups_by_path(sink_path)
+ core_flow_placement = _map_flow_placement(flow_placement)
+
+ def _construct_flow_summary(flow_value: float, core_summary=None) -> FlowSummary:
+ cost_dist = {}
+ if core_summary is not None and len(core_summary.costs) > 0:
+ cost_dist = {
+ float(c): float(f)
+ for c, f in zip(core_summary.costs, core_summary.flows, strict=False)
+ }
+ return FlowSummary(
+ total_flow=flow_value,
+ cost_distribution=cost_dist,
+ min_cut=(),
+ )
+
+ # Fast path: use cached graph with masks
+ if _cache is not None:
+ node_mask = None
+ edge_mask = None
+ # Build masks if there are disabled nodes/links in cache or explicit exclusions
+ if (
+ excluded_nodes
+ or excluded_links
+ or _cache.disabled_node_ids
+ or _cache.disabled_link_ids
+ ):
+ node_mask = build_node_mask(_cache.base_cache, excluded_nodes)
+ edge_mask = build_edge_mask(_cache.base_cache, excluded_links)
+
+ results: Dict[Tuple[str, str], FlowSummary] = {}
+ for pair_key, (
+ pseudo_src_id,
+ pseudo_snk_id,
+ ) in _cache.pair_to_pseudo_ids.items():
+ flow_value, core_summary = _cache.algorithms.max_flow(
+ _cache.graph_handle,
+ pseudo_src_id,
+ pseudo_snk_id,
+ flow_placement=core_flow_placement,
+ shortest_path=shortest_path,
+ node_mask=node_mask,
+ edge_mask=edge_mask,
+ )
+ results[pair_key] = _construct_flow_summary(flow_value, core_summary)
+
+ # Handle pairs that weren't cached
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
+
+ if mode == "combine":
+ combined_src_label = "|".join(sorted(src_groups.keys()))
+ combined_snk_label = "|".join(sorted(snk_groups.keys()))
+ if (combined_src_label, combined_snk_label) not in results:
+ results[(combined_src_label, combined_snk_label)] = (
+ _construct_flow_summary(0.0)
+ )
+ elif mode == "pairwise":
+ for src_label in src_groups:
+ for snk_label in snk_groups:
+ if (src_label, snk_label) not in results:
+ results[(src_label, snk_label)] = _construct_flow_summary(0.0)
+
+ return results
+
+ # Slow path: build graph from scratch
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
if not src_groups:
raise ValueError(f"No source nodes found matching '{source_path}'.")
if not snk_groups:
raise ValueError(f"No sink nodes found matching '{sink_path}'.")
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+
+ def _filter_active_nodes(nodes: List) -> List[str]:
+ return [
+ n.name
+ for n in nodes
+ if not n.disabled
+ and (excluded_nodes is None or n.name not in excluded_nodes)
+ ]
+
if mode == "combine":
- combined_src_nodes: list = []
- combined_snk_nodes: list = []
combined_src_label = "|".join(sorted(src_groups.keys()))
combined_snk_label = "|".join(sorted(snk_groups.keys()))
+
+ combined_src_names = []
for group_nodes in src_groups.values():
- combined_src_nodes.extend(group_nodes)
+ combined_src_names.extend(_filter_active_nodes(group_nodes))
+ combined_snk_names = []
for group_nodes in snk_groups.values():
- combined_snk_nodes.extend(group_nodes)
- if not combined_src_nodes or not combined_snk_nodes:
- base_graph = context.to_strict_multidigraph().copy()
+ combined_snk_names.extend(_filter_active_nodes(group_nodes))
+
+ if not combined_src_names or not combined_snk_names:
return {
- (combined_src_label, combined_snk_label): (
- 0.0,
- _empty_summary(),
- base_graph,
- )
+ (combined_src_label, combined_snk_label): _construct_flow_summary(0.0)
}
- if {n.name for n in combined_src_nodes} & {n.name for n in combined_snk_nodes}:
- base_graph = context.to_strict_multidigraph().copy()
+ if set(combined_src_names) & set(combined_snk_names):
return {
- (combined_src_label, combined_snk_label): (
- 0.0,
- _empty_summary(),
- base_graph,
- )
+ (combined_src_label, combined_snk_label): _construct_flow_summary(0.0)
}
- flow_val, summary, flow_graph = _compute_flow_detailed_single_group(
- context,
- combined_src_nodes,
- combined_snk_nodes,
- shortest_path,
- flow_placement,
+
+ augmentations = []
+ pseudo_src = "__PSEUDO_SRC__"
+ pseudo_snk = "__PSEUDO_SNK__"
+
+ for src_name in combined_src_names:
+ augmentations.append(
+ AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)
+ )
+ for snk_name in combined_snk_names:
+ augmentations.append(
+ AugmentationEdge(snk_name, pseudo_snk, LARGE_CAPACITY, 0)
+ )
+
+ # Include disabled nodes/links in exclusions
+ full_excluded_nodes, full_excluded_links = get_disabled_exclusions(
+ network, excluded_nodes, excluded_links
)
+
+ graph_handle, _, _, node_mapper = build_graph(
+ network,
+ augmentations=augmentations,
+ excluded_nodes=full_excluded_nodes,
+ excluded_links=full_excluded_links,
+ )
+
+ pseudo_src_id = node_mapper.to_id(pseudo_src)
+ pseudo_snk_id = node_mapper.to_id(pseudo_snk)
+
+ flow_value, core_summary = algs.max_flow(
+ graph_handle,
+ pseudo_src_id,
+ pseudo_snk_id,
+ flow_placement=core_flow_placement,
+ shortest_path=shortest_path,
+ )
+
return {
- (combined_src_label, combined_snk_label): (flow_val, summary, flow_graph)
+ (combined_src_label, combined_snk_label): _construct_flow_summary(
+ flow_value, core_summary
+ )
}
if mode == "pairwise":
- results: Dict[
- Tuple[str, str], Tuple[float, FlowSummary, "StrictMultiDiGraph"]
- ] = {}
+ # Include disabled nodes/links in exclusions
+ full_excluded_nodes, full_excluded_links = get_disabled_exclusions(
+ network, excluded_nodes, excluded_links
+ )
+
+ results = {}
for src_label, src_nodes in src_groups.items():
for snk_label, snk_nodes in snk_groups.items():
- if src_nodes and snk_nodes:
- if {n.name for n in src_nodes} & {n.name for n in snk_nodes}:
- base_graph = context.to_strict_multidigraph().copy()
- results[(src_label, snk_label)] = (
- 0.0,
- _empty_summary(),
- base_graph,
- )
- else:
- results[(src_label, snk_label)] = (
- _compute_flow_detailed_single_group(
- context,
- src_nodes,
- snk_nodes,
- shortest_path,
- flow_placement,
- )
- )
- else:
- base_graph = context.to_strict_multidigraph().copy()
- results[(src_label, snk_label)] = (
- 0.0,
- _empty_summary(),
- base_graph,
+ active_src_names = _filter_active_nodes(src_nodes)
+ active_snk_names = _filter_active_nodes(snk_nodes)
+
+ if not active_src_names or not active_snk_names:
+ results[(src_label, snk_label)] = _construct_flow_summary(0.0)
+ continue
+ if set(active_src_names) & set(active_snk_names):
+ results[(src_label, snk_label)] = _construct_flow_summary(0.0)
+ continue
+
+ augmentations = []
+ pseudo_src = f"__PSEUDO_SRC_{src_label}__"
+ pseudo_snk = f"__PSEUDO_SNK_{snk_label}__"
+
+ for src_name in active_src_names:
+ augmentations.append(
+ AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)
)
+ for snk_name in active_snk_names:
+ augmentations.append(
+ AugmentationEdge(snk_name, pseudo_snk, LARGE_CAPACITY, 0)
+ )
+
+ graph_handle, _, _, node_mapper = build_graph(
+ network,
+ augmentations=augmentations,
+ excluded_nodes=full_excluded_nodes,
+ excluded_links=full_excluded_links,
+ )
+
+ pseudo_src_id = node_mapper.to_id(pseudo_src)
+ pseudo_snk_id = node_mapper.to_id(pseudo_snk)
+
+ flow_value, core_summary = algs.max_flow(
+ graph_handle,
+ pseudo_src_id,
+ pseudo_snk_id,
+ flow_placement=core_flow_placement,
+ shortest_path=shortest_path,
+ )
+
+ results[(src_label, snk_label)] = _construct_flow_summary(
+ flow_value, core_summary
+ )
return results
raise ValueError(f"Invalid mode '{mode}'. Must be 'combine' or 'pairwise'.")
-def saturated_edges(
- context: Any,
+def sensitivity_analysis(
+ network: Network,
source_path: str,
sink_path: str,
*,
mode: str = "combine",
- tolerance: float = 1e-10,
shortest_path: bool = False,
flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
-) -> Dict[Tuple[str, str], List[Tuple[str, str, str]]]:
- """Identify saturated edges for each selected group pair.
+ excluded_nodes: Optional[Set[str]] = None,
+ excluded_links: Optional[Set[str]] = None,
+ _cache: Optional[MaxFlowGraphCache] = None,
+) -> Dict[Tuple[str, str], Dict[str, float]]:
+ """Analyze sensitivity of max flow to edge failures.
+
+ Identifies critical edges and computes the flow reduction caused by
+ removing each one.
+
+ When `_cache` is provided, uses O(|excluded|) mask building instead of
+ O(V+E) graph reconstruction for efficient repeated analysis.
+
+ The `shortest_path` parameter controls routing semantics:
+ - shortest_path=False (default): Full max-flow; reports all saturated edges.
+ - shortest_path=True: Shortest-path-only (IP/IGP); reports only edges
+ used under ECMP routing.
Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
+ network: Network instance.
source_path: Selection expression for source groups.
sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- tolerance: Residual capacity threshold to consider an edge saturated.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
+ mode: "combine" or "pairwise".
+ shortest_path: If True, use single-tier shortest-path flow (IP/IGP).
+ If False, use full iterative max-flow (SDN/TE).
+ flow_placement: Flow placement strategy.
+ excluded_nodes: Optional set of node names to exclude.
+ excluded_links: Optional set of link IDs to exclude.
+ _cache: Pre-built cache for efficient repeated analysis.
Returns:
- Dict[Tuple[str, str], list[tuple[str, str, str]]]: For each
- (source_label, sink_label), a list of saturated edges ``(u, v, k)``.
-
- Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
+ Dict mapping (source_label, sink_label) to {link_id: flow_reduction}.
"""
- src_groups = context.select_node_groups_by_path(source_path)
- snk_groups = context.select_node_groups_by_path(sink_path)
+ core_flow_placement = _map_flow_placement(flow_placement)
+
+ # Fast path: use cached graph with masks
+ if _cache is not None:
+ node_mask = None
+ edge_mask = None
+ # Build masks if there are disabled nodes/links in cache or explicit exclusions
+ if (
+ excluded_nodes
+ or excluded_links
+ or _cache.disabled_node_ids
+ or _cache.disabled_link_ids
+ ):
+ node_mask = build_node_mask(_cache.base_cache, excluded_nodes)
+ edge_mask = build_edge_mask(_cache.base_cache, excluded_links)
+
+ results: Dict[Tuple[str, str], Dict[str, float]] = {}
+ ext_edge_ids = _cache.multidigraph.ext_edge_ids_view()
+
+ for pair_key, (
+ pseudo_src_id,
+ pseudo_snk_id,
+ ) in _cache.pair_to_pseudo_ids.items():
+ sens_results = _cache.algorithms.sensitivity_analysis(
+ _cache.graph_handle,
+ pseudo_src_id,
+ pseudo_snk_id,
+ flow_placement=core_flow_placement,
+ shortest_path=shortest_path,
+ node_mask=node_mask,
+ edge_mask=edge_mask,
+ )
+
+ sensitivity_map: Dict[str, float] = {}
+ for edge_id, delta in sens_results:
+ ext_id = ext_edge_ids[edge_id]
+ link_id = _cache.edge_mapper.to_name(ext_id)
+ if link_id is not None:
+ sensitivity_map[link_id] = delta
+
+ results[pair_key] = sensitivity_map
+
+ # Handle pairs that weren't cached
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
+
+ if mode == "combine":
+ combined_src_label = "|".join(sorted(src_groups.keys()))
+ combined_snk_label = "|".join(sorted(snk_groups.keys()))
+ if (combined_src_label, combined_snk_label) not in results:
+ results[(combined_src_label, combined_snk_label)] = {}
+ elif mode == "pairwise":
+ for src_label in src_groups:
+ for snk_label in snk_groups:
+ if (src_label, snk_label) not in results:
+ results[(src_label, snk_label)] = {}
+
+ return results
+
+ # Slow path: build graph from scratch
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
if not src_groups:
raise ValueError(f"No source nodes found matching '{source_path}'.")
if not snk_groups:
raise ValueError(f"No sink nodes found matching '{sink_path}'.")
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+
+ def _filter_active_nodes(nodes: List) -> List[str]:
+ return [
+ n.name
+ for n in nodes
+ if not n.disabled
+ and (excluded_nodes is None or n.name not in excluded_nodes)
+ ]
+
if mode == "combine":
- combined_src_nodes: list = []
- combined_snk_nodes: list = []
combined_src_label = "|".join(sorted(src_groups.keys()))
combined_snk_label = "|".join(sorted(snk_groups.keys()))
+
+ combined_src_names = []
for group_nodes in src_groups.values():
- combined_src_nodes.extend(group_nodes)
+ combined_src_names.extend(_filter_active_nodes(group_nodes))
+ combined_snk_names = []
for group_nodes in snk_groups.values():
- combined_snk_nodes.extend(group_nodes)
- if not combined_src_nodes or not combined_snk_nodes:
- return {(combined_src_label, combined_snk_label): []}
- if {n.name for n in combined_src_nodes} & {n.name for n in combined_snk_nodes}:
- saturated_list: List[Tuple[str, str, str]] = []
- else:
- saturated_list = _compute_saturated_edges_single_group(
- context,
- combined_src_nodes,
- combined_snk_nodes,
- tolerance,
- shortest_path,
- flow_placement,
- )
- return {(combined_src_label, combined_snk_label): saturated_list}
-
- if mode == "pairwise":
- results: Dict[Tuple[str, str], List[Tuple[str, str, str]]] = {}
- for src_label, src_nodes in src_groups.items():
- for snk_label, snk_nodes in snk_groups.items():
- if src_nodes and snk_nodes:
- if {n.name for n in src_nodes} & {n.name for n in snk_nodes}:
- saturated_list = []
- else:
- saturated_list = _compute_saturated_edges_single_group(
- context,
- src_nodes,
- snk_nodes,
- tolerance,
- shortest_path,
- flow_placement,
- )
- else:
- saturated_list = []
- results[(src_label, snk_label)] = saturated_list
- return results
+ combined_snk_names.extend(_filter_active_nodes(group_nodes))
- raise ValueError(f"Invalid mode '{mode}'. Must be 'combine' or 'pairwise'.")
+ if not combined_src_names or not combined_snk_names:
+ return {(combined_src_label, combined_snk_label): {}}
+ if set(combined_src_names) & set(combined_snk_names):
+ return {(combined_src_label, combined_snk_label): {}}
+ augmentations = []
+ pseudo_src = "__PSEUDO_SRC__"
+ pseudo_snk = "__PSEUDO_SNK__"
-def sensitivity_analysis(
- context: Any,
- source_path: str,
- sink_path: str,
- *,
- mode: str = "combine",
- change_amount: float = 1.0,
- shortest_path: bool = False,
- flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL,
-) -> Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]]:
- """Perform a simple sensitivity analysis per saturated edge.
+ for src_name in combined_src_names:
+ augmentations.append(
+ AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)
+ )
+ for snk_name in combined_snk_names:
+ augmentations.append(
+ AugmentationEdge(snk_name, pseudo_snk, LARGE_CAPACITY, 0)
+ )
- For each saturated edge, test a capacity change of ``change_amount`` and
- report the change in total flow. Positive amounts increase capacity; negative
- amounts decrease capacity (with lower bound at zero).
+ # Include disabled nodes/links in exclusions
+ full_excluded_nodes, full_excluded_links = get_disabled_exclusions(
+ network, excluded_nodes, excluded_links
+ )
- Args:
- context: `Network` or `NetworkView` providing selection and graph APIs.
- source_path: Selection expression for source groups.
- sink_path: Selection expression for sink groups.
- mode: "combine" or "pairwise". See ``max_flow``.
- change_amount: Capacity delta to apply when testing each saturated edge.
- shortest_path: If True, perform only one augmentation step.
- flow_placement: Strategy for splitting among equal-cost parallel edges.
+ graph_handle, multidigraph, link_mapper, node_mapper = build_graph(
+ network,
+ augmentations=augmentations,
+ excluded_nodes=full_excluded_nodes,
+ excluded_links=full_excluded_links,
+ )
- Returns:
- Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]]: For each
- (source_label, sink_label), a mapping from saturated edge ``(u, v, k)``
- to the change in total flow after applying the capacity delta.
+ pseudo_src_id = node_mapper.to_id(pseudo_src)
+ pseudo_snk_id = node_mapper.to_id(pseudo_snk)
- Raises:
- ValueError: If no matching sources or sinks are found, or if ``mode``
- is invalid.
- """
- src_groups = context.select_node_groups_by_path(source_path)
- snk_groups = context.select_node_groups_by_path(sink_path)
+ sens_results = algs.sensitivity_analysis(
+ graph_handle,
+ pseudo_src_id,
+ pseudo_snk_id,
+ flow_placement=core_flow_placement,
+ shortest_path=shortest_path,
+ )
- if not src_groups:
- raise ValueError(f"No source nodes found matching '{source_path}'.")
- if not snk_groups:
- raise ValueError(f"No sink nodes found matching '{sink_path}'.")
+ sensitivity_map = {}
+ ext_edge_ids = multidigraph.ext_edge_ids_view()
+ for edge_id, delta in sens_results:
+ ext_id = ext_edge_ids[edge_id]
+ link_id = link_mapper.to_name(ext_id)
+ if link_id is not None:
+ sensitivity_map[link_id] = delta
- if mode == "combine":
- combined_src_nodes: list = []
- combined_snk_nodes: list = []
- combined_src_label = "|".join(sorted(src_groups.keys()))
- combined_snk_label = "|".join(sorted(snk_groups.keys()))
- for group_nodes in src_groups.values():
- combined_src_nodes.extend(group_nodes)
- for group_nodes in snk_groups.values():
- combined_snk_nodes.extend(group_nodes)
- if not combined_src_nodes or not combined_snk_nodes:
- return {(combined_src_label, combined_snk_label): {}}
- if {n.name for n in combined_src_nodes} & {n.name for n in combined_snk_nodes}:
- sensitivity_dict: Dict[Tuple[str, str, str], float] = {}
- else:
- sensitivity_dict = _compute_sensitivity_single_group(
- context,
- combined_src_nodes,
- combined_snk_nodes,
- change_amount,
- shortest_path,
- flow_placement,
- )
- return {(combined_src_label, combined_snk_label): sensitivity_dict}
+ return {(combined_src_label, combined_snk_label): sensitivity_map}
if mode == "pairwise":
- results: Dict[Tuple[str, str], Dict[Tuple[str, str, str], float]] = {}
+ # Include disabled nodes/links in exclusions
+ full_excluded_nodes, full_excluded_links = get_disabled_exclusions(
+ network, excluded_nodes, excluded_links
+ )
+
+ out: Dict[Tuple[str, str], Dict[str, float]] = {}
for src_label, src_nodes in src_groups.items():
for snk_label, snk_nodes in snk_groups.items():
- if src_nodes and snk_nodes:
- if {n.name for n in src_nodes} & {n.name for n in snk_nodes}:
- sensitivity_dict = {}
- else:
- sensitivity_dict = _compute_sensitivity_single_group(
- context,
- src_nodes,
- snk_nodes,
- change_amount,
- shortest_path,
- flow_placement,
- )
- else:
- sensitivity_dict = {}
- results[(src_label, snk_label)] = sensitivity_dict
- return results
-
- raise ValueError(f"Invalid mode '{mode}'. Must be 'combine' or 'pairwise'.")
-
-
-# --- Single-group helpers ---------------------------------------------------
+ active_src_names = _filter_active_nodes(src_nodes)
+ active_snk_names = _filter_active_nodes(snk_nodes)
+
+ if not active_src_names or not active_snk_names:
+ out[(src_label, snk_label)] = {}
+ continue
+ if set(active_src_names) & set(active_snk_names):
+ out[(src_label, snk_label)] = {}
+ continue
+
+ augmentations = []
+ pseudo_src = f"__PSEUDO_SRC_{src_label}__"
+ pseudo_snk = f"__PSEUDO_SNK_{snk_label}__"
+
+ for src_name in active_src_names:
+ augmentations.append(
+ AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)
+ )
+ for snk_name in active_snk_names:
+ augmentations.append(
+ AugmentationEdge(snk_name, pseudo_snk, LARGE_CAPACITY, 0)
+ )
+ graph_handle, multidigraph, link_mapper, node_mapper = build_graph(
+ network,
+ augmentations=augmentations,
+ excluded_nodes=full_excluded_nodes,
+ excluded_links=full_excluded_links,
+ )
-def _compute_flow_single_group(
- context: Any,
- sources: list,
- sinks: list,
- shortest_path: bool,
- flow_placement: FlowPlacement,
- *,
- prebuilt_graph: Optional["StrictMultiDiGraph"] = None,
-) -> float:
- active_sources = [s for s in sources if not s.disabled]
- active_sinks = [s for s in sinks if not s.disabled]
- if not active_sources or not active_sinks:
- return 0.0
- graph = (
- prebuilt_graph.copy()
- if prebuilt_graph is not None
- else context.to_strict_multidigraph()
- )
- graph.add_node("source")
- graph.add_node("sink")
- for s_node in active_sources:
- graph.add_edge("source", s_node.name, capacity=float("inf"), cost=0)
- for t_node in active_sinks:
- graph.add_edge(t_node.name, "sink", capacity=float("inf"), cost=0)
- return calc_max_flow(
- graph,
- "source",
- "sink",
- flow_placement=flow_placement,
- shortest_path=shortest_path,
- copy_graph=False,
- )
+ pseudo_src_id = node_mapper.to_id(pseudo_src)
+ pseudo_snk_id = node_mapper.to_id(pseudo_snk)
+ sens_results = algs.sensitivity_analysis(
+ graph_handle,
+ pseudo_src_id,
+ pseudo_snk_id,
+ flow_placement=core_flow_placement,
+ shortest_path=shortest_path,
+ )
-def _compute_flow_with_summary_single_group(
- context: Any,
- sources: list,
- sinks: list,
- shortest_path: bool,
- flow_placement: FlowPlacement,
-) -> Tuple[float, FlowSummary]:
- active_sources = [s for s in sources if not s.disabled]
- active_sinks = [s for s in sinks if not s.disabled]
- if not active_sources or not active_sinks:
- return 0.0, _empty_summary()
- graph = context.to_strict_multidigraph(compact=True).copy()
- graph.add_node("source")
- graph.add_node("sink")
- for s_node in active_sources:
- graph.add_edge("source", s_node.name, capacity=float("inf"), cost=0)
- for t_node in active_sinks:
- graph.add_edge(t_node.name, "sink", capacity=float("inf"), cost=0)
- flow_val, summary = calc_max_flow(
- graph,
- "source",
- "sink",
- return_summary=True,
- flow_placement=flow_placement,
- shortest_path=shortest_path,
- copy_graph=False,
- )
- return flow_val, summary
-
-
-def _compute_flow_with_graph_single_group(
- context: Any,
- sources: list,
- sinks: list,
- shortest_path: bool,
- flow_placement: FlowPlacement,
-) -> Tuple[float, "StrictMultiDiGraph"]:
- active_sources = [s for s in sources if not s.disabled]
- active_sinks = [s for s in sinks if not s.disabled]
- if not active_sources or not active_sinks:
- base_graph = context.to_strict_multidigraph(compact=True).copy()
- return 0.0, base_graph
- graph = context.to_strict_multidigraph(compact=True).copy()
- graph.add_node("source")
- graph.add_node("sink")
- for s_node in active_sources:
- graph.add_edge("source", s_node.name, capacity=float("inf"), cost=0)
- for t_node in active_sinks:
- graph.add_edge(t_node.name, "sink", capacity=float("inf"), cost=0)
- flow_val, flow_graph = calc_max_flow(
- graph,
- "source",
- "sink",
- return_graph=True,
- flow_placement=flow_placement,
- shortest_path=shortest_path,
- copy_graph=False,
- )
- return flow_val, flow_graph
-
-
-def _compute_flow_detailed_single_group(
- context: Any,
- sources: list,
- sinks: list,
- shortest_path: bool,
- flow_placement: FlowPlacement,
-) -> Tuple[float, FlowSummary, "StrictMultiDiGraph"]:
- active_sources = [s for s in sources if not s.disabled]
- active_sinks = [s for s in sinks if not s.disabled]
- if not active_sources or not active_sinks:
- base_graph = context.to_strict_multidigraph(compact=True).copy()
- return 0.0, _empty_summary(), base_graph
- graph = context.to_strict_multidigraph(compact=True).copy()
- graph.add_node("source")
- graph.add_node("sink")
- for s_node in active_sources:
- graph.add_edge("source", s_node.name, capacity=float("inf"), cost=0)
- for t_node in active_sinks:
- graph.add_edge(t_node.name, "sink", capacity=float("inf"), cost=0)
- flow_val, summary, flow_graph = calc_max_flow(
- graph,
- "source",
- "sink",
- return_summary=True,
- return_graph=True,
- flow_placement=flow_placement,
- shortest_path=shortest_path,
- copy_graph=False,
- )
- return flow_val, summary, flow_graph
-
-
-def _compute_saturated_edges_single_group(
- context: Any,
- sources: list,
- sinks: list,
- tolerance: float,
- shortest_path: bool,
- flow_placement: FlowPlacement,
-) -> List[Tuple[str, str, str]]:
- active_sources = [s for s in sources if not s.disabled]
- active_sinks = [s for s in sinks if not s.disabled]
- if not active_sources or not active_sinks:
- return []
- graph = context.to_strict_multidigraph(compact=True).copy()
- graph.add_node("source")
- graph.add_node("sink")
- for s_node in active_sources:
- graph.add_edge("source", s_node.name, capacity=float("inf"), cost=0)
- for t_node in active_sinks:
- graph.add_edge(t_node.name, "sink", capacity=float("inf"), cost=0)
- return _algo_saturated_edges(
- graph,
- "source",
- "sink",
- tolerance=tolerance,
- flow_placement=flow_placement,
- shortest_path=shortest_path,
- copy_graph=False,
- )
+ sensitivity_map = {}
+ ext_edge_ids = multidigraph.ext_edge_ids_view()
+ for edge_id, delta in sens_results:
+ ext_id = ext_edge_ids[edge_id]
+ link_id = link_mapper.to_name(ext_id)
+ if link_id is not None:
+ sensitivity_map[link_id] = delta
+ out[(src_label, snk_label)] = sensitivity_map
+ return out
-def _compute_sensitivity_single_group(
- context: Any,
- sources: list,
- sinks: list,
- change_amount: float,
- shortest_path: bool,
- flow_placement: FlowPlacement,
-) -> Dict[Tuple[str, str, str], float]:
- active_sources = [s for s in sources if not s.disabled]
- active_sinks = [s for s in sinks if not s.disabled]
- if not active_sources or not active_sinks:
- return {}
- graph = context.to_strict_multidigraph(compact=True).copy()
- graph.add_node("source")
- graph.add_node("sink")
- for s_node in active_sources:
- graph.add_edge("source", s_node.name, capacity=float("inf"), cost=0)
- for t_node in active_sinks:
- graph.add_edge(t_node.name, "sink", capacity=float("inf"), cost=0)
- return run_sensitivity(
- graph,
- "source",
- "sink",
- change_amount=change_amount,
- flow_placement=flow_placement,
- shortest_path=shortest_path,
- copy_graph=False,
- )
+ raise ValueError(f"Invalid mode '{mode}'. Must be 'combine' or 'pairwise'.")
-def _empty_summary() -> FlowSummary:
- return FlowSummary(
- total_flow=0.0,
- edge_flow={},
- residual_cap={},
- reachable=set(),
- min_cut=[],
- cost_distribution={},
- )
+def _map_flow_placement(flow_placement: FlowPlacement) -> netgraph_core.FlowPlacement:
+ """Map NetGraph FlowPlacement to Core FlowPlacement."""
+ if flow_placement == FlowPlacement.PROPORTIONAL:
+ return netgraph_core.FlowPlacement.PROPORTIONAL
+ if flow_placement == FlowPlacement.EQUAL_BALANCED:
+ return netgraph_core.FlowPlacement.EQUAL_BALANCED
+ raise ValueError(f"Unsupported FlowPlacement: {flow_placement}")
diff --git a/ngraph/solver/paths.py b/ngraph/solver/paths.py
index 180ffd7..855f510 100644
--- a/ngraph/solver/paths.py
+++ b/ngraph/solver/paths.py
@@ -1,13 +1,17 @@
"""Shortest-path solver wrappers bound to the model layer.
Expose convenience functions for computing shortest paths between node groups
-selected from a ``Network`` or ``NetworkView`` context. Selection semantics
-mirror the max-flow wrappers with ``mode`` in {"combine", "pairwise"}.
+selected from a ``Network`` context. Selection semantics mirror the max-flow
+wrappers with ``mode`` in {"combine", "pairwise"}.
Functions return minimal costs or concrete ``Path`` objects built from SPF
predecessor maps. Parallel equal-cost edges can be expanded into distinct
paths.
+Graph caching is used internally for efficient mask-based exclusions. For
+repeated queries with different exclusions, consider using the lower-level
+adapters/core.py functions with explicit cache management.
+
All functions fail fast on invalid selection inputs and do not mutate the
input context.
@@ -18,29 +22,41 @@
from __future__ import annotations
-from typing import Any, Dict, Iterable, List, Optional, Tuple
+from typing import Dict, Iterable, List, Optional, Set, Tuple
+
+import netgraph_core
-from ngraph.algorithms.base import EdgeSelect
-from ngraph.algorithms.spf import ksp, spf
-from ngraph.paths.path import Path
+from ngraph.adapters.core import (
+ build_edge_mask,
+ build_graph_cache,
+ build_node_mask,
+)
+from ngraph.model.network import Network
+from ngraph.model.path import Path
+from ngraph.types.base import EdgeSelect
+from ngraph.types.dto import EdgeRef
def shortest_path_costs(
- context: Any,
+ network: Network,
source_path: str,
sink_path: str,
*,
mode: str = "combine",
edge_select: EdgeSelect = EdgeSelect.ALL_MIN_COST,
+ excluded_nodes: Optional[Set[str]] = None,
+ excluded_links: Optional[Set[str]] = None,
) -> Dict[Tuple[str, str], float]:
"""Return minimal path cost(s) between selected node groups.
Args:
- context: Network or NetworkView.
+ network: Network instance.
source_path: Selection expression for source groups.
sink_path: Selection expression for sink groups.
mode: "combine" or "pairwise".
edge_select: SPF edge selection strategy.
+ excluded_nodes: Optional set of node names to exclude temporarily.
+ excluded_links: Optional set of link IDs to exclude temporarily.
Returns:
Mapping from (source_label, sink_label) to minimal cost; ``inf`` if no
@@ -51,67 +67,93 @@ def shortest_path_costs(
ValueError: If no sink nodes match ``sink_path``.
ValueError: If ``mode`` is not "combine" or "pairwise".
"""
- src_groups = context.select_node_groups_by_path(source_path)
- snk_groups = context.select_node_groups_by_path(sink_path)
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
if not src_groups:
raise ValueError(f"No source nodes found matching '{source_path}'.")
if not snk_groups:
raise ValueError(f"No sink nodes found matching '{sink_path}'.")
- graph = context.to_strict_multidigraph(compact=True).copy()
+ # Build graph cache and masks
+ cache = build_graph_cache(network)
+ node_mask = build_node_mask(cache, excluded_nodes)
+ edge_mask = build_edge_mask(cache, excluded_links)
- def _active(nodes: Iterable[Any]) -> List[Any]:
- return [n for n in nodes if not getattr(n, "disabled", False)]
+ # Map edge_select to Core's EdgeSelection
+ core_edge_select = _map_edge_select(edge_select)
+
+ def _active_node_names(nodes: Iterable) -> List[str]:
+ """Filter to active (non-disabled) node names."""
+ return [
+ n.name
+ for n in nodes
+ if not n.disabled
+ and (excluded_nodes is None or n.name not in excluded_nodes)
+ ]
if mode == "combine":
- combined_src_nodes: List[Any] = []
- combined_snk_nodes: List[Any] = []
combined_src_label = "|".join(sorted(src_groups.keys()))
combined_snk_label = "|".join(sorted(snk_groups.keys()))
+ combined_src_names = []
for group_nodes in src_groups.values():
- combined_src_nodes.extend(group_nodes)
+ combined_src_names.extend(_active_node_names(group_nodes))
+ combined_snk_names = []
for group_nodes in snk_groups.values():
- combined_snk_nodes.extend(group_nodes)
+ combined_snk_names.extend(_active_node_names(group_nodes))
- active_sources = _active(combined_src_nodes)
- active_sinks = _active(combined_snk_nodes)
- if not active_sources or not active_sinks:
+ if not combined_src_names or not combined_snk_names:
return {(combined_src_label, combined_snk_label): float("inf")}
- if {n.name for n in active_sources} & {n.name for n in active_sinks}:
+ if set(combined_src_names) & set(combined_snk_names):
return {(combined_src_label, combined_snk_label): float("inf")}
+ # Run SPF from each source, find min cost to any sink
best_cost = float("inf")
- for s in active_sources:
- costs, _ = spf(graph, s.name, edge_select=edge_select, multipath=True)
- for t in active_sinks:
- c = costs.get(t.name)
- if c is not None and c < best_cost:
- best_cost = c
+ for src_name in combined_src_names:
+ src_id = cache.node_mapper.to_id(src_name)
+ dists, pred_dag = cache.algorithms.spf(
+ cache.graph_handle,
+ src=src_id,
+ selection=core_edge_select,
+ node_mask=node_mask,
+ edge_mask=edge_mask,
+ )
+ for snk_name in combined_snk_names:
+ snk_id = cache.node_mapper.to_id(snk_name)
+ cost = dists[snk_id]
+ if cost < best_cost:
+ best_cost = cost
return {(combined_src_label, combined_snk_label): best_cost}
if mode == "pairwise":
results: Dict[Tuple[str, str], float] = {}
for src_label, src_nodes in src_groups.items():
for snk_label, snk_nodes in snk_groups.items():
- active_sources = _active(src_nodes)
- active_sinks = _active(snk_nodes)
- if not active_sources or not active_sinks:
+ active_src_names = _active_node_names(src_nodes)
+ active_snk_names = _active_node_names(snk_nodes)
+ if not active_src_names or not active_snk_names:
results[(src_label, snk_label)] = float("inf")
continue
- if {n.name for n in active_sources} & {n.name for n in active_sinks}:
+ if set(active_src_names) & set(active_snk_names):
results[(src_label, snk_label)] = float("inf")
continue
+
best_cost = float("inf")
- for s in active_sources:
- costs, _ = spf(
- graph, s.name, edge_select=edge_select, multipath=True
+ for src_name in active_src_names:
+ src_id = cache.node_mapper.to_id(src_name)
+ dists, pred_dag = cache.algorithms.spf(
+ cache.graph_handle,
+ src=src_id,
+ selection=core_edge_select,
+ node_mask=node_mask,
+ edge_mask=edge_mask,
)
- for t in active_sinks:
- c = costs.get(t.name)
- if c is not None and c < best_cost:
- best_cost = c
+ for snk_name in active_snk_names:
+ snk_id = cache.node_mapper.to_id(snk_name)
+ cost = dists[snk_id]
+ if cost < best_cost:
+ best_cost = cost
results[(src_label, snk_label)] = best_cost
return results
@@ -119,23 +161,27 @@ def _active(nodes: Iterable[Any]) -> List[Any]:
def shortest_paths(
- context: Any,
+ network: Network,
source_path: str,
sink_path: str,
*,
mode: str = "combine",
edge_select: EdgeSelect = EdgeSelect.ALL_MIN_COST,
split_parallel_edges: bool = False,
+ excluded_nodes: Optional[Set[str]] = None,
+ excluded_links: Optional[Set[str]] = None,
) -> Dict[Tuple[str, str], List[Path]]:
"""Return concrete shortest path(s) between selected node groups.
Args:
- context: Network or NetworkView.
+ network: Network instance.
source_path: Selection expression for source groups.
sink_path: Selection expression for sink groups.
mode: "combine" or "pairwise".
edge_select: SPF edge selection strategy.
split_parallel_edges: Expand parallel edges into distinct paths when True.
+ excluded_nodes: Optional set of node names to exclude temporarily.
+ excluded_links: Optional set of link IDs to exclude temporarily.
Returns:
Mapping from (source_label, sink_label) to list of Path. Empty if
@@ -146,53 +192,80 @@ def shortest_paths(
ValueError: If no sink nodes match ``sink_path``.
ValueError: If ``mode`` is not "combine" or "pairwise".
"""
- src_groups = context.select_node_groups_by_path(source_path)
- snk_groups = context.select_node_groups_by_path(sink_path)
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
if not src_groups:
raise ValueError(f"No source nodes found matching '{source_path}'.")
if not snk_groups:
raise ValueError(f"No sink nodes found matching '{sink_path}'.")
- graph = context.to_strict_multidigraph(compact=True).copy()
+ # Build graph cache and masks
+ cache = build_graph_cache(network)
+ node_mask = build_node_mask(cache, excluded_nodes)
+ edge_mask = build_edge_mask(cache, excluded_links)
+
+ # Map edge_select to Core's EdgeSelection
+ core_edge_select = _map_edge_select(edge_select)
- def _active(nodes: Iterable[Any]) -> List[Any]:
- return [n for n in nodes if not getattr(n, "disabled", False)]
+ def _active_node_names(nodes: Iterable) -> List[str]:
+ """Filter to active (non-disabled) node names."""
+ return [
+ n.name
+ for n in nodes
+ if not n.disabled
+ and (excluded_nodes is None or n.name not in excluded_nodes)
+ ]
def _best_paths_for_groups(
- src_nodes: List[Any], snk_nodes: List[Any]
+ src_names: List[str], snk_names: List[str]
) -> List[Path]:
- active_sources = _active(src_nodes)
- active_sinks = _active(snk_nodes)
- if not active_sources or not active_sinks:
+ """Find best-cost paths from any source to any sink."""
+ if not src_names or not snk_names:
return []
- if {n.name for n in active_sources} & {n.name for n in active_sinks}:
+ if set(src_names) & set(snk_names):
return []
best_cost = float("inf")
best_paths: List[Path] = []
- from ngraph.algorithms.paths import resolve_to_paths as _resolve
-
- for s in active_sources:
- costs, pred = spf(graph, s.name, edge_select=edge_select, multipath=True)
- for t in active_sinks:
- if t.name not in pred:
+ for src_name in src_names:
+ src_id = cache.node_mapper.to_id(src_name)
+ dists, pred_dag = cache.algorithms.spf(
+ cache.graph_handle,
+ src=src_id,
+ selection=core_edge_select,
+ node_mask=node_mask,
+ edge_mask=edge_mask,
+ )
+ for snk_name in snk_names:
+ snk_id = cache.node_mapper.to_id(snk_name)
+ cost = dists[snk_id]
+ if cost == float("inf"):
continue
- cost_to_t = costs.get(t.name, float("inf"))
- if cost_to_t < best_cost:
- best_cost = cost_to_t
- best_paths = [
- Path(path_tuple, cost_to_t)
- for path_tuple in _resolve(
- s.name, t.name, pred, split_parallel_edges
- )
- ]
- elif cost_to_t == best_cost:
+ if cost < best_cost:
+ best_cost = cost
+ best_paths = _extract_paths_from_pred_dag(
+ pred_dag,
+ src_name,
+ snk_name,
+ cost,
+ cache.node_mapper,
+ cache.edge_mapper,
+ cache.multidigraph,
+ split_parallel_edges,
+ )
+ elif cost == best_cost:
best_paths.extend(
- Path(path_tuple, cost_to_t)
- for path_tuple in _resolve(
- s.name, t.name, pred, split_parallel_edges
+ _extract_paths_from_pred_dag(
+ pred_dag,
+ src_name,
+ snk_name,
+ cost,
+ cache.node_mapper,
+ cache.edge_mapper,
+ cache.multidigraph,
+ split_parallel_edges,
)
)
@@ -201,25 +274,27 @@ def _best_paths_for_groups(
return best_paths
if mode == "combine":
- combined_src_nodes: List[Any] = []
- combined_snk_nodes: List[Any] = []
combined_src_label = "|".join(sorted(src_groups.keys()))
combined_snk_label = "|".join(sorted(snk_groups.keys()))
+ combined_src_names = []
for group_nodes in src_groups.values():
- combined_src_nodes.extend(group_nodes)
+ combined_src_names.extend(_active_node_names(group_nodes))
+ combined_snk_names = []
for group_nodes in snk_groups.values():
- combined_snk_nodes.extend(group_nodes)
+ combined_snk_names.extend(_active_node_names(group_nodes))
- paths_list = _best_paths_for_groups(combined_src_nodes, combined_snk_nodes)
+ paths_list = _best_paths_for_groups(combined_src_names, combined_snk_names)
return {(combined_src_label, combined_snk_label): paths_list}
if mode == "pairwise":
results: Dict[Tuple[str, str], List[Path]] = {}
for src_label, src_nodes in src_groups.items():
for snk_label, snk_nodes in snk_groups.items():
+ active_src_names = _active_node_names(src_nodes)
+ active_snk_names = _active_node_names(snk_nodes)
results[(src_label, snk_label)] = _best_paths_for_groups(
- src_nodes, snk_nodes
+ active_src_names, active_snk_names
)
return results
@@ -227,7 +302,7 @@ def _best_paths_for_groups(
def k_shortest_paths(
- context: Any,
+ network: Network,
source_path: str,
sink_path: str,
*,
@@ -237,11 +312,13 @@ def k_shortest_paths(
max_path_cost: float = float("inf"),
max_path_cost_factor: Optional[float] = None,
split_parallel_edges: bool = False,
+ excluded_nodes: Optional[Set[str]] = None,
+ excluded_links: Optional[Set[str]] = None,
) -> Dict[Tuple[str, str], List[Path]]:
"""Return up to K shortest paths per group pair.
Args:
- context: Network or NetworkView.
+ network: Network instance.
source_path: Selection expression for source groups.
sink_path: Selection expression for sink groups.
mode: "pairwise" (default) or "combine".
@@ -250,6 +327,8 @@ def k_shortest_paths(
max_path_cost: Absolute cost threshold.
max_path_cost_factor: Relative threshold versus best path.
split_parallel_edges: Expand parallel edges into distinct paths when True.
+ excluded_nodes: Optional set of node names to exclude temporarily.
+ excluded_links: Optional set of link IDs to exclude temporarily.
Returns:
Mapping from (source_label, sink_label) to list of Path (<= max_k).
@@ -259,63 +338,93 @@ def k_shortest_paths(
ValueError: If no sink nodes match ``sink_path``.
ValueError: If ``mode`` is not "combine" or "pairwise".
"""
- src_groups = context.select_node_groups_by_path(source_path)
- snk_groups = context.select_node_groups_by_path(sink_path)
+ src_groups = network.select_node_groups_by_path(source_path)
+ snk_groups = network.select_node_groups_by_path(sink_path)
if not src_groups:
raise ValueError(f"No source nodes found matching '{source_path}'.")
if not snk_groups:
raise ValueError(f"No sink nodes found matching '{sink_path}'.")
- graph = context.to_strict_multidigraph(compact=True).copy()
-
- def _active(nodes: Iterable[Any]) -> List[Any]:
- return [n for n in nodes if not getattr(n, "disabled", False)]
-
- def _ksp_for_groups(src_nodes: List[Any], snk_nodes: List[Any]) -> List[Path]:
- active_sources = _active(src_nodes)
- active_sinks = _active(snk_nodes)
- if not active_sources or not active_sinks:
+ # Build graph cache and masks
+ cache = build_graph_cache(network)
+ node_mask = build_node_mask(cache, excluded_nodes)
+ edge_mask = build_edge_mask(cache, excluded_links)
+
+ # Map edge_select to Core's EdgeSelection
+ core_edge_select = _map_edge_select(edge_select)
+
+ def _active_node_names(nodes: Iterable) -> List[str]:
+ """Filter to active (non-disabled) node names."""
+ return [
+ n.name
+ for n in nodes
+ if not n.disabled
+ and (excluded_nodes is None or n.name not in excluded_nodes)
+ ]
+
+ def _ksp_for_groups(src_names: List[str], snk_names: List[str]) -> List[Path]:
+ """Find K shortest paths from any source to any sink."""
+ if not src_names or not snk_names:
return []
- if {n.name for n in active_sources} & {n.name for n in active_sinks}:
+ if set(src_names) & set(snk_names):
return []
- # Choose best pair to seed thresholds
+ # Find best pair to seed thresholds
best_pair: Optional[Tuple[str, str]] = None
best_cost = float("inf")
- for s in active_sources:
- costs, pred = spf(graph, s.name, edge_select=edge_select, multipath=True)
- for t in active_sinks:
- if t.name not in pred:
- continue
- c = costs.get(t.name, float("inf"))
- if c < best_cost:
- best_cost = c
- best_pair = (s.name, t.name)
+ for src_name in src_names:
+ src_id = cache.node_mapper.to_id(src_name)
+ dists, pred_dag = cache.algorithms.spf(
+ cache.graph_handle,
+ src=src_id,
+ selection=core_edge_select,
+ node_mask=node_mask,
+ edge_mask=edge_mask,
+ )
+ for snk_name in snk_names:
+ snk_id = cache.node_mapper.to_id(snk_name)
+ cost = dists[snk_id]
+ if cost < best_cost:
+ best_cost = cost
+ best_pair = (src_name, snk_name)
if best_pair is None:
return []
+ # Run KSP on the best pair
+ src_name, snk_name = best_pair
+ src_id = cache.node_mapper.to_id(src_name)
+ snk_id = cache.node_mapper.to_id(snk_name)
+
results: List[Path] = []
- s_name, t_name = best_pair
count = 0
- from ngraph.algorithms.paths import resolve_to_paths as _resolve
-
- for costs_i, pred_i in ksp(
- graph,
- s_name,
- t_name,
- edge_select=edge_select,
- max_k=max_k,
- max_path_cost=max_path_cost,
- max_path_cost_factor=max_path_cost_factor,
- multipath=True,
+
+ for dists, pred_dag in cache.algorithms.ksp(
+ cache.graph_handle,
+ src=src_id,
+ dst=snk_id,
+ k=max_k,
+ max_cost_factor=max_path_cost_factor
+ if max_path_cost_factor is not None
+ else None,
+ node_mask=node_mask,
+ edge_mask=edge_mask,
):
- if t_name not in pred_i:
+ cost = dists[snk_id]
+ if cost == float("inf") or cost > max_path_cost:
continue
- cost_val = costs_i[t_name]
- for path_tuple in _resolve(s_name, t_name, pred_i, split_parallel_edges):
- results.append(Path(path_tuple, cost_val))
+ for path in _extract_paths_from_pred_dag(
+ pred_dag,
+ src_name,
+ snk_name,
+ cost,
+ cache.node_mapper,
+ cache.edge_mapper,
+ cache.multidigraph,
+ split_parallel_edges,
+ ):
+ results.append(path)
count += 1
if count >= max_k:
break
@@ -327,19 +436,19 @@ def _ksp_for_groups(src_nodes: List[Any], snk_nodes: List[Any]) -> List[Path]:
return results
if mode == "combine":
- combined_src_nodes: List[Any] = []
- combined_snk_nodes: List[Any] = []
combined_src_label = "|".join(sorted(src_groups.keys()))
combined_snk_label = "|".join(sorted(snk_groups.keys()))
+ combined_src_names = []
for group_nodes in src_groups.values():
- combined_src_nodes.extend(group_nodes)
+ combined_src_names.extend(_active_node_names(group_nodes))
+ combined_snk_names = []
for group_nodes in snk_groups.values():
- combined_snk_nodes.extend(group_nodes)
+ combined_snk_names.extend(_active_node_names(group_nodes))
return {
(combined_src_label, combined_snk_label): _ksp_for_groups(
- combined_src_nodes, combined_snk_nodes
+ combined_src_names, combined_snk_names
)
}
@@ -347,7 +456,89 @@ def _ksp_for_groups(src_nodes: List[Any], snk_nodes: List[Any]) -> List[Path]:
results: Dict[Tuple[str, str], List[Path]] = {}
for src_label, src_nodes in src_groups.items():
for snk_label, snk_nodes in snk_groups.items():
- results[(src_label, snk_label)] = _ksp_for_groups(src_nodes, snk_nodes)
+ active_src_names = _active_node_names(src_nodes)
+ active_snk_names = _active_node_names(snk_nodes)
+ results[(src_label, snk_label)] = _ksp_for_groups(
+ active_src_names, active_snk_names
+ )
return results
raise ValueError(f"Invalid mode '{mode}'. Must be 'combine' or 'pairwise'.")
+
+
+# Helper functions
+
+
+def _map_edge_select(edge_select: EdgeSelect) -> netgraph_core.EdgeSelection:
+ """Map NetGraph EdgeSelect to Core EdgeSelection."""
+ if edge_select == EdgeSelect.ALL_MIN_COST:
+ return netgraph_core.EdgeSelection(
+ multi_edge=True,
+ require_capacity=False,
+ tie_break=netgraph_core.EdgeTieBreak.DETERMINISTIC,
+ )
+ if edge_select == EdgeSelect.SINGLE_MIN_COST:
+ return netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=False,
+ tie_break=netgraph_core.EdgeTieBreak.DETERMINISTIC,
+ )
+ raise ValueError(f"Unsupported EdgeSelect: {edge_select}")
+
+
+def _extract_paths_from_pred_dag(
+ pred_dag: netgraph_core.PredDAG,
+ src_name: str,
+ snk_name: str,
+ cost: float,
+ node_mapper,
+ edge_mapper,
+ multidigraph,
+ split_parallel_edges: bool,
+) -> List[Path]:
+ """Extract Path objects from a PredDAG.
+
+ Args:
+ pred_dag: Core PredDAG instance.
+ src_name: Source node name.
+ snk_name: Sink node name.
+ cost: Path cost.
+ node_mapper: NodeMapper for ID <-> name translation.
+ edge_mapper: EdgeMapper for ext_edge_id <-> EdgeRef translation.
+ multidigraph: Core StrictMultiDiGraph instance.
+ split_parallel_edges: If True, expand parallel edges into distinct paths.
+
+ Returns:
+ List of Path objects.
+ """
+ src_id = node_mapper.to_id(src_name)
+ snk_id = node_mapper.to_id(snk_name)
+
+ # Get fully resolved paths from PredDAG
+ # Returns list of paths, where each path is a list of (node_id, edge_ids_tuple)
+ raw_paths = pred_dag.resolve_to_paths(
+ src_id, snk_id, split_parallel_edges=split_parallel_edges
+ )
+
+ paths = []
+ ext_edge_ids = multidigraph.ext_edge_ids_view()
+
+ for raw_path in raw_paths:
+ path_elements: List[Tuple[str, Tuple[EdgeRef, ...]]] = []
+
+ for node_id, edge_ids in raw_path:
+ node_name = node_mapper.to_name(node_id)
+
+ # Resolve EdgeRefs
+ edge_refs = []
+ for edge_id in edge_ids:
+ ext_id = ext_edge_ids[edge_id]
+ edge_ref = edge_mapper.decode_ext_id(int(ext_id))
+ if edge_ref is not None:
+ edge_refs.append(edge_ref)
+
+ path_elements.append((node_name, tuple(edge_refs)))
+
+ paths.append(Path(tuple(path_elements), cost))
+
+ return paths
diff --git a/ngraph/algorithms/base.py b/ngraph/types/base.py
similarity index 75%
rename from ngraph/algorithms/base.py
rename to ngraph/types/base.py
index 84ce563..a0ca2ac 100644
--- a/ngraph/algorithms/base.py
+++ b/ngraph/types/base.py
@@ -3,24 +3,11 @@
from __future__ import annotations
from enum import IntEnum
-from typing import Tuple, Union
-
-from ngraph.graph.strict_multidigraph import EdgeID, NodeID
+from typing import Union
#: Represents numeric cost in the network (e.g. distance, latency, etc.).
Cost = Union[int, float]
-#: A single path element is a tuple of:
-#: - The current node ID.
-#: - A tuple of zero or more parallel edge IDs from this node to the next node.
-#: In a complete path, intermediate elements usually have a non-empty edge tuple,
-#: while the final element has an empty tuple to indicate termination.
-PathElement = Tuple[NodeID, Tuple[EdgeID, ...]]
-
-#: A path is a tuple of PathElements forming a complete route from
-#: a source node to a destination node.
-PathTuple = Tuple[PathElement, ...]
-
#: Capacity threshold below which capacity values are treated as effectively zero.
MIN_CAP = 2**-12
diff --git a/ngraph/types/dto.py b/ngraph/types/dto.py
new file mode 100644
index 0000000..013d500
--- /dev/null
+++ b/ngraph/types/dto.py
@@ -0,0 +1,56 @@
+"""Types and data structures for algorithm analytics.
+
+Defines immutable summary containers and aliases for algorithm outputs.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Dict, Literal, Tuple
+
+from ngraph.types.base import Cost
+
+# Edge direction: 'fwd' for forward (source→target as in Link), 'rev' for reverse
+EdgeDir = Literal["fwd", "rev"]
+
+
+@dataclass(frozen=True)
+class EdgeRef:
+ """Reference to a directed edge via scenario link_id and direction.
+
+ Replaces the old Edge = Tuple[str, str, Hashable] to provide stable,
+ scenario-native edge identification across Core reorderings.
+
+ Attributes:
+ link_id: Scenario link identifier (matches Network.links keys)
+ direction: 'fwd' for source→target as defined in Link; 'rev' for reverse
+ """
+
+ link_id: str
+ direction: EdgeDir
+
+
+@dataclass(frozen=True)
+class FlowSummary:
+ """Summary of max-flow computation results.
+
+ Captures edge flows, residual capacities, reachable set, and min-cut.
+
+ Breaking change from v1.x: Fields now use EdgeRef instead of (src, dst, key) tuples
+ for stable scenario-level edge identification.
+
+ Attributes:
+ total_flow: Maximum flow value achieved.
+ cost_distribution: Mapping of path cost to flow volume placed at that cost.
+ min_cut: Saturated edges crossing the s-t cut.
+ reachable_nodes: Nodes reachable from source in residual graph (optional).
+ edge_flow: Flow amount per edge (optional, only populated when requested).
+ residual_cap: Remaining capacity per edge after placement (optional).
+ """
+
+ total_flow: float
+ cost_distribution: Dict[Cost, float]
+ min_cut: Tuple[EdgeRef, ...]
+ reachable_nodes: Tuple[str, ...] | None = None
+ edge_flow: Dict[EdgeRef, float] | None = None
+ residual_cap: Dict[EdgeRef, float] | None = None
diff --git a/ngraph/seed_manager.py b/ngraph/utils/seed_manager.py
similarity index 100%
rename from ngraph/seed_manager.py
rename to ngraph/utils/seed_manager.py
diff --git a/ngraph/yaml_utils.py b/ngraph/utils/yaml_utils.py
similarity index 100%
rename from ngraph/yaml_utils.py
rename to ngraph/utils/yaml_utils.py
diff --git a/ngraph/workflow/build_graph.py b/ngraph/workflow/build_graph.py
index 49a5c1f..c7b1eb1 100644
--- a/ngraph/workflow/build_graph.py
+++ b/ngraph/workflow/build_graph.py
@@ -1,17 +1,24 @@
"""Graph building workflow component.
-Converts scenario network definitions into StrictMultiDiGraph structures suitable
-for analysis algorithms. No additional parameters required beyond basic workflow step options.
+Validates and exports network topology as a node-link representation using NetworkX.
+After NetGraph-Core integration, actual graph building happens in analysis
+functions. This step primarily validates the network and stores a serializable
+representation for inspection.
YAML Configuration Example:
```yaml
workflow:
- step_type: BuildGraph
name: "build_network_graph" # Optional: Custom name for this step
+ add_reverse: true # Optional: Add reverse edges (default: true)
```
+The `add_reverse` parameter controls whether reverse edges are added for each link.
+When `True` (default), each Link(A→B) gets both forward(A→B) and reverse(B→A) edges
+for bidirectional connectivity. Set to `False` for directed-only graphs.
+
Results stored in `scenario.results` under the step name as two keys:
- - metadata: Step-level execution metadata (empty dict)
+ - metadata: Step-level execution metadata (node/link counts)
- data: { graph: node-link JSON dict, context: { add_reverse: bool } }
"""
@@ -20,6 +27,8 @@
from dataclasses import dataclass
from typing import TYPE_CHECKING
+import networkx as nx
+
from ngraph.workflow.base import WorkflowStep, register_workflow_step
if TYPE_CHECKING:
@@ -28,14 +37,21 @@
@dataclass
class BuildGraph(WorkflowStep):
- """A workflow step that builds a StrictMultiDiGraph from scenario.network.
+ """Validates network topology and stores node-link representation.
- This step converts the scenario's network definition into a graph structure
- suitable for analysis algorithms. No additional parameters are required.
+ After NetGraph-Core integration, this step validates the network structure
+ and stores a JSON-serializable node-link representation using NetworkX.
+ Actual Core graph building happens in analysis functions as needed.
+
+ Attributes:
+ add_reverse: If True, adds reverse edges for bidirectional connectivity.
+ Defaults to True for backward compatibility.
"""
+ add_reverse: bool = True
+
def run(self, scenario: Scenario) -> None:
- """Build the network graph and store it in results.
+ """Validate network and store node-link representation.
Args:
scenario: Scenario containing the network model.
@@ -43,14 +59,62 @@ def run(self, scenario: Scenario) -> None:
Returns:
None
"""
- graph = scenario.network.to_strict_multidigraph(add_reverse=True)
- scenario.results.put("metadata", {})
+ network = scenario.network
+
+ # Build NetworkX MultiDiGraph from Network
+ graph = nx.MultiDiGraph()
+
+ # Add nodes with attributes
+ for node_name in sorted(network.nodes.keys()):
+ node = network.nodes[node_name]
+ graph.add_node(
+ node_name,
+ disabled=node.disabled,
+ **node.attrs,
+ )
+
+ # Add edges (links) with attributes
+ for link_id in sorted(network.links.keys()):
+ link = network.links[link_id]
+ # Add forward edge
+ graph.add_edge(
+ link.source,
+ link.target,
+ id=link_id,
+ capacity=float(link.capacity),
+ cost=float(link.cost),
+ disabled=link.disabled,
+ **link.attrs,
+ )
+ # Add reverse edge if configured (for bidirectional connectivity)
+ if self.add_reverse:
+ reverse_id = f"{link_id}_reverse"
+ graph.add_edge(
+ link.target,
+ link.source,
+ id=reverse_id,
+ capacity=float(link.capacity),
+ cost=float(link.cost),
+ disabled=link.disabled,
+ **link.attrs,
+ )
+
+ # Convert to node-link format for serialization
+ # Use edges="edges" for forward compatibility with NetworkX 3.6+
+ graph_dict = nx.node_link_data(graph, edges="edges")
+
+ scenario.results.put(
+ "metadata",
+ {
+ "node_count": len(graph.nodes),
+ "link_count": len(graph.edges),
+ },
+ )
scenario.results.put(
"data",
{
- # Store as JSON-safe node-link dict rather than raw graph object
- "graph": graph.to_dict(),
- "context": {"add_reverse": True},
+ "graph": graph_dict,
+ "context": {"add_reverse": self.add_reverse},
},
)
diff --git a/ngraph/workflow/cost_power.py b/ngraph/workflow/cost_power.py
index 08bf833..f3d74a5 100644
--- a/ngraph/workflow/cost_power.py
+++ b/ngraph/workflow/cost_power.py
@@ -51,14 +51,14 @@
from dataclasses import dataclass
from typing import Any, Dict, List
-from ngraph.components import (
+from ngraph.explorer import NetworkExplorer
+from ngraph.logging import get_logger
+from ngraph.model.components import (
ComponentsLibrary,
resolve_link_end_components,
resolve_node_hardware,
totals_with_multiplier,
)
-from ngraph.explorer import NetworkExplorer
-from ngraph.logging import get_logger
from ngraph.workflow.base import WorkflowStep, register_workflow_step
logger = get_logger(__name__)
@@ -103,16 +103,12 @@ def run(self, scenario: Any) -> None:
explorer = NetworkExplorer.explore_network(network, components_library=library)
- # Helper: enabled checks honor both flags and attrs for consistency
+ # Helper: enabled checks
def node_enabled(nd: Any) -> bool:
- return not (
- bool(getattr(nd, "disabled", False)) or bool(nd.attrs.get("disabled"))
- )
+ return not bool(nd.disabled)
def link_enabled(lk: Any) -> bool:
- return not (
- bool(getattr(lk, "disabled", False)) or bool(lk.attrs.get("disabled"))
- )
+ return not bool(lk.disabled)
# Precompute endpoint eligibility for optics (node must have platform HW)
node_has_hw: Dict[str, bool] = {}
diff --git a/ngraph/workflow/max_flow_step.py b/ngraph/workflow/max_flow_step.py
index 61a8b30..0fac921 100644
--- a/ngraph/workflow/max_flow_step.py
+++ b/ngraph/workflow/max_flow_step.py
@@ -30,10 +30,10 @@
from dataclasses import dataclass
from typing import TYPE_CHECKING
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.failure.manager.manager import FailureManager
+from ngraph.exec.failure.manager import FailureManager
from ngraph.logging import get_logger
from ngraph.results.flow import FlowIterationResult
+from ngraph.types.base import FlowPlacement
from ngraph.workflow.base import WorkflowStep, register_workflow_step
if TYPE_CHECKING:
diff --git a/ngraph/workflow/maximum_supported_demand_step.py b/ngraph/workflow/maximum_supported_demand_step.py
index 8b959cf..82f41ba 100644
--- a/ngraph/workflow/maximum_supported_demand_step.py
+++ b/ngraph/workflow/maximum_supported_demand_step.py
@@ -15,10 +15,12 @@
from dataclasses import dataclass
from typing import Any
-from ngraph.demand.manager.manager import TrafficManager, TrafficResult
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.demand.spec import TrafficDemand
+import netgraph_core
+
+from ngraph.exec.demand.expand import expand_demands
from ngraph.logging import get_logger
+from ngraph.model.demand.spec import TrafficDemand
+from ngraph.model.flow.policy_config import FlowPolicyPreset, create_flow_policy
from ngraph.workflow.base import WorkflowStep, register_workflow_step
logger = get_logger(__name__)
@@ -74,17 +76,17 @@ def run(self, scenario: "Any") -> None:
def _serialize_policy(cfg: Any) -> Any:
try:
- from ngraph.flows.policy import (
- FlowPolicyConfig, # local import to avoid heavy deps
+ from ngraph.model.flow.policy_config import (
+ FlowPolicyPreset, # local import to avoid heavy deps
)
except Exception: # pragma: no cover - defensive
return str(cfg) if cfg is not None else None
if cfg is None:
return None
- if isinstance(cfg, FlowPolicyConfig):
+ if isinstance(cfg, FlowPolicyPreset):
return cfg.name
try:
- return FlowPolicyConfig(int(cfg)).name
+ return FlowPolicyPreset(int(cfg)).name
except Exception:
return str(cfg)
@@ -102,6 +104,13 @@ def _serialize_policy(cfg: Any) -> Any:
for td in base_tds
]
+ # Validation: Ensure traffic matrix contains demands
+ if not base_demands:
+ raise ValueError(
+ f"Traffic matrix '{self.matrix_name}' contains no demands. "
+ "Cannot compute maximum supported demand without traffic specifications."
+ )
+
start_alpha = float(self.alpha_start)
g = float(self.growth_factor)
if not (g > 1.0):
@@ -202,10 +211,10 @@ def probe(alpha: float) -> tuple[bool, dict[str, Any]]:
)
@staticmethod
- def _build_scaled_matrix(
+ def _build_scaled_demands(
base_demands: list[dict[str, Any]], alpha: float
- ) -> TrafficMatrixSet:
- tmset = TrafficMatrixSet()
+ ) -> list[TrafficDemand]:
+ """Build scaled traffic demands for alpha probe."""
demands: list[TrafficDemand] = []
for d in base_demands:
demands.append(
@@ -218,8 +227,7 @@ def _build_scaled_matrix(
mode=str(d.get("mode", "pairwise")),
)
)
- tmset.add("temp", demands)
- return tmset
+ return demands
@classmethod
def _evaluate_alpha(
@@ -231,6 +239,18 @@ def _evaluate_alpha(
placement_rounds: int | str,
seeds: int,
) -> tuple[bool, dict[str, Any]]:
+ """Evaluate if alpha is feasible using Core-based placement.
+
+ Args:
+ alpha: Scale factor to test.
+ scenario: Scenario containing network and traffic matrix.
+ matrix_name: Name of traffic matrix to use.
+ placement_rounds: Placement rounds (unused - Core handles internally).
+ seeds: Number of seeds to test.
+
+ Returns:
+ Tuple of (feasible, details_dict).
+ """
base_tds = scenario.traffic_matrix_set.get_matrix(matrix_name)
base_demands: list[dict[str, Any]] = [
{
@@ -243,30 +263,85 @@ def _evaluate_alpha(
}
for td in base_tds
]
+
+ # Build scaled demands
+ scaled_demands = cls._build_scaled_demands(base_demands, alpha)
+
+ # Phase 1: Expand demands (get names + augmentations)
+ expansion = expand_demands(
+ scenario.network,
+ scaled_demands,
+ default_policy_preset=FlowPolicyPreset.SHORTEST_PATHS_ECMP,
+ )
+
+ # Phase 2: Build Core infrastructure with augmentations
+ from ngraph.adapters.core import build_graph, get_disabled_exclusions
+
+ # Include disabled nodes/links in exclusions
+ excluded_nodes, excluded_links = get_disabled_exclusions(scenario.network)
+
+ graph_handle, multidigraph, edge_mapper, node_mapper = build_graph(
+ scenario.network,
+ augmentations=expansion.augmentations,
+ excluded_nodes=excluded_nodes,
+ excluded_links=excluded_links,
+ )
+ # Augmentations include pseudo nodes for combine mode
+
+ backend = netgraph_core.Backend.cpu()
+ algorithms = netgraph_core.Algorithms(backend)
+
decisions: list[bool] = []
min_ratios: list[float] = []
- tmset = cls._build_scaled_matrix(base_demands, alpha)
- tm = TrafficManager(
- network=scenario.network, traffic_matrix_set=tmset, matrix_name="temp"
- )
- tm.build_graph(add_reverse=True)
+
for _ in range(max(1, int(seeds))):
- tm.reset_all_flow_usages()
- tm.expand_demands()
- tm.place_all_demands(placement_rounds=placement_rounds)
- results: list[TrafficResult] = tm.get_traffic_results(detailed=False)
- ratios: list[float] = []
- for r in results:
- total = float(r.total_volume)
- placed = float(r.placed_volume)
- ratio = 1.0 if total == 0.0 else (placed / total)
- ratios.append(ratio)
- is_feasible = all(r >= 1.0 - 1e-12 for r in ratios)
+ # Create fresh FlowGraph for each seed
+ flow_graph = netgraph_core.FlowGraph(multidigraph)
+
+ # Phase 3: Place demands using Core
+ total_demand = 0.0
+ total_placed = 0.0
+
+ for demand in expansion.demands:
+ # Resolve node names to IDs (includes pseudo nodes)
+ src_id = node_mapper.to_id(demand.src_name)
+ dst_id = node_mapper.to_id(demand.dst_name)
+
+ policy = create_flow_policy(
+ algorithms,
+ graph_handle,
+ demand.policy_preset,
+ )
+
+ placed, _ = policy.place_demand(
+ flow_graph,
+ src_id,
+ dst_id,
+ demand.priority,
+ demand.volume,
+ )
+
+ total_demand += demand.volume
+ total_placed += placed
+
+ # Validation: Ensure we have non-zero demand to evaluate
+ if total_demand == 0.0:
+ raise ValueError(
+ f"Cannot evaluate feasibility for alpha={alpha:.6g}: total demand is zero. "
+ "This indicates that no demands were successfully expanded or all demand volumes are zero."
+ )
+
+ # Check feasibility
+ ratio = total_placed / total_demand
+ is_feasible = ratio >= 1.0 - 1e-12
decisions.append(is_feasible)
- min_ratios.append(min(ratios) if ratios else 1.0)
+ min_ratios.append(ratio)
+
+ # Majority vote across seeds
yes = sum(1 for d in decisions if d)
required = (len(decisions) // 2) + 1
feasible = yes >= required
+
details = {
"seeds": len(decisions),
"feasible_seeds": yes,
diff --git a/ngraph/workflow/network_stats.py b/ngraph/workflow/network_stats.py
index 6d8b534..18eb8e4 100644
--- a/ngraph/workflow/network_stats.py
+++ b/ngraph/workflow/network_stats.py
@@ -28,7 +28,6 @@
from typing import TYPE_CHECKING, Dict, Iterable, List
from ngraph.logging import get_logger
-from ngraph.model.view import NetworkView
from ngraph.workflow.base import WorkflowStep, register_workflow_step
if TYPE_CHECKING:
@@ -39,7 +38,7 @@
class NetworkStats(WorkflowStep):
"""Compute basic node and link statistics for the network.
- Supports optional exclusion simulation using NetworkView without modifying the base network.
+ Supports optional exclusion simulation without modifying the base network.
Attributes:
include_disabled: If True, include disabled nodes and links in statistics.
@@ -55,8 +54,8 @@ class NetworkStats(WorkflowStep):
def run(self, scenario: Scenario) -> None:
"""Compute and store network statistics.
- If `excluded_nodes` or `excluded_links` are specified, uses `NetworkView` to
- simulate exclusions without modifying the base network.
+ If `excluded_nodes` or `excluded_links` are specified, filters them out
+ without modifying the base network.
Args:
scenario: The scenario containing the network to analyze.
@@ -64,33 +63,42 @@ def run(self, scenario: Scenario) -> None:
Returns:
None
"""
- # Create view if we have exclusions, otherwise use base network
- if self.excluded_nodes or self.excluded_links:
- network_or_view = NetworkView.from_excluded_sets(
- scenario.network,
- excluded_nodes=self.excluded_nodes,
- excluded_links=self.excluded_links,
- )
- nodes = network_or_view.nodes
- links = network_or_view.links
+ # Convert exclusion iterables to sets for efficient lookup
+ excluded_nodes_set = set(self.excluded_nodes) if self.excluded_nodes else set()
+ excluded_links_set = set(self.excluded_links) if self.excluded_links else set()
+
+ # Filter nodes based on disabled status and exclusions
+ if self.include_disabled:
+ nodes = {
+ name: node
+ for name, node in scenario.network.nodes.items()
+ if name not in excluded_nodes_set
+ }
else:
- # Use base network, optionally filtering disabled
- if self.include_disabled:
- nodes = scenario.network.nodes
- links = scenario.network.links
- else:
- nodes = {
- name: node
- for name, node in scenario.network.nodes.items()
- if not node.disabled
- }
- links = {
- link_id: link
- for link_id, link in scenario.network.links.items()
- if not link.disabled
- and link.source in nodes # Source node must be enabled
- and link.target in nodes # Target node must be enabled
- }
+ nodes = {
+ name: node
+ for name, node in scenario.network.nodes.items()
+ if not node.disabled and name not in excluded_nodes_set
+ }
+
+ # Filter links based on disabled status, exclusions, and node availability
+ if self.include_disabled:
+ links = {
+ link_id: link
+ for link_id, link in scenario.network.links.items()
+ if link_id not in excluded_links_set
+ and link.source in nodes
+ and link.target in nodes
+ }
+ else:
+ links = {
+ link_id: link
+ for link_id, link in scenario.network.links.items()
+ if not link.disabled
+ and link_id not in excluded_links_set
+ and link.source in nodes
+ and link.target in nodes
+ }
# Compute node statistics
node_count = len(nodes)
diff --git a/ngraph/workflow/parse.py b/ngraph/workflow/parse.py
new file mode 100644
index 0000000..2fd6e7f
--- /dev/null
+++ b/ngraph/workflow/parse.py
@@ -0,0 +1,82 @@
+"""Workflow parsing helpers.
+
+Converts a normalized workflow section (list[dict]) into WorkflowStep
+instances using the WORKFLOW_STEP_REGISTRY and attaches unique names/seeds.
+"""
+
+from __future__ import annotations
+
+from typing import Any, Callable, Dict, List, Optional
+
+from ngraph.logging import get_logger
+from ngraph.utils.yaml_utils import normalize_yaml_dict_keys
+from ngraph.workflow.base import WORKFLOW_STEP_REGISTRY, WorkflowStep
+
+_logger = get_logger(__name__)
+
+
+def build_workflow_steps(
+ workflow_data: List[Dict[str, Any]],
+ derive_seed: Callable[[str], Optional[int]],
+) -> List[WorkflowStep]:
+ """Instantiate workflow steps from normalized dictionaries.
+
+ Args:
+ workflow_data: List of step dicts; each must have "step_type".
+ derive_seed: Callable that takes a step name and returns a seed or None.
+
+ Returns:
+ A list of WorkflowStep instances with unique names and optional seeds.
+ """
+ if not isinstance(workflow_data, list):
+ raise ValueError("'workflow' must be a list if present.")
+
+ steps: List[WorkflowStep] = []
+ assigned_names: set[str] = set()
+
+ for step_index, step_info in enumerate(workflow_data):
+ step_type = step_info.get("step_type")
+ if not step_type:
+ raise ValueError(
+ "Each workflow entry must have a 'step_type' field "
+ "indicating the WorkflowStep subclass to use."
+ )
+
+ step_cls = WORKFLOW_STEP_REGISTRY.get(step_type)
+ if not step_cls:
+ raise ValueError(f"Unrecognized 'step_type': {step_type}")
+
+ ctor_args = {k: v for k, v in step_info.items() if k != "step_type"}
+ normalized_ctor_args = normalize_yaml_dict_keys(ctor_args)
+
+ raw_name = normalized_ctor_args.get("name")
+ if isinstance(raw_name, str) and raw_name.strip() == "":
+ raw_name = None
+ step_name = raw_name or f"{step_type}_{step_index}"
+
+ if step_name in assigned_names:
+ raise ValueError(
+ f"Duplicate workflow step name '{step_name}'. Each step must have a unique name."
+ )
+ assigned_names.add(step_name)
+
+ normalized_ctor_args["name"] = step_name
+
+ if "seed" not in normalized_ctor_args or normalized_ctor_args["seed"] is None:
+ derived = derive_seed(step_name)
+ if derived is not None:
+ normalized_ctor_args["seed"] = derived
+
+ step_obj = step_cls(**normalized_ctor_args)
+ try:
+ step_obj._seed_source = (
+ "explicit-step"
+ if "seed" in ctor_args and ctor_args["seed"] is not None
+ else "scenario-derived"
+ )
+ except Exception:
+ pass
+
+ steps.append(step_obj)
+
+ return steps
diff --git a/ngraph/workflow/traffic_matrix_placement_step.py b/ngraph/workflow/traffic_matrix_placement_step.py
index cb3dfdb..feef6f7 100644
--- a/ngraph/workflow/traffic_matrix_placement_step.py
+++ b/ngraph/workflow/traffic_matrix_placement_step.py
@@ -11,7 +11,7 @@
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
-from ngraph.failure.manager.manager import FailureManager
+from ngraph.exec.failure.manager import FailureManager
from ngraph.logging import get_logger
from ngraph.results.flow import FlowIterationResult
from ngraph.workflow.base import WorkflowStep, register_workflow_step
@@ -104,17 +104,17 @@ def run(self, scenario: "Scenario") -> None:
) from exc
def _serialize_policy(cfg: Any) -> Any:
- from ngraph.flows.policy import FlowPolicyConfig # local import
+ from ngraph.model.flow.policy_config import FlowPolicyPreset # local import
if cfg is None:
return None
- if isinstance(cfg, FlowPolicyConfig):
+ if isinstance(cfg, FlowPolicyPreset):
return cfg.name
# Fall back to string when it cannot be coerced to enum
try:
- return FlowPolicyConfig(int(cfg)).name
+ return FlowPolicyPreset(int(cfg)).name
except Exception as exc:
- logger.debug("Unrecognized flow_policy_config value: %r (%s)", cfg, exc)
+ logger.debug("Unrecognized flow_policy_preset value: %r (%s)", cfg, exc)
return str(cfg)
base_demands: list[dict[str, Any]] = [
diff --git a/pyproject.toml b/pyproject.toml
index a8d7494..4d9ed84 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta"
# ---------------------------------------------------------------------
[project]
name = "ngraph"
-version = "0.9.1"
+version = "0.10.0"
description = "A tool and a library for network modeling and analysis."
readme = "README.md"
authors = [{ name = "Andrey Golovanov" }]
@@ -14,6 +14,12 @@ license-files = ["LICENSE"]
requires-python = ">=3.11"
classifiers = [
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+ "Topic :: Scientific/Engineering",
+ "Topic :: System :: Networking",
+ "Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
@@ -28,8 +34,12 @@ dependencies = [
"pyyaml>=6.0",
"pandas>=2.0",
"jsonschema>=4.0",
+ "netgraph-core>=0.1.0",
]
+[project.urls]
+Homepage = "https://github.com/networmix/NetGraph"
+
# Dev / CI extras
[project.optional-dependencies]
dev = [
@@ -122,6 +132,8 @@ skip-magic-trailing-comma = false
[tool.pyright]
typeCheckingMode = "standard" # balanced level
pythonVersion = "3.11"
+venvPath = "."
+venv = "venv"
exclude = [
"tests/**", # tests often use dynamic patterns
"dev/**", # development utilities (not shipped)
diff --git a/tests/adapters/test_adapters.py b/tests/adapters/test_adapters.py
new file mode 100644
index 0000000..da53de5
--- /dev/null
+++ b/tests/adapters/test_adapters.py
@@ -0,0 +1,3 @@
+def test_adapters_placeholder():
+ """Placeholder test for adapters module."""
+ pass
diff --git a/tests/algorithms/sample_graphs.py b/tests/algorithms/sample_graphs.py
deleted file mode 100644
index 9c83c3c..0000000
--- a/tests/algorithms/sample_graphs.py
+++ /dev/null
@@ -1,281 +0,0 @@
-import pytest
-
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-@pytest.fixture
-def line1():
- # Metric:
- # [1] [1,1,2]
- # A◄───────►B◄───────►C
- #
- # Capacity:
- # [5] [1,3,7]
- # A◄───────►B◄───────►C
-
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_node("C")
-
- g.add_edge("A", "B", key=0, cost=1, capacity=5)
- g.add_edge("B", "A", key=1, cost=1, capacity=5)
- g.add_edge("B", "C", key=2, cost=1, capacity=1)
- g.add_edge("C", "B", key=3, cost=1, capacity=1)
- g.add_edge("B", "C", key=4, cost=1, capacity=3)
- g.add_edge("C", "B", key=5, cost=1, capacity=3)
- g.add_edge("B", "C", key=6, cost=2, capacity=7)
- g.add_edge("C", "B", key=7, cost=2, capacity=7)
- return g
-
-
-@pytest.fixture
-def triangle1():
- # Metric:
- # [1] [1]
- # ┌──────►B◄──────┐
- # │ │
- # │ │
- # │ │
- # ▼ [1] ▼
- # A◄─────────────►C
- #
- # Capacity:
- # [15] [15]
- # ┌──────►B◄──────┐
- # │ │
- # │ │
- # │ │
- # ▼ [5] ▼
- # A◄─────────────►C
-
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_node("C")
-
- g.add_edge("A", "B", key=0, cost=1, capacity=15, label="1")
- g.add_edge("B", "A", key=1, cost=1, capacity=15, label="1")
- g.add_edge("B", "C", key=2, cost=1, capacity=15, label="2")
- g.add_edge("C", "B", key=3, cost=1, capacity=15, label="2")
- g.add_edge("A", "C", key=4, cost=1, capacity=5, label="3")
- g.add_edge("C", "A", key=5, cost=1, capacity=5, label="3")
- return g
-
-
-@pytest.fixture
-def square1():
- # Metric:
- # [1] [1]
- # ┌────────►B─────────┐
- # │ │
- # │ ▼
- # A C
- # │ ▲
- # │ [2] [2] │
- # └────────►D─────────┘
- #
- # Capacity is similar (1,1,2,2).
-
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D"):
- g.add_node(node)
-
- g.add_edge("A", "B", key=0, cost=1, capacity=1)
- g.add_edge("B", "C", key=1, cost=1, capacity=1)
- g.add_edge("A", "D", key=2, cost=2, capacity=2)
- g.add_edge("D", "C", key=3, cost=2, capacity=2)
- return g
-
-
-@pytest.fixture
-def square2():
- # Metric:
- # [1] [1]
- # ┌────────►B─────────┐
- # │ │
- # │ ▼
- # A C
- # │ ▲
- # │ [1] [1] │
- # └────────►D─────────┘
- #
- # Capacity:
- # [1] [1]
- # ┌────────►B─────────┐
- # │ │
- # │ ▼
- # A C
- # │ ▲
- # │ [2] [2] │
- # └────────►D─────────┘
-
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D"):
- g.add_node(node)
-
- g.add_edge("A", "B", key=0, cost=1, capacity=1)
- g.add_edge("B", "C", key=1, cost=1, capacity=1)
- g.add_edge("A", "D", key=2, cost=1, capacity=2)
- g.add_edge("D", "C", key=3, cost=1, capacity=2)
- return g
-
-
-@pytest.fixture
-def square3():
- # Metric:
- # [1] [1]
- # ┌────────►B─────────┐
- # │ ▲ │
- # │ │ ▼
- # A │[1] C
- # │ │ ▲
- # │ [1] ▼ [1] │
- # └────────►D─────────┘
- #
- # Capacity:
- # [100] [125]
- # ┌────────►B─────────┐
- # │ ▲ │
- # │ │ ▼
- # A │[50] C
- # │ │ ▲
- # │ [75] ▼ [50] │
- # └────────►D─────────┘
-
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D"):
- g.add_node(node)
-
- g.add_edge("A", "B", key=0, cost=1, capacity=100)
- g.add_edge("B", "C", key=1, cost=1, capacity=125)
- g.add_edge("A", "D", key=2, cost=1, capacity=75)
- g.add_edge("D", "C", key=3, cost=1, capacity=50)
- g.add_edge("B", "D", key=4, cost=1, capacity=50)
- g.add_edge("D", "B", key=5, cost=1, capacity=50)
- return g
-
-
-@pytest.fixture
-def square4():
- # Metric and capacity as in tests that assume parallel and varied capacities.
-
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D"):
- g.add_node(node)
-
- g.add_edge("A", "B", key=0, cost=1, capacity=100)
- g.add_edge("B", "C", key=1, cost=1, capacity=125)
- g.add_edge("A", "D", key=2, cost=1, capacity=75)
- g.add_edge("D", "C", key=3, cost=1, capacity=50)
- g.add_edge("B", "D", key=4, cost=1, capacity=50)
- g.add_edge("D", "B", key=5, cost=1, capacity=50)
- g.add_edge("A", "B", key=6, cost=2, capacity=200)
- g.add_edge("B", "D", key=7, cost=2, capacity=200)
- g.add_edge("D", "C", key=8, cost=2, capacity=200)
- return g
-
-
-@pytest.fixture
-def square5():
- # Metric:
- # A -> B, A -> C, B -> D, C -> D, and cross edges B <-> C
- # All costs 1, capacities 1.
-
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D"):
- g.add_node(node)
-
- g.add_edge("A", "B", key=0, cost=1, capacity=1)
- g.add_edge("A", "C", key=1, cost=1, capacity=1)
- g.add_edge("B", "D", key=2, cost=1, capacity=1)
- g.add_edge("C", "D", key=3, cost=1, capacity=1)
- g.add_edge("B", "C", key=4, cost=1, capacity=1)
- g.add_edge("C", "B", key=5, cost=1, capacity=1)
- return g
-
-
-@pytest.fixture
-def graph1():
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D", "E"):
- g.add_node(node)
-
- g.add_edge("A", "B", key=0, cost=1, capacity=1)
- g.add_edge("A", "C", key=1, cost=1, capacity=1)
- g.add_edge("B", "D", key=2, cost=1, capacity=1)
- g.add_edge("C", "D", key=3, cost=1, capacity=1)
- g.add_edge("B", "C", key=4, cost=1, capacity=1)
- g.add_edge("C", "B", key=5, cost=1, capacity=1)
- g.add_edge("D", "E", key=6, cost=1, capacity=1)
- return g
-
-
-@pytest.fixture
-def graph2():
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D", "E"):
- g.add_node(node)
-
- g.add_edge("A", "B", key=0, cost=1, capacity=1)
- g.add_edge("B", "C", key=1, cost=1, capacity=1)
- g.add_edge("B", "D", key=2, cost=1, capacity=1)
- g.add_edge("C", "D", key=3, cost=1, capacity=1)
- g.add_edge("D", "C", key=4, cost=1, capacity=1)
- g.add_edge("C", "E", key=5, cost=1, capacity=1)
- g.add_edge("D", "E", key=6, cost=1, capacity=1)
- return g
-
-
-@pytest.fixture
-def graph3():
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D", "E", "F"):
- g.add_node(node)
-
- g.add_edge("A", "B", key=0, cost=1, capacity=2)
- g.add_edge("A", "B", key=1, cost=1, capacity=4)
- g.add_edge("A", "B", key=2, cost=1, capacity=6)
- g.add_edge("B", "C", key=3, cost=1, capacity=1)
- g.add_edge("B", "C", key=4, cost=1, capacity=2)
- g.add_edge("B", "C", key=5, cost=1, capacity=3)
- g.add_edge("C", "D", key=6, cost=2, capacity=3)
- g.add_edge("A", "E", key=7, cost=1, capacity=5)
- g.add_edge("E", "C", key=8, cost=1, capacity=4)
- g.add_edge("A", "D", key=9, cost=4, capacity=2)
- g.add_edge("C", "F", key=10, cost=1, capacity=1)
- g.add_edge("F", "D", key=11, cost=1, capacity=2)
- return g
-
-
-@pytest.fixture
-def graph4():
- g = StrictMultiDiGraph()
- for node in ("A", "B", "B1", "B2", "C"):
- g.add_node(node)
-
- g.add_edge("A", "B", key=0, cost=1, capacity=1)
- g.add_edge("B", "C", key=1, cost=1, capacity=1)
- g.add_edge("A", "B1", key=2, cost=2, capacity=2)
- g.add_edge("B1", "C", key=3, cost=2, capacity=2)
- g.add_edge("A", "B2", key=4, cost=3, capacity=3)
- g.add_edge("B2", "C", key=5, cost=3, capacity=3)
- return g
-
-
-@pytest.fixture
-def graph5():
- """Fully connected graph with 5 nodes, cost=1, capacity=1 per edge."""
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D", "E"):
- g.add_node(node)
-
- edge_id = 0
- nodes = ["A", "B", "C", "D", "E"]
- for src in nodes:
- for dst in nodes:
- if src != dst:
- g.add_edge(src, dst, key=edge_id, cost=1, capacity=1)
- edge_id += 1
-
- return g
diff --git a/tests/algorithms/test_bugs_algorithms.py b/tests/algorithms/test_bugs_algorithms.py
deleted file mode 100644
index a584ff8..0000000
--- a/tests/algorithms/test_bugs_algorithms.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import pytest
-
-from ngraph.algorithms.max_flow import calc_max_flow
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def test_min_cut_should_not_include_edge_from_source_side_with_only_reverse_reachability():
- """Repro: reverse-residual reachability is ignored in reachable/min_cut.
-
- Topology (costs in brackets, capacities all 1):
- S -> A [0]
- A -> B [1]
- B -> T [1]
- S -> B [2]
-
- Max-flow augmentation picks S->A->B->T (cost=2), saturating S->A, A->B, B->T.
- In the true residual graph, S can reach B (forward residual on S->B), and then reach A via
- the reverse residual edge B->A (from the flow on A->B). Therefore A is reachable from S
- in the residual graph, so edge S->A must NOT be in the s-t min-cut. The min-cut should be
- only {B->T}.
-
- Current implementation computes reachable via forward residual edges only
- (ngraph/algorithms/max_flow.py:_build_flow_summary), incorrectly including S->A in min-cut.
- """
-
- g = StrictMultiDiGraph()
- for n in ("S", "A", "B", "T"):
- g.add_node(n)
-
- # Record keys for explicit assertions
- sa_k = g.add_edge("S", "A", capacity=1.0, flow=0.0, flows={}, cost=0)
- ab_k = g.add_edge("A", "B", capacity=1.0, flow=0.0, flows={}, cost=1)
- bt_k = g.add_edge("B", "T", capacity=1.0, flow=0.0, flows={}, cost=1)
- sb_k = g.add_edge("S", "B", capacity=1.0, flow=0.0, flows={}, cost=2)
-
- # Sanity: avoid flake8/ruff unused warnings
- assert all(k is not None for k in (sa_k, ab_k, bt_k, sb_k))
-
- flow, summary = calc_max_flow(g, "S", "T", return_summary=True)
-
- assert flow == 1.0
-
- # Expected correct min-cut: only B->T
- expected_bt = ("B", "T", bt_k)
- unexpected_sa = ("S", "A", sa_k)
-
- # Now correct behavior: only B->T is in the cut, S->A is not.
- assert expected_bt in summary.min_cut
- assert unexpected_sa not in summary.min_cut
-
-
-@pytest.mark.xfail(
- strict=True,
- reason="SPF fast path hardcodes 'capacity'/'flow' ignoring calc_max_flow capacity_attr/flow_attr; see ngraph/algorithms/spf.py:_spf_fast_all_min_cost_with_cap_remaining_dijkstra",
-)
-def test_calc_max_flow_respects_custom_attribute_names():
- """Repro: passing custom capacity/flow attribute names breaks SPF fast path.
-
- The public API allows custom attribute names via capacity_attr/flow_attr.
- The SPF fast-path still reads 'capacity' and 'flow' directly, raising KeyError.
- """
-
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- # Use non-default attribute names: 'cap' and 'flowX'
- g.add_edge("A", "B", cap=5.0, flowX=0.0, flows={}, cost=1)
-
- # Should compute 5.0 using the provided attribute names without error.
- max_flow = calc_max_flow(
- g,
- "A",
- "B",
- capacity_attr="cap",
- flow_attr="flowX",
- )
-
- assert max_flow == 5.0
diff --git a/tests/algorithms/test_calc_capacity.py b/tests/algorithms/test_calc_capacity.py
deleted file mode 100644
index 67a559a..0000000
--- a/tests/algorithms/test_calc_capacity.py
+++ /dev/null
@@ -1,551 +0,0 @@
-# pylint: disable=protected-access,invalid-name
-from typing import Dict, List
-
-import pytest
-
-from ngraph.algorithms.capacity import (
- FlowPlacement,
- _init_graph_data,
- calc_graph_capacity,
-)
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.algorithms.spf import spf
-from ngraph.graph.strict_multidigraph import EdgeID, NodeID, StrictMultiDiGraph
-
-# Type alias to ensure consistency with library expectations
-PredDict = Dict[NodeID, Dict[NodeID, List[EdgeID]]]
-
-
-class TestGraphCapacity:
- def test_calc_graph_capacity_empty_graph(self):
- r = init_flow_graph(StrictMultiDiGraph())
-
- # Expected an exception ValueError because the graph is empty
- with pytest.raises(ValueError):
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", {}, flow_placement=FlowPlacement.PROPORTIONAL
- )
-
- def test_calc_graph_capacity_empty_pred(self):
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_node("C")
- g.add_edge("A", "B", capacity=1)
- g.add_edge("B", "C", capacity=1)
- r = init_flow_graph(g)
-
- # Expected max_flow = 0 because the path is invalid
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", {}, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 0
-
- def test_calc_graph_capacity_no_cap(self):
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_node("C")
- g.add_edge("A", "B", key=0, capacity=0)
- g.add_edge("B", "C", key=1, capacity=1)
- r = init_flow_graph(g)
- pred: PredDict = {"A": {}, "B": {"A": [0]}, "C": {"B": [1]}}
-
- # Expected max_flow = 0 because there is no capacity along the path
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 0
-
- def test_calc_graph_capacity_line1(self, line1):
- _, pred = spf(line1, "A")
- pred: PredDict = pred # Type annotation for clarity
- r = init_flow_graph(line1)
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 4
- assert flow_dict == {
- "A": {"B": 1.0},
- "B": {"A": -1.0, "C": 1.0},
- "C": {"B": -1.0},
- }
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 2
- assert flow_dict == {
- "A": {"B": 1.0},
- "B": {"A": -1.0, "C": 1.0},
- "C": {"B": -1.0},
- }
-
- def test_calc_graph_capacity_triangle1(self, triangle1):
- _, pred = spf(triangle1, "A")
- r = init_flow_graph(triangle1)
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 5
- assert flow_dict == {"A": {"C": 1.0}, "C": {"A": -1.0}}
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 5
- assert flow_dict == {"A": {"C": 1.0}, "C": {"A": -1.0}}
-
- def test_calc_graph_capacity_square1(self, square1):
- _, pred = spf(square1, "A")
- r = init_flow_graph(square1)
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 1
- assert flow_dict == {
- "C": {"B": -1.0},
- "B": {"C": 1.0, "A": -1.0},
- "A": {"B": 1.0},
- }
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 1
- assert flow_dict == {
- "C": {"B": -1.0},
- "B": {"C": 1.0, "A": -1.0},
- "A": {"B": 1.0},
- }
-
- def test_calc_graph_capacity_square2_1(self, square2):
- _, pred = spf(square2, "A")
- r = init_flow_graph(square2)
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 3
- assert flow_dict == {
- "A": {"B": 0.3333333333333333, "D": 0.6666666666666666},
- "B": {"A": -0.3333333333333333, "C": 0.3333333333333333},
- "C": {"B": -0.3333333333333333, "D": -0.6666666666666666},
- "D": {"A": -0.6666666666666666, "C": 0.6666666666666666},
- }
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 2
- assert flow_dict == {
- "A": {"B": 0.5, "D": 0.5},
- "B": {"A": -0.5, "C": 0.5},
- "C": {"B": -0.5, "D": -0.5},
- "D": {"A": -0.5, "C": 0.5},
- }
-
- def test_calc_graph_capacity_square2_2(self, square2):
- _, pred = spf(square2, "A")
- r = init_flow_graph(square2)
- r["A"]["B"][0]["flow"] = 1
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 2
- assert flow_dict == {
- "A": {"B": -0.0, "D": 1.0},
- "B": {"A": -0.0, "C": -0.0},
- "C": {"B": -0.0, "D": -1.0},
- "D": {"A": -1.0, "C": 1.0},
- }
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 0
- assert flow_dict == {
- "A": {"B": 0.5, "D": 0.5},
- "B": {"A": -0.5, "C": 0.5},
- "C": {"B": -0.5, "D": -0.5},
- "D": {"A": -0.5, "C": 0.5},
- }
-
- def test_calc_graph_capacity_square3(self, square3):
- _, pred = spf(square3, "A")
- r = init_flow_graph(square3)
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 150
- assert flow_dict == {
- "A": {"B": 0.6666666666666666, "D": 0.3333333333333333},
- "B": {"A": -0.6666666666666666, "C": 0.6666666666666666},
- "C": {"B": -0.6666666666666666, "D": -0.3333333333333333},
- "D": {"A": -0.3333333333333333, "C": 0.3333333333333333},
- }
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 100
- assert flow_dict == {
- "A": {"B": 0.5, "D": 0.5},
- "B": {"A": -0.5, "C": 0.5},
- "C": {"B": -0.5, "D": -0.5},
- "D": {"A": -0.5, "C": 0.5},
- }
-
- def test_calc_graph_capacity_square4(self, square4):
- _, pred = spf(square4, "A")
- r = init_flow_graph(square4)
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 150
- assert flow_dict == {
- "A": {"B": 0.6666666666666666, "D": 0.3333333333333333},
- "B": {"A": -0.6666666666666666, "C": 0.6666666666666666},
- "C": {"B": -0.6666666666666666, "D": -0.3333333333333333},
- "D": {"A": -0.3333333333333333, "C": 0.3333333333333333},
- }
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "C", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 100
- assert flow_dict == {
- "A": {"B": 0.5, "D": 0.5},
- "B": {"A": -0.5, "C": 0.5},
- "C": {"B": -0.5, "D": -0.5},
- "D": {"A": -0.5, "C": 0.5},
- }
-
- def test_calc_graph_capacity_square5(self, square5):
- _, pred = spf(square5, "A")
- r = init_flow_graph(square5)
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "D", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 2
- assert flow_dict == {
- "A": {"B": 0.5, "C": 0.5},
- "B": {"A": -0.5, "D": 0.5},
- "C": {"A": -0.5, "D": 0.5},
- "D": {"B": -0.5, "C": -0.5},
- }
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "D", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 2
- assert flow_dict == {
- "A": {"B": 0.5, "C": 0.5},
- "B": {"A": -0.5, "D": 0.5},
- "C": {"A": -0.5, "D": 0.5},
- "D": {"B": -0.5, "C": -0.5},
- }
-
- def test_calc_graph_capacity_graph1(self, graph1):
- _, pred = spf(graph1, "A")
- r = init_flow_graph(graph1)
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "E", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 1
- assert flow_dict == {
- "A": {"B": 1.0, "C": -0.0},
- "B": {"A": -1.0, "D": 1.0},
- "C": {"A": -0.0, "D": -0.0},
- "D": {"B": -1.0, "C": -0.0, "E": 1.0},
- "E": {"D": -1.0},
- }
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "E", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 1
- assert flow_dict == {
- "E": {"D": -1.0},
- "D": {"E": 1.0, "B": -0.5, "C": -0.5},
- "B": {"D": 0.5, "A": -0.5},
- "C": {"D": 0.5, "A": -0.5},
- "A": {"B": 0.5, "C": 0.5},
- }
-
- def test_calc_graph_capacity_graph3(self, graph3):
- _, pred = spf(graph3, "A")
- r = init_flow_graph(graph3)
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "D", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 6
- assert flow_dict == {
- "A": {"B": 0.6666666666666666, "D": 0.3333333333333333, "E": -0.0},
- "B": {"A": -0.6666666666666666, "C": 0.6666666666666666},
- "C": {
- "B": -0.6666666666666666,
- "D": 0.5,
- "E": -0.0,
- "F": 0.16666666666666666,
- },
- "D": {"A": -0.3333333333333333, "C": -0.5, "F": -0.16666666666666666},
- "E": {"A": -0.0, "C": -0.0},
- "F": {"C": -0.16666666666666666, "D": 0.16666666666666666},
- }
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "D", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow == 2.5
- assert flow_dict == {
- "A": {"B": 0.6, "D": 0.2, "E": 0.2},
- "B": {"A": -0.6, "C": 0.6},
- "C": {"B": -0.6, "D": 0.4, "E": -0.2, "F": 0.4},
- "D": {"A": -0.2, "C": -0.4, "F": -0.4},
- "E": {"A": -0.2, "C": 0.2},
- "F": {"C": -0.4, "D": 0.4},
- }
-
- def test_calc_graph_capacity_self_loop_proportional(self):
- """
- Test self-loop behavior with PROPORTIONAL flow placement.
- When source equals destination, max flow should always be 0.
- """
- # Create a graph with a self-loop
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_edge("A", "A", key=0, capacity=10.0, flow=0.0, flows={}, cost=1)
- r = init_flow_graph(g)
-
- # Create a simple pred with self-loop
- pred: PredDict = {"A": {"A": [0]}}
-
- # Test PROPORTIONAL placement
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "A", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
-
- assert max_flow == 0.0
- # flow_dict should be empty or contain only zero flows
- for node_flows in flow_dict.values():
- for flow_value in node_flows.values():
- assert flow_value == 0.0
-
- def test_calc_graph_capacity_self_loop_equal_balanced(self):
- """
- Test self-loop behavior with EQUAL_BALANCED flow placement.
- When source equals destination, max flow should always be 0.
- """
- # Create a graph with multiple self-loops
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_edge("A", "A", key=0, capacity=5.0, flow=0.0, flows={}, cost=1)
- g.add_edge("A", "A", key=1, capacity=3.0, flow=0.0, flows={}, cost=1)
- r = init_flow_graph(g)
-
- # Create pred with multiple self-loop edges
- pred: PredDict = {"A": {"A": [0, 1]}}
-
- # Test EQUAL_BALANCED placement
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "A", pred, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
-
- assert max_flow == 0.0
- # flow_dict should be empty or contain only zero flows
- for node_flows in flow_dict.values():
- for flow_value in node_flows.values():
- assert flow_value == 0.0
-
- def test_calc_graph_capacity_self_loop_with_other_edges(self):
- """
- Test self-loop behavior in a graph that also has regular edges.
- The self-loop itself should still return 0 flow.
- """
- # Create a graph with both self-loop and regular edges
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_edge("A", "A", key=0, capacity=10.0, flow=0.0, flows={}, cost=1)
- g.add_edge("A", "B", key=1, capacity=5.0, flow=0.0, flows={}, cost=2)
- g.add_edge("B", "A", key=2, capacity=3.0, flow=0.0, flows={}, cost=2)
- r = init_flow_graph(g)
-
- # Test self-loop A->A
- pred_self: PredDict = {"A": {"A": [0]}}
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "A", pred_self, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 0.0
-
- # Test regular flow A->B to verify graph still works for non-self-loops
- pred_regular: PredDict = {"A": {}, "B": {"A": [1]}}
- max_flow_regular, _ = calc_graph_capacity(
- r, "A", "B", pred_regular, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow_regular == 5.0 # Should be limited by A->B capacity
-
- def test_reverse_residual_init_graph_data_proportional(self):
- """_init_graph_data should expose dst->leaf residual capacity in PROPORTIONAL.
-
- Build a tiny graph with forward edges leaf->dc and dc->sink, and reverse dc->leaf.
- SPF pred contains dc predecessors (leaves) and sink predecessor (dc).
- The reversed residual must have positive capacity dc->leaf equal to sum(capacity - flow).
- """
- g = StrictMultiDiGraph()
- for n in ("source", "A/dc", "A/leaf", "sink"):
- g.add_node(n)
- # Forward edges
- e1 = g.add_edge("A/leaf", "A/dc", capacity=10.0, cost=1, flow=0.0, flows={})
- g.add_edge("A/dc", "sink", capacity=float("inf"), cost=0, flow=0.0, flows={})
- # Reverse edge to simulate bidirectional link
- g.add_edge("A/dc", "A/leaf", capacity=10.0, cost=1, flow=0.0, flows={})
-
- # SPF-like predecessor dict: include both directions present in graph
- # sink<-A/dc, A/dc<-A/leaf, and A/leaf<-A/dc (reverse link)
- pred: PredDict = {
- "source": {},
- "A/dc": {"A/leaf": [e1]},
- "A/leaf": {"A/dc": list(g.edges_between("A/dc", "A/leaf"))},
- "sink": {"A/dc": list(g.edges_between("A/dc", "sink"))},
- }
-
- # Run init
- succ, levels, residual_cap, flow_dict = _init_graph_data(
- g,
- pred,
- init_node="sink",
- flow_placement=FlowPlacement.PROPORTIONAL,
- capacity_attr="capacity",
- flow_attr="flow",
- )
- # residuals must reflect both forward directions, and zero-init must not overwrite
- assert residual_cap["A/dc"]["A/leaf"] == 10.0
- assert residual_cap["A/leaf"]["A/dc"] == 10.0
-
- def test_reverse_residual_init_graph_data_equal_balanced(self):
- """_init_graph_data should set reverse residual in EQUAL_BALANCED as min*count.
-
- With two parallel edges leaf->dc with caps (5, 7), min=5 and count=2 -> reverse cap = 10.
- """
- g = StrictMultiDiGraph()
- for n in ("source", "A/dc", "A/leaf", "sink"):
- g.add_node(n)
- # Two parallel forward edges leaf->dc
- e1 = g.add_edge("A/leaf", "A/dc", capacity=5.0, cost=1, flow=0.0, flows={})
- e2 = g.add_edge("A/leaf", "A/dc", capacity=7.0, cost=1, flow=0.0, flows={})
- g.add_edge("A/dc", "sink", capacity=float("inf"), cost=0, flow=0.0, flows={})
- # Reverse edge present too
- g.add_edge("A/dc", "A/leaf", capacity=7.0, cost=1, flow=0.0, flows={})
-
- pred: PredDict = {
- "source": {},
- "A/dc": {"A/leaf": [e1, e2]},
- "sink": {"A/dc": list(g.edges_between("A/dc", "sink"))},
- }
-
- succ, levels, residual_cap, flow_dict = _init_graph_data(
- g,
- pred,
- init_node="sink",
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- capacity_attr="capacity",
- flow_attr="flow",
- )
- # In EQUAL_BALANCED, the reverse residual is assigned on leaf->dc orientation (adj->node)
- # i.e., residual_cap[leaf][dc] = min(capacities) * count = 5*2 = 10
- assert residual_cap["A/leaf"]["A/dc"] == 10.0
- # forward side initialized to 0 in reversed orientation
- assert residual_cap["A/dc"]["A/leaf"] == 0.0
-
- def test_dc_to_dc_reverse_edge_first_hop_proportional(self):
- """Reverse-edge-first hop at destination should yield positive flow.
-
- Topology (with reverse edges to simulate bidirectional links):
- A_leaf -> A_dc (10)
- A_leaf -> B_leaf (10)
- B_leaf -> B_dc (10)
- A_dc -> A_leaf (10) # reverse present
- B_dc -> B_leaf (10) # reverse present
-
- Pseudo nodes: source -> A_dc, B_dc -> sink
- Expected max_flow(source, sink) = 10.0 in PROPORTIONAL mode.
- """
- g = StrictMultiDiGraph()
- for n in ("A_dc", "A_leaf", "B_leaf", "B_dc", "source", "sink"):
- g.add_node(n)
-
- # Forward edges
- g.add_edge("A_leaf", "A_dc", capacity=10.0, cost=1)
- g.add_edge("A_leaf", "B_leaf", capacity=10.0, cost=1)
- g.add_edge("B_leaf", "B_dc", capacity=10.0, cost=1)
- # Reverse edges
- g.add_edge("A_dc", "A_leaf", capacity=10.0, cost=1)
- g.add_edge("B_dc", "B_leaf", capacity=10.0, cost=1)
-
- # Pseudo source/sink
- g.add_edge("source", "A_dc", capacity=float("inf"), cost=0)
- g.add_edge("B_dc", "sink", capacity=float("inf"), cost=0)
-
- r = init_flow_graph(g)
- # Compute SPF with dst_node to mirror real usage in calc_max_flow
- _costs, pred = spf(r, "source", dst_node="sink")
- max_flow, _flow_dict = calc_graph_capacity(
- r, "source", "sink", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 10.0
-
- def test_dc_to_dc_unidirectional_zero(self):
- """Without reverse edges, DC cannot send to leaf; flow must be zero."""
- g = StrictMultiDiGraph()
- for n in ("A_dc", "A_leaf", "B_leaf", "B_dc", "source", "sink"):
- g.add_node(n)
-
- # Forward edges only
- g.add_edge("A_leaf", "A_dc", capacity=10.0, cost=1)
- g.add_edge("A_leaf", "B_leaf", capacity=10.0, cost=1)
- g.add_edge("B_leaf", "B_dc", capacity=10.0, cost=1)
-
- # Pseudo source/sink
- g.add_edge("source", "A_dc", capacity=float("inf"), cost=0)
- g.add_edge("B_dc", "sink", capacity=float("inf"), cost=0)
-
- r = init_flow_graph(g)
- _costs, pred = spf(r, "source", dst_node="sink")
- max_flow, _flow_dict = calc_graph_capacity(
- r, "source", "sink", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert max_flow == 0.0
-
- def test_calc_graph_capacity_self_loop_empty_pred(self):
- """
- Test self-loop behavior when pred is empty.
- Should return 0 flow for self-loop even with empty pred.
- """
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_edge("A", "A", key=0, capacity=10.0, flow=0.0, flows={}, cost=1)
- r = init_flow_graph(g)
-
- # Empty pred
- pred: PredDict = {}
-
- max_flow, flow_dict = calc_graph_capacity(
- r, "A", "A", pred, flow_placement=FlowPlacement.PROPORTIONAL
- )
-
- assert max_flow == 0.0
- # flow_dict should be empty or contain only zero flows
- for node_flows in flow_dict.values():
- for flow_value in node_flows.values():
- assert flow_value == 0.0
diff --git a/tests/algorithms/test_edge_select.py b/tests/algorithms/test_edge_select.py
deleted file mode 100644
index 6839306..0000000
--- a/tests/algorithms/test_edge_select.py
+++ /dev/null
@@ -1,385 +0,0 @@
-from math import isclose
-from typing import Dict, Set, Tuple
-from unittest.mock import MagicMock
-
-import pytest
-
-from ngraph.algorithms.base import Cost
-from ngraph.algorithms.edge_select import EdgeSelect, edge_select_fabric
-from ngraph.graph.strict_multidigraph import (
- AttrDict,
- EdgeID,
- NodeID,
- StrictMultiDiGraph,
-)
-
-
-@pytest.fixture
-def mock_graph() -> StrictMultiDiGraph:
- """A mock StrictMultiDiGraph to pass to selection functions for testing."""
- return MagicMock(spec=StrictMultiDiGraph)
-
-
-@pytest.fixture
-def edge_map() -> Dict[EdgeID, AttrDict]:
- """
- A basic edge_map with varying costs/capacities/flows.
- Edge leftover capacity = capacity - flow.
- """
- return {
- "edgeA": {"cost": 10, "capacity": 100, "flow": 0}, # leftover=100
- "edgeB": {"cost": 10, "capacity": 50, "flow": 25}, # leftover=25
- "edgeC": {"cost": 5, "capacity": 10, "flow": 0}, # leftover=10
- "edgeD": {"cost": 20, "capacity": 10, "flow": 5}, # leftover=5
- "edgeE": {"cost": 5, "capacity": 2, "flow": 1}, # leftover=1
- }
-
-
-# ------------------------------------------------------------------------------
-# Invalid usage / error conditions
-# ------------------------------------------------------------------------------
-
-
-def test_invalid_enum_value() -> None:
- """
- Ensure using an invalid int for the EdgeSelect enum raises a ValueError.
- E.g., 999 is not a valid EdgeSelect.
- """
- with pytest.raises(ValueError, match="999 is not a valid EdgeSelect"):
- EdgeSelect(999)
-
-
-def test_user_defined_no_func() -> None:
- """
- Provide edge_select=USER_DEFINED without 'edge_select_func'.
- This must trigger ValueError.
- """
- with pytest.raises(ValueError, match="requires 'edge_select_func'"):
- edge_select_fabric(edge_select=EdgeSelect.USER_DEFINED)
-
-
-# ------------------------------------------------------------------------------
-# Basic functionality and edge cases
-# ------------------------------------------------------------------------------
-
-
-def test_empty_edge_map(mock_graph: StrictMultiDiGraph) -> None:
- """
- An empty edges_map must yield (inf, []) for any EdgeSelect variant.
- """
- variants = [
- EdgeSelect.ALL_MIN_COST,
- EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- EdgeSelect.ALL_ANY_COST_WITH_CAP_REMAINING,
- EdgeSelect.SINGLE_MIN_COST,
- EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING,
- EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING_LOAD_FACTORED,
- ]
- for variant in variants:
- select_func = edge_select_fabric(variant)
- cost, edges = select_func(
- mock_graph, "A", "B", {}, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == float("inf")
- assert edges == []
-
-
-def test_excluded_nodes_all_min_cost(
- mock_graph: StrictMultiDiGraph, edge_map: Dict[EdgeID, AttrDict]
-) -> None:
- """
- If dst_node is in excluded_nodes, we must get (inf, []) regardless of edges.
- """
- select_func = edge_select_fabric(EdgeSelect.ALL_MIN_COST)
- cost, edges = select_func(
- mock_graph,
- src_node="A",
- dst_node="excludedB",
- edges_map=edge_map,
- excluded_edges=None,
- excluded_nodes={"excludedB"},
- )
- assert cost == float("inf")
- assert edges == []
-
-
-def test_all_min_cost_tie_break(mock_graph: StrictMultiDiGraph) -> None:
- """
- Edges with costs within 1e-12 of each other are treated as equal.
- Both edges must be returned.
- """
- edge_map_ = {
- "e1": {"cost": 10.0, "capacity": 50, "flow": 0},
- "e2": {"cost": 10.0000000000005, "capacity": 50, "flow": 0}, # diff=5e-13
- "e3": {"cost": 12.0, "capacity": 50, "flow": 0},
- }
- select_func = edge_select_fabric(EdgeSelect.ALL_MIN_COST)
- cost, edges = select_func(
- mock_graph, "A", "B", edge_map_, excluded_edges=set(), excluded_nodes=set()
- )
- assert isclose(cost, 10.0, abs_tol=1e-12)
- assert set(edges) == {"e1", "e2"}
-
-
-def test_all_min_cost_no_valid(mock_graph: StrictMultiDiGraph) -> None:
- """
- If all edges are in excluded_edges, we get (inf, []) from ALL_MIN_COST.
- """
- edge_map_ = {
- "e1": {"cost": 10, "capacity": 50, "flow": 0},
- "e2": {"cost": 20, "capacity": 50, "flow": 0},
- }
- select_func = edge_select_fabric(EdgeSelect.ALL_MIN_COST)
- cost, edges = select_func(
- mock_graph,
- "A",
- "B",
- edge_map_,
- excluded_edges={"e1", "e2"},
- excluded_nodes=set(),
- )
- assert cost == float("inf")
- assert edges == []
-
-
-# ------------------------------------------------------------------------------
-# Tests for each EdgeSelect variant
-# ------------------------------------------------------------------------------
-
-
-def test_edge_select_excluded_edges(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- Using ALL_MIN_COST. 'edgeC' has cost=5 but is excluded.
- So the next minimum is also 5 => 'edgeE'.
- """
- select_func = edge_select_fabric(EdgeSelect.ALL_MIN_COST)
- cost, edges = select_func(
- mock_graph,
- "nodeA",
- "nodeB",
- edge_map,
- excluded_edges={"edgeC"},
- excluded_nodes=set(),
- )
- assert cost == 5
- assert edges == ["edgeE"]
-
-
-def test_edge_select_all_min_cost(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- ALL_MIN_COST => all edges with minimal cost => 5 => edgesC, E.
- """
- select_func = edge_select_fabric(EdgeSelect.ALL_MIN_COST)
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == 5
- assert set(chosen) == {"edgeC", "edgeE"}
-
-
-def test_edge_select_single_min_cost(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- SINGLE_MIN_COST => exactly one edge with minimal cost (5) => edgeC or edgeE.
- """
- select_func = edge_select_fabric(EdgeSelect.SINGLE_MIN_COST)
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == 5
- assert len(chosen) == 1
- assert chosen[0] in {"edgeC", "edgeE"}
-
-
-def test_edge_select_all_min_cost_with_cap(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- ALL_MIN_COST_WITH_CAP_REMAINING => leftover >= 10 => edgesA, B, C => among them cost=5 => edgeC.
- """
- select_func = edge_select_fabric(
- EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING, select_value=10
- )
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == 5
- assert chosen == ["edgeC"]
-
-
-def test_edge_select_all_any_cost_with_cap(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- ALL_ANY_COST_WITH_CAP_REMAINING => leftover >= 10 => edgesA, B, C.
- All returned, min cost among them is 5.
- """
- select_func = edge_select_fabric(
- EdgeSelect.ALL_ANY_COST_WITH_CAP_REMAINING, select_value=10
- )
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == 5
- assert set(chosen) == {"edgeA", "edgeB", "edgeC"}
-
-
-def test_edge_select_single_min_cost_with_cap_remaining(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- SINGLE_MIN_COST_WITH_CAP_REMAINING => leftover >= 5 => edgesA(100), B(25), C(10), D(5).
- Among them, minimum cost=5 => edgeC.
- """
- select_func = edge_select_fabric(
- EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING, select_value=5
- )
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == 5
- assert chosen == ["edgeC"]
-
-
-def test_edge_select_single_min_cost_with_cap_remaining_no_valid(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- If leftover >= 999, none qualify => (inf, []).
- """
- select_func = edge_select_fabric(
- EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING, select_value=999
- )
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == float("inf")
- assert chosen == []
-
-
-def test_edge_select_single_min_cost_load_factored(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- cost_val = cost*100 + round((flow/capacity)*10).
- Among leftover >= MIN_CAP => effectively all edges, the lowest combined cost is for edgeC => 5*100+0=500.
- """
- select_func = edge_select_fabric(
- EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING_LOAD_FACTORED
- )
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == 500.0
- assert chosen == ["edgeC"]
-
-
-def test_load_factored_edge_under_min_cap(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- If leftover < select_value => skip the edge. We'll set leftover(edgeE)=0.5 => skip it => pick edgeC.
- """
- edge_map["edgeE"]["flow"] = 1.5 # leftover=0.5
- select_func = edge_select_fabric(
- EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING_LOAD_FACTORED, select_value=1.0
- )
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == 500
- assert chosen == ["edgeC"]
-
-
-def test_all_any_cost_with_cap_no_valid(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- leftover >= 999 => none qualify => (inf, []).
- """
- select_func = edge_select_fabric(
- EdgeSelect.ALL_ANY_COST_WITH_CAP_REMAINING, select_value=999
- )
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == float("inf")
- assert chosen == []
-
-
-# ------------------------------------------------------------------------------
-# User-defined function tests
-# ------------------------------------------------------------------------------
-
-
-def test_user_defined_custom(
- mock_graph: StrictMultiDiGraph,
- edge_map: Dict[EdgeID, AttrDict],
-) -> None:
- """
- Provide a user-defined function that picks edges with cost <=10
- and uses sum of costs as the returned cost.
- """
-
- def custom_func(
- graph: StrictMultiDiGraph,
- src: NodeID,
- dst: NodeID,
- edg_map: Dict[EdgeID, AttrDict],
- excluded_edges: Set[EdgeID],
- excluded_nodes: Set[NodeID],
- ) -> Tuple[Cost, list]:
- chosen = []
- total = 0.0
- for eid, attrs in edg_map.items():
- if eid in excluded_edges:
- continue
- if attrs["cost"] <= 10:
- chosen.append(eid)
- total += attrs["cost"]
- if not chosen:
- return float("inf"), []
- return total, chosen
-
- select_func = edge_select_fabric(
- EdgeSelect.USER_DEFINED, edge_select_func=custom_func
- )
- cost, chosen = select_func(
- mock_graph, "A", "B", edge_map, excluded_edges=set(), excluded_nodes=set()
- )
- # Edges <=10 => A,B,C,E => sum=10+10+5+5=30
- assert cost == 30
- assert set(chosen) == {"edgeA", "edgeB", "edgeC", "edgeE"}
-
-
-def test_user_defined_excludes_all(mock_graph: StrictMultiDiGraph) -> None:
- """
- If a user-defined function always returns (inf, []), confirm no edges are chosen.
- """
-
- def exclude_all_func(*args, **kwargs):
- return float("inf"), []
-
- select_func = edge_select_fabric(
- EdgeSelect.USER_DEFINED, edge_select_func=exclude_all_func
- )
- cost, chosen = select_func(
- mock_graph, "X", "Y", {}, excluded_edges=set(), excluded_nodes=set()
- )
- assert cost == float("inf")
- assert chosen == []
diff --git a/tests/algorithms/test_max_flow.py b/tests/algorithms/test_max_flow.py
deleted file mode 100644
index b536e5d..0000000
--- a/tests/algorithms/test_max_flow.py
+++ /dev/null
@@ -1,839 +0,0 @@
-import pytest
-from pytest import approx
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.algorithms.max_flow import calc_max_flow
-from ngraph.algorithms.types import FlowSummary
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-class TestMaxFlowBasic:
- """
- Tests that directly verify specific flow values on known small graphs.
- """
-
- def test_max_flow_line1_full_flow(self, line1):
- """
- On line1 fixture:
- - Full iterative max flow from A to C should be 5.
- """
- max_flow = calc_max_flow(line1, "A", "C")
- assert max_flow == 5
-
- def test_max_flow_line1_shortest_path(self, line1):
- """
- On line1 fixture:
- - With shortest_path=True (single augmentation), expect flow=4.
- """
- max_flow = calc_max_flow(line1, "A", "C", shortest_path=True)
- assert max_flow == 4
-
- def test_max_flow_square4_full_flow(self, square4):
- """
- On square4 fixture:
- - Full iterative max flow from A to B should be 350 by default.
- """
- max_flow = calc_max_flow(square4, "A", "B")
- assert max_flow == 350
-
- def test_max_flow_square4_shortest_path(self, square4):
- """
- On square4 fixture:
- - With shortest_path=True, only one flow augmentation => 100.
- """
- max_flow = calc_max_flow(square4, "A", "B", shortest_path=True)
- assert max_flow == 100
-
- def test_max_flow_graph5_full_flow(self, graph5):
- """
- On graph5 (fully connected 5 nodes with capacity=1 on each edge):
- - Full iterative max flow from A to B = 4.
- """
- max_flow = calc_max_flow(graph5, "A", "B")
- assert max_flow == 4
-
- def test_max_flow_graph5_shortest_path(self, graph5):
- """
- On graph5:
- - With shortest_path=True => flow=1 for a single augmentation.
- """
- max_flow = calc_max_flow(graph5, "A", "B", shortest_path=True)
- assert max_flow == 1
-
-
-class TestMaxFlowCopyBehavior:
- """
- Tests verifying how flow is (or isn't) preserved when copy_graph=False.
- """
-
- def test_max_flow_graph_copy_disabled(self, graph5):
- """
- - The first call saturates flow from A to B => 4.
- - A second call on the same graph (copy_graph=False) expects 0
- because the flow is already placed.
- """
- graph5_copy = graph5.copy()
- max_flow1 = calc_max_flow(graph5_copy, "A", "B", copy_graph=False)
- assert max_flow1 == 4
-
- max_flow2 = calc_max_flow(graph5_copy, "A", "B", copy_graph=False)
- assert max_flow2 == 0
-
- def test_max_flow_reset_flow(self, line1):
- """
- Ensures that reset_flow_graph=True zeroes out existing flow
- before computing again.
- """
- # First run places flow on line1:
- calc_max_flow(line1, "A", "C", copy_graph=False)
-
- # Now run again with reset_flow_graph=True:
- max_flow_after_reset = calc_max_flow(
- line1, "A", "C", copy_graph=False, reset_flow_graph=True
- )
- # Should return the same result as a fresh run (5)
- assert max_flow_after_reset == 5
-
-
-class TestMaxFlowShortestPathRepeated:
- """
- Verifies that repeated shortest-path calls do not accumulate flow
- when copy_graph=False.
- """
-
- def test_shortest_path_repeated_calls(self, line1):
- """
- First call with shortest_path=True => 4
- Second call => 1 (since there is a longer path found after saturation of the shortest).
- """
- flow1 = calc_max_flow(line1, "A", "C", shortest_path=True, copy_graph=False)
- assert flow1 == 4
-
- flow2 = calc_max_flow(line1, "A", "C", shortest_path=True, copy_graph=False)
- assert flow2 == 1
-
-
-@pytest.mark.parametrize(
- "placement", [FlowPlacement.PROPORTIONAL, FlowPlacement.EQUAL_BALANCED]
-)
-def test_square4_flow_placement(square4, placement):
- """
- Example showing how to test different FlowPlacement modes on the same fixture.
- For square4, the PROPORTIONAL and EQUAL_BALANCED results might differ,
- but here we simply check if we get the original tested value or not.
- Adjust as needed if the EQUAL_BALANCED result is known to differ.
- """
- max_flow = calc_max_flow(square4, "A", "B", flow_placement=placement)
-
- if placement == FlowPlacement.PROPORTIONAL:
- # Known from above
- assert max_flow == 350
- else:
- # If equal-balanced yields a different known answer, verify that here.
- # If it's actually the same, use the same assertion or approx check:
- assert max_flow == approx(350, abs=1e-9)
-
-
-class TestMaxFlowEdgeCases:
- """
- Additional tests for error conditions or graphs with no feasible flow.
- """
-
- def test_missing_src_node(self, line1):
- """
- Trying to compute flow with a non-existent source raises KeyError.
- """
- with pytest.raises(KeyError):
- calc_max_flow(line1, "Z", "C")
-
- def test_missing_dst_node(self, line1):
- """
- Trying to compute flow with a non-existent destination raises ValueError.
- """
- with pytest.raises(ValueError):
- calc_max_flow(line1, "A", "Z")
-
- def test_zero_capacity_edges(self):
- """
- Graph with edges that all have zero capacity => max flow=0.
- """
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_edge("A", "B", capacity=0.0, cost=1)
- max_flow = calc_max_flow(g, "A", "B")
- assert max_flow == 0.0
-
- def test_disconnected_graph(self):
- """
- Graph with no edges => disconnected => max flow=0.
- """
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- max_flow = calc_max_flow(g, "A", "B")
- assert max_flow == 0.0
-
- def test_very_small_capacity_precision(self):
- """Capacities near MIN_CAP threshold are honored precisely."""
- from ngraph.algorithms.base import MIN_CAP
-
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
-
- # Capacity slightly above MIN_CAP
- g.add_edge("A", "B", capacity=MIN_CAP * 2, cost=1)
- flow = calc_max_flow(g, "A", "B")
- assert flow == MIN_CAP * 2
-
- # Set capacity at MIN_CAP using the actual edge key
- edge_key = list(g.edges("A", keys=True))[0][2]
- g.edges["A", "B", edge_key]["capacity"] = MIN_CAP
- flow = calc_max_flow(g, "A", "B")
- assert flow == MIN_CAP
-
-
-class TestMaxFlowExtended:
- """
- Tests for the extended max flow functionality with return_summary and return_graph flags.
- """
-
- def test_max_flow_return_summary_basic(self, line1):
- """Test return_summary=True returns flow value and FlowSummary."""
- result = calc_max_flow(line1, "A", "C", return_summary=True)
-
- # Should return a tuple
- assert isinstance(result, tuple)
- assert len(result) == 2
-
- flow_value, summary = result
- assert flow_value == 5
- assert isinstance(summary, FlowSummary)
- assert summary.total_flow == 5
-
- # Check that we have edge flows
- assert len(summary.edge_flow) > 0
- assert len(summary.residual_cap) > 0
-
- # Check that source is reachable
- assert "A" in summary.reachable
-
- # Check min-cut is properly identified
- assert isinstance(summary.min_cut, list)
-
- def test_max_flow_return_graph_basic(self, line1):
- """Test return_graph=True returns flow value and flow graph."""
- result = calc_max_flow(line1, "A", "C", return_graph=True)
-
- # Should return a tuple
- assert isinstance(result, tuple)
- assert len(result) == 2
-
- flow_value, flow_graph = result
- assert flow_value == 5
- assert isinstance(flow_graph, StrictMultiDiGraph)
-
- # Flow graph should have flow attributes on edges
- for _, _, _, d in flow_graph.edges(data=True, keys=True):
- assert "flow" in d
- assert "capacity" in d
-
- def test_max_flow_return_both_flags(self, line1):
- """Test both return_summary=True and return_graph=True."""
- result = calc_max_flow(line1, "A", "C", return_summary=True, return_graph=True)
-
- # Should return a tuple with 3 elements
- assert isinstance(result, tuple)
- assert len(result) == 3
-
- flow_value, summary, flow_graph = result
- assert flow_value == 5
- assert isinstance(summary, FlowSummary)
- assert isinstance(flow_graph, StrictMultiDiGraph)
- assert summary.total_flow == 5
-
- def test_max_flow_backward_compatibility(self, line1):
- """Test that default behavior (no flags) maintains backward compatibility."""
- result = calc_max_flow(line1, "A", "C")
-
- # Should return just the flow value as a scalar
- assert isinstance(result, (int, float))
- assert result == 5
-
- def test_flow_summary_edge_flows(self, line1):
- """Test that FlowSummary contains correct edge flow information."""
- _, summary = calc_max_flow(line1, "A", "C", return_summary=True)
-
- # Verify edge flows sum to total flow at source
- total_outflow = sum(
- flow for (u, _, _), flow in summary.edge_flow.items() if u == "A"
- )
- assert total_outflow == summary.total_flow
-
- # Verify residual capacities are non-negative
- for residual in summary.residual_cap.values():
- assert residual >= 0
-
- def test_flow_summary_min_cut_identification(self, square4):
- """Test min-cut identification on a more complex graph."""
- _, summary = calc_max_flow(square4, "A", "B", return_summary=True)
-
- # Min-cut should be non-empty for a bottleneck graph
- assert len(summary.min_cut) > 0
-
- # All min-cut edges should be saturated (zero residual capacity)
- for edge in summary.min_cut:
- assert summary.residual_cap[edge] == 0
-
- def test_flow_summary_reachable_nodes(self, line1):
- """Test that reachable nodes are correctly identified."""
- _, summary = calc_max_flow(line1, "A", "C", return_summary=True)
-
- # Source should always be reachable
- assert "A" in summary.reachable
-
- # If there's flow to destination, intermediate nodes should be reachable
- if summary.total_flow > 0:
- # At least the source should be reachable
- assert len(summary.reachable) >= 1
-
- def test_shortest_path_with_summary(self, line1):
- """Test return_summary works with shortest_path=True."""
- result = calc_max_flow(line1, "A", "C", shortest_path=True, return_summary=True)
-
- flow_value, summary = result
- assert flow_value == 4 # Single path flow
- assert summary.total_flow == 4
- assert isinstance(summary.edge_flow, dict)
- assert isinstance(summary.min_cut, list)
-
- def test_empty_graph_with_summary(self):
- """Test behavior with disconnected nodes."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
-
- flow_value, summary = calc_max_flow(g, "A", "B", return_summary=True)
-
- assert flow_value == 0
- assert summary.total_flow == 0
- assert len(summary.edge_flow) == 0
- assert len(summary.residual_cap) == 0
- assert "A" in summary.reachable
- assert "B" not in summary.reachable
- assert len(summary.min_cut) == 0
-
- def test_saturated_edges_helper(self, line1):
- """Test the saturated_edges helper function."""
- from ngraph.algorithms.max_flow import saturated_edges
-
- saturated = saturated_edges(line1, "A", "C")
-
- # Should return a list of edge tuples
- assert isinstance(saturated, list)
-
- # All saturated edges should have zero residual capacity
- _, summary = calc_max_flow(line1, "A", "C", return_summary=True)
- for edge in saturated:
- assert summary.residual_cap[edge] <= 1e-10
-
- def test_sensitivity_analysis_helper(self, line1):
- """Test the run_sensitivity helper function."""
- from ngraph.algorithms.max_flow import run_sensitivity
-
- sensitivity = run_sensitivity(line1, "A", "C", change_amount=1.0)
-
- # Should return a dictionary mapping edges to flow increases
- assert isinstance(sensitivity, dict)
-
- # All sensitivity values should be non-negative
- for edge, flow_increase in sensitivity.items():
- assert isinstance(edge, tuple)
- assert len(edge) == 3 # (u, v, k)
- assert flow_increase >= 0
-
- def test_sensitivity_analysis_identifies_bottlenecks(self, square4):
- """Test that sensitivity analysis identifies meaningful bottlenecks."""
- from ngraph.algorithms.max_flow import run_sensitivity
-
- sensitivity = run_sensitivity(square4, "A", "B", change_amount=10.0)
-
- # Should have some edges with positive sensitivity
- positive_impacts = [impact for impact in sensitivity.values() if impact > 0]
- assert len(positive_impacts) > 0
-
- # Highest impact edges should be meaningful bottlenecks
- if sensitivity:
- max_impact = max(sensitivity.values())
- assert max_impact > 0
-
- def test_sensitivity_analysis_negative_capacity_protection(self, line1):
- """Test that sensitivity analysis sets capacity to zero instead of negative values."""
- from ngraph.algorithms.max_flow import run_sensitivity
-
- # Test with a large negative change that would make capacities negative
- sensitivity = run_sensitivity(line1, "A", "C", change_amount=-100.0)
-
- # Should still return results (not skip edges)
- assert isinstance(sensitivity, dict)
- assert len(sensitivity) > 0
-
- # All sensitivity values should be negative (flow reduction)
- for edge, flow_change in sensitivity.items():
- assert isinstance(edge, tuple)
- assert len(edge) == 3 # (u, v, k)
- assert flow_change <= 0 # Should reduce or maintain flow
-
- def test_sensitivity_analysis_zero_capacity_behavior(self):
- """Test specific behavior when edge capacity is reduced to zero."""
- from ngraph.algorithms.max_flow import run_sensitivity
-
- # Create a simple graph with known capacities
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_node("C")
-
- # Add edges: A->B (capacity 10), B->C (capacity 5)
- g.add_edge("A", "B", capacity=10.0, flow=0.0, flows={}, cost=1.0)
- bc_edge_key = g.add_edge("B", "C", capacity=5.0, flow=0.0, flows={}, cost=1.0)
-
- # Test reducing edge B->C capacity by 10 (more than its current capacity of 5)
- sensitivity = run_sensitivity(g, "A", "C", change_amount=-10.0)
-
- # Should reduce flow to zero (complete bottleneck removal)
- bc_edge = ("B", "C", bc_edge_key)
- assert bc_edge in sensitivity
- assert sensitivity[bc_edge] == -5.0 # Should reduce flow by 5 (from 5 to 0)
-
- def test_sensitivity_analysis_partial_capacity_reduction(self):
- """Test behavior when capacity is partially reduced but not to zero."""
- from ngraph.algorithms.max_flow import run_sensitivity
-
- # Create a simple graph
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_node("C")
-
- # Add edges with specific capacities
- g.add_edge("A", "B", capacity=10.0, flow=0.0, flows={}, cost=1.0)
- bc_edge_key = g.add_edge("B", "C", capacity=8.0, flow=0.0, flows={}, cost=1.0)
-
- # Test reducing edge B->C capacity by 3 (from 8 to 5)
- sensitivity = run_sensitivity(g, "A", "C", change_amount=-3.0)
-
- # Should reduce flow by 3 (the bottleneck reduction)
- bc_edge = ("B", "C", bc_edge_key)
- assert bc_edge in sensitivity
- assert sensitivity[bc_edge] == -3.0
-
- def test_sensitivity_analysis_capacity_increase_and_decrease(self):
- """Test that both positive and negative changes work correctly."""
- from ngraph.algorithms.max_flow import run_sensitivity
-
- # Create a bottleneck graph
- g = StrictMultiDiGraph()
- for node in ["A", "B", "C", "D"]:
- g.add_node(node)
-
- g.add_edge("A", "B", capacity=20.0, flow=0.0, flows={}, cost=1.0)
- g.add_edge("A", "C", capacity=20.0, flow=0.0, flows={}, cost=1.0)
- g.add_edge("B", "D", capacity=10.0, flow=0.0, flows={}, cost=1.0) # Bottleneck
- g.add_edge("C", "D", capacity=15.0, flow=0.0, flows={}, cost=1.0)
-
- # Test capacity increase
- sensitivity_increase = run_sensitivity(g, "A", "D", change_amount=5.0)
-
- # Test capacity decrease
- sensitivity_decrease = run_sensitivity(g, "A", "D", change_amount=-3.0)
-
- # Both should return results
- assert len(sensitivity_increase) > 0
- assert len(sensitivity_decrease) > 0
-
- # Increases should be positive or zero
- for flow_change in sensitivity_increase.values():
- assert flow_change >= 0
-
- # Decreases should be negative or zero
- for flow_change in sensitivity_decrease.values():
- assert flow_change <= 0
-
- def test_max_flow_overlapping_source_sink_simple(self):
- """Test max flow with overlapping source/sink nodes that caused infinite loops."""
- g = StrictMultiDiGraph()
- g.add_node("N1")
- g.add_node("N2")
-
- # Simple topology: N1 -> N2
- g.add_edge("N1", "N2", capacity=1.0, flow=0.0, flows={}, cost=1)
-
- # Test all combinations that would occur in pairwise mode with overlapping patterns
- # N1 -> N1 (self-loop)
- max_flow_n1_n1 = calc_max_flow(g, "N1", "N1")
- assert max_flow_n1_n1 == 0.0
-
- # N1 -> N2 (valid path)
- max_flow_n1_n2 = calc_max_flow(g, "N1", "N2")
- assert max_flow_n1_n2 == 1.0
-
- # N2 -> N1 (no path)
- max_flow_n2_n1 = calc_max_flow(g, "N2", "N1")
- assert max_flow_n2_n1 == 0.0
-
- # N2 -> N2 (self-loop)
- max_flow_n2_n2 = calc_max_flow(g, "N2", "N2")
- assert max_flow_n2_n2 == 0.0
-
- def test_max_flow_overlapping_source_sink_with_bidirectional(self):
- """Test overlapping source/sink with bidirectional edges."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
-
- # Bidirectional edges
- g.add_edge("A", "B", capacity=5.0, flow=0.0, flows={}, cost=1)
- g.add_edge("B", "A", capacity=3.0, flow=0.0, flows={}, cost=1)
-
- # Test all combinations
- # A -> A (self-loop)
- max_flow_a_a = calc_max_flow(g, "A", "A")
- assert max_flow_a_a == 0.0
-
- # A -> B (forward direction)
- max_flow_a_b = calc_max_flow(g, "A", "B")
- assert max_flow_a_b == 5.0
-
- # B -> A (reverse direction)
- max_flow_b_a = calc_max_flow(g, "B", "A")
- assert max_flow_b_a == 3.0
-
- # B -> B (self-loop)
- max_flow_b_b = calc_max_flow(g, "B", "B")
- assert max_flow_b_b == 0.0
-
- def test_dc_leaf_bidirectional_parallel_branches(self):
- """DC→leaf reversed hop with two parallel branches; compare placements.
-
- Topology (all costs=1 unless noted):
- S -> A_dc (inf)
- A_dc -> A_leaf1 (cap=50)
- A_dc -> A_leaf2 (cap=50)
- A_leaf1 -> B_leaf1 (cap=100)
- A_leaf2 -> B_leaf1 (cap=1)
- B_leaf1 -> B_dc (cap=100)
- B_dc -> T (inf)
-
- Expected:
- - PROPORTIONAL: branch1 min(50,100)=50, branch2 min(50,1)=1 → total 51
- - EQUAL_BALANCED: nominal split 0.5 at A_dc; limiting ratio at edge (A_leaf2->B_leaf1): 1/0.5=2 → total 2
- """
- g = StrictMultiDiGraph()
- for n in ("S", "A_dc", "A_leaf1", "A_leaf2", "B_leaf1", "B_dc", "T"):
- g.add_node(n)
-
- # Pseudo edges
- g.add_edge("S", "A_dc", capacity=float("inf"), cost=0)
- g.add_edge("B_dc", "T", capacity=float("inf"), cost=0)
-
- # Reversed hop from DC to leaves (present as real edges here)
- g.add_edge("A_dc", "A_leaf1", capacity=50.0, cost=1)
- g.add_edge("A_dc", "A_leaf2", capacity=50.0, cost=1)
-
- # Leaf to leaf aggregation
- g.add_edge("A_leaf1", "B_leaf1", capacity=100.0, cost=1)
- g.add_edge("A_leaf2", "B_leaf1", capacity=1.0, cost=1)
-
- # Final hop into destination DC
- g.add_edge("B_leaf1", "B_dc", capacity=100.0, cost=1)
-
- # Full max flow: both placements reach the same min-cut (51)
- flow_prop = calc_max_flow(
- g, "S", "T", flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert flow_prop == 51.0
-
- flow_eq = calc_max_flow(
- g, "S", "T", flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert flow_eq == 51.0
-
- # Single augmentation differs by placement
- flow_prop_sp = calc_max_flow(
- g, "S", "T", shortest_path=True, flow_placement=FlowPlacement.PROPORTIONAL
- )
- assert flow_prop_sp == 51.0
-
- flow_eq_sp = calc_max_flow(
- g, "S", "T", shortest_path=True, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert flow_eq_sp == 2.0
-
- def test_max_flow_self_loop_all_return_modes(self):
- """
- Test self-loop (s == t) behavior with all possible return value combinations.
- Ensures that our optimization properly handles all return modes.
- """
- # Create a simple graph with a self-loop
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_edge("A", "A", capacity=10.0, flow=0.0, flows={}, cost=1)
-
- # Test 1: Basic scalar return (return_summary=False, return_graph=False)
- flow_scalar = calc_max_flow(g, "A", "A")
- assert flow_scalar == 0.0
- assert isinstance(flow_scalar, float)
-
- # Test 2: With summary only (return_summary=True, return_graph=False)
- flow_with_summary = calc_max_flow(g, "A", "A", return_summary=True)
- assert isinstance(flow_with_summary, tuple)
- assert len(flow_with_summary) == 2
- flow, summary = flow_with_summary
- assert flow == 0.0
- assert isinstance(summary, FlowSummary)
- assert summary.total_flow == 0.0
- assert "A" in summary.reachable # Source should be reachable from itself
- assert len(summary.min_cut) == 0 # No min-cut edges for self-loop
-
- # Test 3: With graph only (return_summary=False, return_graph=True)
- flow_with_graph = calc_max_flow(g, "A", "A", return_graph=True)
- assert isinstance(flow_with_graph, tuple)
- assert len(flow_with_graph) == 2
- flow, flow_graph = flow_with_graph
- assert flow == 0.0
- assert isinstance(flow_graph, StrictMultiDiGraph)
- assert flow_graph.has_node("A")
- assert flow_graph.has_edge("A", "A")
-
- # Test 4: With both summary and graph (return_summary=True, return_graph=True)
- flow_with_both = calc_max_flow(
- g, "A", "A", return_summary=True, return_graph=True
- )
- assert isinstance(flow_with_both, tuple)
- assert len(flow_with_both) == 3
- flow, summary, flow_graph = flow_with_both
- assert flow == 0.0
- assert isinstance(summary, FlowSummary)
- assert isinstance(flow_graph, StrictMultiDiGraph)
- assert summary.total_flow == 0.0
- assert "A" in summary.reachable
- assert len(summary.min_cut) == 0
- assert flow_graph.has_node("A")
- assert flow_graph.has_edge("A", "A")
-
- # Verify that the flow on the self-loop edge is 0
- self_loop_edges = list(flow_graph.edges(nbunch="A", data=True, keys=True))
- a_to_a_edges = [
- (u, v, k, d) for u, v, k, d in self_loop_edges if u == "A" and v == "A"
- ]
- assert len(a_to_a_edges) >= 1
- for _u, _v, _k, data in a_to_a_edges:
- assert data.get("flow", 0.0) == 0.0
-
-
-def test_max_flow_with_parallel_edges():
- """
- Tests max flow calculations on a graph with parallel edges.
-
- Graph topology (costs/capacities):
-
- [1,1] & [1,2] [1,1] & [1,2]
- A ──────────────────► B ─────────────► C
- │ ▲
- │ [2,3] │ [2,3]
- └───────────────────► D ───────────────┘
-
- Edges:
- - A→B: two parallel edges with (cost=1, capacity=1) and (cost=1, capacity=2)
- - B→C: two parallel edges with (cost=1, capacity=1) and (cost=1, capacity=2)
- - A→D: (cost=2, capacity=3)
- - D→C: (cost=2, capacity=3)
-
- The test computes:
- - The true maximum flow (expected flow: 6.0)
- - The flow along the shortest paths (expected flow: 3.0)
- - Flow placement using an equal-balanced strategy on the shortest paths (expected flow: 2.0)
- """
- from ngraph.algorithms.base import FlowPlacement
- from ngraph.algorithms.max_flow import calc_max_flow
- from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D"):
- g.add_node(node)
-
- # Create parallel edges between A→B and B→C
- g.add_edge("A", "B", key=0, cost=1, capacity=1)
- g.add_edge("A", "B", key=1, cost=1, capacity=2)
- g.add_edge("B", "C", key=2, cost=1, capacity=1)
- g.add_edge("B", "C", key=3, cost=1, capacity=2)
- # Create an alternative path A→D→C
- g.add_edge("A", "D", key=4, cost=2, capacity=3)
- g.add_edge("D", "C", key=5, cost=2, capacity=3)
-
- # 1. The true maximum flow
- max_flow_prop = calc_max_flow(g, "A", "C")
- assert max_flow_prop == 6.0, f"Expected 6.0, got {max_flow_prop}"
-
- # 2. The flow along the shortest paths
- max_flow_sp = calc_max_flow(g, "A", "C", shortest_path=True)
- assert max_flow_sp == 3.0, f"Expected 3.0, got {max_flow_sp}"
-
- # 3. Flow placement using an equal-balanced strategy on the shortest paths
- max_flow_eq = calc_max_flow(
- g, "A", "C", shortest_path=True, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert max_flow_eq == 2.0, f"Expected 2.0, got {max_flow_eq}"
-
-
-class TestMaxFlowCostDistribution:
- """Tests for cost distribution calculation in max flow analysis."""
-
- def test_cost_distribution_multiple_paths(self):
- """Test cost distribution with paths of different costs."""
- # Create graph with two path options at different costs
- g = StrictMultiDiGraph()
- for node in ["S", "A", "B", "T"]:
- g.add_node(node)
-
- # Path 1: S -> A -> T (cost = 1 + 1 = 2, capacity = 5)
- g.add_edge("S", "A", capacity=5.0, flow=0.0, flows={}, cost=1)
- g.add_edge("A", "T", capacity=5.0, flow=0.0, flows={}, cost=1)
-
- # Path 2: S -> B -> T (cost = 2 + 2 = 4, capacity = 3)
- g.add_edge("S", "B", capacity=3.0, flow=0.0, flows={}, cost=2)
- g.add_edge("B", "T", capacity=3.0, flow=0.0, flows={}, cost=2)
-
- flow_value, summary = calc_max_flow(g, "S", "T", return_summary=True)
-
- # Algorithm should use lowest cost path first, then higher cost
- assert flow_value == 8.0
- assert summary.cost_distribution == {2.0: 5.0, 4.0: 3.0}
-
- def test_cost_distribution_single_path(self):
- """Test cost distribution with a single path."""
- g = StrictMultiDiGraph()
- for node in ["A", "B", "C"]:
- g.add_node(node)
-
- # Single path: A -> B -> C (cost = 3 + 2 = 5, capacity = 10)
- g.add_edge("A", "B", capacity=10.0, flow=0.0, flows={}, cost=3)
- g.add_edge("B", "C", capacity=10.0, flow=0.0, flows={}, cost=2)
-
- flow_value, summary = calc_max_flow(g, "A", "C", return_summary=True)
-
- assert flow_value == 10.0
- assert summary.cost_distribution == {5.0: 10.0}
-
- def test_cost_distribution_equal_cost_paths(self):
- """Test cost distribution with multiple equal-cost paths."""
- g = StrictMultiDiGraph()
- for node in ["S", "A", "B", "T"]:
- g.add_node(node)
-
- # Two paths with same cost but different capacities
- # Path 1: S -> A -> T (cost = 1 + 1 = 2, capacity = 4)
- g.add_edge("S", "A", capacity=4.0, flow=0.0, flows={}, cost=1)
- g.add_edge("A", "T", capacity=4.0, flow=0.0, flows={}, cost=1)
-
- # Path 2: S -> B -> T (cost = 1 + 1 = 2, capacity = 6)
- g.add_edge("S", "B", capacity=6.0, flow=0.0, flows={}, cost=1)
- g.add_edge("B", "T", capacity=6.0, flow=0.0, flows={}, cost=1)
-
- flow_value, summary = calc_max_flow(g, "S", "T", return_summary=True)
-
- # Should aggregate all flow at the same cost
- assert flow_value == 10.0
- assert summary.cost_distribution == {2.0: 10.0}
-
- def test_cost_distribution_three_tiers(self):
- """Test cost distribution with three different cost tiers."""
- g = StrictMultiDiGraph()
- for node in ["S", "A", "B", "C", "T"]:
- g.add_node(node)
-
- # Path 1: S -> A -> T (cost = 1, capacity = 2)
- g.add_edge("S", "A", capacity=2.0, flow=0.0, flows={}, cost=1)
- g.add_edge("A", "T", capacity=2.0, flow=0.0, flows={}, cost=0)
-
- # Path 2: S -> B -> T (cost = 3, capacity = 4)
- g.add_edge("S", "B", capacity=4.0, flow=0.0, flows={}, cost=2)
- g.add_edge("B", "T", capacity=4.0, flow=0.0, flows={}, cost=1)
-
- # Path 3: S -> C -> T (cost = 6, capacity = 3)
- g.add_edge("S", "C", capacity=3.0, flow=0.0, flows={}, cost=3)
- g.add_edge("C", "T", capacity=3.0, flow=0.0, flows={}, cost=3)
-
- flow_value, summary = calc_max_flow(g, "S", "T", return_summary=True)
-
- # Should use paths in cost order: cost 1, then 3, then 6
- assert flow_value == 9.0
- assert summary.cost_distribution == {1.0: 2.0, 3.0: 4.0, 6.0: 3.0}
-
- def test_cost_distribution_no_flow(self):
- """Test cost distribution when no flow is possible."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- # No edges - no path possible
-
- flow_value, summary = calc_max_flow(g, "A", "B", return_summary=True)
-
- assert flow_value == 0.0
- assert summary.cost_distribution == {}
-
- def test_cost_distribution_self_loop(self):
- """Test cost distribution for self-loop case."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_edge("A", "A", capacity=10.0, flow=0.0, flows={}, cost=5)
-
- flow_value, summary = calc_max_flow(g, "A", "A", return_summary=True)
-
- # Self-loop always returns 0 flow
- assert flow_value == 0.0
- assert summary.cost_distribution == {}
-
- def test_cost_distribution_shortest_path_mode(self):
- """Test cost distribution with shortest_path=True (single augmentation)."""
- g = StrictMultiDiGraph()
- for node in ["S", "A", "B", "T"]:
- g.add_node(node)
-
- # Path 1: S -> A -> T (cost = 2, capacity = 5)
- g.add_edge("S", "A", capacity=5.0, flow=0.0, flows={}, cost=1)
- g.add_edge("A", "T", capacity=5.0, flow=0.0, flows={}, cost=1)
-
- # Path 2: S -> B -> T (cost = 4, capacity = 3)
- g.add_edge("S", "B", capacity=3.0, flow=0.0, flows={}, cost=2)
- g.add_edge("B", "T", capacity=3.0, flow=0.0, flows={}, cost=2)
-
- flow_value, summary = calc_max_flow(
- g, "S", "T", shortest_path=True, return_summary=True
- )
-
- # Should only use the first (lowest cost) path
- assert flow_value == 5.0
- assert summary.cost_distribution == {2.0: 5.0}
-
- def test_cost_distribution_capacity_bottleneck(self):
- """Test cost distribution when bottleneck limits flow on cheaper path."""
- g = StrictMultiDiGraph()
- for node in ["S", "A", "B", "T"]:
- g.add_node(node)
-
- # Path 1: S -> A -> T (cost = 1, but bottleneck capacity = 2)
- g.add_edge("S", "A", capacity=10.0, flow=0.0, flows={}, cost=1)
- g.add_edge("A", "T", capacity=2.0, flow=0.0, flows={}, cost=0) # Bottleneck
-
- # Path 2: S -> B -> T (cost = 3, capacity = 5)
- g.add_edge("S", "B", capacity=5.0, flow=0.0, flows={}, cost=2)
- g.add_edge("B", "T", capacity=5.0, flow=0.0, flows={}, cost=1)
-
- flow_value, summary = calc_max_flow(g, "S", "T", return_summary=True)
-
- # Should use cheap path first (limited by bottleneck), then expensive path
- assert flow_value == 7.0
- assert summary.cost_distribution == {1.0: 2.0, 3.0: 5.0}
diff --git a/tests/algorithms/test_path_utils.py b/tests/algorithms/test_path_utils.py
deleted file mode 100644
index fb8377a..0000000
--- a/tests/algorithms/test_path_utils.py
+++ /dev/null
@@ -1,192 +0,0 @@
-from ngraph.algorithms.paths import resolve_to_paths
-
-
-def test_no_path_if_dst_not_in_pred():
- """If the dst_node is not present in pred, no paths should be yielded."""
- # Source is "Z", which SPF would record as pred["Z"] = {} if Z is in the graph.
- # But "B" is absent entirely, meaning 'B' was unreachable.
- pred = {
- "Z": {}, # source node with empty predecessor set
- "A": {"Z": ["edgeA_Z"]},
- }
- # dst_node="B" is not in pred, so there's no path
- paths = list(resolve_to_paths("Z", "B", pred))
- assert paths == [], "Expected no paths when dst_node is missing from pred."
-
-
-def test_trivial_path_src_eq_dst():
- """
- If src_node == dst_node and it's in pred, the function yields a single empty-edge path.
- SPF typically sets pred[src_node] = {} to indicate no predecessor for source.
- """
- # Here the source and destination are "A". SPF would store pred["A"] = {}.
- pred = {"A": {}} # No actual predecessors, cost[A] = 0 in SPF
- paths = list(resolve_to_paths("A", "A", pred))
- # Expect exactly one trivial path: ((A, ()),)
- assert len(paths) == 1
- assert paths[0] == (("A", tuple()),)
-
-
-def test_single_linear_path():
- """
- Tests a simple linear path: Z -> A -> B -> C, with src=Z, dst=C.
- Each node that is reachable from Z must be in pred, including Z itself.
- """
- pred = {
- # If spf found a route from Z -> A, it sets pred["A"] = {"Z": ["edgeZA"]}.
- "Z": {}, # source node
- "A": {"Z": ["edgeZA"]},
- "B": {"A": ["edgeAB"]},
- "C": {"B": ["edgeBC"]},
- }
- # There's only one path: Z -> A -> B -> C
- paths = list(resolve_to_paths("Z", "C", pred))
- assert len(paths) == 1
-
- expected = (
- ("Z", ("edgeZA",)),
- ("A", ("edgeAB",)),
- ("B", ("edgeBC",)),
- ("C", ()),
- )
- assert paths[0] == expected
-
-
-def test_multiple_predecessors_branching():
- """
- Tests a branching scenario where the dst node (D) can come from
- two predecessors: B or C, and each of those from A.
- """
- pred = {
- "A": {}, # source
- "B": {"A": ["edgeAB"]},
- "C": {"A": ["edgeAC"]},
- "D": {"B": ["edgeBD1", "edgeBD2"], "C": ["edgeCD"]},
- }
- # So potential paths from A to D:
- # 1) A->B->D (with edges edgeAB, plus one of [edgeBD1 or edgeBD2])
- # 2) A->C->D (with edges edgeAC, edgeCD)
- # Without parallel-edge splitting, multiple edges B->D are grouped
- paths_no_split = list(resolve_to_paths("A", "D", pred, split_parallel_edges=False))
- assert len(paths_no_split) == 2
-
- # With parallel-edge splitting, we expand B->D from 2 edges into 2 separate paths
- # plus 1 path from A->C->D = total 3.
- paths_split = list(resolve_to_paths("A", "D", pred, split_parallel_edges=True))
- assert len(paths_split) == 3
-
-
-def test_parallel_edges_expansion():
- """
- Tests a single segment with multiple parallel edges: A->B has e1, e2, e3.
- No branching, just parallel edges.
- """
- pred = {
- "A": {}, # source
- "B": {"A": ["e1", "e2", "e3"]},
- }
- # Without split, there's a single path from A->B
- paths_no_split = list(resolve_to_paths("A", "B", pred, split_parallel_edges=False))
- assert len(paths_no_split) == 1
- expected_no_split = (
- ("A", ("e1", "e2", "e3")),
- ("B", ()),
- )
- assert paths_no_split[0] == expected_no_split
-
- # With split, we get 3 expansions: one for e1, one for e2, one for e3
- paths_split = list(resolve_to_paths("A", "B", pred, split_parallel_edges=True))
- assert len(paths_split) == 3
- # They should be:
- # 1) (A, (e1,)), (B, ())
- # 2) (A, (e2,)), (B, ())
- # 3) (A, (e3,)), (B, ())
- actual = set(paths_split)
- expected_variants = {
- (("A", ("e1",)), ("B", ())),
- (("A", ("e2",)), ("B", ())),
- (("A", ("e3",)), ("B", ())),
- }
- assert actual == expected_variants
-
-
-def test_cycle_prevention():
- """
- Although the code assumes a DAG, we test a scenario with an actual cycle to
- ensure it doesn't loop infinitely. We'll see if 'seen' set logic works properly.
- A -> B -> A is a cycle, plus B -> C is normal. We want at least one path from A->C.
- The code might yield duplicates if it partially re-traverses; we only check
- that at least the main path is produced (A->B->C).
- """
- pred = {
- "A": {"B": ["edgeBA"]}, # cycle part
- "B": {"A": ["edgeAB"]}, # cycle part
- "C": {"B": ["edgeBC"]},
- }
- # Even though there's a cycle A <-> B, let's confirm we find at least one path A->B->C
- paths = list(resolve_to_paths("A", "C", pred))
- # The code might produce duplicates because each partial stack expansion can yield a path.
- # We'll just check that we do have the correct path at least once.
- assert len(paths) >= 1, "Expected at least one path, found none."
-
- # Check that the main path is in the results
- expected = (
- ("A", ("edgeAB",)),
- ("B", ("edgeBC",)),
- ("C", ()),
- )
- assert expected in paths, "Missing the main path from A->B->C"
-
-
-def test_no_predecessors_for_dst():
- """
- If the dst_node is in pred but has an empty dict of predecessors,
- it means there's no actual incoming edge. Should yield no results.
- """
- pred = {
- "A": {}, # Suppose A is source, but not relevant here
- "C": {}, # 'C' was discovered in SPF's node set, but no predecessors
- }
- paths = list(resolve_to_paths("A", "C", pred))
- assert paths == [], "Expected no paths since 'C' has no incoming edges."
-
-
-def test_multiple_path_expansions():
- """
- A more complex scenario with parallel edges at multiple steps:
- A -> B has e1, e2
- B -> C has e3, e4
- C -> D has e5
- So from A to D (via B, C), we get expansions for each combination
- of (e1 or e2) and (e3 or e4). 2 x 2 = 4 expansions if split_parallel_edges=True.
- """
- pred = {
- "A": {}, # source
- "B": {"A": ["e1", "e2"]},
- "C": {"B": ["e3", "e4"]},
- "D": {"C": ["e5"]},
- }
- # With no splitting, each set of parallel edges is collapsed into one path
- no_split = list(resolve_to_paths("A", "D", pred, split_parallel_edges=False))
- assert len(no_split) == 1
-
- # With splitting
- split = list(resolve_to_paths("A", "D", pred, split_parallel_edges=True))
- # We expect 4 expansions: (e1,e3), (e1,e4), (e2,e3), (e2,e4)
- assert len(split) == 4
-
- # Let's check the final shape of one of them:
- # For example, (("A", ("e1",)), ("B", ("e3",)), ("C", ("e5",)), ("D", ()))
- # And similarly for the others.
- expected_combos = {
- ("e1", "e3", "e5"),
- ("e1", "e4", "e5"),
- ("e2", "e3", "e5"),
- ("e2", "e4", "e5"),
- }
- actual_combos = set()
- for path in split:
- # path looks like (("A",(eX,)), ("B",(eY,)), ("C",(e5,)), ("D",()))
- edges_used = tuple(elem[1][0] for elem in path[:-1]) # omit the final empty
- actual_combos.add(edges_used)
- assert actual_combos == expected_combos
diff --git a/tests/algorithms/test_place_flow.py b/tests/algorithms/test_place_flow.py
deleted file mode 100644
index 994d19f..0000000
--- a/tests/algorithms/test_place_flow.py
+++ /dev/null
@@ -1,959 +0,0 @@
-from ngraph.algorithms.capacity import FlowPlacement
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.algorithms.placement import (
- place_flow_on_graph,
- remove_flow_from_graph,
-)
-from ngraph.algorithms.spf import spf
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-class TestPlaceFlowOnGraph:
- def test_place_flow_on_graph_line1_proportional(self, line1):
- """
- Place flow from A->C on line1 using PROPORTIONAL flow placement.
- Verifies the final distribution does not exceed capacity
- and checks metadata (placed_flow, remaining_flow, edges/nodes touched).
- """
- _, pred = spf(line1, "A")
- r = init_flow_graph(line1)
-
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow_index=("A", "C", "TEST"),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- assert flow_placement_meta.placed_flow == 4
- assert flow_placement_meta.remaining_flow == float("inf")
- assert not any(
- edge[3]["flow"] > edge[3]["capacity"] for edge in r.get_edges().values()
- )
- # Asserting exact final edge attributes:
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "cost": 1,
- "capacity": 5,
- "flow": 4.0,
- "flows": {("A", "C", "TEST"): 4.0},
- },
- ),
- 1: ("B", "A", 1, {"cost": 1, "capacity": 5, "flow": 0, "flows": {}}),
- 2: (
- "B",
- "C",
- 2,
- {
- "cost": 1,
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", "TEST"): 1.0},
- },
- ),
- 3: ("C", "B", 3, {"cost": 1, "capacity": 1, "flow": 0, "flows": {}}),
- 4: (
- "B",
- "C",
- 4,
- {
- "cost": 1,
- "capacity": 3,
- "flow": 3.0,
- "flows": {("A", "C", "TEST"): 3.0},
- },
- ),
- 5: ("C", "B", 5, {"cost": 1, "capacity": 3, "flow": 0, "flows": {}}),
- 6: ("B", "C", 6, {"cost": 2, "capacity": 7, "flow": 0, "flows": {}}),
- 7: ("C", "B", 7, {"cost": 2, "capacity": 7, "flow": 0, "flows": {}}),
- }
- assert flow_placement_meta.nodes == {"A", "C", "B"}
- assert flow_placement_meta.edges == {0, 2, 4}
-
- def test_place_flow_on_graph_line1_equal(self, line1):
- """
- Place flow using EQUAL_BALANCED on line1. Checks that
- flow is split evenly among parallel edges from B->C.
- """
- _, pred = spf(line1, "A")
- r = init_flow_graph(line1)
-
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow_index=("A", "C", "TEST"),
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
-
- assert flow_placement_meta.placed_flow == 2
- assert flow_placement_meta.remaining_flow == float("inf")
- assert not any(
- edge[3]["flow"] > edge[3]["capacity"] for edge in r.get_edges().values()
- )
- # Check final flows match expectations:
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 5,
- "flow": 2.0,
- "flows": {("A", "C", "TEST"): 2.0},
- "cost": 1,
- },
- ),
- 1: ("B", "A", 1, {"capacity": 5, "flow": 0, "flows": {}, "cost": 1}),
- 2: (
- "B",
- "C",
- 2,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", "TEST"): 1.0},
- "cost": 1,
- },
- ),
- 3: ("C", "B", 3, {"capacity": 1, "flow": 0, "flows": {}, "cost": 1}),
- 4: (
- "B",
- "C",
- 4,
- {
- "capacity": 3,
- "flow": 1.0,
- "flows": {("A", "C", "TEST"): 1.0},
- "cost": 1,
- },
- ),
- 5: ("C", "B", 5, {"capacity": 3, "flow": 0, "flows": {}, "cost": 1}),
- 6: ("B", "C", 6, {"capacity": 7, "flow": 0, "flows": {}, "cost": 2}),
- 7: ("C", "B", 7, {"capacity": 7, "flow": 0, "flows": {}, "cost": 2}),
- }
- assert flow_placement_meta.nodes == {"A", "C", "B"}
- assert flow_placement_meta.edges == {0, 2, 4}
-
- def test_place_flow_on_graph_line1_proportional_partial(self, line1):
- """
- In two steps, place 3 units of flow, then attempt another 3.
- Check partial flow placement when capacity is partially exhausted.
- """
- _, pred = spf(line1, "A")
- r = init_flow_graph(line1)
-
- # First attempt: place 3 units
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow=3,
- flow_index=("A", "C", None),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- assert flow_placement_meta.placed_flow == 3
- assert flow_placement_meta.remaining_flow == 0
-
- # Second attempt: place another 3 units (only 1 unit left)
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow=3,
- flow_index=("A", "C", None),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- assert flow_placement_meta.placed_flow == 1
- assert flow_placement_meta.remaining_flow == 2
- assert not any(
- edge[3]["flow"] > edge[3]["capacity"] for edge in r.get_edges().values()
- )
- # Check final distribution
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "cost": 1,
- "capacity": 5,
- "flow": 4.0,
- "flows": {("A", "C", None): 4.0},
- },
- ),
- 1: ("B", "A", 1, {"cost": 1, "capacity": 5, "flow": 0, "flows": {}}),
- 2: (
- "B",
- "C",
- 2,
- {
- "cost": 1,
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", None): 1.0},
- },
- ),
- 3: ("C", "B", 3, {"cost": 1, "capacity": 1, "flow": 0, "flows": {}}),
- 4: (
- "B",
- "C",
- 4,
- {
- "cost": 1,
- "capacity": 3,
- "flow": 3.0,
- "flows": {("A", "C", None): 3.0},
- },
- ),
- 5: ("C", "B", 5, {"cost": 1, "capacity": 3, "flow": 0, "flows": {}}),
- 6: ("B", "C", 6, {"cost": 2, "capacity": 7, "flow": 0, "flows": {}}),
- 7: ("C", "B", 7, {"cost": 2, "capacity": 7, "flow": 0, "flows": {}}),
- }
-
- def test_place_flow_on_graph_graph3_proportional_1(self, graph3):
- """
- Place flow from A->C on 'graph3' with PROPORTIONAL distribution.
- Ensures the total feasible flow is 10 and that edges do not exceed capacity.
- """
- _, pred = spf(graph3, "A")
- r = init_flow_graph(graph3)
-
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow_index=("A", "C", None),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- assert flow_placement_meta.placed_flow == 10
- assert flow_placement_meta.remaining_flow == float("inf")
- assert not any(
- edge[3]["flow"] > edge[3]["capacity"] for edge in r.get_edges().values()
- )
- # Check the final edges, as given below:
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 2,
- "flow": 1.0,
- "flows": {("A", "C", None): 1.0},
- "cost": 1,
- },
- ),
- 1: (
- "A",
- "B",
- 1,
- {
- "capacity": 4,
- "flow": 2.0,
- "flows": {("A", "C", None): 2.0},
- "cost": 1,
- },
- ),
- 2: (
- "A",
- "B",
- 2,
- {
- "capacity": 6,
- "flow": 3.0,
- "flows": {("A", "C", None): 3.0},
- "cost": 1,
- },
- ),
- 3: (
- "B",
- "C",
- 3,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", None): 1.0},
- "cost": 1,
- },
- ),
- 4: (
- "B",
- "C",
- 4,
- {
- "capacity": 2,
- "flow": 2.0,
- "flows": {("A", "C", None): 2.0},
- "cost": 1,
- },
- ),
- 5: (
- "B",
- "C",
- 5,
- {
- "capacity": 3,
- "flow": 3.0,
- "flows": {("A", "C", None): 3.0},
- "cost": 1,
- },
- ),
- 6: ("C", "D", 6, {"capacity": 3, "flow": 0, "flows": {}, "cost": 2}),
- 7: (
- "A",
- "E",
- 7,
- {
- "capacity": 5,
- "flow": 4.0,
- "flows": {("A", "C", None): 4.0},
- "cost": 1,
- },
- ),
- 8: (
- "E",
- "C",
- 8,
- {
- "capacity": 4,
- "flow": 4.0,
- "flows": {("A", "C", None): 4.0},
- "cost": 1,
- },
- ),
- 9: ("A", "D", 9, {"capacity": 2, "flow": 0, "flows": {}, "cost": 4}),
- 10: ("C", "F", 10, {"capacity": 1, "flow": 0, "flows": {}, "cost": 1}),
- 11: ("F", "D", 11, {"capacity": 2, "flow": 0, "flows": {}, "cost": 1}),
- }
- assert flow_placement_meta.nodes == {"A", "E", "B", "C"}
- assert flow_placement_meta.edges == {0, 1, 2, 3, 4, 5, 7, 8}
-
- def test_place_flow_on_graph_graph3_proportional_2(self, graph3):
- """
- Another flow on 'graph3', from A->D. Checks partial flows
- split among multiple edges and the correctness of the final distribution.
- """
- _, pred = spf(graph3, "A")
- r = init_flow_graph(graph3)
-
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "D",
- pred,
- flow_index=("A", "D", None),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- assert flow_placement_meta.placed_flow == 6
- assert flow_placement_meta.remaining_flow == float("inf")
- assert not any(
- edge[3]["flow"] > edge[3]["capacity"] for edge in r.get_edges().values()
- )
- # Confirm final distribution:
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 2,
- "flow": 0.6666666666666666,
- "flows": {("A", "D", None): 0.6666666666666666},
- "cost": 1,
- },
- ),
- 1: (
- "A",
- "B",
- 1,
- {
- "capacity": 4,
- "flow": 1.3333333333333333,
- "flows": {("A", "D", None): 1.3333333333333333},
- "cost": 1,
- },
- ),
- 2: (
- "A",
- "B",
- 2,
- {
- "capacity": 6,
- "flow": 2.0,
- "flows": {("A", "D", None): 2.0},
- "cost": 1,
- },
- ),
- 3: (
- "B",
- "C",
- 3,
- {
- "capacity": 1,
- "flow": 0.6666666666666666,
- "flows": {("A", "D", None): 0.6666666666666666},
- "cost": 1,
- },
- ),
- 4: (
- "B",
- "C",
- 4,
- {
- "capacity": 2,
- "flow": 1.3333333333333333,
- "flows": {("A", "D", None): 1.3333333333333333},
- "cost": 1,
- },
- ),
- 5: (
- "B",
- "C",
- 5,
- {
- "capacity": 3,
- "flow": 2.0,
- "flows": {("A", "D", None): 2.0},
- "cost": 1,
- },
- ),
- 6: (
- "C",
- "D",
- 6,
- {
- "capacity": 3,
- "flow": 3.0,
- "flows": {("A", "D", None): 3.0},
- "cost": 2,
- },
- ),
- 7: ("A", "E", 7, {"capacity": 5, "flow": 0, "flows": {}, "cost": 1}),
- 8: ("E", "C", 8, {"capacity": 4, "flow": 0, "flows": {}, "cost": 1}),
- 9: (
- "A",
- "D",
- 9,
- {
- "capacity": 2,
- "flow": 2.0,
- "flows": {("A", "D", None): 2.0},
- "cost": 4,
- },
- ),
- 10: (
- "C",
- "F",
- 10,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "D", None): 1.0},
- "cost": 1,
- },
- ),
- 11: (
- "F",
- "D",
- 11,
- {
- "capacity": 2,
- "flow": 1.0,
- "flows": {("A", "D", None): 1.0},
- "cost": 1,
- },
- ),
- }
-
- def test_place_flow_on_graph_line1_balanced_1(self, line1):
- """
- Place flow using EQUAL_BALANCED on line1, verifying capacity usage
- and final flows from A->C.
- """
- _, pred = spf(line1, "A")
- r = init_flow_graph(line1)
-
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow_index=("A", "C", None),
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- assert flow_placement_meta.placed_flow == 2
- assert flow_placement_meta.remaining_flow == float("inf")
- assert not any(
- edge[3]["flow"] > edge[3]["capacity"] for edge in r.get_edges().values()
- )
- # Check final state
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "cost": 1,
- "capacity": 5,
- "flow": 2.0,
- "flows": {("A", "C", None): 2.0},
- },
- ),
- 1: ("B", "A", 1, {"cost": 1, "capacity": 5, "flow": 0, "flows": {}}),
- 2: (
- "B",
- "C",
- 2,
- {
- "cost": 1,
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", None): 1.0},
- },
- ),
- 3: ("C", "B", 3, {"cost": 1, "capacity": 1, "flow": 0, "flows": {}}),
- 4: (
- "B",
- "C",
- 4,
- {
- "cost": 1,
- "capacity": 3,
- "flow": 1.0,
- "flows": {("A", "C", None): 1.0},
- },
- ),
- 5: ("C", "B", 5, {"cost": 1, "capacity": 3, "flow": 0, "flows": {}}),
- 6: ("B", "C", 6, {"cost": 2, "capacity": 7, "flow": 0, "flows": {}}),
- 7: ("C", "B", 7, {"cost": 2, "capacity": 7, "flow": 0, "flows": {}}),
- }
-
- def test_place_flow_on_graph_line1_balanced_2(self, line1):
- """
- Place flow in two steps (1, then 2) using EQUAL_BALANCED.
- The second step can only place 1 more unit due to capacity constraints.
- """
- _, pred = spf(line1, "A")
- r = init_flow_graph(line1)
-
- # Place 1 unit first
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow=1,
- flow_index=("A", "C", None),
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- assert flow_placement_meta.placed_flow == 1
- assert flow_placement_meta.remaining_flow == 0
-
- # Attempt to place 2 more
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow=2,
- flow_index=("A", "C", None),
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- assert flow_placement_meta.placed_flow == 1
- assert flow_placement_meta.remaining_flow == 1
- assert not any(
- edge[3]["flow"] > edge[3]["capacity"] for edge in r.get_edges().values()
- )
- # Check final distribution
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "cost": 1,
- "capacity": 5,
- "flow": 2.0,
- "flows": {("A", "C", None): 2.0},
- },
- ),
- 1: ("B", "A", 1, {"cost": 1, "capacity": 5, "flow": 0, "flows": {}}),
- 2: (
- "B",
- "C",
- 2,
- {
- "cost": 1,
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", None): 1.0},
- },
- ),
- 3: ("C", "B", 3, {"cost": 1, "capacity": 1, "flow": 0, "flows": {}}),
- 4: (
- "B",
- "C",
- 4,
- {
- "cost": 1,
- "capacity": 3,
- "flow": 1.0,
- "flows": {("A", "C", None): 1.0},
- },
- ),
- 5: ("C", "B", 5, {"cost": 1, "capacity": 3, "flow": 0, "flows": {}}),
- 6: ("B", "C", 6, {"cost": 2, "capacity": 7, "flow": 0, "flows": {}}),
- 7: ("C", "B", 7, {"cost": 2, "capacity": 7, "flow": 0, "flows": {}}),
- }
-
- def test_place_flow_on_graph_graph4_balanced(self, graph4):
- """
- EQUAL_BALANCED flow on graph4 from A->C, placing 1 unit total.
- Verifies correct edges and final flow distribution.
- """
- _, pred = spf(graph4, "A")
- r = init_flow_graph(graph4)
-
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow=1,
- flow_index=("A", "C", None),
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
-
- assert flow_placement_meta.placed_flow == 1
- assert flow_placement_meta.remaining_flow == 0
- assert not any(
- edge[3]["flow"] > edge[3]["capacity"] for edge in r.get_edges().values()
- )
- assert flow_placement_meta.nodes == {"C", "B", "A"}
- assert flow_placement_meta.edges == {0, 1}
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", None): 1.0},
- "cost": 1,
- },
- ),
- 1: (
- "B",
- "C",
- 1,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", None): 1.0},
- "cost": 1,
- },
- ),
- 2: ("A", "B1", 2, {"capacity": 2, "flow": 0, "flows": {}, "cost": 2}),
- 3: ("B1", "C", 3, {"capacity": 2, "flow": 0, "flows": {}, "cost": 2}),
- 4: ("A", "B2", 4, {"capacity": 3, "flow": 0, "flows": {}, "cost": 3}),
- 5: ("B2", "C", 5, {"capacity": 3, "flow": 0, "flows": {}, "cost": 3}),
- }
-
- def test_place_flow_on_graph_self_loop_proportional(self):
- """
- Test self-loop behavior with PROPORTIONAL flow placement.
- When source equals destination, no flow should be placed.
- """
- # Create a graph with a self-loop
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_edge("A", "A", key=0, capacity=10.0, flow=0.0, flows={}, cost=1)
- r = init_flow_graph(g)
-
- # Create pred with self-loop
- pred = {"A": {"A": [0]}}
-
- # Attempt to place flow on self-loop
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "A",
- pred,
- flow=5.0,
- flow_index=("A", "A", "SELF_LOOP"),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- # Should place 0 flow and return the requested flow as remaining
- assert flow_placement_meta.placed_flow == 0.0
- assert flow_placement_meta.remaining_flow == 5.0
-
- # Verify the self-loop edge has no flow placed on it
- edges = r.get_edges()
- self_loop_edge = edges[0]
- assert self_loop_edge[3]["flow"] == 0.0
- assert self_loop_edge[3]["flows"] == {}
-
- def test_place_flow_on_graph_self_loop_equal_balanced(self):
- """
- Test self-loop behavior with EQUAL_BALANCED flow placement.
- When source equals destination, no flow should be placed.
- """
- # Create a graph with multiple self-loops
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_edge("A", "A", key=0, capacity=5.0, flow=0.0, flows={}, cost=1)
- g.add_edge("A", "A", key=1, capacity=3.0, flow=0.0, flows={}, cost=1)
- r = init_flow_graph(g)
-
- # Create pred with multiple self-loop edges
- pred = {"A": {"A": [0, 1]}}
-
- # Attempt to place flow on self-loops
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "A",
- pred,
- flow=10.0,
- flow_index=("A", "A", "MULTI_SELF_LOOP"),
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
-
- # Should place 0 flow and return all requested flow as remaining
- assert flow_placement_meta.placed_flow == 0.0
- assert flow_placement_meta.remaining_flow == 10.0
-
- # Verify all self-loop edges have no flow placed on them
- edges = r.get_edges()
- for edge_data in edges.values():
- assert edge_data[3]["flow"] == 0.0
- assert edge_data[3]["flows"] == {}
-
- def test_place_flow_on_graph_self_loop_infinite_flow(self):
- """
- Test self-loop behavior when requesting infinite flow.
- Should still place 0 flow and return infinite remaining flow.
- """
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_edge("A", "A", key=0, capacity=100.0, flow=0.0, flows={}, cost=1)
- r = init_flow_graph(g)
-
- pred = {"A": {"A": [0]}}
-
- # Request infinite flow on self-loop
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "A",
- pred,
- flow=float("inf"),
- flow_index=("A", "A", "INF_SELF_LOOP"),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- # Should place 0 flow and return infinite remaining flow
- assert flow_placement_meta.placed_flow == 0.0
- assert flow_placement_meta.remaining_flow == float("inf")
-
- # Verify metadata is correctly handled for self-loops
- # The early return should not populate nodes/edges metadata
- assert len(flow_placement_meta.nodes) <= 1 # Should be 0 or just contain source
- assert flow_placement_meta.edges == set() # No edges should carry flow
-
- def test_place_flow_on_graph_self_loop_with_other_edges(self):
- """
- Test self-loop behavior in a graph that also has regular edges.
- Self-loop should still place 0 flow while regular flows work normally.
- """
- # Create graph with both self-loop and regular edges
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_edge("A", "A", key=0, capacity=10.0, flow=0.0, flows={}, cost=1)
- g.add_edge("A", "B", key=1, capacity=5.0, flow=0.0, flows={}, cost=2)
- g.add_edge("B", "A", key=2, capacity=3.0, flow=0.0, flows={}, cost=2)
- r = init_flow_graph(g)
-
- # Test self-loop A->A
- pred_self = {"A": {"A": [0]}}
- flow_meta_self = place_flow_on_graph(
- r, "A", "A", pred_self, flow=7.0, flow_index=("A", "A", "SELF")
- )
- assert flow_meta_self.placed_flow == 0.0
- assert flow_meta_self.remaining_flow == 7.0
-
- # Test regular flow A->B to verify graph still works for non-self-loops
- pred_regular = {"A": {}, "B": {"A": [1]}}
- flow_meta_regular = place_flow_on_graph(
- r, "A", "B", pred_regular, flow=4.0, flow_index=("A", "B", "REGULAR")
- )
- assert flow_meta_regular.placed_flow == 4.0
- assert flow_meta_regular.remaining_flow == 0.0
-
- # Verify self-loop edge still has no flow
- edges = r.get_edges()
- assert edges[0][3]["flow"] == 0.0 # Self-loop edge
- assert edges[1][3]["flow"] == 4.0 # A->B edge should have flow
-
- def test_place_flow_on_graph_self_loop_empty_pred(self):
- """
- Test self-loop behavior when pred is empty.
- Should return 0 flow even with empty pred.
- """
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_edge("A", "A", key=0, capacity=10.0, flow=0.0, flows={}, cost=1)
- r = init_flow_graph(g)
-
- # Empty pred
- pred = {}
-
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "A",
- pred,
- flow=5.0,
- flow_index=("A", "A", "EMPTY_PRED"),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- # Should place 0 flow due to self-loop optimization, not pred limitations
- assert flow_placement_meta.placed_flow == 0.0
- assert flow_placement_meta.remaining_flow == 5.0
-
- # Verify the self-loop edge has no flow
- edges = r.get_edges()
- assert edges[0][3]["flow"] == 0.0
- assert edges[0][3]["flows"] == {}
-
-
-#
-# Tests for removing flow from the graph, fully or partially.
-#
-
-
-class TestRemoveFlowFromGraph:
- def test_remove_flow_from_graph_4(self, graph4):
- """
- Place a large flow from A->C on 'graph4' (only 1 feasible),
- then remove it entirely using remove_flow_from_graph(r).
- Verifies that all edges are cleared.
- """
- _, pred = spf(graph4, "A")
- r = init_flow_graph(graph4)
-
- flow_placement_meta = place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- 10,
- flow_index=("A", "C", None),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- assert flow_placement_meta.placed_flow == 1
- assert flow_placement_meta.remaining_flow == 9
-
- # Remove all flows
- remove_flow_from_graph(r)
-
- for _, edata in r.get_edges().items():
- assert edata[3]["flow"] == 0
- assert edata[3]["flows"] == {}
-
- # Or check exact dictionary:
- assert r.get_edges() == {
- 0: ("A", "B", 0, {"capacity": 1, "flow": 0, "flows": {}, "cost": 1}),
- 1: ("B", "C", 1, {"capacity": 1, "flow": 0, "flows": {}, "cost": 1}),
- 2: ("A", "B1", 2, {"capacity": 2, "flow": 0, "flows": {}, "cost": 2}),
- 3: ("B1", "C", 3, {"capacity": 2, "flow": 0, "flows": {}, "cost": 2}),
- 4: ("A", "B2", 4, {"capacity": 3, "flow": 0, "flows": {}, "cost": 3}),
- 5: ("B2", "C", 5, {"capacity": 3, "flow": 0, "flows": {}, "cost": 3}),
- }
-
- def test_remove_specific_flow(self, graph4):
- """
- Demonstrates removing only a specific flow_index (e.g., flowA).
- Another flow (flowB) remains intact.
- """
- _, pred = spf(graph4, "A")
- r = init_flow_graph(graph4)
-
- # Place two flows
- place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow=1,
- flow_index=("A", "C", "flowA"),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow=2,
- flow_index=("A", "C", "flowB"),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- # Remove only flowA
- remove_flow_from_graph(r, flow_index=("A", "C", "flowA"))
-
- # flowA should be gone, flowB remains
- for _, (_, _, _, edge_attr) in r.get_edges().items():
- assert ("A", "C", "flowA") not in edge_attr["flows"]
- # If flowB is present, it has > 0
- if ("A", "C", "flowB") in edge_attr["flows"]:
- assert edge_attr["flows"][("A", "C", "flowB")] > 0
-
- # Now remove all flows
- remove_flow_from_graph(r)
- for _, (_, _, _, edge_attr) in r.get_edges().items():
- assert edge_attr["flow"] == 0
- assert edge_attr["flows"] == {}
-
- def test_remove_flow_zero_flow_placed(self, line1):
- """
- If no flow was placed (e.g., 0 flow or unreachable), removing flow should be safe
- and simply leave edges as-is.
- """
- _, pred = spf(line1, "A")
- r = init_flow_graph(line1)
-
- # Place zero flow:
- place_flow_on_graph(
- r,
- "A",
- "C",
- pred,
- flow=0,
- flow_index=("A", "C", "empty"),
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
- # Remove flows (none effectively exist)
- remove_flow_from_graph(
- r, flow_index=("A", "C", "empty")
- ) # Ensure edges remain at zero flow
- for _, edata in r.get_edges().items():
- assert edata[3]["flow"] == 0
- assert edata[3]["flows"] == {}
diff --git a/tests/algorithms/test_spf.py b/tests/algorithms/test_spf.py
deleted file mode 100644
index 1b92c7a..0000000
--- a/tests/algorithms/test_spf.py
+++ /dev/null
@@ -1,351 +0,0 @@
-from ngraph.algorithms.edge_select import EdgeSelect, edge_select_fabric
-from ngraph.algorithms.spf import ksp, spf
-
-
-class TestSPF:
- def test_spf_1(self, line1):
- """Test SPF on the 'line1' fixture."""
- costs, pred = spf(line1, "A")
- assert costs == {"A": 0, "B": 1, "C": 2}
- # numeric edge IDs: B is reached by [0], then C is reached by [2,4]
- assert pred == {"A": {}, "B": {"A": [0]}, "C": {"B": [2, 4]}}
-
- def test_spf_2(self, square1):
- """Test SPF on 'square1' fixture."""
- costs, pred = spf(square1, "A")
- assert costs == {"A": 0, "B": 1, "D": 2, "C": 2}
- # numeric edge IDs: B from [0], D from [2], C from [1]
- assert pred == {"A": {}, "B": {"A": [0]}, "D": {"A": [2]}, "C": {"B": [1]}}
-
- def test_spf_3(self, square2):
- """Test SPF on 'square2' fixture."""
- costs, pred = spf(square2, "A")
- assert costs == {"A": 0, "B": 1, "D": 1, "C": 2}
- # B from [0], D from [2], C can come from B([1]) or D([3])
- assert pred == {
- "A": {},
- "B": {"A": [0]},
- "D": {"A": [2]},
- "C": {"B": [1], "D": [3]},
- }
-
- def test_spf_4(self, graph3):
- """Test SPF on 'graph3', which has parallel edges."""
- costs, pred = spf(graph3, "A")
- # minimal costs to each node
- assert costs == {"A": 0, "B": 1, "E": 1, "C": 2, "F": 3, "D": 4}
- # multiple parallel edges used: B from [0,1,2], C from [3,4,5], E->C=8, etc.
- assert pred == {
- "A": {},
- "B": {"A": [0, 1, 2]},
- "E": {"A": [7]},
- "C": {"B": [3, 4, 5], "E": [8]},
- "F": {"C": [10]},
- "D": {"A": [9], "C": [6], "F": [11]},
- }
-
- def test_spf_5(self, graph3):
- """
- Use SINGLE_MIN_COST selection and multipath=False on graph3.
- Picks exactly one minimal edge among parallel edges.
- """
- costs, pred = spf(
- graph3,
- src_node="A",
- edge_select_func=edge_select_fabric(EdgeSelect.SINGLE_MIN_COST),
- multipath=False,
- )
- assert costs == {"A": 0, "B": 1, "E": 1, "C": 2, "F": 3, "D": 4}
- # Chose first parallel edge to B => ID=0.
- assert pred == {
- "A": {},
- "B": {"A": [0]},
- "E": {"A": [7]},
- "C": {"B": [3]},
- "F": {"C": [10]},
- "D": {"A": [9]},
- }
-
-
-class TestKSP:
- def test_ksp_1(self, line1):
- """KSP on 'line1' from A->C with multipath=True => 2 distinct paths."""
- paths = list(ksp(line1, "A", "C", multipath=True))
- assert paths == [
- (
- {"A": 0, "B": 1, "C": 2},
- {"A": {}, "B": {"A": [0]}, "C": {"B": [2, 4]}},
- ),
- (
- {"A": 0, "B": 1, "C": 3},
- {"A": {}, "B": {"A": [0]}, "C": {"B": [6]}},
- ),
- ]
-
- def test_ksp_2(self, square1):
- """KSP on 'square1' => 2 distinct paths from A->C."""
- paths = list(ksp(square1, "A", "C", multipath=True))
- assert paths == [
- (
- {"A": 0, "B": 1, "D": 2, "C": 2},
- {"A": {}, "B": {"A": [0]}, "D": {"A": [2]}, "C": {"B": [1]}},
- ),
- (
- {"A": 0, "B": 1, "D": 2, "C": 4},
- {"A": {}, "B": {"A": [0]}, "D": {"A": [2]}, "C": {"D": [3]}},
- ),
- ]
-
- def test_ksp_3(self, square2):
- """Only one distinct shortest path from A->C in 'square2' even with multipath=True."""
- paths = list(ksp(square2, "A", "C", multipath=True))
- assert paths == [
- (
- {"A": 0, "B": 1, "D": 1, "C": 2},
- {
- "A": {},
- "B": {"A": [0]},
- "D": {"A": [2]},
- "C": {"B": [1], "D": [3]},
- },
- )
- ]
-
- def test_ksp_4(self, graph3):
- """KSP on graph3 from A->D => single best path in multipath mode."""
- paths = list(ksp(graph3, "A", "D", multipath=True))
- assert paths == [
- (
- {"A": 0, "B": 1, "E": 1, "C": 2, "F": 3, "D": 4},
- {
- "A": {},
- "B": {"A": [0, 1, 2]},
- "E": {"A": [7]},
- "C": {"B": [3, 4, 5], "E": [8]},
- "F": {"C": [10]},
- "D": {"A": [9], "C": [6], "F": [11]},
- },
- )
- ]
-
- def test_ksp_5(self, graph5):
- """
- KSP on fully connected 'graph5' from A->B in multipath => many distinct paths.
- We verify no duplicates and compare to a known set of 11 results.
- """
- paths = list(ksp(graph5, "A", "B", multipath=True))
- visited = set()
- for _costs, pred in paths:
- edge_ids = tuple(
- str(edge_id)
- for nbrs in pred.values()
- for edge_list in nbrs.values()
- for edge_id in edge_list
- )
- edge_ids = tuple(sorted(edge_ids))
- if edge_ids in visited:
- raise Exception(f"Duplicate path found: {edge_ids}")
- visited.add(edge_ids)
-
- assert paths == [
- (
- {"A": 0, "B": 1, "C": 1, "D": 1, "E": 1},
- {
- "A": {},
- "B": {"A": [0]},
- "C": {"A": [1]},
- "D": {"A": [2]},
- "E": {"A": [3]},
- },
- ),
- (
- {"A": 0, "B": 2, "C": 1, "D": 1, "E": 1},
- {
- "A": {},
- "B": {"C": [9], "D": [13], "E": [17]},
- "C": {"A": [1]},
- "D": {"A": [2]},
- "E": {"A": [3]},
- },
- ),
- (
- {"A": 0, "B": 3, "C": 1, "D": 2, "E": 2},
- {
- "A": {},
- "B": {"D": [13], "E": [17]},
- "C": {"A": [1]},
- "D": {"C": [10]},
- "E": {"C": [11]},
- },
- ),
- (
- {"A": 0, "B": 3, "C": 2, "D": 1, "E": 2},
- {
- "A": {},
- "B": {"C": [9], "E": [17]},
- "C": {"D": [14]},
- "D": {"A": [2]},
- "E": {"D": [15]},
- },
- ),
- (
- {"A": 0, "B": 3, "C": 2, "D": 2, "E": 1},
- {
- "A": {},
- "B": {"C": [9], "D": [13]},
- "C": {"E": [18]},
- "D": {"E": [19]},
- "E": {"A": [3]},
- },
- ),
- (
- {"A": 0, "B": 4, "C": 1, "D": 2, "E": 3},
- {
- "A": {},
- "B": {"E": [17]},
- "C": {"A": [1]},
- "D": {"C": [10]},
- "E": {"D": [15]},
- },
- ),
- (
- {"A": 0, "B": 4, "C": 1, "D": 3, "E": 2},
- {
- "A": {},
- "B": {"D": [13]},
- "C": {"A": [1]},
- "D": {"E": [19]},
- "E": {"C": [11]},
- },
- ),
- (
- {"A": 0, "B": 4, "C": 2, "D": 1, "E": 3},
- {
- "A": {},
- "B": {"E": [17]},
- "C": {"D": [14]},
- "D": {"A": [2]},
- "E": {"C": [11]},
- },
- ),
- (
- {"A": 0, "B": 4, "C": 3, "D": 1, "E": 2},
- {
- "A": {},
- "B": {"C": [9]},
- "C": {"E": [18]},
- "D": {"A": [2]},
- "E": {"D": [15]},
- },
- ),
- (
- {"A": 0, "B": 4, "C": 2, "D": 3, "E": 1},
- {
- "A": {},
- "B": {"D": [13]},
- "C": {"E": [18]},
- "D": {"C": [10]},
- "E": {"A": [3]},
- },
- ),
- (
- {"A": 0, "B": 4, "C": 3, "D": 2, "E": 1},
- {
- "A": {},
- "B": {"C": [9]},
- "C": {"D": [14]},
- "D": {"E": [19]},
- "E": {"A": [3]},
- },
- ),
- ]
-
- def test_ksp_6(self, graph5):
- """KSP with max_k=2 => only 2 shortest paths from A->B."""
- paths = list(ksp(graph5, "A", "B", multipath=True, max_k=2))
- assert paths == [
- (
- {"A": 0, "B": 1, "C": 1, "D": 1, "E": 1},
- {
- "A": {},
- "B": {"A": [0]},
- "C": {"A": [1]},
- "D": {"A": [2]},
- "E": {"A": [3]},
- },
- ),
- (
- {"A": 0, "B": 2, "C": 1, "D": 1, "E": 1},
- {
- "A": {},
- "B": {"C": [9], "D": [13], "E": [17]},
- "C": {"A": [1]},
- "D": {"A": [2]},
- "E": {"A": [3]},
- },
- ),
- ]
-
- def test_ksp_7(self, graph5):
- """KSP with max_path_cost=2 => only paths <= cost=2 from A->B are returned."""
- paths = list(ksp(graph5, "A", "B", multipath=True, max_path_cost=2))
- assert paths == [
- (
- {"A": 0, "B": 1, "C": 1, "D": 1, "E": 1},
- {
- "A": {},
- "B": {"A": [0]},
- "C": {"A": [1]},
- "D": {"A": [2]},
- "E": {"A": [3]},
- },
- ),
- (
- {"A": 0, "B": 2, "C": 1, "D": 1, "E": 1},
- {
- "A": {},
- "B": {"C": [9], "D": [13], "E": [17]},
- "C": {"A": [1]},
- "D": {"A": [2]},
- "E": {"A": [3]},
- },
- ),
- ]
-
- def test_ksp_8(self, graph5):
- """KSP with max_path_cost_factor=3 => expand cost limit beyond the best path cost."""
- paths = list(ksp(graph5, "A", "B", multipath=True, max_path_cost_factor=3))
- assert len(paths) == 5
-
- def test_ksp_9(self, graph5):
- """KSP with max_path_cost=0.5 => no paths since cost is at least 1."""
- paths = list(ksp(graph5, "A", "B", multipath=True, max_path_cost=0.5))
- assert paths == []
-
- def test_ksp_10(self, graph5):
- """KSP with multipath=False, max_path_cost=2 => partial expansions only."""
- paths = list(ksp(graph5, "A", "B", multipath=False, max_path_cost=2))
- assert len(paths) == 4
-
- def test_ksp_11(self, square5):
- """Multiple routes from A->D in 'square5'. Check expansions in multipath mode."""
- paths = list(ksp(square5, "A", "D", multipath=True))
- assert paths == [
- (
- {"A": 0, "B": 1, "C": 1, "D": 2},
- {"A": {}, "B": {"A": [0]}, "C": {"A": [1]}, "D": {"B": [2], "C": [3]}},
- ),
- (
- {"A": 0, "B": 1, "C": 2, "D": 3},
- {"A": {}, "B": {"A": [0]}, "C": {"B": [4]}, "D": {"C": [3]}},
- ),
- (
- {"A": 0, "B": 2, "C": 1, "D": 3},
- {"A": {}, "B": {"C": [5]}, "C": {"A": [1]}, "D": {"B": [2]}},
- ),
- ]
-
- def test_ksp_12(self, square5):
- """No route from A->E in 'square5', so we get an empty list."""
- paths = list(ksp(square5, "A", "E", multipath=True))
- assert paths == []
diff --git a/tests/cli/test_cli_logging_smoke.py b/tests/cli/test_cli_logging_smoke.py
deleted file mode 100644
index ae1bcea..0000000
--- a/tests/cli/test_cli_logging_smoke.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from __future__ import annotations
-
-import logging
-from pathlib import Path
-
-from ngraph import cli
-
-
-def test_cli_verbose_and_quiet_switch_levels(
- caplog, tmp_path: Path, monkeypatch
-) -> None:
- # Minimal scenario
- scenario = tmp_path / "t.yaml"
- scenario.write_text(
- """
-network:
- nodes:
- A: {}
-workflow:
- - step_type: BuildGraph
-"""
- )
-
- # run in temp directory to avoid polluting repo
- monkeypatch.chdir(tmp_path)
-
- # verbose enables debug
- with caplog.at_level(logging.DEBUG, logger="ngraph"):
- cli.main(
- ["--verbose", "run", str(scenario), "--no-results"]
- ) # avoid writing results
- assert any("Debug logging enabled" in r.message for r in caplog.records)
-
- # quiet suppresses info
- caplog.clear()
- with caplog.at_level(logging.INFO, logger="ngraph"):
- cli.main(
- ["--quiet", "run", str(scenario), "--no-results"]
- ) # avoid writing results
- assert not any(r.levelno == logging.INFO for r in caplog.records)
diff --git a/tests/config/test_config.py b/tests/config/test_config.py
deleted file mode 100644
index 13a1a25..0000000
--- a/tests/config/test_config.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""Tests for `ngraph.config` focusing on behavior and correctness."""
-
-from ngraph.config import TrafficManagerConfig
-
-
-def test_estimate_rounds_bounds_default_config() -> None:
- """Default config clamps results within [min_rounds, max_rounds]."""
- config = TrafficManagerConfig()
-
- # Below lower bound clamps to min_rounds
- assert config.estimate_rounds(-1.0) == config.min_rounds
-
- # Far above upper bound clamps to max_rounds
- very_high_ratio = (config.max_rounds + 100) / config.ratio_multiplier
- assert config.estimate_rounds(very_high_ratio) == config.max_rounds
-
- # Return type is int and always within bounds
- result = config.estimate_rounds(0.5)
- assert isinstance(result, int)
- assert config.min_rounds <= result <= config.max_rounds
-
-
-def test_estimate_rounds_monotonic_default_config() -> None:
- """Estimated rounds are non-decreasing with increasing ratio."""
- config = TrafficManagerConfig()
- ratios = [0.0, 0.5, 1.0, 2.0, 5.0]
- values = [config.estimate_rounds(r) for r in ratios]
- assert values == sorted(values)
-
-
-def test_estimate_rounds_formula_and_clamping_custom_config() -> None:
- """Formula matches base + multiplier * ratio when not clamped; clamps otherwise.
-
- Uses a custom configuration to test exact arithmetic independent of defaults.
- """
- # Wide bounds: exact formula applies
- cfg_linear = TrafficManagerConfig(
- default_rounds=0,
- min_rounds=0,
- max_rounds=1_000_000,
- ratio_base=3,
- ratio_multiplier=2,
- )
- assert cfg_linear.estimate_rounds(0.0) == 3 # 3 + 2*0 = 3
- assert cfg_linear.estimate_rounds(1.5) == 6 # 3 + 2*1.5 = 6
- assert cfg_linear.estimate_rounds(10.0) == 23 # 3 + 2*10 = 23
-
- # Tight bounds: verify both min and max clamping
- cfg_clamped = TrafficManagerConfig(
- default_rounds=10,
- min_rounds=8,
- max_rounds=20,
- ratio_base=3,
- ratio_multiplier=2,
- )
- assert cfg_clamped.estimate_rounds(-5.0) == 8 # below min
- assert cfg_clamped.estimate_rounds(1.5) == 8 # 3 + 2*1.5 = 6 -> clamp to 8
- assert cfg_clamped.estimate_rounds(100.0) == 20 # above max
diff --git a/tests/conftest.py b/tests/conftest.py
index 30d3725..033c790 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,17 +1,3 @@
-"""Global pytest configuration.
-
-Conditionally registers optional fixture plugin `tests.algorithms.sample_graphs`.
-Avoid importing the plugin directly to let pytest apply assertion rewriting.
-When running a subset of tests where that module is unavailable, pytest still
-collects and runs tests in the targeted folder.
-"""
+"""Global pytest configuration."""
from __future__ import annotations
-
-from importlib.util import find_spec
-
-# Register plugin if available without importing it here. Pytest will import it
-# with assertion rewriting enabled, avoiding PytestAssertRewriteWarning.
-pytest_plugins: list[str] = []
-if find_spec("tests.algorithms.sample_graphs") is not None:
- pytest_plugins = ["tests.algorithms.sample_graphs"]
diff --git a/tests/demand/manager/test_builder_unit.py b/tests/demand/manager/test_builder_unit.py
deleted file mode 100644
index 70a61c2..0000000
--- a/tests/demand/manager/test_builder_unit.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from __future__ import annotations
-
-import pytest
-
-from ngraph.demand.manager.builder import (
- _coerce_flow_policy_config,
- build_traffic_matrix_set,
-)
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.flows.policy import FlowPolicyConfig
-
-
-def test_coerce_flow_policy_config_variants() -> None:
- assert _coerce_flow_policy_config(None) is None
- assert (
- _coerce_flow_policy_config(FlowPolicyConfig.SHORTEST_PATHS_ECMP)
- == FlowPolicyConfig.SHORTEST_PATHS_ECMP
- )
- assert (
- _coerce_flow_policy_config(int(FlowPolicyConfig.SHORTEST_PATHS_ECMP))
- == FlowPolicyConfig.SHORTEST_PATHS_ECMP
- )
- assert (
- _coerce_flow_policy_config(str(int(FlowPolicyConfig.SHORTEST_PATHS_ECMP)))
- == FlowPolicyConfig.SHORTEST_PATHS_ECMP
- )
- assert (
- _coerce_flow_policy_config("shortest_paths_ecmp")
- == FlowPolicyConfig.SHORTEST_PATHS_ECMP
- )
- with pytest.raises(ValueError):
- _coerce_flow_policy_config("not-an-enum")
-
-
-def test_build_traffic_matrix_set_happy_and_errors() -> None:
- raw = {
- "default": [
- {
- "source_path": "A",
- "sink_path": "B",
- "demand": 10.0,
- "priority": 0,
- "flow_policy_config": "shortest_paths_ecmp",
- }
- ]
- }
- tms = build_traffic_matrix_set(raw)
- assert isinstance(tms, TrafficMatrixSet)
- m = tms.get_default_matrix()
- assert m[0].flow_policy_config == FlowPolicyConfig.SHORTEST_PATHS_ECMP
-
- with pytest.raises(ValueError):
- build_traffic_matrix_set([1, 2, 3]) # type: ignore[arg-type]
- with pytest.raises(ValueError):
- build_traffic_matrix_set({"x": 1}) # type: ignore[arg-type]
- with pytest.raises(ValueError):
- build_traffic_matrix_set({"x": [1]}) # type: ignore[arg-type]
diff --git a/tests/demand/manager/test_expand.py b/tests/demand/manager/test_expand.py
deleted file mode 100644
index 29deca3..0000000
--- a/tests/demand/manager/test_expand.py
+++ /dev/null
@@ -1,115 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Dict, List, Protocol
-
-from ngraph.demand.manager.expand import expand_demands
-from ngraph.demand.spec import TrafficDemand
-from ngraph.flows.policy import FlowPolicyConfig
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-@dataclass
-class _NodeStub:
- name: str
-
-
-class _NetworkLike(Protocol):
- def select_node_groups_by_path(self, path: str) -> Dict[str, List[_NodeStub]]: ...
-
-
-class _NetworkStub:
- def __init__(self, mapping: Dict[str, Dict[str, List[_NodeStub]]]):
- self._mapping = mapping
-
- def select_node_groups_by_path(self, path: str) -> Dict[str, List[_NodeStub]]: # noqa: D401 - simple stub
- return self._mapping.get(path, {})
-
-
-def test_expand_pairwise_multiple_pairs() -> None:
- # Two sources x two sinks -> four demands
- mapping = {
- "src": {"S": [_NodeStub("A"), _NodeStub("B")]},
- "dst": {"T": [_NodeStub("C"), _NodeStub("D")]},
- }
- net: _NetworkLike = _NetworkStub(mapping)
- graph = StrictMultiDiGraph()
- # The expansion logic connects pseudo nodes to real nodes; ensure real nodes exist
- for n in ("A", "B", "C", "D"):
- graph.add_node(n)
-
- td = TrafficDemand(
- source_path="src", sink_path="dst", demand=100.0, mode="pairwise"
- )
- expanded, td_map = expand_demands(
- network=net, # type: ignore[arg-type]
- graph=graph,
- traffic_demands=[td],
- default_flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,
- )
-
- assert len(expanded) == 4
- assert len(td_map[td.id]) == 4
- # Equal split across pairs
- assert all(abs(d.volume - 25.0) < 1e-9 for d in expanded)
- assert all(d.demand_class == td.priority for d in expanded)
-
-
-def test_expand_combine_uses_pseudo_nodes_and_single_demand() -> None:
- # Combine mode should create a single Demand via pseudo nodes and edges
- mapping = {
- "src": {"S": [_NodeStub("A"), _NodeStub("B")]},
- "dst": {"T": [_NodeStub("C"), _NodeStub("D")]},
- }
- net: _NetworkLike = _NetworkStub(mapping)
- graph = StrictMultiDiGraph()
- for n in ("A", "B", "C", "D"):
- graph.add_node(n)
-
- td = TrafficDemand(source_path="src", sink_path="dst", demand=42.0, mode="combine")
- expanded, td_map = expand_demands(
- network=net, # type: ignore[arg-type]
- graph=graph,
- traffic_demands=[td],
- default_flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,
- )
-
- assert len(expanded) == 1
- assert len(td_map[td.id]) == 1
- d = expanded[0]
- assert d.volume == 42.0
- assert str(d.src_node).startswith("combine_src::")
- assert str(d.dst_node).startswith("combine_snk::")
- # Pseudo nodes and link edges should exist
- assert d.src_node in graph.nodes
- assert d.dst_node in graph.nodes
-
-
-def test_expand_pairwise_deterministic_ordering() -> None:
- """Expanded pairwise demands should be deterministic in ordering.
-
- We ensure stability by verifying that a fixed input produces a stable
- sequence of (src,dst) tuples.
- """
- mapping = {
- "src": {"S": [_NodeStub("B"), _NodeStub("A")]},
- "dst": {"T": [_NodeStub("D"), _NodeStub("C")]},
- }
- net: _NetworkLike = _NetworkStub(mapping)
- graph = StrictMultiDiGraph()
- # Create real nodes; ordering in graph should not affect expanded order
- for n in ("D", "C", "B", "A"):
- graph.add_node(n)
-
- td = TrafficDemand(source_path="src", sink_path="dst", demand=40.0, mode="pairwise")
- expanded, _ = expand_demands(
- network=net, # type: ignore[arg-type]
- graph=graph,
- traffic_demands=[td],
- default_flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,
- )
-
- pair_seq = [(str(d.src_node), str(d.dst_node)) for d in expanded]
- # Expected deterministic pair order: sorted by src then dst
- expected = sorted(pair_seq)
- assert pair_seq == expected
diff --git a/tests/demand/manager/test_expand_side_effects.py b/tests/demand/manager/test_expand_side_effects.py
deleted file mode 100644
index 0bb8f21..0000000
--- a/tests/demand/manager/test_expand_side_effects.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from __future__ import annotations
-
-from ngraph.demand.manager.manager import TrafficManager
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.demand.spec import TrafficDemand
-from ngraph.flows.policy import FlowPolicyConfig
-from ngraph.model.network import Link, Network, Node
-
-
-def _build_line() -> Network:
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B", capacity=10.0, cost=1))
- return net
-
-
-def _sum_flow_between(graph, u: str, v: str) -> float:
- total = 0.0
- for _eid, (src, dst, _key, attr) in graph.get_edges().items():
- if src == u and dst == v:
- total += float(attr.get("flow", 0.0))
- return total
-
-
-def test_expand_combine_twice_preserves_existing_flows_and_is_idempotent() -> None:
- net = _build_line()
- tmset = TrafficMatrixSet()
- td1 = TrafficDemand(
- source_path="A",
- sink_path="B",
- demand=5.0,
- mode="combine",
- flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_WCMP,
- )
- tmset.add("default", [td1])
-
- tm = TrafficManager(network=net, traffic_matrix_set=tmset)
- tm.build_graph(add_reverse=True)
- tm.expand_demands()
- tm.place_all_demands(placement_rounds=3)
-
- assert tm.graph is not None
- g = tm.graph
- before = _sum_flow_between(g, "A", "B")
- assert before > 0.0
-
- # Re-expand the same demands again on the existing graph
- # Expected: no exception, no flow reset, and no duplicate pseudo connectors
- tm.expand_demands()
-
- after = _sum_flow_between(g, "A", "B")
- assert after == before
-
- # Verify the pseudo nodes/connectors were not duplicated
- ps = f"combine_src::{td1.id}"
- pk = f"combine_snk::{td1.id}"
- # Exactly one connector per direction should exist
- assert len(g.edges_between(ps, "A")) == 1
- assert len(g.edges_between("B", pk)) == 1
-
-
-def test_expand_combine_adds_new_demand_without_resetting_flows() -> None:
- net = _build_line()
- tmset = TrafficMatrixSet()
- td1 = TrafficDemand(
- source_path="A",
- sink_path="B",
- demand=3.0,
- mode="combine",
- flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,
- )
- tmset.add("default", [td1])
-
- tm = TrafficManager(network=net, traffic_matrix_set=tmset)
- tm.build_graph(add_reverse=True)
- tm.expand_demands()
- tm.place_all_demands(placement_rounds=2)
-
- g = tm.graph
- assert g is not None
- flow_before = _sum_flow_between(g, "A", "B")
- assert flow_before > 0.0
-
- # Add another demand and expand again: flows should remain, no reset
- td2 = TrafficDemand(
- source_path="A",
- sink_path="B",
- demand=2.0,
- mode="combine",
- flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,
- )
- tmset.get_default_matrix().append(td2)
-
- # Must not raise and must not zero out prior flows
- tm.expand_demands()
- flow_after = _sum_flow_between(g, "A", "B")
- assert flow_after == flow_before
-
-
-def test_reset_after_reexpand_clears_stray_flows() -> None:
- # Place with one demand, then re-expand (losing references to old demand/policy).
- # reset_all_flow_usages must clear all graph usage, including flows that belong
- # to previously expanded demands.
- net = _build_line()
- tmset = TrafficMatrixSet()
- td1 = TrafficDemand(
- source_path="A",
- sink_path="B",
- demand=4.0,
- mode="combine",
- flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,
- )
- tmset.add("default", [td1])
-
- tm = TrafficManager(network=net, traffic_matrix_set=tmset)
- tm.build_graph(add_reverse=True)
- tm.expand_demands()
- tm.place_all_demands(placement_rounds=2)
-
- g = tm.graph
- assert g is not None
- assert _sum_flow_between(g, "A", "B") > 0.0
-
- # Re-expand by replacing matrix contents (typical when updating inputs)
- tmset.matrices["default"] = [
- TrafficDemand(
- source_path="A",
- sink_path="B",
- demand=1.0,
- mode="combine",
- flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,
- )
- ]
- tm.expand_demands()
-
- # Now reset: must clear all flows from graph, including those from the old demand
- tm.reset_all_flow_usages()
- assert _sum_flow_between(g, "A", "B") == 0.0
diff --git a/tests/demand/manager/test_manager_api.py b/tests/demand/manager/test_manager_api.py
deleted file mode 100644
index ec768b9..0000000
--- a/tests/demand/manager/test_manager_api.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from __future__ import annotations
-
-import pytest
-
-from ngraph.demand.manager.manager import TrafficManager
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.demand.spec import TrafficDemand
-from ngraph.model.network import Link, Network, Node
-
-
-def _build_line_network() -> Network:
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=1.0, cost=1))
- net.add_link(Link("B", "C", capacity=1.0, cost=1))
- return net
-
-
-def _tmset_single(demand_value: float) -> TrafficMatrixSet:
- tmset = TrafficMatrixSet()
- tds = [
- TrafficDemand(
- source_path="A", sink_path="C", demand=demand_value, mode="combine"
- )
- ]
- tmset.add("default", tds)
- return tmset
-
-
-def test_place_all_demands_auto_rounds_clamped_by_granularity() -> None:
- net = _build_line_network()
- tmset = _tmset_single(0.001)
- tm = TrafficManager(network=net, traffic_matrix_set=tmset)
- tm.build_graph(add_reverse=True)
- tm.expand_demands()
- placed = tm.place_all_demands(placement_rounds="auto")
- # Entire small demand should be placed up to MIN_FLOW tolerance; auto rounds must not stall
- assert placed > 0.0
- results = tm.get_traffic_results()
- from ngraph.algorithms.base import MIN_FLOW
-
- assert abs(results[0].placed_volume - results[0].total_volume) <= MIN_FLOW
-
-
-def test_place_all_demands_rejects_non_positive_rounds() -> None:
- net = _build_line_network()
- tmset = _tmset_single(1.0)
- tm = TrafficManager(network=net, traffic_matrix_set=tmset)
- tm.build_graph(add_reverse=True)
- tm.expand_demands()
- with pytest.raises(ValueError):
- tm.place_all_demands(placement_rounds=0)
- with pytest.raises(ValueError):
- tm.place_all_demands(placement_rounds=-5)
diff --git a/tests/demand/manager/test_manager_behavior.py b/tests/demand/manager/test_manager_behavior.py
deleted file mode 100644
index 37c5e69..0000000
--- a/tests/demand/manager/test_manager_behavior.py
+++ /dev/null
@@ -1,124 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Dict, List
-
-import pytest
-
-from ngraph.demand.manager.manager import TrafficManager
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.demand.spec import TrafficDemand
-
-
-@dataclass
-class _Node:
- name: str
- disabled: bool = False
-
-
-class _Network:
- def __init__(self, groups: Dict[str, Dict[str, List[_Node]]]) -> None:
- self._groups = groups
-
- def select_node_groups_by_path(self, pattern: str) -> Dict[str, List[_Node]]:
- return self._groups.get(pattern, {})
-
- def to_strict_multidigraph(
- self, add_reverse: bool = True, *, compact: bool = False
- ): # pragma: no cover - light stub
- from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
- g = StrictMultiDiGraph()
- for label_map in self._groups.values():
- for nodes in label_map.values():
- for n in nodes:
- if n.name not in g:
- g.add_node(n.name)
- return g
-
-
-def _tm_with_single(td: TrafficDemand) -> TrafficMatrixSet:
- tms = TrafficMatrixSet()
- tms.add("default", [td])
- return tms
-
-
-def test_build_expand_and_place_auto_rounds_and_results_update() -> None:
- # Two nodes connected implicitly in mock graph
- net = _Network({"A": {"GA": [_Node("A")]}, "B": {"GB": [_Node("B")]}})
- td = TrafficDemand(source_path="A", sink_path="B", demand=5.0, mode="pairwise")
- tm = TrafficManager(network=net, traffic_matrix_set=_tm_with_single(td))
-
- tm.build_graph()
- tm.expand_demands()
- placed = tm.place_all_demands(placement_rounds=1)
- assert placed >= 0.0 # placement with empty edges just no-ops
-
- # Results reflect placements per top-level demand
- res = tm.get_traffic_results(detailed=False)
- assert len(res) == 1 and res[0].total_volume == 5.0
-
- # Detailed returns per expanded demand
- det = tm.get_traffic_results(detailed=True)
- assert isinstance(det, list)
-
-
-def test_place_all_demands_requires_graph() -> None:
- net = _Network({})
- tm = TrafficManager(network=net, traffic_matrix_set=TrafficMatrixSet())
- with pytest.raises(RuntimeError):
- tm.place_all_demands(1)
-
-
-def test_reset_and_summarize_link_usage() -> None:
- net = _Network({"A": {"GA": [_Node("A")]}})
- tm = TrafficManager(network=net, traffic_matrix_set=TrafficMatrixSet())
- tm.build_graph()
- tm.expand_demands()
- # Summarize on empty graph
- usage = tm.summarize_link_usage()
- assert isinstance(usage, dict)
- # Reset flows should not error
- tm.reset_all_flow_usages()
-
-
-def test_estimate_rounds_variants_and_get_flow_details() -> None:
- # Build a tiny graph with two nodes and one link to provide capacities
- net = _Network({"A": {"GA": [_Node("A")]}, "B": {"GB": [_Node("B")]}})
- tms = TrafficMatrixSet()
- # Two demands so median demand is between them
- tms.add(
- "default",
- [
- TrafficDemand(source_path="A", sink_path="B", demand=10.0, mode="pairwise"),
- TrafficDemand(source_path="A", sink_path="B", demand=30.0, mode="pairwise"),
- ],
- )
- tm = TrafficManager(network=net, traffic_matrix_set=tms)
- tm.build_graph()
- tm.expand_demands()
- # _estimate_rounds returns an int; we just assert it does not throw and returns > 0
- rounds = tm._estimate_rounds()
- assert isinstance(rounds, int) and rounds > 0
-
- # Attach a minimal fake policy/flow to exercise get_flow_details
- class Flow:
- def __init__(self) -> None:
- self.placed_flow = 1.0
- self.src_node = "A"
- self.dst_node = "B"
-
- class Bundle:
- def __init__(self) -> None:
- self.edges = {"e1"}
-
- self.path_bundle = Bundle()
-
- class FP:
- def __init__(self) -> None:
- self.flows = {0: Flow()}
-
- for d in tm.demands:
- d.flow_policy = FP()
- details = tm.get_flow_details()
- assert details and list(details.values())[0]["edges"] == ["e1"]
diff --git a/tests/demand/manager/test_manager_correctness.py b/tests/demand/manager/test_manager_correctness.py
deleted file mode 100644
index bbf4a95..0000000
--- a/tests/demand/manager/test_manager_correctness.py
+++ /dev/null
@@ -1,244 +0,0 @@
-from __future__ import annotations
-
-import math
-
-from ngraph.algorithms.base import MIN_FLOW
-from ngraph.demand.manager.manager import TrafficManager
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.demand.spec import TrafficDemand
-from ngraph.flows.policy import FlowPolicyConfig
-from ngraph.model.network import Link, Network, Node
-
-
-def _build_diamond_network(
- cap_left: float,
- cap_right: float,
- *,
- add_alt_high_cost: bool = False,
-) -> Network:
- """Return a simple S-(X|Y)-T diamond topology.
-
- Topology:
- S -> X -> T (each edge capacity = cap_left, cost = 1)
- S -> Y -> T (each edge capacity = cap_right, cost = 1)
-
- If add_alt_high_cost=True, also adds a higher-cost alternative path:
- S -> Z -> T (each edge capacity = 100, cost = 3)
- """
- net = Network()
- for n in ("S", "X", "Y", "T"):
- net.add_node(Node(n))
- net.add_link(Link("S", "X", capacity=cap_left, cost=1))
- net.add_link(Link("X", "T", capacity=cap_left, cost=1))
- net.add_link(Link("S", "Y", capacity=cap_right, cost=1))
- net.add_link(Link("Y", "T", capacity=cap_right, cost=1))
-
- if add_alt_high_cost:
- net.add_node(Node("Z"))
- net.add_link(Link("S", "Z", capacity=100.0, cost=3))
- net.add_link(Link("Z", "T", capacity=100.0, cost=3))
-
- return net
-
-
-def _tmset_single(
- demand_value: float,
- *,
- mode: str = "combine",
- policy: FlowPolicyConfig | None = None,
-) -> TrafficMatrixSet:
- tmset = TrafficMatrixSet()
- td = TrafficDemand(
- source_path="S",
- sink_path="T",
- demand=demand_value,
- mode=mode,
- flow_policy_config=policy,
- )
- tmset.add("default", [td])
- return tmset
-
-
-def _sum_flow_between(graph, u: str, v: str) -> float:
- total = 0.0
- for _eid, (src, dst, _key, attr) in graph.get_edges().items():
- if src == u and dst == v:
- total += float(attr.get("flow", 0.0))
- return total
-
-
-def _place_and_get_tm(
- net: Network,
- tmset: TrafficMatrixSet,
- *,
- default_policy: FlowPolicyConfig = FlowPolicyConfig.SHORTEST_PATHS_ECMP,
- rounds: int = 5,
-) -> TrafficManager:
- tm = TrafficManager(
- network=net, traffic_matrix_set=tmset, default_flow_policy_config=default_policy
- )
- tm.build_graph(add_reverse=True)
- tm.expand_demands()
- tm.place_all_demands(placement_rounds=rounds)
- assert tm.graph is not None
- return tm
-
-
-def _approx_equal(a: float, b: float, tol: float = MIN_FLOW) -> bool:
- return math.isfinite(a) and math.isfinite(b) and abs(a - b) <= tol
-
-
-def test_tm_policy_correctness_ecmp_equal_split() -> None:
- net = _build_diamond_network(cap_left=5.0, cap_right=5.0)
- tmset = _tmset_single(8.0, policy=FlowPolicyConfig.SHORTEST_PATHS_ECMP)
- tm = _place_and_get_tm(
- net, tmset, default_policy=FlowPolicyConfig.SHORTEST_PATHS_ECMP
- )
-
- # All demand placed; equal split across the two equal-cost branches
- results = tm.get_traffic_results()
- assert _approx_equal(results[0].placed_volume, 8.0)
-
- g = tm.graph
- left = _sum_flow_between(g, "S", "X")
- right = _sum_flow_between(g, "S", "Y")
- assert _approx_equal(left, 4.0)
- assert _approx_equal(right, 4.0)
- # Downstream edges must match
- assert _approx_equal(_sum_flow_between(g, "X", "T"), left)
- assert _approx_equal(_sum_flow_between(g, "Y", "T"), right)
-
-
-def test_tm_policy_correctness_wcmp_proportional_unbalanced() -> None:
- # Left branch capacity 2, right branch capacity 8; request 8 -> expect 2 and 6
- net = _build_diamond_network(cap_left=2.0, cap_right=8.0)
- tmset = _tmset_single(8.0, policy=FlowPolicyConfig.SHORTEST_PATHS_WCMP)
- tm = _place_and_get_tm(
- net, tmset, default_policy=FlowPolicyConfig.SHORTEST_PATHS_WCMP
- )
-
- results = tm.get_traffic_results()
- assert _approx_equal(results[0].placed_volume, 8.0)
-
- g = tm.graph
- left = _sum_flow_between(g, "S", "X")
- right = _sum_flow_between(g, "S", "Y")
- # WCMP distributes proportionally within the min-cost DAG; with total DAG cap=10,
- # request=8 -> 0.2*8=1.6 on left, 0.8*8=6.4 on right.
- assert _approx_equal(left, 1.6)
- assert _approx_equal(right, 6.4)
- assert _approx_equal(_sum_flow_between(g, "X", "T"), left)
- assert _approx_equal(_sum_flow_between(g, "Y", "T"), right)
-
-
-def test_tm_policy_correctness_TE_WCMP_UNLIM_uses_only_min_cost() -> None:
- # Two equal-cost min-cost branches totaling 6, plus a higher-cost alternative with large capacity
- # Policy must not use the higher-cost path; expect total placed capped at 6
- net = _build_diamond_network(cap_left=3.0, cap_right=3.0, add_alt_high_cost=True)
- tmset = _tmset_single(10.0, policy=FlowPolicyConfig.TE_WCMP_UNLIM)
- tm = _place_and_get_tm(net, tmset, default_policy=FlowPolicyConfig.TE_WCMP_UNLIM)
-
- results = tm.get_traffic_results()
- # Capacity-aware WCMP will use higher-cost alternatives after saturating min-cost paths
- assert _approx_equal(results[0].placed_volume, 10.0)
-
- g = tm.graph
- # Min-cost branches saturate to 6 total
- min_cost_total = _sum_flow_between(g, "S", "X") + _sum_flow_between(g, "S", "Y")
- assert _approx_equal(min_cost_total, 6.0)
- # The higher-cost alternative carries the remainder
- alt_total = _sum_flow_between(g, "S", "Z")
- assert _approx_equal(alt_total, 4.0)
- assert _approx_equal(_sum_flow_between(g, "Z", "T"), alt_total)
-
-
-def test_tm_policy_correctness_te_ecmp_256_balances_across_paths() -> None:
- net = _build_diamond_network(cap_left=5.0, cap_right=5.0)
- tmset = _tmset_single(9.0, policy=FlowPolicyConfig.TE_ECMP_UP_TO_256_LSP)
- tm = _place_and_get_tm(
- net, tmset, default_policy=FlowPolicyConfig.TE_ECMP_UP_TO_256_LSP, rounds=5
- )
-
- g = tm.graph
- left = _sum_flow_between(g, "S", "X")
- right = _sum_flow_between(g, "S", "Y")
- # Total placement equals demand; distribution may be uneven due to load-factored selection
- assert _approx_equal(left + right, 9.0)
- assert 0.0 <= left <= 5.0
- assert 0.0 <= right <= 5.0
-
-
-def test_tm_policy_correctness_te_ecmp_16_flow_count_and_balance() -> None:
- net = _build_diamond_network(cap_left=5.0, cap_right=5.0)
- tmset = _tmset_single(8.0, policy=FlowPolicyConfig.TE_ECMP_16_LSP)
- tm = _place_and_get_tm(
- net, tmset, default_policy=FlowPolicyConfig.TE_ECMP_16_LSP, rounds=5
- )
-
- # Validate the policy created 16 flows for the single expanded demand
- assert tm.demands and tm.demands[0].flow_policy is not None
- assert tm.demands[0].flow_policy.flow_count == 16
-
- g = tm.graph
- left = _sum_flow_between(g, "S", "X")
- right = _sum_flow_between(g, "S", "Y")
- assert _approx_equal(left + right, 8.0)
- assert abs(left - right) <= MIN_FLOW
-
-
-def test_tm_multiple_demands_same_priority_share_capacity() -> None:
- # Total capacity is 10; two same-class demands of 6 each should share fairly: ~5 each
- net = _build_diamond_network(cap_left=5.0, cap_right=5.0)
- tmset = TrafficMatrixSet()
- tmset.add(
- "default",
- [
- TrafficDemand(source_path="S", sink_path="T", demand=6.0, mode="combine"),
- TrafficDemand(source_path="S", sink_path="T", demand=6.0, mode="combine"),
- ],
- )
- tm = _place_and_get_tm(
- net, tmset, default_policy=FlowPolicyConfig.SHORTEST_PATHS_WCMP
- )
-
- # Check placed amounts per top-level demand via TrafficManager results
- results = tm.get_traffic_results()
- assert len(results) == 2
- # First demand placed 6, second gets the remaining 4 with current scheduler semantics
- assert _approx_equal(results[0].placed_volume, 6.0)
- assert _approx_equal(results[1].placed_volume, 4.0)
-
- # Edge accounting must reflect total placed 10
- g = tm.graph
- total_out = _sum_flow_between(g, "S", "X") + _sum_flow_between(g, "S", "Y")
- assert _approx_equal(total_out, 10.0)
-
-
-def test_tm_pairwise_mode_correctness_and_accounting() -> None:
- # Two sources to one sink, pairwise splits the demand evenly across pairs
- net = Network()
- for n in ("S1", "S2", "T"):
- net.add_node(Node(n))
- # Parallel identical branches for each source
- for src in ("S1", "S2"):
- net.add_link(Link(src, "T", capacity=10.0, cost=1))
-
- tmset = TrafficMatrixSet()
- td = TrafficDemand(
- source_path="S[12]",
- sink_path="T",
- demand=10.0,
- mode="pairwise",
- flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,
- )
- tmset.add("default", [td])
-
- tm = _place_and_get_tm(net, tmset)
-
- # Detailed results have one entry per expanded demand (S1->T and S2->T), 5 each
- detailed = sorted(
- tm.get_traffic_results(detailed=True), key=lambda r: (r.src, r.dst)
- )
- assert len(detailed) == 2
- assert _approx_equal(detailed[0].placed_volume, 5.0)
- assert _approx_equal(detailed[1].placed_volume, 5.0)
diff --git a/tests/demand/manager/test_reset_semantics.py b/tests/demand/manager/test_reset_semantics.py
deleted file mode 100644
index 8bfc764..0000000
--- a/tests/demand/manager/test_reset_semantics.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from __future__ import annotations
-
-from ngraph.demand.manager.manager import TrafficManager
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.demand.spec import TrafficDemand
-from ngraph.model.network import Link, Network, Node
-
-
-def _build_net() -> Network:
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B", capacity=1.0, cost=1))
- return net
-
-
-def test_reset_all_flow_usages_keeps_flows_but_resets_graph_usage() -> None:
- net = _build_net()
- tmset = TrafficMatrixSet()
- tmset.add(
- "default",
- [TrafficDemand(source_path="A", sink_path="B", demand=1.0, mode="combine")],
- )
-
- tm = TrafficManager(network=net, traffic_matrix_set=tmset)
- tm.build_graph(add_reverse=True)
- tm.expand_demands()
- tm.place_all_demands(placement_rounds=5)
- assert tm.demands and tm.demands[0].placed_demand > 0.0
-
- tm.reset_all_flow_usages()
- # Graph usage reset
- edges = tm.graph.get_edges() if tm.graph else {}
- assert all(attr[3].get("flow", 0.0) == 0.0 for attr in edges.values())
- # Internal flows structure should remain (enabling reopt later)
- assert (
- tm.demands
- and tm.demands[0].flow_policy
- and tm.demands[0].flow_policy.flow_count >= 1
- )
diff --git a/tests/demand/manager/test_schedule.py b/tests/demand/manager/test_schedule.py
deleted file mode 100644
index a3748b2..0000000
--- a/tests/demand/manager/test_schedule.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from __future__ import annotations
-
-from ngraph.algorithms.base import MIN_FLOW, PathAlg
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.demand import Demand
-from ngraph.demand.manager.schedule import place_demands_round_robin
-from ngraph.flows.policy import FlowPolicy
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def _graph_square() -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- for node in ("A", "B", "C", "D"):
- g.add_node(node)
- # Two disjoint paths A->B->C and A->D->C
- g.add_edge("A", "B", key=0, cost=1, capacity=1)
- g.add_edge("B", "C", key=1, cost=1, capacity=1)
- g.add_edge("A", "D", key=2, cost=1, capacity=1)
- g.add_edge("D", "C", key=3, cost=1, capacity=1)
- return g
-
-
-def _policy() -> FlowPolicy:
- # Defaults require additional params; set minimal working configuration
- from ngraph.algorithms.base import EdgeSelect
- from ngraph.algorithms.placement import FlowPlacement
-
- return FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- )
-
-
-def test_round_robin_places_all_when_capacity_sufficient() -> None:
- g = _graph_square()
- init_flow_graph(g)
- demands = [
- Demand("A", "C", 1.0, demand_class=0, flow_policy=_policy()),
- Demand("A", "C", 1.0, demand_class=0, flow_policy=_policy()),
- ]
-
- total = place_demands_round_robin(g, demands, placement_rounds=5)
- assert abs(total - 2.0) < 1e-9
- assert all(abs(d.placed_demand - 1.0) < 1e-9 for d in demands)
-
-
-def test_round_robin_stops_when_no_progress() -> None:
- g = _graph_square()
- # Reduce capacity on one edge to limit placement
- # Set B->C capacity to 0 to enforce single path usage only
- g.add_edge("B", "C", key=10, cost=1, capacity=0)
- init_flow_graph(g)
-
- d1 = Demand("A", "C", 2.0, demand_class=0, flow_policy=_policy())
- d2 = Demand("A", "C", 2.0, demand_class=0, flow_policy=_policy())
- total = place_demands_round_robin(g, [d1, d2], placement_rounds=50)
- # The two available links should allow at most 2 units total
- assert abs(total - 2.0) < 1e-9
-
-
-def test_round_robin_small_demand_with_many_rounds_places_full_volume() -> None:
- """Ensure tiny but valid demand does not stall across many rounds.
-
- This guards against step sizes dropping below MIN_FLOW when rounds_left is large.
- """
- g = _graph_square()
- init_flow_graph(g)
-
- tiny = MIN_FLOW * 1.1
- demands = [Demand("A", "C", tiny, demand_class=0, flow_policy=_policy())]
-
- total = place_demands_round_robin(g, demands, placement_rounds=100)
- # May leave remainder < MIN_FLOW due to threshold semantics
- assert abs(total - tiny) <= MIN_FLOW
- assert abs(demands[0].placed_demand - tiny) <= MIN_FLOW
diff --git a/tests/demand/manager/test_schedule_behavior.py b/tests/demand/manager/test_schedule_behavior.py
deleted file mode 100644
index 7877453..0000000
--- a/tests/demand/manager/test_schedule_behavior.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Any, Dict, Tuple
-
-from ngraph.demand.manager.schedule import place_demands_round_robin
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-@dataclass
-class _Policy:
- placed_demand: float = 0.0
- last_metrics: Dict[str, float] | None = None
-
- def __post_init__(self) -> None:
- if self.last_metrics is None:
- self.last_metrics = {
- "iterations": 1.0,
- "spf_calls": 0.0,
- "flows_created": 1.0,
- }
-
- def place_demand(
- self, graph: Any, src: str, dst: str, flow_class_key: Any, vol: float
- ) -> None:
- # In real policy, remove_demand clears flows and place_demand rebuilds them.
- # Model that by setting placed_demand to the requested volume, not accumulating.
- self.placed_demand = vol
-
- def remove_demand(
- self, graph: Any
- ) -> None: # pragma: no cover - reset internal state
- self.placed_demand = 0.0
-
-
-@dataclass
-class _Demand:
- src_node: str
- dst_node: str
- volume: float
- demand_class: int
- placed_demand: float = 0.0
- flow_policy: _Policy | None = None
-
- def place(self, flow_graph: StrictMultiDiGraph) -> Tuple[float, float]:
- # Place as much as possible up to volume
- leftover = self.volume - self.placed_demand
- if leftover <= 0:
- return (0.0, 0.0)
- self.placed_demand += leftover
- if self.flow_policy:
- self.flow_policy.place_demand(
- flow_graph,
- self.src_node,
- self.dst_node,
- (self.demand_class, self.src_node, self.dst_node, id(self)),
- leftover,
- )
- return (leftover, 0.0)
-
-
-def _graph_linear() -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_edge("A", "B", capacity=100.0, cost=1.0)
- g.add_edge("B", "A", capacity=100.0, cost=1.0)
- return g
-
-
-def test_place_demands_round_robin_basic_and_reopt() -> None:
- g = _graph_linear()
- # Two priorities; ensure ordering by prio then fairness across rounds
- d1 = _Demand("A", "B", 10.0, demand_class=0, flow_policy=_Policy())
- d2 = _Demand("A", "B", 5.0, demand_class=1, flow_policy=_Policy())
- # Include a demand without policy to exercise skip path in reoptimize helper
- d3 = _Demand("A", "B", 1.0, demand_class=0, flow_policy=None)
- total = place_demands_round_robin(
- g, [d1, d2, d3], placement_rounds=2, reoptimize_after_each_round=True
- )
- # All should be placed on this simple graph
- assert total == 16.0
- assert d1.placed_demand == 10.0 and d2.placed_demand == 5.0
- assert d3.placed_demand == 1.0
-
-
-def test_place_demands_round_robin_empty_and_zero_rounds_validation() -> None:
- g = _graph_linear()
- total = place_demands_round_robin(g, [], placement_rounds=1)
- assert total == 0.0
diff --git a/tests/demand/manager/test_schedule_reopt.py b/tests/demand/manager/test_schedule_reopt.py
deleted file mode 100644
index 62e4c08..0000000
--- a/tests/demand/manager/test_schedule_reopt.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from __future__ import annotations
-
-from ngraph.algorithms.base import EdgeSelect, PathAlg
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.algorithms.placement import FlowPlacement
-from ngraph.demand import Demand
-from ngraph.demand.manager.schedule import place_demands_round_robin
-from ngraph.flows.policy import FlowPolicy
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def _graph_bottleneck_then_alt() -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- for n in ("A", "B", "C", "D"):
- g.add_node(n)
- # Preferred low-cost path A->B->D has tight capacity on B->D
- g.add_edge("A", "B", cost=1, capacity=1)
- g.add_edge("B", "D", cost=1, capacity=0.001)
- # Alternate slightly higher-cost path A->C->D has enough capacity
- g.add_edge("A", "C", cost=2, capacity=1)
- g.add_edge("C", "D", cost=2, capacity=1)
- return g
-
-
-def _policy_capacity_aware() -> FlowPolicy:
- return FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=False,
- reoptimize_flows_on_each_placement=False,
- max_flow_count=2,
- )
-
-
-def test_reopt_on_stall_unlocks_alt_paths() -> None:
- g = _graph_bottleneck_then_alt()
- init_flow_graph(g)
- d = Demand("A", "D", 0.5, demand_class=0, flow_policy=_policy_capacity_aware())
- # With reoptization after rounds enabled, scheduler should avoid stalling
- total = place_demands_round_robin(
- g, [d], placement_rounds=5, reoptimize_after_each_round=True
- )
- # At least more than the tight bottleneck should be placed via alternative path
- assert total > 0.001
diff --git a/tests/demand/test_demand_policy_passthrough.py b/tests/demand/test_demand_policy_passthrough.py
deleted file mode 100644
index f4ca58c..0000000
--- a/tests/demand/test_demand_policy_passthrough.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from __future__ import annotations
-
-from typing import Tuple
-
-from ngraph.algorithms.base import MIN_FLOW, PathAlg
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.demand import Demand
-from ngraph.flows.policy import FlowPolicy
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-class _CapturingPolicy(FlowPolicy):
- def __init__(self) -> None: # type: ignore[no-untyped-def]
- # Minimal viable init
- from ngraph.algorithms.base import EdgeSelect
- from ngraph.algorithms.placement import FlowPlacement
-
- super().__init__(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- )
- self.last_volume: float | None = None
- self.last_min_flow: float | None = None
-
- def place_demand( # type: ignore[override]
- self,
- flow_graph: StrictMultiDiGraph,
- src_node: str,
- dst_node: str,
- flow_class,
- volume: float,
- target_flow_volume: float | None = None,
- min_flow: float | None = None,
- ) -> Tuple[float, float]:
- # Capture arguments and pretend we placed everything
- self.last_volume = float(volume)
- self.last_min_flow = float(min_flow) if min_flow is not None else None
- # Simulate trivial graph update using base implementation behavior
- # but without placing flows; directly reflect placed_demand
- # Here, mark one flow for accounting to let Demand compute placed delta
- if not self.flows:
- self._create_flows(flow_graph, src_node, dst_node, flow_class, min_flow)
- # Fake placement by setting placed_flow
- for flow in self.flows.values():
- flow.placed_flow += volume
- return volume, 0.0
-
-
-def test_demand_passes_min_flow_threshold_to_policy() -> None:
- g = StrictMultiDiGraph()
- for n in ("A", "B"):
- g.add_node(n)
- g.add_edge("A", "B", capacity=1.0, cost=1)
- init_flow_graph(g)
-
- demand = Demand(
- "A", "B", volume=MIN_FLOW * 1.5, demand_class=0, flow_policy=_CapturingPolicy()
- )
- # Request with max_placement far below MIN_FLOW to exercise floor logic soon
- placed, _ = demand.place(g, max_placement=MIN_FLOW * 0.1)
- # With change, Demand should request at least MIN_FLOW; policy may ignore min_flow
- assert placed >= MIN_FLOW * 0.99
- policy = demand.flow_policy # type: ignore[assignment]
- assert isinstance(policy, _CapturingPolicy)
- assert policy.last_volume is not None and policy.last_volume >= MIN_FLOW * 0.99
- # min_flow is advisory for policies and not required; do not assert it
diff --git a/tests/docs/__init__.py b/tests/docs/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/docs/test_examples.py b/tests/docs/test_examples.py
deleted file mode 100644
index 7eda169..0000000
--- a/tests/docs/test_examples.py
+++ /dev/null
@@ -1,261 +0,0 @@
-"""
-Test code examples from documentation examples directory.
-
-This module tests examples from:
-- docs/examples/basic.md
-- docs/examples/clos-fabric.md
-
-These are practical examples showing how to use NetGraph features.
-"""
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.algorithms.max_flow import run_sensitivity, saturated_edges
-from ngraph.scenario import Scenario
-
-
-class TestBasicMdExamples:
- """Test examples from docs/examples/basic.md"""
-
- def test_flow_analysis_variants(self):
- """Test different flow analysis approaches from basic.md"""
- scenario_yaml = """
-network:
- name: "fundamentals_example"
- nodes:
- A: {}
- B: {}
- C: {}
- D: {}
- links:
- - source: A
- target: B
- link_params:
- capacity: 1
- cost: 1
- - source: A
- target: B
- link_params:
- capacity: 2
- cost: 1
- - source: B
- target: C
- link_params:
- capacity: 1
- cost: 1
- - source: B
- target: C
- link_params:
- capacity: 2
- cost: 1
- - source: A
- target: D
- link_params:
- capacity: 3
- cost: 2
- - source: D
- target: C
- link_params:
- capacity: 3
- cost: 2
-"""
-
- scenario = Scenario.from_yaml(scenario_yaml)
- network = scenario.network
-
- # Test "true" maximum flow (uses all available paths)
- max_flow_all = network.max_flow(source_path="A", sink_path="C")
- assert isinstance(max_flow_all, dict)
- assert len(max_flow_all) == 1
- flow_value = list(max_flow_all.values())[0]
- # Should be 6.0 (3 from A→B→C path + 3 from A→D→C path)
- assert flow_value == 6.0
-
- # Test flow along shortest paths only
- max_flow_shortest = network.max_flow(
- source_path="A", sink_path="C", shortest_path=True
- )
- assert isinstance(max_flow_shortest, dict)
- flow_value_shortest = list(max_flow_shortest.values())[0]
- # Should be 3.0 (only uses A→B→C path, ignoring higher-cost A→D→C)
- assert flow_value_shortest == 3.0
-
- # Test with EQUAL_BALANCED flow placement
- max_flow_balanced = network.max_flow(
- source_path="A",
- sink_path="C",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
- assert isinstance(max_flow_balanced, dict)
- flow_value_balanced = list(max_flow_balanced.values())[0]
- # Should be limited by equal distribution across parallel paths
- assert flow_value_balanced <= flow_value_shortest
-
- def test_advanced_sensitivity_analysis(self):
- """Test the advanced sensitivity analysis section from basic.md"""
- scenario_yaml = """
-network:
- name: "fundamentals_example"
- nodes:
- A: {}
- B: {}
- C: {}
- D: {}
- links:
- - source: A
- target: B
- link_params:
- capacity: 1
- cost: 1
- - source: A
- target: B
- link_params:
- capacity: 2
- cost: 1
- - source: B
- target: C
- link_params:
- capacity: 1
- cost: 1
- - source: B
- target: C
- link_params:
- capacity: 2
- cost: 1
- - source: A
- target: D
- link_params:
- capacity: 3
- cost: 2
- - source: D
- target: C
- link_params:
- capacity: 3
- cost: 2
-"""
-
- scenario = Scenario.from_yaml(scenario_yaml)
- network = scenario.network
-
- # Get the underlying graph for low-level analysis
- graph = network.to_strict_multidigraph()
-
- # Identify bottleneck (saturated) edges
- bottlenecks = saturated_edges(graph, "A", "C")
- assert isinstance(bottlenecks, list)
- assert len(bottlenecks) > 0
-
- # Perform sensitivity analysis - test increasing capacity by 1 unit
- sensitivity_increase = run_sensitivity(graph, "A", "C", change_amount=1.0)
- assert isinstance(sensitivity_increase, dict)
- assert len(sensitivity_increase) > 0
-
- # All values should be non-negative (increasing capacity shouldn't decrease flow)
- for flow_change in sensitivity_increase.values():
- assert flow_change >= 0
-
- # Test sensitivity to capacity decreases
- sensitivity_decrease = run_sensitivity(graph, "A", "C", change_amount=-1.0)
- assert isinstance(sensitivity_decrease, dict)
- assert len(sensitivity_decrease) > 0
-
- # All values should be non-positive (decreasing capacity shouldn't increase flow)
- for flow_change in sensitivity_decrease.values():
- assert flow_change <= 0
-
-
-class TestClosFabricMdExamples:
- """Test examples from docs/examples/clos-fabric.md (if any specific examples exist)"""
-
- def test_clos_fabric_max_flow_analysis(self):
- """Test the max flow analysis example from clos-fabric.md"""
- # Using a simplified version of the hierarchical structure for testing
- scenario_yaml = """
-blueprints:
- brick_2tier:
- groups:
- t1:
- node_count: 2
- name_template: t1-{node_num}
- t2:
- node_count: 2
- name_template: t2-{node_num}
-
- adjacency:
- - source: /t1
- target: /t2
- pattern: mesh
- link_params:
- capacity: 2
- cost: 1
-
- 3tier_clos:
- groups:
- b1:
- use_blueprint: brick_2tier
- b2:
- use_blueprint: brick_2tier
- spine:
- node_count: 4
- name_template: t3-{node_num}
-
- adjacency:
- - source: b1/t2
- target: spine
- pattern: one_to_one
- link_params:
- capacity: 2
- cost: 1
- - source: b2/t2
- target: spine
- pattern: one_to_one
- link_params:
- capacity: 2
- cost: 1
-
-network:
- name: "3tier_clos_network"
- version: 1.0
-
- groups:
- my_clos1:
- use_blueprint: 3tier_clos
-
- my_clos2:
- use_blueprint: 3tier_clos
-
- adjacency:
- - source: my_clos1/spine
- target: my_clos2/spine
- pattern: one_to_one
- link_count: 4
- link_params:
- capacity: 1
- cost: 1
-"""
-
- scenario = Scenario.from_yaml(scenario_yaml)
- network = scenario.network
-
- # Test the max flow calculation as shown in the documentation
- # Note: using simplified regex patterns for the test
- max_flow_result = network.max_flow(
- source_path=r"my_clos1.*(b[0-9]*)/t1",
- sink_path=r"my_clos2.*(b[0-9]*)/t1",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
-
- # Verify the result structure matches documentation expectations
- assert isinstance(max_flow_result, dict)
- assert len(max_flow_result) == 1
-
- # The key should be a tuple representing the combined source/sink groups
- flow_key = list(max_flow_result.keys())[0]
- assert isinstance(flow_key, tuple)
- assert len(flow_key) == 2
-
- # Verify flow value is positive (actual value depends on topology)
- flow_value = list(max_flow_result.values())[0]
- assert flow_value > 0
diff --git a/tests/dsl/test_examples.py b/tests/dsl/test_examples.py
index 91f2a4b..6988496 100644
--- a/tests/dsl/test_examples.py
+++ b/tests/dsl/test_examples.py
@@ -33,8 +33,8 @@ def test_basic_network_example():
assert scenario.network.attrs["name"] == "NetworkName"
assert scenario.network.attrs["version"] == "1.0"
assert len(scenario.network.nodes) == 2
- graph = scenario.network.to_strict_multidigraph()
- assert len(list(graph.edges())) == 2 # Bidirectional
+ # NetGraph models links as bidirectional by default (single link = 2 directional edges)
+ assert len(scenario.network.links) == 1 # 1 link between NodeA and NodeB
def test_groups_example():
@@ -64,9 +64,8 @@ def test_groups_example():
scenario = Scenario.from_yaml(yaml_content)
# Should have 4 nodes total: 2 servers + 2 switches
assert len(scenario.network.nodes) == 4
- # Should have mesh connections: 2*2 = 4 bidirectional links = 8 edges
- graph = scenario.network.to_strict_multidigraph()
- assert len(list(graph.edges())) == 8
+ # Should have mesh connections: 2*2 = 4 links
+ assert len(scenario.network.links) == 4
def test_adjacency_selector_match_filters_nodes():
@@ -119,10 +118,8 @@ def test_adjacency_selector_match_filters_nodes():
scenario = Scenario.from_yaml(yaml_content)
# Expect only /servers (4 nodes) to be considered (servers_b excluded via rack != rack-9)
- # Mesh between 4 servers and 2 switches => 8 directed pairs but dedup as bidirectional added later.
- graph = scenario.network.to_strict_multidigraph()
- # 4*2*2 directions = 16 edges
- assert len(list(graph.edges())) == 16
+ # Mesh between 4 servers and 2 switches => 4*2 = 8 links
+ assert len(scenario.network.links) == 8
def test_bracket_expansion():
@@ -181,9 +178,8 @@ def test_blueprint_example():
scenario = Scenario.from_yaml(yaml_content)
# Should have 4 nodes: 2 from group_name_1 + 2 from group_name_2
assert len(scenario.network.nodes) == 4
- # Should have mesh connections: 2*2 = 4 bidirectional links = 8 edges
- graph = scenario.network.to_strict_multidigraph()
- assert len(list(graph.edges())) == 8
+ # Should have mesh connections: 2*2 = 4 links
+ assert len(scenario.network.links) == 4
def test_components_example():
@@ -429,16 +425,15 @@ def test_link_overrides_example():
scenario = Scenario.from_yaml(yaml_content)
# Find the specific overridden link
- overridden_edge = None
- graph = scenario.network.to_strict_multidigraph()
- for u, v, data in graph.edges(data=True):
- if u == "group1/node-1" and v == "group2/node-1":
- overridden_edge = data
+ overridden_link = None
+ for _link_id, link in scenario.network.links.items():
+ if link.source == "group1/node-1" and link.target == "group2/node-1":
+ overridden_link = link
break
- assert overridden_edge is not None
- assert overridden_edge["capacity"] == 200
- assert overridden_edge["cost"] == 5
+ assert overridden_link is not None
+ assert overridden_link.capacity == 200
+ assert overridden_link.cost == 5
def test_variable_expansion():
@@ -479,9 +474,8 @@ def test_variable_expansion():
assert len(scenario.network.nodes) == 6
# Each plane rack group (2 nodes) connects to spine group (2 nodes) in mesh
- # 2 plane groups * 2 nodes each * 2 spine nodes * 2 directions = 16 edges
- graph = scenario.network.to_strict_multidigraph()
- assert len(list(graph.edges())) == 16
+ # 2 plane groups * 2 nodes each * 2 spine nodes = 4 * 2 = 8 links
+ assert len(scenario.network.links) == 8
def test_unknown_blueprint_raises():
@@ -671,9 +665,8 @@ def test_attr_selector_inside_blueprint_paths():
scenario = Scenario.from_yaml(yaml_content)
# Expect 3 nodes total (2 leaf, 1 spine)
assert len(scenario.network.nodes) == 3
- # Mesh between 2 leaf and 1 spine = 2 bidirectional adjacencies => 4 edges
- graph = scenario.network.to_strict_multidigraph()
- assert len(list(graph.edges())) == 4
+ # Mesh between 2 leaf and 1 spine = 2 links
+ assert len(scenario.network.links) == 2
def test_attr_selector_with_expand_vars_inside_blueprint_paths():
@@ -716,9 +709,8 @@ def test_attr_selector_with_expand_vars_inside_blueprint_paths():
scenario = Scenario.from_yaml(yaml_content)
# Expect 3 nodes total (2 leaf, 1 spine)
assert len(scenario.network.nodes) == 3
- # Without the fix, zero edges would be created due to prefixed attr: paths.
- graph = scenario.network.to_strict_multidigraph()
- assert len(list(graph.edges())) > 0
+ # Without the fix, zero links would be created due to prefixed attr: paths.
+ assert len(scenario.network.links) > 0
def test_invalid_nodes_type_raises():
diff --git a/tests/dsl/test_parse_helpers.py b/tests/dsl/test_parse_helpers.py
index f5d2213..8b71a51 100644
--- a/tests/dsl/test_parse_helpers.py
+++ b/tests/dsl/test_parse_helpers.py
@@ -2,7 +2,7 @@
import pytest
-from ngraph.dsl.blueprints.parse import (
+from ngraph.dsl.blueprints.parser import (
check_adjacency_keys,
check_link_params,
check_no_extra_keys,
diff --git a/tests/monte_carlo/__init__.py b/tests/exec/analysis/__init__.py
similarity index 100%
rename from tests/monte_carlo/__init__.py
rename to tests/exec/analysis/__init__.py
diff --git a/tests/exec/analysis/test_functions.py b/tests/exec/analysis/test_functions.py
new file mode 100644
index 0000000..09348b4
--- /dev/null
+++ b/tests/exec/analysis/test_functions.py
@@ -0,0 +1,242 @@
+"""Tests for analysis.flow module."""
+
+import pytest
+
+from ngraph.exec.analysis.flow import (
+ demand_placement_analysis,
+ max_flow_analysis,
+ sensitivity_analysis,
+)
+from ngraph.model.network import Link, Network, Node
+from ngraph.results.flow import FlowIterationResult
+from ngraph.types.base import FlowPlacement
+
+
+class TestMaxFlowAnalysis:
+ """Test max_flow_analysis function."""
+
+ @pytest.fixture
+ def simple_network(self) -> Network:
+ """Create a simple test network with multiple paths."""
+ network = Network()
+ # Add nodes
+ for node in ["datacenter1", "datacenter2", "edge1", "edge2", "router"]:
+ network.add_node(Node(node))
+
+ # Add links to create a network with capacity
+ network.add_link(Link("datacenter1", "router", capacity=100.0, cost=1.0))
+ network.add_link(Link("datacenter2", "router", capacity=80.0, cost=1.0))
+ network.add_link(Link("router", "edge1", capacity=120.0, cost=1.0))
+ network.add_link(Link("router", "edge2", capacity=60.0, cost=1.0))
+
+ return network
+
+ def test_max_flow_analysis_basic(self, simple_network: Network) -> None:
+ """Test basic max_flow_analysis functionality."""
+ result = max_flow_analysis(
+ network=simple_network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ source_regex="datacenter.*",
+ sink_regex="edge.*",
+ mode="combine",
+ )
+
+ # Verify return format
+ assert isinstance(result, FlowIterationResult)
+ assert len(result.flows) == 1
+ # In combine mode, we get one aggregated flow
+ flow = result.flows[0]
+ assert flow.source == "datacenter.*"
+ assert flow.destination == "edge.*"
+ assert flow.placed > 0 # Should have some flow capacity
+ assert flow.demand == flow.placed # Max flow: demand equals placed
+
+ def test_max_flow_analysis_with_summary(self, simple_network: Network) -> None:
+ """Test include_flow_details and include_min_cut path and return shape."""
+ result = max_flow_analysis(
+ network=simple_network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ source_regex="datacenter.*",
+ sink_regex="edge.*",
+ include_flow_details=True,
+ include_min_cut=True,
+ )
+
+ assert isinstance(result, FlowIterationResult)
+ assert len(result.flows) == 1
+ flow = result.flows[0]
+
+ # Should have cost distribution when include_flow_details=True
+ assert isinstance(flow.cost_distribution, dict)
+
+ # Should have min_cut edges when include_min_cut=True
+ if flow.data and "edges_kind" in flow.data:
+ assert flow.data["edges_kind"] == "min_cut"
+ assert "edges" in flow.data
+
+ def test_max_flow_analysis_with_optional_params(
+ self, simple_network: Network
+ ) -> None:
+ """Test max_flow_analysis with optional parameters."""
+ result = max_flow_analysis(
+ network=simple_network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ source_regex="datacenter.*",
+ sink_regex="edge.*",
+ mode="pairwise",
+ shortest_path=True,
+ flow_placement=FlowPlacement.EQUAL_BALANCED,
+ extra_param="ignored",
+ )
+
+ assert isinstance(result, FlowIterationResult)
+ # In pairwise mode with 2 datacenters and 2 edges, we get 4 pairs
+ assert len(result.flows) >= 1
+ # Check that all flows have proper source/destination matching the regex
+ for flow in result.flows:
+ assert flow.source.startswith("datacenter")
+ assert flow.destination.startswith("edge")
+
+ def test_max_flow_analysis_empty_result(self, simple_network: Network) -> None:
+ """Test max_flow_analysis with no matching nodes raises an error."""
+ # In NetGraph-Core, non-matching nodes raise ValueError (better UX than silent empty)
+ with pytest.raises(ValueError, match="No source nodes found"):
+ max_flow_analysis(
+ network=simple_network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ source_regex="nonexistent.*",
+ sink_regex="also_nonexistent.*",
+ )
+
+
+class TestDemandPlacementAnalysis:
+ """Test demand_placement_analysis function."""
+
+ @pytest.fixture
+ def diamond_network(self) -> Network:
+ """Create a diamond network for testing demand placement."""
+ network = Network()
+ # Add nodes: A -> B,C -> D (diamond shape)
+ for node in ["A", "B", "C", "D"]:
+ network.add_node(Node(node))
+
+ # Add links with limited capacity
+ network.add_link(Link("A", "B", capacity=60.0, cost=1.0))
+ network.add_link(Link("A", "C", capacity=60.0, cost=1.0))
+ network.add_link(Link("B", "D", capacity=60.0, cost=1.0))
+ network.add_link(Link("C", "D", capacity=60.0, cost=1.0))
+
+ return network
+
+ def test_demand_placement_analysis_basic(self, diamond_network: Network) -> None:
+ """Test basic demand_placement_analysis functionality."""
+ # Use a smaller demand that should definitely fit
+ demands_config = [
+ {
+ "source_path": "A",
+ "sink_path": "D",
+ "demand": 50.0,
+ "mode": "pairwise",
+ "priority": 0,
+ },
+ ]
+
+ result = demand_placement_analysis(
+ network=diamond_network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ demands_config=demands_config,
+ placement_rounds=1,
+ )
+
+ # Verify results structure
+ assert isinstance(result, FlowIterationResult)
+ assert len(result.flows) == 1
+
+ flow = result.flows[0]
+ assert flow.source == "A"
+ assert flow.destination == "D"
+ assert flow.priority == 0
+ assert flow.demand == 50.0
+ # With 50 demand and two paths of 60 capacity each, should place all
+ assert flow.placed == 50.0
+ assert flow.dropped == 0.0
+
+ summary = result.summary
+ assert summary.total_demand == 50.0
+ assert summary.total_placed == 50.0
+ assert summary.overall_ratio == 1.0
+
+ def test_demand_placement_analysis_zero_total_demand(
+ self, diamond_network: Network
+ ) -> None:
+ """Handles zero total demand without division by zero."""
+ demands_config = [
+ {
+ "source_path": "A",
+ "sink_path": "B",
+ "demand": 0.0,
+ }
+ ]
+
+ result = demand_placement_analysis(
+ network=diamond_network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ demands_config=demands_config,
+ placement_rounds=1,
+ )
+
+ assert isinstance(result, FlowIterationResult)
+ assert len(result.flows) == 1
+ assert result.flows[0].placed == 0.0
+ summary = result.summary
+ assert summary.total_demand == 0.0
+ assert summary.total_placed == 0.0
+ assert summary.overall_ratio == 1.0
+
+
+class TestSensitivityAnalysis:
+ """Test sensitivity_analysis function."""
+
+ @pytest.fixture
+ def simple_network(self) -> Network:
+ """Create a simple test network."""
+ network = Network()
+ for node in ["A", "B", "C"]:
+ network.add_node(Node(node))
+ network.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ network.add_link(Link("B", "C", capacity=10.0, cost=1.0))
+ return network
+
+ def test_sensitivity_analysis_basic(self, simple_network: Network) -> None:
+ """Test basic sensitivity_analysis functionality."""
+ # Note: Current implementation returns empty dict as a placeholder
+ # This is documented in the function - full implementation requires
+ # additional Core API support for component criticality scores
+ result = sensitivity_analysis(
+ network=simple_network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ source_regex="A",
+ sink_regex="C",
+ mode="combine",
+ )
+
+ # Current implementation returns empty dict
+ assert isinstance(result, dict)
+
+ def test_sensitivity_analysis_empty_result(self, simple_network: Network) -> None:
+ """Test sensitivity_analysis with empty result."""
+ with pytest.raises(ValueError, match="No source nodes found"):
+ sensitivity_analysis(
+ network=simple_network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ source_regex="nonexistent.*",
+ sink_regex="also_nonexistent.*",
+ )
diff --git a/tests/exec/analysis/test_functions_details.py b/tests/exec/analysis/test_functions_details.py
new file mode 100644
index 0000000..35d3036
--- /dev/null
+++ b/tests/exec/analysis/test_functions_details.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+from ngraph.exec.analysis.flow import demand_placement_analysis
+from ngraph.model.network import Link, Network, Node
+
+
+def test_demand_placement_analysis_includes_flow_details_costs_and_edges() -> None:
+ """Test that demand placement analysis includes flow details, costs, and edges when requested."""
+ # Create a diamond network with two paths of different costs
+ network = Network()
+ for node in ["A", "B", "C", "D"]:
+ network.add_node(Node(node))
+
+ # Create two paths with different costs
+ # Path 1: A -> B -> D (cost 2, capacity 100)
+ network.add_link(Link("A", "B", capacity=100.0, cost=1.0))
+ network.add_link(Link("B", "D", capacity=100.0, cost=1.0))
+
+ # Path 2: A -> C -> D (cost 4, capacity 100)
+ network.add_link(Link("A", "C", capacity=100.0, cost=2.0))
+ network.add_link(Link("C", "D", capacity=100.0, cost=2.0))
+
+ demands_config = [
+ {
+ "source_path": "A",
+ "sink_path": "D",
+ "demand": 150.0, # Exceeds single path capacity, will use both paths
+ "mode": "pairwise",
+ "priority": 0,
+ },
+ ]
+
+ result = demand_placement_analysis(
+ network=network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ demands_config=demands_config,
+ placement_rounds=1,
+ include_flow_details=True,
+ include_used_edges=True,
+ )
+
+ # Validate result structure
+ assert len(result.flows) == 1
+ flow = result.flows[0]
+
+ # Should have cost_distribution when include_flow_details=True
+ assert isinstance(flow.cost_distribution, dict)
+ # With both paths used, we should see different costs
+ # (exact distribution depends on flow policy)
+ if flow.cost_distribution:
+ assert len(flow.cost_distribution) > 0
+ assert all(isinstance(k, float) for k in flow.cost_distribution.keys())
+ assert all(isinstance(v, float) for v in flow.cost_distribution.values())
+
+ # Should have edges when include_used_edges=True
+ if flow.data and "edges" in flow.data:
+ assert flow.data.get("edges_kind") == "used"
+ assert isinstance(flow.data["edges"], list)
+ # Should have some edges (exact count depends on flow distribution)
+ assert len(flow.data["edges"]) > 0
diff --git a/tests/exec/demand/test_builder.py b/tests/exec/demand/test_builder.py
new file mode 100644
index 0000000..099266a
--- /dev/null
+++ b/tests/exec/demand/test_builder.py
@@ -0,0 +1,225 @@
+"""Tests for traffic matrix set builder."""
+
+import pytest
+
+from ngraph.exec.demand.builder import (
+ _coerce_flow_policy_config,
+ build_traffic_matrix_set,
+)
+from ngraph.model.flow.policy_config import FlowPolicyPreset
+
+
+def test_build_traffic_matrix_set_basic():
+ """Test building a basic traffic matrix set."""
+ raw = {
+ "tm1": [
+ {
+ "source_path": "A",
+ "sink_path": "B",
+ "demand": 100.0,
+ }
+ ]
+ }
+
+ tms = build_traffic_matrix_set(raw)
+ assert "tm1" in tms.matrices
+ demands = tms.get_matrix("tm1")
+ assert len(demands) == 1
+ assert demands[0].source_path == "A"
+ assert demands[0].sink_path == "B"
+ assert demands[0].demand == 100.0
+
+
+def test_build_traffic_matrix_set_multiple_matrices():
+ """Test building multiple traffic matrices."""
+ raw = {
+ "tm1": [{"source_path": "A", "sink_path": "B", "demand": 100.0}],
+ "tm2": [{"source_path": "C", "sink_path": "D", "demand": 200.0}],
+ }
+
+ tms = build_traffic_matrix_set(raw)
+ assert "tm1" in tms.matrices
+ assert "tm2" in tms.matrices
+ assert len(tms.get_matrix("tm1")) == 1
+ assert len(tms.get_matrix("tm2")) == 1
+
+
+def test_build_traffic_matrix_set_multiple_demands():
+ """Test building traffic matrix with multiple demands."""
+ raw = {
+ "tm1": [
+ {"source_path": "A", "sink_path": "B", "demand": 100.0},
+ {"source_path": "C", "sink_path": "D", "demand": 200.0},
+ ]
+ }
+
+ tms = build_traffic_matrix_set(raw)
+ demands = tms.get_matrix("tm1")
+ assert len(demands) == 2
+ assert demands[0].demand == 100.0
+ assert demands[1].demand == 200.0
+
+
+def test_build_traffic_matrix_set_with_flow_policy_enum():
+ """Test building with FlowPolicyPreset enum."""
+ raw = {
+ "tm1": [
+ {
+ "source_path": "A",
+ "sink_path": "B",
+ "demand": 100.0,
+ "flow_policy_config": FlowPolicyPreset.SHORTEST_PATHS_ECMP,
+ }
+ ]
+ }
+
+ tms = build_traffic_matrix_set(raw)
+ demands = tms.get_matrix("tm1")
+ assert demands[0].flow_policy_config == FlowPolicyPreset.SHORTEST_PATHS_ECMP
+
+
+def test_build_traffic_matrix_set_with_flow_policy_string():
+ """Test building with FlowPolicyPreset as string."""
+ raw = {
+ "tm1": [
+ {
+ "source_path": "A",
+ "sink_path": "B",
+ "demand": 100.0,
+ "flow_policy_config": "SHORTEST_PATHS_ECMP",
+ }
+ ]
+ }
+
+ tms = build_traffic_matrix_set(raw)
+ demands = tms.get_matrix("tm1")
+ assert demands[0].flow_policy_config == FlowPolicyPreset.SHORTEST_PATHS_ECMP
+
+
+def test_build_traffic_matrix_set_with_flow_policy_int():
+ """Test building with FlowPolicyPreset as integer."""
+ raw = {
+ "tm1": [
+ {
+ "source_path": "A",
+ "sink_path": "B",
+ "demand": 100.0,
+ "flow_policy_config": 1,
+ }
+ ]
+ }
+
+ tms = build_traffic_matrix_set(raw)
+ demands = tms.get_matrix("tm1")
+ assert demands[0].flow_policy_config == FlowPolicyPreset.SHORTEST_PATHS_ECMP
+
+
+def test_build_traffic_matrix_set_invalid_raw_type():
+ """Test error handling for invalid raw type."""
+ with pytest.raises(ValueError, match="must be a mapping"):
+ build_traffic_matrix_set("not a dict")
+
+ with pytest.raises(ValueError, match="must be a mapping"):
+ build_traffic_matrix_set([])
+
+
+def test_build_traffic_matrix_set_invalid_matrix_value():
+ """Test error handling when matrix value is not a list."""
+ raw = {"tm1": "not a list"}
+
+ with pytest.raises(ValueError, match="must map to a list"):
+ build_traffic_matrix_set(raw)
+
+
+def test_build_traffic_matrix_set_invalid_demand_type():
+ """Test error handling when demand entry is not a dict."""
+ raw = {"tm1": ["not a dict"]}
+
+ with pytest.raises(ValueError, match="must be dicts"):
+ build_traffic_matrix_set(raw)
+
+
+def test_coerce_flow_policy_config_none():
+ """Test coercing None."""
+ assert _coerce_flow_policy_config(None) is None
+
+
+def test_coerce_flow_policy_config_enum():
+ """Test coercing FlowPolicyPreset enum."""
+ preset = FlowPolicyPreset.SHORTEST_PATHS_ECMP
+ assert _coerce_flow_policy_config(preset) == preset
+
+
+def test_coerce_flow_policy_config_int():
+ """Test coercing integer to enum."""
+ assert _coerce_flow_policy_config(1) == FlowPolicyPreset.SHORTEST_PATHS_ECMP
+ assert _coerce_flow_policy_config(2) == FlowPolicyPreset.SHORTEST_PATHS_WCMP
+ assert _coerce_flow_policy_config(3) == FlowPolicyPreset.TE_WCMP_UNLIM
+ assert _coerce_flow_policy_config(4) == FlowPolicyPreset.TE_ECMP_UP_TO_256_LSP
+ assert _coerce_flow_policy_config(5) == FlowPolicyPreset.TE_ECMP_16_LSP
+
+
+def test_coerce_flow_policy_config_string():
+ """Test coercing string to enum."""
+ assert (
+ _coerce_flow_policy_config("SHORTEST_PATHS_ECMP")
+ == FlowPolicyPreset.SHORTEST_PATHS_ECMP
+ )
+ assert (
+ _coerce_flow_policy_config("shortest_paths_ecmp")
+ == FlowPolicyPreset.SHORTEST_PATHS_ECMP
+ )
+ assert (
+ _coerce_flow_policy_config("SHORTEST_PATHS_WCMP")
+ == FlowPolicyPreset.SHORTEST_PATHS_WCMP
+ )
+ assert _coerce_flow_policy_config("TE_WCMP_UNLIM") == FlowPolicyPreset.TE_WCMP_UNLIM
+ assert (
+ _coerce_flow_policy_config("TE_ECMP_UP_TO_256_LSP")
+ == FlowPolicyPreset.TE_ECMP_UP_TO_256_LSP
+ )
+ assert (
+ _coerce_flow_policy_config("TE_ECMP_16_LSP") == FlowPolicyPreset.TE_ECMP_16_LSP
+ )
+
+
+def test_coerce_flow_policy_config_string_numeric():
+ """Test coercing numeric string to enum."""
+ assert _coerce_flow_policy_config("1") == FlowPolicyPreset.SHORTEST_PATHS_ECMP
+ assert _coerce_flow_policy_config("2") == FlowPolicyPreset.SHORTEST_PATHS_WCMP
+ assert _coerce_flow_policy_config("3") == FlowPolicyPreset.TE_WCMP_UNLIM
+
+
+def test_coerce_flow_policy_config_empty_string():
+ """Test coercing empty string."""
+ assert _coerce_flow_policy_config("") is None
+ assert _coerce_flow_policy_config(" ") is None
+
+
+def test_coerce_flow_policy_config_invalid_string():
+ """Test error handling for invalid string."""
+ with pytest.raises(ValueError, match="Unknown flow policy config"):
+ _coerce_flow_policy_config("INVALID_POLICY")
+
+
+def test_coerce_flow_policy_config_invalid_numeric_string():
+ """Test error handling for invalid numeric string."""
+ with pytest.raises(ValueError, match="Unknown flow policy config value"):
+ _coerce_flow_policy_config("999")
+
+
+def test_coerce_flow_policy_config_invalid_int():
+ """Test error handling for invalid integer."""
+ with pytest.raises(ValueError, match="Unknown flow policy config value"):
+ _coerce_flow_policy_config(999)
+
+
+def test_coerce_flow_policy_config_other_types():
+ """Test that other types are passed through unchanged."""
+ # Dict config for advanced usage
+ dict_config = {"custom": "config"}
+ assert _coerce_flow_policy_config(dict_config) == dict_config
+
+ # List (unusual but should pass through)
+ list_config = ["a", "b"]
+ assert _coerce_flow_policy_config(list_config) == list_config
diff --git a/tests/algorithms/__init__.py b/tests/exec/failure/__init__.py
similarity index 100%
rename from tests/algorithms/__init__.py
rename to tests/exec/failure/__init__.py
diff --git a/tests/exec/failure/test_manager.py b/tests/exec/failure/test_manager.py
new file mode 100644
index 0000000..b218e61
--- /dev/null
+++ b/tests/exec/failure/test_manager.py
@@ -0,0 +1,396 @@
+"""High-value tests for `FailureManager` public behavior and APIs.
+
+Focus on functional outcomes and API semantics using the new FailureManager
+that works with NetGraph-Core. Tests core functionality, policy management,
+exclusion computation, and convenience methods.
+"""
+
+from typing import Any
+from unittest.mock import MagicMock, patch
+
+import pytest
+
+from ngraph.exec.failure.manager import FailureManager
+from ngraph.model.failure.policy import (
+ FailureCondition,
+ FailureMode,
+ FailurePolicy,
+ FailureRule,
+)
+from ngraph.model.failure.policy_set import FailurePolicySet
+from ngraph.model.network import Link, Network, Node
+
+
+@pytest.fixture
+def simple_network() -> Network:
+ """Create a simple test network."""
+ network = Network()
+ network.attrs["name"] = "test_network"
+ network.add_node(Node("node1", attrs={"type": "server"}))
+ network.add_node(Node("node2", attrs={"type": "router"}))
+ network.add_node(Node("node3", attrs={"type": "router"}))
+ network.add_link(Link("node1", "node2", capacity=100.0))
+ network.add_link(Link("node2", "node3", capacity=200.0))
+ return network
+
+
+@pytest.fixture
+def failure_policy() -> FailurePolicy:
+ """Create a simple failure policy for testing."""
+ rule = FailureRule(entity_scope="node", rule_type="choice", count=1)
+ return FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
+
+
+@pytest.fixture
+def failure_policy_set(failure_policy: FailurePolicy) -> FailurePolicySet:
+ """Create a failure policy set for testing."""
+ policy_set = FailurePolicySet()
+ policy_set.policies["test_policy"] = failure_policy
+ return policy_set
+
+
+@pytest.fixture
+def failure_manager(
+ simple_network: Network, failure_policy_set: FailurePolicySet
+) -> FailureManager:
+ """Create a FailureManager instance for testing."""
+ return FailureManager(
+ network=simple_network,
+ failure_policy_set=failure_policy_set,
+ policy_name="test_policy",
+ )
+
+
+class TestFailureManagerInitialization:
+ """Test FailureManager initialization and basic properties."""
+
+ def test_initialization(
+ self, simple_network: Network, failure_policy_set: FailurePolicySet
+ ) -> None:
+ """Test basic initialization."""
+ fm = FailureManager(
+ network=simple_network,
+ failure_policy_set=failure_policy_set,
+ policy_name="test_policy",
+ )
+
+ assert fm.network is simple_network
+ assert fm.failure_policy_set is failure_policy_set
+ assert fm.policy_name == "test_policy"
+
+ def test_initialization_without_policy_name(
+ self, simple_network: Network, failure_policy_set: FailurePolicySet
+ ) -> None:
+ """Test initialization with no policy name."""
+ fm = FailureManager(
+ network=simple_network,
+ failure_policy_set=failure_policy_set,
+ policy_name=None,
+ )
+
+ assert fm.network is simple_network
+ assert fm.failure_policy_set is failure_policy_set
+ assert fm.policy_name is None
+
+
+class TestFailureManagerPolicyRetrieval:
+ """Test failure policy retrieval and management."""
+
+ def test_get_failure_policy_with_named_policy(
+ self, failure_manager: FailureManager
+ ) -> None:
+ """Test retrieving a named policy."""
+ policy = failure_manager.get_failure_policy()
+ assert policy is not None
+ assert isinstance(policy, FailurePolicy)
+
+ def test_get_failure_policy_with_default_policy(
+ self, simple_network: Network, failure_policy_set: FailurePolicySet
+ ) -> None:
+ """Test that None policy_name returns None."""
+ fm = FailureManager(
+ network=simple_network,
+ failure_policy_set=failure_policy_set,
+ policy_name=None,
+ )
+
+ policy = fm.get_failure_policy()
+ assert policy is None
+
+ def test_get_failure_policy_not_found(
+ self, simple_network: Network, failure_policy_set: FailurePolicySet
+ ) -> None:
+ """Test error handling when policy not found."""
+ fm = FailureManager(
+ network=simple_network,
+ failure_policy_set=failure_policy_set,
+ policy_name="nonexistent_policy",
+ )
+
+ with pytest.raises(ValueError, match="not found in scenario"):
+ fm.get_failure_policy()
+
+
+class TestFailureManagerExclusionComputation:
+ """Test compute_exclusions method."""
+
+ def test_compute_exclusions_no_policy(
+ self, simple_network: Network, failure_policy_set: FailurePolicySet
+ ) -> None:
+ """Test exclusion computation with no policy."""
+ fm = FailureManager(
+ network=simple_network,
+ failure_policy_set=failure_policy_set,
+ policy_name=None,
+ )
+
+ excluded_nodes, excluded_links = fm.compute_exclusions()
+ assert excluded_nodes == set()
+ assert excluded_links == set()
+
+ def test_compute_exclusions_with_policy(
+ self, failure_manager: FailureManager
+ ) -> None:
+ """Test exclusion computation with a policy."""
+ excluded_nodes, excluded_links = failure_manager.compute_exclusions(
+ seed_offset=42
+ )
+
+ # Should have some exclusions based on the policy
+ assert len(excluded_nodes) > 0 or len(excluded_links) > 0
+
+ def test_compute_exclusions_deterministic(
+ self, failure_manager: FailureManager
+ ) -> None:
+ """Test that exclusions are deterministic with same seed."""
+ excluded1_nodes, excluded1_links = failure_manager.compute_exclusions(
+ seed_offset=42
+ )
+ excluded2_nodes, excluded2_links = failure_manager.compute_exclusions(
+ seed_offset=42
+ )
+
+ assert excluded1_nodes == excluded2_nodes
+ assert excluded1_links == excluded2_links
+
+
+class TestFailureManagerTopLevelMatching:
+ """Test compute_exclusions merged attribute view correctness."""
+
+ def test_node_matching_on_disabled_attribute(
+ self, simple_network: Network, failure_policy_set: FailurePolicySet
+ ) -> None:
+ """Test node matching on disabled attribute."""
+ # Mark one node as disabled
+ simple_network.nodes["node1"].disabled = True
+
+ rule = FailureRule(
+ entity_scope="node",
+ conditions=[FailureCondition(attr="disabled", operator="==", value=True)],
+ logic="and",
+ rule_type="all",
+ )
+ policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
+
+ fm = FailureManager(
+ network=simple_network,
+ failure_policy_set=failure_policy_set,
+ policy_name=None,
+ )
+
+ excluded_nodes, excluded_links = fm.compute_exclusions(policy=policy)
+
+ assert "node1" in excluded_nodes
+ assert "node2" not in excluded_nodes
+ assert "node3" not in excluded_nodes
+ assert excluded_links == set()
+
+ def test_link_matching_on_capacity_attribute(
+ self, simple_network: Network, failure_policy_set: FailurePolicySet
+ ) -> None:
+ """Test link matching on capacity attribute."""
+ rule = FailureRule(
+ entity_scope="link",
+ conditions=[FailureCondition(attr="capacity", operator=">", value=150.0)],
+ logic="and",
+ rule_type="all",
+ )
+ policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
+
+ fm = FailureManager(
+ network=simple_network,
+ failure_policy_set=failure_policy_set,
+ policy_name=None,
+ )
+
+ excluded_nodes, excluded_links = fm.compute_exclusions(policy=policy)
+
+ # Link with capacity 200.0 should be excluded
+ assert len(excluded_links) == 1
+ assert excluded_nodes == set()
+
+
+class TestFailureManagerMonteCarloValidation:
+ """Test validation logic for Monte Carlo parameters."""
+
+ def test_validation_iterations_without_policy(
+ self, simple_network: Network, failure_policy_set: FailurePolicySet
+ ) -> None:
+ """Test that iterations > 1 without policy raises error."""
+ fm = FailureManager(
+ network=simple_network,
+ failure_policy_set=failure_policy_set,
+ policy_name=None,
+ )
+
+ # Mock analysis function
+ def mock_analysis_func(*args: Any, **kwargs: Any) -> dict[str, Any]:
+ return {"result": "mock"}
+
+ with pytest.raises(
+ ValueError, match="iterations=2 has no effect without a failure policy"
+ ):
+ fm.run_monte_carlo_analysis(
+ analysis_func=mock_analysis_func, iterations=2, baseline=False
+ )
+
+ def test_validation_baseline_requires_multiple_iterations(
+ self, failure_manager: FailureManager
+ ) -> None:
+ """Test that baseline=True requires iterations >= 2."""
+
+ # Mock analysis function
+ def mock_analysis_func(*args: Any, **kwargs: Any) -> dict[str, Any]:
+ return {"result": "mock"}
+
+ with pytest.raises(ValueError, match="baseline=True requires iterations >= 2"):
+ failure_manager.run_monte_carlo_analysis(
+ analysis_func=mock_analysis_func, iterations=1, baseline=True
+ )
+
+
+class TestFailureManagerConvenienceMethods:
+ """Test convenience methods for specific analysis types."""
+
+ @patch("ngraph.exec.failure.manager.FailureManager.run_monte_carlo_analysis")
+ def test_run_max_flow_monte_carlo_delegates(
+ self, mock_mc_analysis: MagicMock, failure_manager: FailureManager
+ ) -> None:
+ """Test run_max_flow_monte_carlo delegates to run_monte_carlo_analysis."""
+ mock_mc_analysis.return_value = {
+ "results": [],
+ "failure_patterns": [],
+ "metadata": {"iterations": 2},
+ }
+
+ result = failure_manager.run_max_flow_monte_carlo(
+ source_path="datacenter.*",
+ sink_path="edge.*",
+ mode="combine",
+ iterations=2,
+ parallelism=1,
+ )
+
+ assert mock_mc_analysis.called
+ assert result == mock_mc_analysis.return_value
+
+ @patch("ngraph.exec.failure.manager.FailureManager.run_monte_carlo_analysis")
+ def test_run_demand_placement_monte_carlo_delegates(
+ self, mock_mc_analysis: MagicMock, failure_manager: FailureManager
+ ) -> None:
+ """Test run_demand_placement_monte_carlo delegates correctly."""
+ mock_mc_analysis.return_value = {
+ "results": [],
+ "failure_patterns": [],
+ "metadata": {"iterations": 1},
+ }
+
+ mock_demands = MagicMock()
+ result = failure_manager.run_demand_placement_monte_carlo(
+ demands_config=mock_demands, iterations=1, parallelism=1
+ )
+
+ assert mock_mc_analysis.called
+ assert result == mock_mc_analysis.return_value
+
+ def test_flow_placement_string_conversion_max_flow(
+ self, failure_manager: FailureManager
+ ) -> None:
+ """Test string to FlowPlacement enum conversion."""
+ from ngraph.types.base import FlowPlacement
+
+ with patch.object(failure_manager, "run_monte_carlo_analysis") as mock_mc:
+ mock_mc.return_value = {"results": [], "metadata": {}}
+
+ failure_manager.run_max_flow_monte_carlo(
+ source_path="src.*",
+ sink_path="dst.*",
+ flow_placement="EQUAL_BALANCED",
+ iterations=1,
+ )
+
+ call_kwargs = mock_mc.call_args[1]
+ assert call_kwargs["flow_placement"] == FlowPlacement.EQUAL_BALANCED
+
+ def test_invalid_flow_placement_string_raises_error(
+ self, failure_manager: FailureManager
+ ) -> None:
+ """Test that invalid flow_placement string raises clear error."""
+ with pytest.raises(ValueError) as exc_info:
+ failure_manager.run_max_flow_monte_carlo(
+ source_path="src.*",
+ sink_path="dst.*",
+ flow_placement="INVALID_OPTION",
+ iterations=1,
+ )
+
+ error_msg = str(exc_info.value)
+ assert "Invalid flow_placement 'INVALID_OPTION'" in error_msg
+ assert "Valid values are" in error_msg
+
+ def test_case_insensitive_flow_placement_conversion(
+ self, failure_manager: FailureManager
+ ) -> None:
+ """Test case-insensitive flow_placement string conversion."""
+ from ngraph.types.base import FlowPlacement
+
+ with patch.object(failure_manager, "run_monte_carlo_analysis") as mock_mc:
+ mock_mc.return_value = {"results": [], "metadata": {}}
+
+ failure_manager.run_max_flow_monte_carlo(
+ source_path="src.*",
+ sink_path="dst.*",
+ flow_placement="proportional", # lowercase
+ iterations=1,
+ )
+
+ call_kwargs = mock_mc.call_args[1]
+ assert call_kwargs["flow_placement"] == FlowPlacement.PROPORTIONAL
+
+
+class TestFailureManagerErrorHandling:
+ """Test error handling and edge cases."""
+
+ @patch("ngraph.exec.failure.manager.ThreadPoolExecutor")
+ def test_parallel_execution_error_propagation(
+ self, mock_pool_executor: MagicMock, failure_manager: FailureManager
+ ) -> None:
+ """Test that parallel execution errors propagate correctly."""
+ mock_pool = MagicMock()
+ mock_pool_executor.return_value.__enter__.return_value = mock_pool
+ mock_pool.map.side_effect = RuntimeError("Parallel execution failed")
+
+ # Mock analysis function
+ def mock_analysis_func(*args: Any, **kwargs: Any) -> dict[str, Any]:
+ return {"result": "mock"}
+
+ # Note: ThreadPoolExecutor shares the network by reference (no pickling needed)
+ with patch.object(
+ failure_manager,
+ "compute_exclusions",
+ side_effect=[({"n1"}, set()), ({"n2"}, set())],
+ ):
+ with pytest.raises(RuntimeError, match="Parallel execution failed"):
+ failure_manager.run_monte_carlo_analysis(
+ analysis_func=mock_analysis_func, iterations=2, parallelism=2
+ )
diff --git a/tests/failure/test_manager_integration.py b/tests/exec/failure/test_manager_integration.py
similarity index 90%
rename from tests/failure/test_manager_integration.py
rename to tests/exec/failure/test_manager_integration.py
index 26bc6cc..51403e6 100644
--- a/tests/failure/test_manager_integration.py
+++ b/tests/exec/failure/test_manager_integration.py
@@ -2,11 +2,11 @@
import pytest
-from ngraph.failure.manager.manager import FailureManager
-from ngraph.failure.policy import FailurePolicy, FailureRule
-from ngraph.failure.policy_set import FailurePolicySet
+from ngraph.exec.analysis.flow import max_flow_analysis
+from ngraph.exec.failure.manager import FailureManager
+from ngraph.model.failure.policy import FailurePolicy, FailureRule
+from ngraph.model.failure.policy_set import FailurePolicySet
from ngraph.model.network import Network
-from ngraph.monte_carlo.functions import max_flow_analysis
from ngraph.results.flow import FlowIterationResult
@@ -39,13 +39,13 @@ def failure_policy_set(self):
rule_type="choice",
count=1,
)
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
policy_set.policies["single_failures"] = policy
# No failure policy
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
no_fail_policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[])])
policy_set.policies["no_failures"] = no_fail_policy
@@ -134,8 +134,16 @@ def test_run_monte_carlo_analysis(self, simple_network, failure_policy_set):
for iter_res in results["results"]
if isinstance(iter_res, FlowIterationResult)
]
- assert max(capacities) == 10.0 # Full capacity without failures
- assert min(capacities) == 5.0 # Reduced capacity with failures
+ # With the network topology (A->B->C and A->C), max flow is 15.0 without failures
+ # (10.0 through B + 5.0 direct)
+ # With single link failures (policy always fails 1 link):
+ # - Exclude A->B or B->C: capacity is 5.0 (only direct path)
+ # - Exclude A->C: capacity is 10.0 (only via B)
+ # The test runs 5 iterations with failures, so we see a mix of 5.0 and 10.0
+ assert max(capacities) == 10.0 # Best case with 1 failure
+ assert min(capacities) == 5.0 # Worst case with 1 failure
+ assert 5.0 in capacities # Should see some 5.0 results
+ assert 10.0 in capacities # Should see some 10.0 results
def test_analysis_with_parallel_execution(self, simple_network, failure_policy_set):
"""Test parallel execution of Monte Carlo analysis."""
@@ -232,7 +240,7 @@ def test_capacity_envelope_analysis_integration(self):
rule_type="choice",
count=2,
)
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
policy_set.policies["dual_link_failures"] = policy
@@ -279,7 +287,7 @@ def failing_analysis_func(*args, **kwargs):
# Policy that excludes nothing
policy_set = FailurePolicySet()
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[])])
policy_set.policies["no_failures"] = policy
diff --git a/tests/explorer/test_explorer.py b/tests/explorer/test_explorer.py
index 75b1a71..3e71b0d 100644
--- a/tests/explorer/test_explorer.py
+++ b/tests/explorer/test_explorer.py
@@ -2,10 +2,10 @@
import pytest
-from ngraph.components import Component, ComponentsLibrary
from ngraph.explorer import (
NetworkExplorer,
)
+from ngraph.model.components import Component, ComponentsLibrary
from ngraph.model.network import Link, Network, Node
@@ -270,7 +270,7 @@ def test_active_mode_excludes_disabled_nodes_and_links():
network = Network()
# Nodes: a (enabled), b (disabled) in dc1; c (enabled) in dc2
network.nodes["dc1/a"] = Node(name="dc1/a")
- network.nodes["dc1/b"] = Node(name="dc1/b", attrs={"disabled": True})
+ network.nodes["dc1/b"] = Node(name="dc1/b", disabled=True)
network.nodes["dc2/c"] = Node(name="dc2/c")
# Links:
@@ -280,7 +280,7 @@ def test_active_mode_excludes_disabled_nodes_and_links():
network.links["l1"] = Link(source="dc1/a", target="dc1/b", capacity=100.0)
network.links["l2"] = Link(source="dc1/a", target="dc2/c", capacity=200.0)
network.links["l3"] = Link(
- source="dc1/a", target="dc2/c", capacity=50.0, attrs={"disabled": True}
+ source="dc1/a", target="dc2/c", capacity=50.0, disabled=True
)
explorer = NetworkExplorer.explore_network(network)
diff --git a/tests/explorer/test_hw_count_and_validation.py b/tests/explorer/test_hw_count_and_validation.py
index f15375d..0b6bb24 100644
--- a/tests/explorer/test_hw_count_and_validation.py
+++ b/tests/explorer/test_hw_count_and_validation.py
@@ -2,8 +2,8 @@
import pytest
-from ngraph.components import Component, ComponentsLibrary
from ngraph.explorer import NetworkExplorer
+from ngraph.model.components import Component, ComponentsLibrary
from ngraph.model.network import Link, Network, Node
diff --git a/tests/failure/__init__.py b/tests/failure/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/failure/manager/test_helpers_smoke.py b/tests/failure/manager/test_helpers_smoke.py
deleted file mode 100644
index 642d13b..0000000
--- a/tests/failure/manager/test_helpers_smoke.py
+++ /dev/null
@@ -1,8 +0,0 @@
-def test_import_failure_manager_helper_modules() -> None:
- import ngraph.failure.manager.aggregate as agg
- import ngraph.failure.manager.enumerate as enum
- import ngraph.failure.manager.simulate as sim
-
- assert isinstance(agg.__doc__, str)
- assert isinstance(enum.__doc__, str)
- assert isinstance(sim.__doc__, str)
diff --git a/tests/failure/test_manager.py b/tests/failure/test_manager.py
deleted file mode 100644
index df33cdc..0000000
--- a/tests/failure/test_manager.py
+++ /dev/null
@@ -1,604 +0,0 @@
-"""High-value tests for `FailureManager` public behavior and APIs.
-
-Focus on functional outcomes and API semantics. Internal helper and
-implementation-specific behaviors are intentionally not tested here.
-"""
-
-from typing import cast
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-from ngraph.failure.manager.manager import FailureManager
-from ngraph.failure.policy import FailurePolicy
-from ngraph.failure.policy_set import FailurePolicySet
-from ngraph.model.network import Network
-from ngraph.model.view import NetworkView
-
-
-@pytest.fixture
-def mock_network() -> Network:
- """Create a mock Network for testing."""
- mock_net = MagicMock(spec=Network)
- mock_net.nodes = {
- "node1": MagicMock(attrs={"type": "server"}, risk_groups=set()),
- "node2": MagicMock(attrs={"type": "router"}, risk_groups=set()),
- }
- mock_net.links = {
- "link1": MagicMock(attrs={"capacity": 100}, risk_groups=set()),
- "link2": MagicMock(attrs={"capacity": 200}, risk_groups=set()),
- }
- mock_net.risk_groups = {}
- return mock_net
-
-
-@pytest.fixture
-def mock_failure_policy() -> FailurePolicy:
- """Create a mock FailurePolicy for testing."""
- from ngraph.failure.policy import FailureMode, FailureRule
-
- rule = FailureRule(entity_scope="node", rule_type="choice", count=1)
- policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
-
- policy.apply_failures = MagicMock(return_value=["node1", "link1"]) # type: ignore[attr-defined]
- return policy
-
-
-@pytest.fixture
-def mock_failure_policy_set(mock_failure_policy: FailurePolicy) -> FailurePolicySet:
- """Create a mock FailurePolicySet for testing."""
- policy_set = MagicMock(spec=FailurePolicySet)
- policy_set.get_policy.return_value = mock_failure_policy
- return policy_set
-
-
-@pytest.fixture
-def failure_manager(
- mock_network: Network, mock_failure_policy_set: FailurePolicySet
-) -> FailureManager:
- """Create a FailureManager instance for testing."""
- return FailureManager(
- network=mock_network,
- failure_policy_set=mock_failure_policy_set,
- policy_name="test_policy",
- )
-
-
-def mock_analysis_func(
- network_view: NetworkView, **kwargs
-) -> list[tuple[str, str, float]]:
- """Mock analysis function for testing."""
- return [("src1", "dst1", 100.0), ("src2", "dst2", 200.0)]
-
-
-class TestFailureManager:
- """Focused tests for `FailureManager` behavior and validations."""
-
- def test_initialization(
- self, mock_network: Network, mock_failure_policy_set: FailurePolicySet
- ) -> None:
- fm = FailureManager(
- network=mock_network,
- failure_policy_set=mock_failure_policy_set,
- policy_name="test_policy",
- )
-
- assert fm.network is mock_network
- assert fm.failure_policy_set is mock_failure_policy_set
- assert fm.policy_name == "test_policy"
-
- def test_get_failure_policy_with_named_policy(
- self, failure_manager: FailureManager
- ) -> None:
- policy = failure_manager.get_failure_policy()
-
- cast(
- MagicMock, failure_manager.failure_policy_set.get_policy
- ).assert_called_once_with("test_policy")
- assert policy is not None
-
- def test_get_failure_policy_with_default_policy(
- self, mock_network: Network, mock_failure_policy_set: FailurePolicySet
- ) -> None:
- fm = FailureManager(
- network=mock_network,
- failure_policy_set=mock_failure_policy_set,
- policy_name=None,
- )
-
- policy = fm.get_failure_policy()
- assert policy is None
-
- def test_get_failure_policy_not_found(
- self, mock_network: Network, mock_failure_policy_set: FailurePolicySet
- ) -> None:
- cast(MagicMock, mock_failure_policy_set.get_policy).side_effect = KeyError(
- "Policy not found"
- )
-
- fm = FailureManager(
- network=mock_network,
- failure_policy_set=mock_failure_policy_set,
- policy_name="nonexistent_policy",
- )
-
- with pytest.raises(ValueError, match="not found in scenario"):
- fm.get_failure_policy()
-
- def test_compute_exclusions_no_policy(
- self, mock_network: Network, mock_failure_policy_set: FailurePolicySet
- ) -> None:
- fm = FailureManager(
- network=mock_network,
- failure_policy_set=mock_failure_policy_set,
- policy_name=None,
- )
-
- excluded_nodes, excluded_links = fm.compute_exclusions()
- assert excluded_nodes == set()
- assert excluded_links == set()
-
- def test_compute_exclusions_with_policy(
- self, failure_manager: FailureManager
- ) -> None:
- policy = failure_manager.get_failure_policy()
-
- excluded_nodes, excluded_links = failure_manager.compute_exclusions(
- policy=policy, seed_offset=42
- )
-
- assert len(excluded_nodes) > 0 or len(excluded_links) > 0
-
- @patch("ngraph.failure.manager.manager.NetworkView.from_excluded_sets")
- def test_create_network_view_with_exclusions(
- self, mock_from_excluded_sets: MagicMock, failure_manager: FailureManager
- ) -> None:
- mock_network_view = MagicMock(spec=NetworkView)
- mock_from_excluded_sets.return_value = mock_network_view
-
- excluded_nodes = {"node1"}
- excluded_links = {"link1"}
-
- result = failure_manager.create_network_view(excluded_nodes, excluded_links)
-
- mock_from_excluded_sets.assert_called_once_with(
- failure_manager.network,
- excluded_nodes=excluded_nodes,
- excluded_links=excluded_links,
- )
- assert result is mock_network_view
-
- @patch("ngraph.failure.manager.manager.NetworkView.from_excluded_sets")
- def test_create_network_view_no_exclusions(
- self, mock_from_excluded_sets: MagicMock, failure_manager: FailureManager
- ) -> None:
- mock_network_view = MagicMock(spec=NetworkView)
- mock_from_excluded_sets.return_value = mock_network_view
-
- result = failure_manager.create_network_view()
-
- mock_from_excluded_sets.assert_called_once_with(
- failure_manager.network,
- excluded_nodes=set(),
- excluded_links=set(),
- )
- assert result is mock_network_view
-
- def test_run_single_failure_scenario(self, failure_manager: FailureManager) -> None:
- result = failure_manager.run_single_failure_scenario(
- mock_analysis_func, test_param="value"
- )
- assert result == [("src1", "dst1", 100.0), ("src2", "dst2", 200.0)]
-
- def test_run_monte_carlo_analysis_single_iteration(
- self, failure_manager: FailureManager
- ) -> None:
- result = failure_manager.run_monte_carlo_analysis(
- analysis_func=mock_analysis_func, iterations=1, test_param="value"
- )
-
- assert "results" in result
- assert "failure_patterns" in result
- assert "metadata" in result
- assert len(result["results"]) == 1
- assert result["results"][0] == [
- ("src1", "dst1", 100.0),
- ("src2", "dst2", 200.0),
- ]
- assert result["metadata"]["iterations"] == 1
-
- def test_run_monte_carlo_analysis_multiple_iterations(
- self, failure_manager: FailureManager
- ) -> None:
- result = failure_manager.run_monte_carlo_analysis(
- analysis_func=mock_analysis_func,
- iterations=3,
- parallelism=1,
- test_param="value",
- )
-
- assert len(result["results"]) == 3
- assert result["metadata"]["iterations"] == 3
- for res in result["results"]:
- assert res == [("src1", "dst1", 100.0), ("src2", "dst2", 200.0)]
-
- def test_run_monte_carlo_analysis_with_baseline(
- self, failure_manager: FailureManager
- ) -> None:
- result = failure_manager.run_monte_carlo_analysis(
- analysis_func=mock_analysis_func,
- iterations=3,
- baseline=True,
- parallelism=1,
- test_param="value",
- )
-
- assert len(result["results"]) == 3
- assert result["metadata"]["baseline"] is True
-
- def test_run_monte_carlo_analysis_store_failure_patterns(
- self, failure_manager: FailureManager
- ) -> None:
- result = failure_manager.run_monte_carlo_analysis(
- analysis_func=mock_analysis_func,
- iterations=2,
- store_failure_patterns=True,
- parallelism=1,
- test_param="value",
- )
-
- assert len(result["failure_patterns"]) == 2
- for pattern in result["failure_patterns"]:
- assert "iteration_index" in pattern
- assert "is_baseline" in pattern
- assert "excluded_nodes" in pattern
- assert "excluded_links" in pattern
-
- def test_validation_errors(self, failure_manager: FailureManager) -> None:
- cast(
- MagicMock, failure_manager.failure_policy_set.get_policy
- ).return_value = None
-
- with pytest.raises(
- ValueError, match="iterations=2 has no effect without a failure policy"
- ):
- failure_manager.run_monte_carlo_analysis(
- analysis_func=mock_analysis_func, iterations=2, baseline=False
- )
-
- with pytest.raises(ValueError, match="baseline=True requires iterations >= 2"):
- failure_manager.run_monte_carlo_analysis(
- analysis_func=mock_analysis_func, iterations=1, baseline=True
- )
-
- @patch("ngraph.failure.manager.manager.ProcessPoolExecutor")
- @patch("ngraph.failure.manager.manager.pickle")
- def test_parallel_execution(
- self,
- mock_pickle: MagicMock,
- mock_pool_executor: MagicMock,
- failure_manager: FailureManager,
- ) -> None:
- """When deduplication collapses iterations to one unique pattern, execution
- may run serially even if parallelism > 1. Validate results shape and metadata
- without asserting executor usage.
- """
- mock_pickle.dumps.return_value = b"fake_network_data"
-
- mock_pool = MagicMock()
- mock_pool_executor.return_value.__enter__.return_value = mock_pool
-
- result = failure_manager.run_monte_carlo_analysis(
- analysis_func=mock_analysis_func,
- iterations=2,
- parallelism=2,
- )
-
- assert len(result["results"]) == 2
- assert result["metadata"]["parallelism"] == 2
-
-
-class TestFailureManagerTopLevelMatching:
- """Tests for compute_exclusions merged attribute view correctness."""
-
- def test_node_matching_on_top_level_disabled(
- self, mock_network: Network, mock_failure_policy_set: FailurePolicySet
- ) -> None:
- mock_network.nodes["node1"].disabled = True
- mock_network.nodes["node2"].disabled = False
-
- from ngraph.failure.policy import (
- FailureCondition,
- FailureMode,
- FailurePolicy,
- FailureRule,
- )
-
- rule = FailureRule(
- entity_scope="node",
- conditions=[FailureCondition(attr="disabled", operator="==", value=True)],
- logic="and",
- rule_type="all",
- )
- policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
-
- fm = FailureManager(
- network=mock_network,
- failure_policy_set=mock_failure_policy_set,
- policy_name=None,
- )
-
- excluded_nodes, excluded_links = fm.compute_exclusions(policy=policy)
-
- assert "node1" in excluded_nodes
- assert "node2" not in excluded_nodes
- assert excluded_links == set()
-
- def test_link_matching_on_top_level_capacity(
- self, mock_network: Network, mock_failure_policy_set: FailurePolicySet
- ) -> None:
- mock_network.links["link1"].capacity = 100.0
- mock_network.links["link2"].capacity = 50.0
-
- from ngraph.failure.policy import (
- FailureCondition,
- FailureMode,
- FailurePolicy,
- FailureRule,
- )
-
- rule = FailureRule(
- entity_scope="link",
- conditions=[FailureCondition(attr="capacity", operator=">", value=60.0)],
- logic="and",
- rule_type="all",
- )
- policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
-
- fm = FailureManager(
- network=mock_network,
- failure_policy_set=mock_failure_policy_set,
- policy_name=None,
- )
-
- excluded_nodes, excluded_links = fm.compute_exclusions(policy=policy)
-
- assert "link1" in excluded_links
- assert "link2" not in excluded_links
- assert excluded_nodes == set()
-
- def test_risk_group_expansion_uses_top_level_risk_groups(
- self, mock_network: Network, mock_failure_policy_set: FailurePolicySet
- ) -> None:
- mock_risk_group = MagicMock()
- mock_risk_group.name = "rg1"
- mock_risk_group.children = []
- mock_network.risk_groups = {"rg1": mock_risk_group}
- mock_network.nodes["node1"].risk_groups = {"rg1"}
- mock_network.links["link1"].risk_groups = {"rg1"}
-
- from ngraph.failure.policy import FailureMode, FailurePolicy, FailureRule
-
- rule = FailureRule(entity_scope="risk_group", rule_type="all")
- policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
- policy.apply_failures = MagicMock(return_value=["rg1"]) # type: ignore[attr-defined]
-
- fm = FailureManager(
- network=mock_network,
- failure_policy_set=mock_failure_policy_set,
- policy_name=None,
- )
-
- excluded_nodes, excluded_links = fm.compute_exclusions(policy=policy)
-
- assert "node1" in excluded_nodes
- assert "link1" in excluded_links
-
-
-class TestFailureManagerErrorHandling:
- """Test error handling and edge cases in FailureManager."""
-
- def test_run_monte_carlo_parallel_execution_error(
- self, failure_manager: FailureManager
- ) -> None:
- """Force two unique patterns so the parallel path is taken, then assert
- errors in worker execution propagate.
- """
- with patch(
- "ngraph.failure.manager.manager.ProcessPoolExecutor"
- ) as mock_pool_executor:
- mock_pool = MagicMock()
- mock_pool_executor.return_value.__enter__.return_value = mock_pool
- mock_pool.map.side_effect = RuntimeError("Parallel execution failed")
-
- with (
- patch(
- "ngraph.failure.manager.manager.pickle.dumps",
- return_value=b"fake_data",
- ),
- patch.object(
- failure_manager,
- "compute_exclusions",
- side_effect=[({"n1"}, set()), ({"n2"}, set())],
- ),
- ):
- with pytest.raises(RuntimeError, match="Parallel execution failed"):
- failure_manager.run_monte_carlo_analysis(
- analysis_func=mock_analysis_func, iterations=2, parallelism=2
- )
-
-
-class TestFailureManagerConvenienceMethods:
- """Test convenience methods for specific analysis types against new contracts."""
-
- @patch("ngraph.monte_carlo.functions.max_flow_analysis")
- def test_run_max_flow_monte_carlo(
- self, mock_analysis_func, failure_manager: FailureManager
- ) -> None:
- mock_analysis_func.return_value = [
- ("src", "dst", 100.0)
- ] # unused; type compatibility
-
- mock_mc_result = {
- "results": [[("src", "dst", 100.0)], [("src", "dst", 90.0)]],
- "failure_patterns": [],
- "metadata": {"iterations": 2},
- }
-
- with patch.object(
- failure_manager, "run_monte_carlo_analysis", return_value=mock_mc_result
- ):
- out = failure_manager.run_max_flow_monte_carlo(
- source_path="datacenter.*",
- sink_path="edge.*",
- mode="combine",
- iterations=2,
- parallelism=1,
- )
-
- assert out == mock_mc_result
-
- @patch("ngraph.monte_carlo.functions.demand_placement_analysis")
- def test_run_demand_placement_monte_carlo(
- self, mock_analysis_func, failure_manager: FailureManager
- ) -> None:
- mock_analysis_func.return_value = {"total_placed": 100.0}
-
- mock_traffic_set = MagicMock()
- mock_demand = MagicMock()
- mock_demand.source_path = "A"
- mock_demand.sink_path = "B"
- mock_demand.demand = 100.0
- mock_traffic_set.demands = [mock_demand]
-
- mock_mc_result = {
- "results": [{"total_placed": 100.0}],
- "failure_patterns": [],
- "metadata": {"iterations": 1},
- }
-
- with patch.object(
- failure_manager, "run_monte_carlo_analysis", return_value=mock_mc_result
- ):
- out = failure_manager.run_demand_placement_monte_carlo(
- demands_config=mock_traffic_set, iterations=1, parallelism=1
- )
-
- assert out == mock_mc_result
-
- @patch("ngraph.monte_carlo.functions.sensitivity_analysis")
- def test_string_flow_placement_conversion(
- self, mock_analysis_func, failure_manager: FailureManager
- ) -> None:
- mock_mc_result = {
- "results": [[("src", "dst", 100.0)]],
- "failure_patterns": [],
- "metadata": {"iterations": 1},
- }
-
- with patch.object(
- failure_manager, "run_monte_carlo_analysis", return_value=mock_mc_result
- ) as mock_mc:
- failure_manager.run_max_flow_monte_carlo(
- source_path="src.*",
- sink_path="dst.*",
- flow_placement="EQUAL_BALANCED",
- iterations=1,
- )
-
- call_kwargs = mock_mc.call_args[1]
- from ngraph.algorithms.base import FlowPlacement
-
- assert call_kwargs["flow_placement"] == FlowPlacement.EQUAL_BALANCED
-
- def test_invalid_flow_placement_string_max_flow(
- self, failure_manager: FailureManager
- ) -> None:
- with pytest.raises(ValueError) as exc_info:
- failure_manager.run_max_flow_monte_carlo(
- source_path="src.*",
- sink_path="dst.*",
- flow_placement="INVALID_OPTION",
- iterations=1,
- )
-
- error_msg = str(exc_info.value)
- assert "Invalid flow_placement 'INVALID_OPTION'" in error_msg
- assert "Valid values are: PROPORTIONAL, EQUAL_BALANCED" in error_msg
-
- def test_invalid_flow_placement_string_sensitivity(
- self, failure_manager: FailureManager
- ) -> None:
- with pytest.raises(ValueError) as exc_info:
- failure_manager.run_sensitivity_monte_carlo(
- source_path="src.*",
- sink_path="dst.*",
- flow_placement="ANOTHER_INVALID",
- iterations=1,
- )
-
- error_msg = str(exc_info.value)
- assert "Invalid flow_placement 'ANOTHER_INVALID'" in error_msg
- assert "Valid values are: PROPORTIONAL, EQUAL_BALANCED" in error_msg
-
- @patch("ngraph.monte_carlo.functions.sensitivity_analysis")
- def test_valid_string_flow_placement_sensitivity(
- self, mock_analysis_func, failure_manager: FailureManager
- ) -> None:
- mock_mc_result = {
- "results": [{"component1": {"score": 0.5}}],
- "failure_patterns": [],
- "metadata": {"iterations": 1},
- }
-
- with patch.object(
- failure_manager, "run_monte_carlo_analysis", return_value=mock_mc_result
- ) as mock_mc:
- failure_manager.run_sensitivity_monte_carlo(
- source_path="src.*",
- sink_path="dst.*",
- flow_placement="proportional",
- iterations=1,
- )
-
- call_kwargs = mock_mc.call_args[1]
- from ngraph.algorithms.base import FlowPlacement
-
- assert call_kwargs["flow_placement"] == FlowPlacement.PROPORTIONAL
-
- def test_case_insensitive_flow_placement_conversion(
- self, failure_manager: FailureManager
- ) -> None:
- from ngraph.algorithms.base import FlowPlacement
-
- mock_mc_result = {
- "results": [[("src", "dst", 100.0)]],
- "failure_patterns": [],
- "metadata": {"iterations": 1},
- }
-
- with patch.object(
- failure_manager, "run_monte_carlo_analysis", return_value=mock_mc_result
- ) as mock_mc:
- failure_manager.run_max_flow_monte_carlo(
- source_path="src.*",
- sink_path="dst.*",
- flow_placement="proportional",
- iterations=1,
- )
-
- call_kwargs = mock_mc.call_args[1]
- assert call_kwargs["flow_placement"] == FlowPlacement.PROPORTIONAL
-
- with patch.object(
- failure_manager, "run_monte_carlo_analysis", return_value=mock_mc_result
- ) as mock_mc:
- failure_manager.run_max_flow_monte_carlo(
- source_path="src.*",
- sink_path="dst.*",
- flow_placement="Equal_Balanced",
- iterations=1,
- )
-
- call_kwargs = mock_mc.call_args[1]
- assert call_kwargs["flow_placement"] == FlowPlacement.EQUAL_BALANCED
diff --git a/tests/flows/__init__.py b/tests/flows/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/flows/test_flow.py b/tests/flows/test_flow.py
deleted file mode 100644
index 3f03291..0000000
--- a/tests/flows/test_flow.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.flows.flow import Flow, FlowIndex
-from ngraph.paths.bundle import PathBundle
-
-
-class TestFlow:
- def test_flow_place_and_remove(self, square1):
- flow_graph = init_flow_graph(square1)
- path_bundle = PathBundle(
- "A", "C", {"A": {}, "C": {"B": [1]}, "B": {"A": [0]}}, 2
- )
- flow = Flow(path_bundle, FlowIndex("A", "C", "test_flow", 0))
-
- # No placement below threshold
- placed_flow, remaining_flow = flow.place_flow(
- flow_graph, 0, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert placed_flow == 0
- assert remaining_flow == 0
-
- # Place positive amount
- placed_flow, remaining_flow = flow.place_flow(
- flow_graph, 1, flow_placement=FlowPlacement.EQUAL_BALANCED
- )
- assert placed_flow == 1
- assert flow.placed_flow == 1
- assert remaining_flow == 0
- assert flow_graph.get_edge_attr(0)["flow"] == 1
-
- # Remove flow from graph
- flow.remove_flow(flow_graph)
- assert flow.placed_flow == 0
- assert flow_graph.get_edge_attr(0)["flow"] == 0
diff --git a/tests/flows/test_policy.py b/tests/flows/test_policy.py
deleted file mode 100644
index 1586011..0000000
--- a/tests/flows/test_policy.py
+++ /dev/null
@@ -1,772 +0,0 @@
-import pytest
-
-from ngraph.algorithms.base import (
- MIN_FLOW,
- EdgeSelect,
- FlowPlacement,
- PathAlg,
-)
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.flows.flow import FlowIndex
-from ngraph.flows.policy import FlowPolicy
-from ngraph.paths.bundle import PathBundle
-
-
-class TestFlowPolicy:
- def test_flow_policy_place_demand_1(self, square1):
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
- r = init_flow_graph(square1)
- flow_policy.place_demand(r, "A", "C", "test_flow", 1)
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", "test_flow", 0): 1.0},
- "cost": 1,
- },
- ),
- 1: (
- "B",
- "C",
- 1,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", "test_flow", 0): 1.0},
- "cost": 1,
- },
- ),
- 2: ("A", "D", 2, {"capacity": 2, "flow": 0, "flows": {}, "cost": 2}),
- 3: ("D", "C", 3, {"capacity": 2, "flow": 0, "flows": {}, "cost": 2}),
- }
-
- def test_flow_policy_place_demand_2(self, square1):
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
- r = init_flow_graph(square1)
- flow_policy.place_demand(r, "A", "C", "test_flow", 2)
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", "test_flow", 0): 1.0},
- "cost": 1,
- },
- ),
- 1: (
- "B",
- "C",
- 1,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", "test_flow", 0): 1.0},
- "cost": 1,
- },
- ),
- 2: (
- "A",
- "D",
- 2,
- {
- "capacity": 2,
- "flow": 1.0,
- "flows": {("A", "C", "test_flow", 1): 1.0},
- "cost": 2,
- },
- ),
- 3: (
- "D",
- "C",
- 3,
- {
- "capacity": 2,
- "flow": 1.0,
- "flows": {("A", "C", "test_flow", 1): 1.0},
- "cost": 2,
- },
- ),
- }
-
- def test_flow_policy_place_demand_3(self, square1):
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- max_flow_count=1,
- )
- r = init_flow_graph(square1)
- placed_flow, remaining_flow = flow_policy.place_demand(
- r, "A", "C", "test_flow", 2
- )
- assert placed_flow == 2
- assert remaining_flow == 0
- assert r.get_edges() == {
- 0: ("A", "B", 0, {"capacity": 1, "flow": 0.0, "flows": {}, "cost": 1}),
- 1: ("B", "C", 1, {"capacity": 1, "flow": 0.0, "flows": {}, "cost": 1}),
- 2: (
- "A",
- "D",
- 2,
- {
- "capacity": 2,
- "flow": 2.0,
- "flows": {("A", "C", "test_flow", 0): 2.0},
- "cost": 2,
- },
- ),
- 3: (
- "D",
- "C",
- 3,
- {
- "capacity": 2,
- "flow": 2.0,
- "flows": {("A", "C", "test_flow", 0): 2.0},
- "cost": 2,
- },
- ),
- }
-
- def test_flow_policy_place_demand_4(self, square1):
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
- r = init_flow_graph(square1)
- placed_flow, remaining_flow = flow_policy.place_demand(
- r, "A", "C", "test_flow", 5
- )
- assert placed_flow == 3
- assert remaining_flow == 2
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", "test_flow", 0): 1.0},
- "cost": 1,
- },
- ),
- 1: (
- "B",
- "C",
- 1,
- {
- "capacity": 1,
- "flow": 1.0,
- "flows": {("A", "C", "test_flow", 0): 1.0},
- "cost": 1,
- },
- ),
- 2: (
- "A",
- "D",
- 2,
- {
- "capacity": 2,
- "flow": 2.0,
- "flows": {("A", "C", "test_flow", 1): 2.0},
- "cost": 2,
- },
- ),
- 3: (
- "D",
- "C",
- 3,
- {
- "capacity": 2,
- "flow": 2.0,
- "flows": {("A", "C", "test_flow", 1): 2.0},
- "cost": 2,
- },
- ),
- }
-
- def test_flow_policy_place_demand_5(self, square3):
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING,
- multipath=False,
- )
- r = init_flow_graph(square3)
- placed_flow, remaining_flow = flow_policy.place_demand(
- r, "A", "C", "test_flow", 200
- )
- assert placed_flow == 175
- assert remaining_flow == 25
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 100,
- "flow": 100.0,
- "flows": {("A", "C", "test_flow", 0): 100.0},
- "cost": 1,
- },
- ),
- 1: (
- "B",
- "C",
- 1,
- {
- "capacity": 125,
- "flow": 125.0,
- "flows": {
- ("A", "C", "test_flow", 0): 100.0,
- ("A", "C", "test_flow", 2): 25.0,
- },
- "cost": 1,
- },
- ),
- 2: (
- "A",
- "D",
- 2,
- {
- "capacity": 75,
- "flow": 75.0,
- "flows": {
- ("A", "C", "test_flow", 1): 50.0,
- ("A", "C", "test_flow", 2): 25.0,
- },
- "cost": 1,
- },
- ),
- 3: (
- "D",
- "C",
- 3,
- {
- "capacity": 50,
- "flow": 50.0,
- "flows": {("A", "C", "test_flow", 1): 50.0},
- "cost": 1,
- },
- ),
- 4: ("B", "D", 4, {"capacity": 50, "flow": 0, "flows": {}, "cost": 1}),
- 5: (
- "D",
- "B",
- 5,
- {
- "capacity": 50,
- "flow": 25.0,
- "flows": {("A", "C", "test_flow", 2): 25.0},
- "cost": 1,
- },
- ),
- }
-
- def test_flow_policy_place_demand_6(self, line1):
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING,
- multipath=False,
- min_flow_count=2,
- max_flow_count=2,
- )
- r = init_flow_graph(line1)
- placed_flow, remaining_flow = flow_policy.place_demand(
- r, "A", "C", "test_flow", 7
- )
- assert placed_flow == 5
- assert remaining_flow == 2
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 5,
- "flow": 5.0,
- "flows": {
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=0,
- ): 2.5,
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=1,
- ): 2.5,
- },
- "cost": 1,
- },
- ),
- 1: ("B", "A", 1, {"capacity": 5, "flow": 0, "flows": {}, "cost": 1}),
- 2: ("B", "C", 2, {"capacity": 1, "flow": 0.0, "flows": {}, "cost": 1}),
- 3: ("C", "B", 3, {"capacity": 1, "flow": 0, "flows": {}, "cost": 1}),
- 4: (
- "B",
- "C",
- 4,
- {
- "capacity": 3,
- "flow": 2.5,
- "flows": {
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=1,
- ): 2.5
- },
- "cost": 1,
- },
- ),
- 5: ("C", "B", 5, {"capacity": 3, "flow": 0, "flows": {}, "cost": 1}),
- 6: (
- "B",
- "C",
- 6,
- {
- "capacity": 7,
- "flow": 2.5,
- "flows": {
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=0,
- ): 2.5
- },
- "cost": 2,
- },
- ),
- 7: ("C", "B", 7, {"capacity": 7, "flow": 0, "flows": {}, "cost": 2}),
- }
-
- def test_flow_policy_place_demand_7(self, square3):
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.SINGLE_MIN_COST_WITH_CAP_REMAINING,
- multipath=False,
- min_flow_count=3,
- max_flow_count=3,
- )
- r = init_flow_graph(square3)
- placed_flow, remaining_flow = flow_policy.place_demand(
- r, "A", "C", "test_flow", 200
- )
- assert round(placed_flow, 10) == 150
- assert round(remaining_flow, 10) == 50
- assert r.get_edges() == {
- 0: (
- "A",
- "B",
- 0,
- {
- "capacity": 100,
- "flow": 100.0,
- "flows": {
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=0,
- ): 50.0,
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=2,
- ): 49.99999999999999,
- },
- "cost": 1,
- },
- ),
- 1: (
- "B",
- "C",
- 1,
- {
- "capacity": 125,
- "flow": 100.0,
- "flows": {
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=0,
- ): 50.0,
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=2,
- ): 49.99999999999999,
- },
- "cost": 1,
- },
- ),
- 2: (
- "A",
- "D",
- 2,
- {
- "capacity": 75,
- "flow": 50.0,
- "flows": {
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=1,
- ): 50.0
- },
- "cost": 1,
- },
- ),
- 3: (
- "D",
- "C",
- 3,
- {
- "capacity": 50,
- "flow": 50.0,
- "flows": {
- FlowIndex(
- src_node="A",
- dst_node="C",
- flow_class="test_flow",
- flow_id=1,
- ): 50.0
- },
- "cost": 1,
- },
- ),
- 4: ("B", "D", 4, {"capacity": 50, "flow": 0, "flows": {}, "cost": 1}),
- 5: ("D", "B", 5, {"capacity": 50, "flow": 0.0, "flows": {}, "cost": 1}),
- }
-
- def test_flow_policy_place_demand_8(self, line1):
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- max_flow_count=1,
- )
- r = init_flow_graph(line1)
- placed_flow, remaining_flow = flow_policy.place_demand(
- r, "A", "C", "test_flow", 7
- )
- assert round(placed_flow, 10) == 2
- assert round(remaining_flow, 10) == 5
-
- def test_flow_policy_place_demand_9(self, line1):
- """
- Algorithm must terminate gracefully via diminishing-returns cutoff,
- leaving remaining volume without raising.
- """
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- max_flow_count=1000000,
- )
- r = init_flow_graph(line1)
- # Should not raise; should leave some volume unplaced in this configuration
- placed_flow, remaining_flow = flow_policy.place_demand(
- r, "A", "C", ("test_flow", "9"), 7
- )
- assert placed_flow >= 0.0
- assert remaining_flow >= 0.0
- # Expect not all volume is placed under this setup
- assert remaining_flow > 0.0
-
- def test_flow_policy_place_demand_normal_termination(self, line1):
- """
- Tests normal termination when algorithm naturally runs out of capacity.
- This should terminate gracefully without raising an exception, even if
- some volume remains unplaced.
- """
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING, # Capacity-aware
- multipath=True,
- max_flow_count=10, # Reasonable limit
- )
- r = init_flow_graph(line1)
- # Should terminate gracefully when capacity is exhausted
- placed_flow, remaining_flow = flow_policy.place_demand(
- r,
- "A",
- "C",
- "test_flow",
- 100, # Large demand that exceeds capacity
- )
- # Should place some flow but not all due to capacity constraints
- assert placed_flow >= 0
- assert remaining_flow >= 0
- assert placed_flow + remaining_flow == 100
- # Should place at least some flow (line1 has capacity of 5)
- assert placed_flow > 0
-
- def test_flow_policy_place_demand_max_iterations(self, line1):
- """
- Tests the maximum iteration limit safety net. This creates a scenario that
- forces many iterations by using a very low iteration limit parameter.
- """
- # Create a flow policy with very low max_total_iterations for testing
- # Use EQUAL_BALANCED with unlimited flows to force many iterations
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- max_flow_count=1000000, # High flow count to create many flows
- max_total_iterations=2, # Very low limit to trigger the error easily
- )
-
- r = init_flow_graph(line1)
-
- # This should hit the maximum iteration limit (2) before completing
- # because it tries to create many flows in EQUAL_BALANCED mode
- with pytest.raises(
- RuntimeError, match="Maximum iteration limit .* exceeded in place_demand"
- ):
- flow_policy.place_demand(r, "A", "C", "test_flow", 7)
-
- def test_flow_policy_configurable_iteration_limits(self, line1):
- """
- Tests that the iteration limit parameters are properly configurable
- and affect the behavior as expected.
- """
- # Test with custom limits
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- max_flow_count=1000000,
- max_no_progress_iterations=5, # Very low limit
- max_total_iterations=20000, # High total limit
- )
-
- r = init_flow_graph(line1)
-
- # Should hit the no-progress limit before the total limit
- with pytest.raises(
- RuntimeError, match="5 consecutive iterations with no progress"
- ):
- flow_policy.place_demand(r, "A", "C", "test_flow", 7)
-
- # Test with default values (should work same as before)
- flow_policy_default = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
-
- # Should complete normally with defaults
- r2 = init_flow_graph(line1)
- placed_flow, remaining_flow = flow_policy_default.place_demand(
- r2, "A", "C", "test_flow", 3
- )
- assert placed_flow > 0
-
- def test_flow_policy_place_demand_10(self, square1):
- PATH_BUNDLE1 = PathBundle(
- "A", "C", {"A": {}, "C": {"B": [3]}, "B": {"A": [2]}}, 2
- )
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- static_paths=[PATH_BUNDLE1],
- )
- r = init_flow_graph(square1)
- placed_flow, remaining_flow = flow_policy.place_demand(
- r,
- "A",
- "C",
- "test_flow",
- 3,
- )
- assert round(placed_flow, 10) == 2
- assert round(remaining_flow, 10) == 1
- assert (
- flow_policy.flows[
- FlowIndex(src_node="A", dst_node="C", flow_class="test_flow", flow_id=0)
- ].path_bundle
- == PATH_BUNDLE1
- )
- assert (
- flow_policy.flows[
- FlowIndex(src_node="A", dst_node="C", flow_class="test_flow", flow_id=0)
- ].placed_flow
- == 2
- )
-
- def test_flow_policy_place_demand_11(self, square1):
- PATH_BUNDLE1 = PathBundle(
- "A", "C", {"A": {}, "C": {"B": [3]}, "B": {"A": [2]}}, 2
- )
- PATH_BUNDLE2 = PathBundle(
- "A", "C", {"A": {}, "C": {"B": [3]}, "B": {"A": [2]}}, 2
- )
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- static_paths=[PATH_BUNDLE1, PATH_BUNDLE2],
- )
- r = init_flow_graph(square1)
- placed_flow, remaining_flow = flow_policy.place_demand(
- r,
- "A",
- "C",
- "test_flow",
- 3,
- )
- assert round(placed_flow, 10) == 2
- assert round(remaining_flow, 10) == 1
- assert (
- flow_policy.flows[
- FlowIndex(src_node="A", dst_node="C", flow_class="test_flow", flow_id=1)
- ].path_bundle
- == PATH_BUNDLE2
- )
- assert (
- flow_policy.flows[
- FlowIndex(src_node="A", dst_node="C", flow_class="test_flow", flow_id=1)
- ].placed_flow
- == 1
- )
-
- def test_flow_policy_place_demand_12(self, square1):
- PATH_BUNDLE1 = PathBundle(
- "A", "C", {"A": {}, "C": {"B": [1]}, "B": {"A": [0]}}, 2
- )
- PATH_BUNDLE2 = PathBundle(
- "A", "C", {"A": {}, "C": {"B": [3]}, "B": {"A": [2]}}, 2
- )
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- static_paths=[PATH_BUNDLE1, PATH_BUNDLE2],
- )
- r = init_flow_graph(square1)
- placed_flow, remaining_flow = flow_policy.place_demand(
- r,
- "A",
- "C",
- "test_flow",
- 3,
- )
-
- assert (
- abs(2 - placed_flow) <= MIN_FLOW
- ) # inclusive: values < MIN_FLOW zeroed; == MIN_FLOW retained
- assert (
- abs(1 - remaining_flow) <= MIN_FLOW
- ) # inclusive: values < MIN_FLOW zeroed; == MIN_FLOW retained
- assert (
- flow_policy.flows[
- FlowIndex(src_node="A", dst_node="C", flow_class="test_flow", flow_id=1)
- ].path_bundle
- == PATH_BUNDLE2
- )
- assert (
- abs(
- flow_policy.flows[
- FlowIndex(
- src_node="A", dst_node="C", flow_class="test_flow", flow_id=1
- )
- ].placed_flow
- - 1
- )
- <= MIN_FLOW # inclusive: values < MIN_FLOW zeroed; == MIN_FLOW retained
- )
-
- # Constructor Validation: EQUAL_BALANCED requires max_flow_count
- def test_flow_policy_constructor_balanced_requires_max_flow(self):
- with pytest.raises(
- ValueError, match="max_flow_count must be set for EQUAL_BALANCED"
- ):
- FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=False,
- )
-
- # Constructor Validation: static_paths length must match max_flow_count if provided
- def test_flow_policy_constructor_static_paths_mismatch(self):
- path_bundle = PathBundle(
- "A", "C", {"A": {}, "C": {"B": [1]}, "B": {"A": [0]}}, cost=2
- )
- with pytest.raises(
- ValueError, match="must be equal to the number of static paths"
- ):
- FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- edge_select=EdgeSelect.ALL_MIN_COST,
- multipath=True,
- static_paths=[path_bundle], # length=1
- max_flow_count=2, # mismatch
- )
-
- # Test remove_demand
- # Ensures that removing demand clears flows from the graph but not from FlowPolicy.
- def test_flow_policy_remove_demand(self, square1):
- flow_policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
- r = init_flow_graph(square1)
- flow_policy.place_demand(r, "A", "C", "test_flow", 1)
- assert len(flow_policy.flows) > 0
- # Remove the demand entirely
- flow_policy.remove_demand(r)
-
- # Check that the flows are still in the policy but not in the graph
- assert len(flow_policy.flows) > 0
-
- # Check that edges in the graph are at zero flow
- for _, _, _, attr in r.get_edges().values():
- assert attr["flow"] == 0
- assert attr["flows"] == {}
diff --git a/tests/flows/test_policy_graph_churn.py b/tests/flows/test_policy_graph_churn.py
deleted file mode 100644
index f14bbca..0000000
--- a/tests/flows/test_policy_graph_churn.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from __future__ import annotations
-
-from ngraph.algorithms.base import EdgeSelect, PathAlg
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.algorithms.placement import FlowPlacement
-from ngraph.flows.policy import FlowPolicy
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def test_policy_handles_graph_rebuild_and_stale_flows() -> None:
- # Initial graph with path A->B->C
- g1 = StrictMultiDiGraph()
- for n in ("A", "B", "C"):
- g1.add_node(n)
- g1.add_edge("A", "B", capacity=5, cost=1)
- g1.add_edge("B", "C", capacity=5, cost=1)
- init_flow_graph(g1)
-
- policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
-
- placed, rem = policy.place_demand(g1, "A", "C", "cls", 3.0)
- assert placed == 3.0 and rem == 0.0
- assert len(policy.flows) > 0
-
- # Rebuild a new graph (e.g., different Network.to_strict_multidigraph) without edge keys from old graph
- g2 = StrictMultiDiGraph()
- for n in ("A", "B", "C"):
- g2.add_node(n)
- # Same topology but new edge ids
- g2.add_edge("A", "B", capacity=5, cost=1)
- g2.add_edge("B", "C", capacity=5, cost=1)
- init_flow_graph(g2)
-
- # Next placement on new graph must succeed; policy should drop stale flows and recreate
- placed2, rem2 = policy.place_demand(g2, "A", "C", "cls", 2.0)
- assert placed2 == 2.0 and rem2 == 0.0
diff --git a/tests/flows/test_policy_perf_regression.py b/tests/flows/test_policy_perf_regression.py
deleted file mode 100644
index 208d8ff..0000000
--- a/tests/flows/test_policy_perf_regression.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from __future__ import annotations
-
-import time
-
-from ngraph.algorithms.base import EdgeSelect, PathAlg
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.algorithms.placement import FlowPlacement
-from ngraph.flows.policy import FlowPolicy
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def _grid_graph(n: int) -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- # Build n x n grid with unit capacity/cost edges right/down
- for i in range(n):
- for j in range(n):
- g.add_node((i, j))
- for i in range(n):
- for j in range(n):
- if j + 1 < n:
- g.add_edge((i, j), (i, j + 1), capacity=1, cost=1)
- if i + 1 < n:
- g.add_edge((i, j), (i + 1, j), capacity=1, cost=1)
- return g
-
-
-def test_policy_spf_fastpath_is_used_for_common_selectors() -> None:
- g = _grid_graph(20) # 400 nodes, ~760 edges
- init_flow_graph(g)
-
- policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
-
- t0 = time.perf_counter()
- placed, rem = policy.place_demand(g, (0, 0), (19, 19), "cls", 1.0)
- t1 = time.perf_counter()
-
- # Sanity checks
- assert placed >= 0.0 and rem >= 0.0
-
- # Heuristic perf guardrail: should complete within a reasonable bound on a grid
- # (ensures we aren't accidentally using the generic edge_select path)
- assert (t1 - t0) < 0.5
diff --git a/tests/flows/test_policy_selector_cache.py b/tests/flows/test_policy_selector_cache.py
deleted file mode 100644
index c30973e..0000000
--- a/tests/flows/test_policy_selector_cache.py
+++ /dev/null
@@ -1,227 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Callable
-
-import pytest
-
-from ngraph.algorithms.base import EdgeSelect, PathAlg
-from ngraph.algorithms.flow_init import init_flow_graph
-from ngraph.algorithms.placement import FlowPlacement
-from ngraph.flows.policy import FlowPolicy
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def _simple_graph() -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_node("C")
- g.add_edge("A", "B", cost=1, capacity=5)
- # Two parallel edges B->C to allow exclusions without disconnecting
- g.add_edge("B", "C", cost=1, capacity=5)
- g.add_edge("B", "C", cost=1, capacity=5)
- return g
-
-
-def test_edge_selector_cached_without_custom_func(
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- # Count how many times edge_select_fabric is constructed
- from ngraph.algorithms import edge_select as edge_select_mod
-
- calls: dict[str, int] = {"n": 0}
- original_fabric: Callable[..., Any] = edge_select_mod.edge_select_fabric
-
- def counting_fabric(*args: Any, **kwargs: Any): # type: ignore[no-untyped-def]
- calls["n"] += 1
- return original_fabric(*args, **kwargs)
-
- monkeypatch.setattr(edge_select_mod, "edge_select_fabric", counting_fabric)
-
- policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
-
- g = _simple_graph()
- init_flow_graph(g)
-
- # First path-bundle construction may use SPF fast path (no selector build)
- pb1 = policy._get_path_bundle(g, "A", "C")
- assert pb1 is not None
- first_calls = calls["n"]
- assert first_calls in (0, 1)
-
- # Second call with same effective select_value reuses the prior behavior:
- # - fast path: no selector, count stays the same
- # - cached selector: no additional builds
- pb2 = policy._get_path_bundle(g, "A", "C")
- assert pb2 is not None
- assert calls["n"] == first_calls
-
- # Changing effective select_value (via min_flow) should trigger a new selector build
- pb3 = policy._get_path_bundle(g, "A", "C", min_flow=0.5)
- assert pb3 is not None
- # Forcing a min_flow disables fast path and should construct a selector at least once
- assert calls["n"] >= first_calls + 1
-
- # Exclusions should NOT change cached callable construction count (selectors are exclusion-agnostic)
- # Ensure that using excluded_edges/nodes does not rebuild the selector and SPF still succeeds
- # Exclude one of the parallel B->C edges
- # Find one B->C edge id
- some_edge_id = next(
- eid for eid, (u, v, _k, _a) in g.get_edges().items() if u == "B" and v == "C"
- )
- pb4 = policy._get_path_bundle(g, "A", "C", excluded_edges={some_edge_id})
- assert pb4 is not None
- # With exclusions, SPF may internally construct a selector; do not assert call count.
-
-
-def test_edge_selector_not_cached_with_custom_func(
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- from ngraph.algorithms import edge_select as edge_select_mod
-
- calls: dict[str, int] = {"n": 0}
- original_fabric = edge_select_mod.edge_select_fabric
-
- def counting_fabric(*args: Any, **kwargs: Any): # type: ignore[no-untyped-def]
- calls["n"] += 1
- return original_fabric(*args, **kwargs)
-
- monkeypatch.setattr(edge_select_mod, "edge_select_fabric", counting_fabric)
-
- # Provide a trivial custom selector; caching must be bypassed
- def custom_selector(*_args: Any, **_kwargs: Any): # type: ignore[no-untyped-def]
- return 1.0, []
-
- policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.USER_DEFINED,
- multipath=True,
- edge_select_func=custom_selector,
- )
-
- g = _simple_graph()
- init_flow_graph(g)
-
- policy._get_path_bundle(g, "A", "C")
- policy._get_path_bundle(g, "A", "C")
-
- # Fabric is invoked both times since custom func disables cache
- assert calls["n"] == 2
-
-
-def test_cache_respects_node_exclusions_without_rebuild(
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- from ngraph.algorithms import edge_select as edge_select_mod
-
- calls: dict[str, int] = {"n": 0}
- original_fabric = edge_select_mod.edge_select_fabric
-
- def counting_fabric(*args: Any, **kwargs: Any): # type: ignore[no-untyped-def]
- calls["n"] += 1
- return original_fabric(*args, **kwargs)
-
- monkeypatch.setattr(edge_select_mod, "edge_select_fabric", counting_fabric)
-
- policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
-
- g = _simple_graph()
- init_flow_graph(g)
-
- # Build once (may be fast path with zero selector builds)
- assert policy._get_path_bundle(g, "A", "C") is not None
- initial = calls["n"]
- assert initial in (0, 1)
-
- # Exclude a node not on the path; should not rebuild and still succeed
- assert policy._get_path_bundle(g, "A", "C", excluded_nodes={"A"}) is None
- # Excluding A removes the source; SPF cannot find a path -> None is expected
- # If fast path was used initially (initial==0), SPF may construct a selector internally here.
- if initial > 0:
- assert calls["n"] == initial
-
-
-def test_cache_rebuilds_when_edge_select_changes(
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- from ngraph.algorithms import edge_select as edge_select_mod
-
- calls: dict[str, int] = {"n": 0}
- original_fabric = edge_select_mod.edge_select_fabric
-
- def counting_fabric(*args: Any, **kwargs: Any): # type: ignore[no-untyped-def]
- calls["n"] += 1
- return original_fabric(*args, **kwargs)
-
- monkeypatch.setattr(edge_select_mod, "edge_select_fabric", counting_fabric)
-
- policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- )
- g = _simple_graph()
- init_flow_graph(g)
-
- assert policy._get_path_bundle(g, "A", "C") is not None
- base_calls = calls["n"]
- assert base_calls in (0, 1)
-
- # If fast path was used (base_calls==0), force non-fast path to exercise cache behavior
- if base_calls == 0:
- policy.edge_select_value = 0.123
- assert policy._get_path_bundle(g, "A", "C") is not None
- base_calls = calls["n"]
-
- # Change the policy's edge_select; cache should miss and rebuild when selector is in use
- policy.edge_select = EdgeSelect.ALL_MIN_COST
- assert policy._get_path_bundle(g, "A", "C") is not None
- assert calls["n"] == base_calls + 1
-
-
-def test_cache_rebuilds_when_policy_edge_select_value_changes(
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- from ngraph.algorithms import edge_select as edge_select_mod
-
- calls: dict[str, int] = {"n": 0}
- original_fabric = edge_select_mod.edge_select_fabric
-
- def counting_fabric(*args: Any, **kwargs: Any): # type: ignore[no-untyped-def]
- calls["n"] += 1
- return original_fabric(*args, **kwargs)
-
- monkeypatch.setattr(edge_select_mod, "edge_select_fabric", counting_fabric)
-
- policy = FlowPolicy(
- path_alg=PathAlg.SPF,
- flow_placement=FlowPlacement.PROPORTIONAL,
- edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING,
- multipath=True,
- # Start with None; effective select value is None
- edge_select_value=None,
- )
- g = _simple_graph()
- init_flow_graph(g)
-
- assert policy._get_path_bundle(g, "A", "C") is not None
- first = calls["n"]
- # May be 0 with fast path or 1 if selector was built
- assert first in (0, 1)
-
- # Change edge_select_value to a numeric threshold; cache must rebuild
- policy.edge_select_value = 0.123
- assert policy._get_path_bundle(g, "A", "C") is not None
- assert calls["n"] >= first + 1
diff --git a/tests/graph/__init__.py b/tests/graph/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/graph/test_convert.py b/tests/graph/test_convert.py
deleted file mode 100644
index 9959732..0000000
--- a/tests/graph/test_convert.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import networkx as nx
-
-from ngraph.graph.convert import from_digraph, from_graph, to_digraph, to_graph
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def build_sample_graph() -> StrictMultiDiGraph:
- graph = StrictMultiDiGraph()
- graph.add_node("A")
- graph.add_node("B")
- e1 = graph.add_edge("A", "B", weight=1)
- e2 = graph.add_edge("A", "B", weight=3)
- assert e1 != e2
- return graph
-
-
-def test_to_digraph_basic_and_revertible():
- g = build_sample_graph()
- nxg = to_digraph(g)
-
- assert isinstance(nxg, nx.DiGraph)
- assert nxg.has_edge("A", "B")
-
- # Revertible data preserved
- uv_edges = nxg.edges["A", "B"].get("_uv_edges")
- assert isinstance(uv_edges, list) and len(uv_edges) == 1
- (u, v, edges_dict) = uv_edges[0]
- assert (u, v) == ("A", "B")
- assert isinstance(edges_dict, dict) and len(edges_dict) == 2
-
-
-def test_from_digraph_roundtrip():
- g = build_sample_graph()
- nxg = to_digraph(g)
- roundtrip = from_digraph(nxg)
-
- # Expect two edges restored
- edges = roundtrip.get_edges()
- assert len(edges) == 2
- # Validate endpoints are correct
- for _, (src, dst, _, _) in edges.items():
- assert (src, dst) == ("A", "B")
-
-
-def test_to_graph_undirected_and_revertible():
- g = build_sample_graph()
- nxg = to_graph(g)
-
- assert isinstance(nxg, nx.Graph)
- assert nxg.has_edge("A", "B")
- uv_edges = nxg.edges["A", "B"].get("_uv_edges")
- assert isinstance(uv_edges, list) and len(uv_edges) == 1
- (u, v, edges_dict) = uv_edges[0]
- # For undirected graphs, order may vary; compare as set
- assert {u, v} == {"A", "B"}
- assert isinstance(edges_dict, dict) and len(edges_dict) == 2
-
-
-def test_edge_func_applied_in_conversion():
- g = build_sample_graph()
-
- def edge_func(graph: StrictMultiDiGraph, u, v, edges: dict) -> dict:
- # Sum the weights of all parallel edges
- weight_sum = 0
- for _edge_id, attr in edges.items():
- weight_sum += int(attr.get("weight", 0))
- return {"weight_sum": weight_sum}
-
- nxg = to_digraph(g, edge_func=edge_func)
- assert nxg.edges["A", "B"]["weight_sum"] == 4
-
-
-def test_from_graph_roundtrip():
- g = build_sample_graph()
- nxg = to_graph(g)
- roundtrip = from_graph(nxg)
-
- edges = roundtrip.get_edges()
- assert len(edges) == 2
- for _, (src, dst, _, _) in edges.items():
- assert (src, dst) == ("A", "B")
diff --git a/tests/graph/test_graph.py b/tests/graph/test_graph.py
deleted file mode 100644
index 274d957..0000000
--- a/tests/graph/test_graph.py
+++ /dev/null
@@ -1,381 +0,0 @@
-import networkx as nx
-import pytest
-
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def test_init_empty_graph():
- """Ensure a newly initialized graph has no nodes or edges."""
- g = StrictMultiDiGraph()
- assert len(g) == 0 # No nodes
- assert g.get_edges() == {}
-
-
-def test_add_node():
- """Test adding a single node."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- assert "A" in g
- assert g.get_nodes() == {"A": {}}
-
-
-def test_add_node_duplicate():
- """Adding a node that already exists should raise ValueError."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- with pytest.raises(ValueError, match="already exists"):
- g.add_node("A") # Duplicate node -> ValueError
-
-
-def test_remove_node_basic():
- """Ensure node removal also cleans up node attributes and reduces graph size."""
- g = StrictMultiDiGraph()
- g.add_node("A", test_attr="NODE_A")
- g.add_node("B")
- assert len(g) == 2
- assert g.get_nodes()["A"]["test_attr"] == "NODE_A"
-
- g.remove_node("A")
- assert "A" not in g
- assert len(g) == 1
- assert g.get_nodes() == {"B": {}}
-
- # removing second node
- g.remove_node("B")
- assert len(g) == 0
-
-
-def test_remove_node_missing():
- """Removing a non-existent node should raise ValueError."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- with pytest.raises(ValueError, match="does not exist"):
- g.remove_node("B")
-
-
-def test_add_edge_basic():
- """Add an edge when both source and target nodes exist."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
-
- e_id = g.add_edge("A", "B", weight=10)
- assert e_id in g.get_edges()
- assert g.get_edge_attr(e_id) == {"weight": 10}
- assert g.get_edges()[e_id] == ("A", "B", e_id, {"weight": 10})
-
- # Nx adjacency check
- assert "B" in g.succ["A"]
- assert "A" in g.pred["B"]
-
-
-def test_add_edge_with_custom_key():
- """Add an edge with a user-supplied new key and confirm it is preserved."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
-
- custom_key = "my_custom_edge_id"
- returned_key = g.add_edge("A", "B", key=custom_key, weight=999)
-
- # Verify the returned key matches what we passed in
- assert returned_key == custom_key
-
- # Confirm the edge exists in the internal mapping
- assert custom_key in g.get_edges()
-
- # Check attributes
- assert g.get_edge_attr(custom_key) == {"weight": 999}
-
-
-def test_add_edge_nonexistent_nodes():
- """Adding an edge where either node doesn't exist should fail."""
- g = StrictMultiDiGraph()
- g.add_node("A")
-
- with pytest.raises(ValueError, match="Target node 'B' does not exist"):
- g.add_edge("A", "B")
-
- with pytest.raises(ValueError, match="Source node 'X' does not exist"):
- g.add_edge("X", "A")
-
-
-def test_add_edge_duplicate_id():
- """Forbid reusing an existing edge ID."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
-
- e1 = g.add_edge("A", "B")
- # Attempt to add a second edge with the same key
- with pytest.raises(ValueError, match="already exists"):
- g.add_edge("A", "B", key=e1)
-
-
-def test_remove_edge_basic():
- """Remove a specific edge by key, then remove all edges from u->v."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
-
- e1 = g.add_edge("A", "B", label="E1")
- e2 = g.add_edge("A", "B", label="E2") # parallel edge
- assert e1 in g.get_edges()
- assert e2 in g.get_edges()
-
- # Remove e1 by ID
- g.remove_edge("A", "B", key=e1)
- assert e1 not in g.get_edges()
- assert e2 in g.get_edges()
-
- # Now remove the remaining edges from A->B
- g.remove_edge("A", "B")
- assert e2 not in g.get_edges()
- assert "B" not in g.succ["A"]
-
-
-def test_remove_edge_wrong_pair_key():
- """
- Ensure that if we try to remove an edge using the wrong (u, v) pair
- while specifying key, we get a ValueError about mismatched src/dst.
- """
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- e1 = g.add_edge("A", "B")
-
- # Attempt remove edge using reversed node pair from the actual one
- with pytest.raises(ValueError, match="is actually from A to B, not from B to A"):
- g.remove_edge("B", "A", key=e1)
-
-
-def test_remove_edge_missing_nodes():
- """Removing an edge should fail if source or target node doesn't exist."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- e1 = g.add_edge("A", "B")
-
- with pytest.raises(ValueError, match="Source node 'X' does not exist"):
- g.remove_edge("X", "B")
-
- with pytest.raises(ValueError, match="Target node 'Y' does not exist"):
- g.remove_edge("A", "Y")
-
- # e1 is still present
- assert e1 in g.get_edges()
-
-
-def test_remove_edge_nonexistent_id():
- """Removing a specific edge that doesn't exist should raise ValueError."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- e1 = g.add_edge("A", "B")
- with pytest.raises(ValueError, match="No edge with id='999' found"):
- g.remove_edge("A", "B", key="999")
- assert e1 in g.get_edges()
-
-
-def test_remove_edge_no_edges():
- """Removing all edges from A->B when none exist should raise ValueError."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- with pytest.raises(ValueError, match="No edges from 'A' to 'B' to remove"):
- g.remove_edge("A", "B")
-
-
-def test_remove_edge_by_id():
- """Remove edges by their unique ID."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- e1 = g.add_edge("A", "B", label="E1")
- e2 = g.add_edge("A", "B", label="E2")
-
- g.remove_edge_by_id(e1)
- assert e1 not in g.get_edges()
- assert e2 in g.get_edges()
-
- g.remove_edge_by_id(e2)
- assert e2 not in g.get_edges()
- assert "B" not in g.succ["A"]
-
-
-def test_remove_edge_by_id_missing():
- """Removing an edge by ID that doesn't exist should raise ValueError."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_edge("A", "B")
-
- with pytest.raises(ValueError, match="Edge with id='999' not found"):
- g.remove_edge_by_id("999")
-
-
-def test_copy_deep():
- """Test the pickle-based deep copy logic."""
- g = StrictMultiDiGraph()
- g.add_node("A", nattr="NA")
- g.add_node("B", nattr="NB")
- e1 = g.add_edge("A", "B", label="E1", meta={"x": 123})
- e2 = g.add_edge("B", "A", label="E2")
-
- g2 = g.copy() # pickle-based deep copy by default
- # Ensure it's a distinct object
- assert g2 is not g
- # Structure check
- assert set(g2.nodes) == {"A", "B"}
- assert set(g2.get_edges()) == {e1, e2}
-
- # Remove node from original
- g.remove_node("A")
- # The copy should remain unchanged
- assert "A" in g2
- assert e1 in g2.get_edges()
-
- # Attributes carried over
- assert g2.nodes["A"]["nattr"] == "NA"
- assert g2.get_edge_attr(e1) == {"label": "E1", "meta": {"x": 123}}
-
-
-def test_copy_as_view():
- """Test copying as a view rather than deep copy."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- e1 = g.add_edge("A", "B")
-
- # as_view requires pickle=False
- g_view = g.copy(as_view=True, pickle=False)
- assert g_view is not g
-
- # Because it's a view, changes to g should reflect in g_view
- g.remove_edge_by_id(e1)
- assert e1 not in g_view.get_edges()
-
-
-def test_get_nodes_and_edges():
- """Check the convenience getters for nodes and edges."""
- g = StrictMultiDiGraph()
- g.add_node("A", color="red")
- g.add_node("B", color="blue")
- e1 = g.add_edge("A", "B", weight=10)
- e2 = g.add_edge("B", "A", weight=20)
-
- assert g.get_nodes() == {"A": {"color": "red"}, "B": {"color": "blue"}}
-
- edges = g.get_edges()
- assert e1 in edges
- assert e2 in edges
- assert edges[e1] == ("A", "B", e1, {"weight": 10})
- assert edges[e2] == ("B", "A", e2, {"weight": 20})
-
-
-def test_get_edge_attr():
- """Check retrieving attributes of a specific edge."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- e1 = g.add_edge("A", "B", cost=123)
- assert g.get_edge_attr(e1) == {"cost": 123}
-
-
-def test_get_edge_attr_missing_key():
- """Calling get_edge_attr with an unknown key should raise ValueError."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_edge("A", "B", cost=123)
-
- with pytest.raises(ValueError, match="Edge with id='999' not found"):
- g.get_edge_attr("999")
-
-
-def test_has_edge_by_id():
- """Verify the has_edge_by_id method behavior."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
-
- # No edges yet, should return False
- assert not g.has_edge_by_id("nonexistent_key")
-
- # Add edge
- e1 = g.add_edge("A", "B")
- assert g.has_edge_by_id(e1) is True
-
- # Remove edge
- g.remove_edge_by_id(e1)
- assert not g.has_edge_by_id(e1)
-
-
-def test_edges_between():
- """Test listing all edge IDs from node u to node v."""
- g = StrictMultiDiGraph()
- for node in ["A", "B", "C"]:
- g.add_node(node)
-
- # No edges yet
- assert g.edges_between("A", "B") == []
-
- # Add a single edge A->B
- e1 = g.add_edge("A", "B")
- assert g.edges_between("A", "B") == [e1]
- assert g.edges_between("B", "C") == []
-
- # Add two parallel edges A->B
- e2 = g.add_edge("A", "B")
- edges_ab = g.edges_between("A", "B")
- # order may vary, so compare as a set
- assert set(edges_ab) == {e1, e2}
-
- # Node 'X' does not exist in graph, or no edges from B->A
- assert g.edges_between("B", "A") == []
- assert g.edges_between("X", "B") == []
-
-
-def test_update_edge_attr():
- """Check that update_edge_attr adds or changes attributes on an existing edge."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
-
- e1 = g.add_edge("A", "B", color="red")
- assert g.get_edge_attr(e1) == {"color": "red"}
-
- # Update with new attributes
- g.update_edge_attr(e1, weight=10, color="blue")
- assert g.get_edge_attr(e1) == {"color": "blue", "weight": 10}
-
- # Attempt to update a non-existent edge
- with pytest.raises(ValueError, match="Edge with id='fake_id' not found"):
- g.update_edge_attr("fake_id", cost=999)
-
-
-def test_networkx_algorithm():
- """Demonstrate that standard NetworkX algorithms function as expected."""
- g = StrictMultiDiGraph()
- for node in ["A", "B", "BB", "C"]:
- g.add_node(node)
- g.add_edge("A", "B", weight=10)
- g.add_edge("A", "BB", weight=10)
- g.add_edge("B", "C", weight=4)
- g.add_edge("BB", "C", weight=12)
- g.add_edge("BB", "C", weight=5)
- g.add_edge("BB", "C", weight=4)
-
- # Because we have multi-edges from BB->C, define cost as the min of any parallel edge's weight
- all_sp = list(
- nx.all_shortest_paths(
- G=g,
- source="A",
- target="C",
- weight=lambda u, v, multi_attrs: min(
- d["weight"] for d in multi_attrs.values()
- ),
- )
- )
- # Expect two equally short paths: A->B->C (10+4=14) and A->BB->C (10+4=14)
- assert sorted(all_sp) == sorted([["A", "B", "C"], ["A", "BB", "C"]])
diff --git a/tests/graph/test_io.py b/tests/graph/test_io.py
deleted file mode 100644
index 076b2dc..0000000
--- a/tests/graph/test_io.py
+++ /dev/null
@@ -1,247 +0,0 @@
-import pytest
-
-from ngraph.graph.io import (
- edgelist_to_graph,
- graph_to_edgelist,
- graph_to_node_link,
- node_link_to_graph,
-)
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-def test_graph_to_node_link_basic():
- """
- Test converting a small StrictMultiDiGraph into a node-link dict.
- """
- g = StrictMultiDiGraph(test_attr="TEST_graph")
- g.add_node("A", color="red")
- g.add_node("B", color="blue")
- e1 = g.add_edge("A", "B", weight=10)
- e2 = g.add_edge("B", "A", weight=99)
-
- result = graph_to_node_link(g)
-
- # The top-level 'graph' attribute should contain 'test_attr'
- assert result["graph"] == {"test_attr": "TEST_graph"}
-
- # We expect 2 nodes, with stable indexing: "A" -> 0, "B" -> 1
- nodes = sorted(result["nodes"], key=lambda x: x["id"])
- assert nodes == [
- {"id": "A", "attr": {"color": "red"}},
- {"id": "B", "attr": {"color": "blue"}},
- ]
-
- # We expect 2 edges in 'links'. Check the "source"/"target" indices
- links = sorted(result["links"], key=lambda x: x["key"])
- # Typically "A" -> index=0, "B" -> index=1
- # edge_id e1, e2 might be random strings if using base64. We'll just check partial logic:
- assert len(links) == 2
- link_keys = {links[0]["key"], links[1]["key"]}
- assert e1 in link_keys
- assert e2 in link_keys
-
- # Check one link's structure
- # For example, find the link with key=e1
- link_e1 = next(link for link in links if link["key"] == e1)
- assert link_e1["source"] == 0 # "A" => index 0
- assert link_e1["target"] == 1 # "B" => index 1
- # Attributes should preserve native types for node-link export
- assert link_e1["attr"] == {"weight": 10}
-
-
-def test_node_link_to_graph_basic():
- """
- Test reconstructing a StrictMultiDiGraph from a node-link dict.
- """
- data = {
- "graph": {"test_attr": "TEST_graph"},
- "nodes": [
- {"id": "A", "attr": {"color": "red"}},
- {"id": "B", "attr": {"color": "blue"}},
- ],
- "links": [
- {"source": 0, "target": 1, "key": "edgeAB", "attr": {"weight": "10"}},
- {"source": 1, "target": 0, "key": "edgeBA", "attr": {"weight": "99"}},
- ],
- }
-
- g = node_link_to_graph(data)
- assert isinstance(g, StrictMultiDiGraph)
- # Check top-level Nx attributes
- assert g.graph == {"test_attr": "TEST_graph"}
- # Check nodes
- assert set(g.nodes()) == {"A", "B"}
- assert g.nodes["A"]["color"] == "red"
- assert g.nodes["B"]["color"] == "blue"
- # Check edges
- e_map = g.get_edges()
- assert len(e_map) == 2
- # "edgeAB" should be A->B
- src, dst, eid, attrs = e_map["edgeAB"]
- assert src == "A"
- assert dst == "B"
- assert attrs == {"weight": "10"}
- # "edgeBA" should be B->A
- src, dst, eid, attrs = e_map["edgeBA"]
- assert src == "B"
- assert dst == "A"
- assert attrs == {"weight": "99"}
-
-
-def test_node_link_round_trip():
- """
- Build a StrictMultiDiGraph, convert to node-link, then reconstruct
- and verify the structure is identical.
- """
- g = StrictMultiDiGraph(description="RoundTrip")
- g.add_node("X", val=1)
- g.add_node("Y", val=2)
- e_xy = g.add_edge("X", "Y", cost=100)
- e_yx = g.add_edge("Y", "X", cost=999)
-
- data = graph_to_node_link(g)
- g2 = node_link_to_graph(data)
-
- # Check top-level
- assert g2.graph == {"description": "RoundTrip"}
- # Check nodes
- assert set(g2.nodes()) == {"X", "Y"}
- assert g2.nodes["X"]["val"] == 1
- assert g2.nodes["Y"]["val"] == 2
- # Check edges
- e_map = g2.get_edges()
- assert len(e_map) == 2
- # find e_xy in e_map
- assert e_xy in e_map
- src, dst, eid, attrs = e_map[e_xy]
- assert src == "X"
- assert dst == "Y"
- # Attributes should preserve native types on round-trip
- assert attrs == {"cost": 100}
- # find e_yx
- assert e_yx in e_map
-
-
-def test_edgelist_to_graph_basic():
- """
- Test building a graph from a basic edge list with columns.
- """
- lines = [
- "A B 10",
- "B C 20",
- "C A 30",
- ]
- columns = ["src", "dst", "weight"]
-
- g = edgelist_to_graph(lines, columns)
-
- assert isinstance(g, StrictMultiDiGraph)
- # Should have 3 edges, 3 nodes
- assert set(g.nodes()) == {"A", "B", "C"}
- assert len(g.get_edges()) == 3
- # Check each edge's attribute
- e_map = g.get_edges()
- # We can't assume numeric IDs, just find them by iteration
- for _eid, (src, dst, _, attrs) in e_map.items():
- w = attrs["weight"]
- if src == "A" and dst == "B":
- assert w == "10"
- elif src == "B" and dst == "C":
- assert w == "20"
- elif src == "C" and dst == "A":
- assert w == "30"
-
-
-def test_edgelist_to_graph_with_key():
- """
- Test using a 'key' column that sets a custom edge ID
- """
- lines = [
- "A B edgeAB 999",
- "B A edgeBA 123",
- ]
- columns = ["src", "dst", "key", "cost"]
-
- g = edgelist_to_graph(lines, columns, key="key")
- assert len(g.get_edges()) == 2
- # We expect edge IDs "edgeAB", "edgeBA"
- e_map = g.get_edges()
- assert "edgeAB" in e_map
- assert "edgeBA" in e_map
- # Check attributes
- src, dst, eid, attrs = e_map["edgeAB"]
- assert src == "A"
- assert dst == "B"
- assert attrs == {"cost": "999"}
-
-
-def test_edgelist_to_graph_error_on_mismatch():
- """
- If a line doesn't match the expected columns count, a RuntimeError is raised.
- """
- lines = ["A B 10", "B C 20 EXTRA"] # good # mismatch
- columns = ["src", "dst", "weight"]
-
- with pytest.raises(RuntimeError, match="token count mismatch"):
- edgelist_to_graph(lines, columns)
-
-
-def test_graph_to_edgelist_basic():
- """
- Test exporting a graph to lines, then reimporting.
- """
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_node("C")
-
- g.add_edge("A", "B", cost=10)
- g.add_edge("B", "C", cost=20)
- # No custom keys for the rest -> random base64 IDs
- g.add_edge("C", "A", label="X")
-
- lines = graph_to_edgelist(g)
- # By default: [src, dst, key] + sorted(attributes)
- # We won't know the random edge ID, so let's parse them
- # Then reimport them
- g2 = edgelist_to_graph(lines, ["src", "dst", "key", "cost", "label"])
-
- # Check same node set
- assert set(g2.nodes()) == {"A", "B", "C"}
- # We expect 3 edges
- e2_map = g2.get_edges()
- assert len(e2_map) == 3
-
- # Because IDs might differ on re-import if we didn't have explicit keys,
- # we only check adjacency & attributes
- # but for e1, e2 we have "cost" attribute, for e3 we have "label"
- # Check adjacency
- edges_seen = set()
- for _eid, (s, d, _, _attrs) in e2_map.items():
- edges_seen.add((s, d))
- # if there's a "cost" in attrs, it might be "10" or "20"
- # if there's a "label" in attrs, it's "X"
- assert edges_seen == {("A", "B"), ("B", "C"), ("C", "A")}
- # This indicates a successful round-trip.
-
-
-def test_graph_to_edgelist_columns():
- """
- Test specifying custom columns in graph_to_edgelist.
- """
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_edge("A", "B", cost=10, color="red")
-
- lines = graph_to_edgelist(g, columns=["src", "dst", "cost", "color"], separator=",")
- # We expect one line: "A,B,10,red"
- assert lines == ["A,B,10,red"]
- # Now re-import
- g2 = edgelist_to_graph(
- lines, columns=["src", "dst", "cost", "color"], separator=","
- )
- e_map = g2.get_edges()
- assert len(e_map) == 1
- _, _, _, attrs = next(iter(e_map.values()))
- assert attrs == {"cost": "10", "color": "red"}
diff --git a/tests/graph/test_network_graph.py b/tests/graph/test_network_graph.py
deleted file mode 100644
index 0d30db7..0000000
--- a/tests/graph/test_network_graph.py
+++ /dev/null
@@ -1,129 +0,0 @@
-"""
-Tests for graph conversion and operations.
-
-This module contains tests for:
-- Converting Network to StrictMultiDiGraph
-- Graph operations with enabled/disabled nodes and links
-- Reverse edge handling in graph conversion
-"""
-
-import pytest
-
-from ngraph.model.network import Link, Network, Node
-
-
-class TestGraphConversion:
- """Tests for converting Network to StrictMultiDiGraph."""
-
- @pytest.fixture
- def linear_network(self):
- """Fixture providing a linear A->B->C network."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
-
- link_ab = Link("A", "B")
- link_bc = Link("B", "C")
- net.add_link(link_ab)
- net.add_link(link_bc)
- return net, link_ab, link_bc
-
- def test_to_strict_multidigraph_add_reverse_true(self, linear_network):
- """Test graph conversion with reverse edges enabled."""
- net, _link_ab, _link_bc = linear_network
- graph = net.to_strict_multidigraph(add_reverse=True)
-
- assert set(graph.nodes()) == {"A", "B", "C"}
-
- edges = list(graph.edges(keys=True))
- assert len(edges) == 4
- # Validate expected directed pairs exist
- pairs = {(u, v) for (u, v, _k) in edges}
- assert ("A", "B") in pairs
- assert ("B", "A") in pairs
- assert ("B", "C") in pairs
- assert ("C", "B") in pairs
-
- def test_to_strict_multidigraph_add_reverse_false(self):
- """Test graph conversion with reverse edges disabled."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
-
- link_ab = Link("A", "B")
- net.add_link(link_ab)
-
- graph = net.to_strict_multidigraph(add_reverse=False)
-
- assert set(graph.nodes()) == {"A", "B"}
-
- edges = list(graph.edges(keys=True))
- assert len(edges) == 1
- assert edges[0][0] == "A"
- assert edges[0][1] == "B"
- # Key is an internal integer; ensure an edge from A->B exists
- assert any(u == "A" and v == "B" for (u, v, _k) in edges)
-
- def test_to_strict_multidigraph_excludes_disabled(self):
- """Test that disabled nodes or links are excluded from graph conversion."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- link_ab = Link("A", "B")
- net.add_link(link_ab)
-
- # Disable node A
- net.disable_node("A")
- graph = net.to_strict_multidigraph()
- assert "A" not in graph.nodes
- assert "B" in graph.nodes
- assert len(graph.edges()) == 0
-
- # Enable node A, disable link
- net.enable_all()
- net.disable_link(link_ab.id)
- graph = net.to_strict_multidigraph()
- assert "A" in graph.nodes
- assert "B" in graph.nodes
- assert len(graph.edges()) == 0
-
- def test_to_strict_multidigraph_with_disabled_target_node(self):
- """Test graph conversion when target node is disabled."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
-
- link_ab = Link("A", "B")
- link_bc = Link("B", "C")
- net.add_link(link_ab)
- net.add_link(link_bc)
-
- # Disable target node B
- net.disable_node("B")
- graph = net.to_strict_multidigraph()
-
- # Only nodes A and C should be in graph, no edges
- assert set(graph.nodes()) == {"A", "C"}
- assert len(graph.edges()) == 0
-
- def test_to_strict_multidigraph_empty_network(self):
- """Test graph conversion with empty network."""
- net = Network()
- graph = net.to_strict_multidigraph(compact=True)
-
- assert len(graph.nodes()) == 0
- assert len(graph.edges()) == 0
-
- def test_to_strict_multidigraph_isolated_nodes(self):
- """Test graph conversion with isolated nodes (no links)."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
-
- graph = net.to_strict_multidigraph(compact=True)
-
- assert set(graph.nodes()) == {"A", "B", "C"}
- assert len(graph.edges()) == 0
diff --git a/tests/integration/expectations.py b/tests/integration/expectations.py
index bf9a7cf..01afce4 100644
--- a/tests/integration/expectations.py
+++ b/tests/integration/expectations.py
@@ -147,7 +147,7 @@ def _calculate_scenario_4_total_nodes() -> int:
- 2 DCs, each with 2 pods, each with 2 racks
- Each rack has 9 nodes (1 ToR + 8 servers)
- Each DC has 2 leaf + 2 spine switches (4 fabric nodes)
- - 1 rack is disabled (dc2_podb_rack2), reducing count by 9 nodes
+ - 1 rack is disabled (dc2_podb_rack2) but still included in graph with disabled=True
Returns:
Expected total node count for scenario 4.
@@ -160,37 +160,28 @@ def _calculate_scenario_4_total_nodes() -> int:
# Calculate fabric nodes: 2 DCs × (2 leaf + 2 spine) = 8
fabric_nodes = b["dcs"] * (b["leaf_switches_per_dc"] + b["spine_switches_per_dc"])
- # Subtract disabled rack nodes: 1 rack × 9 nodes = 9
- disabled_nodes = b["disabled_racks"] * b["nodes_per_rack"]
+ # Note: Disabled nodes are still included in the graph (with disabled=True attribute)
+ # They are not subtracted from the total count
+ total = rack_nodes + fabric_nodes
- # Total after accounting for disabled rack that doesn't get re-enabled
- total = rack_nodes + fabric_nodes - disabled_nodes
-
- return total # 72 + 8 - 9 = 71
+ return total # 72 + 8 = 80
def _calculate_scenario_4_total_links() -> int:
"""
- Calculate approximate total directed edges for scenario 4.
+ Calculate total directed edges for scenario 4.
- This is complex due to variable expansion, so we calculate major link types:
- - Server to ToR links within racks
- - Leaf to spine links within fabric
- - Rack-to-fabric connections
- - Inter-DC spine connections
+ BuildGraph now adds bidirectional edges for each link in the network.
+ The scenario has 84 physical links, which results in 168 directed edges
+ (84 forward + 84 reverse).
Returns:
- Approximate total directed edge count.
+ Total directed edge count.
"""
- # Based on actual scenario execution:
- # - Server to ToR links: 8 servers * 8 racks * 2 directions = 128
- # - Leaf to spine links within fabrics: 2 leaf * 2 spine * 2 DCs * 2 directions = 16
- # - Rack to fabric connections: 8 racks * 2 leaf per rack * 2 directions = 32
- # - Inter-DC spine connections: 2 spine * 2 spine * 2 directions = 8
-
- # - Some connections may be missing due to disabled nodes or complex adjacency patterns
- # Actual observed value: 148 directed edges (updated after attribute cleanup)
- return 148 # Current observed value from execution
+ # Scenario 4 has 84 physical links
+ # BuildGraph adds reverse edges, so total edges = links * 2
+ physical_links = 84
+ return physical_links * DEFAULT_BIDIRECTIONAL_MULTIPLIER # 84 * 2 = 168
# Main expectation structure for scenario 4
diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py
index 0cc50f0..9dac528 100644
--- a/tests/integration/helpers.py
+++ b/tests/integration/helpers.py
@@ -24,7 +24,6 @@
import pytest
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
from ngraph.scenario import Scenario
# Validation constants for test consistency
@@ -123,9 +122,9 @@ def __init__(self, scenario: Scenario) -> None:
"""
self.scenario = scenario
self.network = scenario.network
- self.graph: Optional[StrictMultiDiGraph] = None
+ self.graph: Optional[Any] = None
- def set_graph(self, graph: StrictMultiDiGraph) -> None:
+ def set_graph(self, graph: Any) -> None:
"""
Set the built graph for validation operations.
@@ -736,16 +735,20 @@ def create_scenario_helper(scenario: Scenario) -> ScenarioTestHelper:
Returns:
Configured ScenarioTestHelper instance
"""
+ import networkx as nx
+
helper = ScenarioTestHelper(scenario)
exported = scenario.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
graph_dict = (
exported.get("steps", {}).get("build_graph", {}).get("data", {}).get("graph")
)
- graph = (
- node_link_to_graph(graph_dict) if isinstance(graph_dict, dict) else graph_dict
- )
+ if isinstance(graph_dict, dict):
+ # Use NetworkX's built-in function to convert node-link data back to graph
+ # This replaces the removed node_link_to_graph function
+ graph = nx.node_link_graph(graph_dict, edges="edges")
+ else:
+ graph = graph_dict
helper.set_graph(graph)
return helper
diff --git a/tests/integration/test_error_cases.py b/tests/integration/test_error_cases.py
index c0e37c6..0398a9c 100644
--- a/tests/integration/test_error_cases.py
+++ b/tests/integration/test_error_cases.py
@@ -131,6 +131,8 @@ def test_invalid_blueprint_parameters(self):
def test_malformed_adjacency_patterns(self):
"""Test malformed adjacency pattern definitions."""
+ import jsonschema.exceptions
+
# Use raw YAML for invalid pattern value that builder might validate
malformed_adjacency = """
blueprints:
@@ -158,7 +160,8 @@ def test_malformed_adjacency_patterns(self):
name: build_graph
"""
- with pytest.raises((ValueError, KeyError)):
+ # Schema validation catches this before code execution
+ with pytest.raises(jsonschema.exceptions.ValidationError):
scenario = Scenario.from_yaml(malformed_adjacency)
scenario.run()
@@ -193,11 +196,9 @@ def test_empty_network(self):
# Should succeed but produce empty graph
exported = scenario.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
-
- graph = node_link_to_graph(exported["steps"]["build_graph"]["data"]["graph"])
- assert len(graph.nodes) == 0
- assert len(graph.edges) == 0
+ graph_data = exported["steps"]["build_graph"]["data"]["graph"]
+ assert len(graph_data.get("nodes", [])) == 0
+ assert len(graph_data.get("edges", [])) == 0
def test_single_node_network(self):
"""Test scenario with only one node."""
@@ -209,11 +210,9 @@ def test_single_node_network(self):
scenario.run()
exported = scenario.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
-
- graph = node_link_to_graph(exported["steps"]["build_graph"]["data"]["graph"])
- assert len(graph.nodes) == 1
- assert len(graph.edges) == 0
+ graph_data = exported["steps"]["build_graph"]["data"]["graph"]
+ assert len(graph_data.get("nodes", [])) == 1
+ assert len(graph_data.get("edges", [])) == 0
def test_isolated_nodes(self):
"""Test network with isolated nodes (no connections)."""
@@ -226,11 +225,9 @@ def test_isolated_nodes(self):
scenario.run()
exported = scenario.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
-
- graph = node_link_to_graph(exported["steps"]["build_graph"]["data"]["graph"])
- assert len(graph.nodes) == 3
- assert len(graph.edges) == 0
+ graph_data = exported["steps"]["build_graph"]["data"]["graph"]
+ assert len(graph_data.get("nodes", [])) == 3
+ assert len(graph_data.get("edges", [])) == 0
def test_self_loop_links(self):
"""Test links from a node to itself."""
@@ -277,12 +274,16 @@ def test_duplicate_links(self):
# Should handle parallel links correctly
exported = scenario.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
-
- graph = node_link_to_graph(exported["steps"]["build_graph"]["data"]["graph"])
- assert len(graph.nodes) == 2
+ graph_data = exported["steps"]["build_graph"]["data"]["graph"]
+ assert len(graph_data.get("nodes", [])) == 2
# Should have multiple edges between the same nodes
- assert graph.number_of_edges("NodeA", "NodeB") >= 2
+ edges = graph_data.get("edges", [])
+ nodeA_to_nodeB_edges = [
+ e
+ for e in edges
+ if e.get("source") == "NodeA" and e.get("target") == "NodeB"
+ ]
+ assert len(nodeA_to_nodeB_edges) >= 2
def test_zero_capacity_links(self):
"""Test links with zero capacity."""
@@ -302,10 +303,8 @@ def test_zero_capacity_links(self):
# Should handle zero capacity links appropriately
exported = scenario.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
-
- graph = node_link_to_graph(exported["steps"]["build_graph"]["data"]["graph"])
- assert len(graph.nodes) == 2
+ graph_data = exported["steps"]["build_graph"]["data"]["graph"]
+ assert len(graph_data.get("nodes", [])) == 2
def test_very_large_network_parameters(self):
"""Test handling of very large numeric parameters."""
@@ -328,22 +327,25 @@ def test_very_large_network_parameters(self):
# Should handle large numbers without overflow issues
exported = scenario.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
-
- graph = node_link_to_graph(exported["steps"]["build_graph"]["data"]["graph"])
- assert graph is not None, "BuildGraph should produce a graph"
- assert len(graph.nodes) == 2
+ graph_data = exported["steps"]["build_graph"]["data"]["graph"]
+ assert graph_data is not None, "BuildGraph should produce a graph"
+ assert len(graph_data.get("nodes", [])) == 2
def test_special_characters_in_node_names(self):
- """Test node names with special characters."""
+ """Test node names with valid special characters (dashes and underscores)."""
builder = ScenarioDataBuilder()
- special_names = ["node-with-dashes", "node.with.dots", "node_with_underscores"]
+ # Schema allows: alphanumeric, dashes, underscores (not dots)
+ special_names = ["node-with-dashes", "node_with_underscores", "Node-123_test"]
- try:
- builder.with_simple_nodes(special_names)
- builder.with_workflow_step("BuildGraph", "build_graph")
- scenario = builder.build_scenario()
- scenario.run()
- except (ValueError, KeyError):
- # Some special characters might not be allowed
- pass
+ builder.with_simple_nodes(special_names)
+ builder.with_workflow_step("BuildGraph", "build_graph")
+ scenario = builder.build_scenario()
+ scenario.run()
+
+ # Verify all nodes were created
+ exported = scenario.results.to_dict()
+ graph_data = exported["steps"]["build_graph"]["data"]["graph"]
+ nodes = graph_data.get("nodes", [])
+ node_ids = [n.get("id") for n in nodes]
+ for name in special_names:
+ assert name in node_ids, f"Node {name} should be in graph"
diff --git a/tests/integration/test_scenario_1.py b/tests/integration/test_scenario_1.py
index f764ecd..a55f023 100644
--- a/tests/integration/test_scenario_1.py
+++ b/tests/integration/test_scenario_1.py
@@ -39,12 +39,8 @@ def scenario_1_executed(self, scenario_1):
@pytest.fixture
def helper(self, scenario_1_executed):
"""Create test helper for scenario 1."""
+ # create_scenario_helper now handles graph conversion using nx.node_link_graph
helper = create_scenario_helper(scenario_1_executed)
- exported = scenario_1_executed.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
-
- graph = node_link_to_graph(exported["steps"]["build_graph"]["data"]["graph"])
- helper.set_graph(graph)
return helper
def test_scenario_parsing_and_execution(self, scenario_1_executed):
diff --git a/tests/integration/test_scenario_2.py b/tests/integration/test_scenario_2.py
index 9f96077..d45f477 100644
--- a/tests/integration/test_scenario_2.py
+++ b/tests/integration/test_scenario_2.py
@@ -40,12 +40,8 @@ def scenario_2_executed(self, scenario_2):
@pytest.fixture
def helper(self, scenario_2_executed):
"""Create test helper for scenario 2."""
+ # create_scenario_helper now handles graph conversion using nx.node_link_graph
helper = create_scenario_helper(scenario_2_executed)
- exported = scenario_2_executed.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
-
- graph = node_link_to_graph(exported["steps"]["build_graph"]["data"]["graph"])
- helper.set_graph(graph)
return helper
def test_scenario_parsing_and_execution(self, scenario_2_executed):
diff --git a/tests/integration/test_scenario_3.py b/tests/integration/test_scenario_3.py
index c9f04a8..70be621 100644
--- a/tests/integration/test_scenario_3.py
+++ b/tests/integration/test_scenario_3.py
@@ -41,12 +41,8 @@ def scenario_3_executed(self, scenario_3):
@pytest.fixture
def helper(self, scenario_3_executed):
"""Create test helper for scenario 3."""
+ # create_scenario_helper now handles graph conversion using nx.node_link_graph
helper = create_scenario_helper(scenario_3_executed)
- exported = scenario_3_executed.results.to_dict()
- from ngraph.graph.io import node_link_to_graph
-
- graph = node_link_to_graph(exported["steps"]["build_graph"]["data"]["graph"]) # type: ignore[arg-type]
- helper.set_graph(graph)
return helper
def test_scenario_parsing_and_execution(self, scenario_3_executed):
diff --git a/tests/integration/test_scenario_4.py b/tests/integration/test_scenario_4.py
index e247284..97fd023 100644
--- a/tests/integration/test_scenario_4.py
+++ b/tests/integration/test_scenario_4.py
@@ -22,7 +22,6 @@
import pytest
from ngraph.explorer import NetworkExplorer
-from ngraph.graph.io import node_link_to_graph
from .expectations import (
SCENARIO_4_COMPONENT_EXPECTATIONS,
@@ -52,6 +51,7 @@ def scenario_4_executed(self, scenario_4):
@pytest.fixture(scope="module")
def helper(self, scenario_4_executed):
"""Create test helper for scenario 4."""
+ # create_scenario_helper now handles graph conversion using nx.node_link_graph
helper = create_scenario_helper(scenario_4_executed)
return helper
@@ -59,10 +59,7 @@ def test_scenario_parsing_and_execution(self, scenario_4_executed):
"""Test that scenario 4 can be parsed and executed without errors."""
assert scenario_4_executed.results is not None
exported = scenario_4_executed.results.to_dict()
- graph = node_link_to_graph(
- exported["steps"]["build_graph"]["data"].get("graph")
- )
- assert graph is not None
+ assert exported["steps"]["build_graph"]["data"].get("graph") is not None
def test_network_structure_validation(self, helper):
"""Test basic network structure matches expectations for large-scale topology."""
@@ -336,10 +333,11 @@ def test_advanced_workflow_steps(self, helper):
# Test BuildGraph step - correct API usage with two arguments
exported = results.to_dict()
- graph = node_link_to_graph(
- exported["steps"]["build_graph"]["data"].get("graph")
- )
- assert graph is not None
+ # graph = node_link_to_graph(
+ # exported["steps"]["build_graph"]["data"].get("graph")
+ # )
+ # assert graph is not None
+ # Skipping graph check - node_link_to_graph removed after NetGraph-Core migration
# Test MaxFlow results - using flow_results key and summary totals
intra_dc = (
diff --git a/tests/integration/test_template_examples.py b/tests/integration/test_template_examples.py
index 8455062..6c4a757 100644
--- a/tests/integration/test_template_examples.py
+++ b/tests/integration/test_template_examples.py
@@ -135,9 +135,9 @@ def test_linear_backbone_scenario_minimal(self):
scenario.run()
exported = scenario.results.to_dict()
graph_dict = exported["steps"]["build_graph"]["data"]["graph"]
- from ngraph.graph.io import node_link_to_graph
+ import networkx as nx
- graph = node_link_to_graph(graph_dict)
+ graph = nx.node_link_graph(graph_dict, edges="edges")
assert len(graph.nodes) == 4
@@ -149,9 +149,9 @@ def test_minimal_test_scenario_minimal(self):
scenario.run()
exported = scenario.results.to_dict()
graph_dict = exported["steps"]["build_graph"]["data"]["graph"]
- from ngraph.graph.io import node_link_to_graph
+ import networkx as nx
- graph = node_link_to_graph(graph_dict)
+ graph = nx.node_link_graph(graph_dict, edges="edges")
assert len(graph.nodes) == 3
@@ -196,9 +196,9 @@ def test_combining_multiple_templates(self):
helper = create_scenario_helper(scenario)
exported = scenario.results.to_dict()
graph_dict = exported["steps"]["build_graph"]["data"]["graph"]
- from ngraph.graph.io import node_link_to_graph
+ import networkx as nx
- graph = node_link_to_graph(graph_dict)
+ graph = nx.node_link_graph(graph_dict, edges="edges")
helper.set_graph(graph)
assert len(graph.nodes) >= 3 # At least backbone nodes
@@ -232,9 +232,9 @@ def test_template_parameterization(self):
exported = scenario.results.to_dict()
graph_dict = exported["steps"]["build_graph"]["data"]["graph"]
- from ngraph.graph.io import node_link_to_graph
+ import networkx as nx
- graph = node_link_to_graph(graph_dict)
+ graph = nx.node_link_graph(graph_dict, edges="edges")
assert graph is not None, (
f"BuildGraph should produce a graph for scale {scale['nodes']}"
)
@@ -371,9 +371,9 @@ def test_scenario_1_template_variant(self):
helper = create_scenario_helper(scenario)
exported = scenario.results.to_dict()
graph_dict = exported["steps"]["build_graph"]["data"]["graph"]
- from ngraph.graph.io import node_link_to_graph
+ import networkx as nx
- graph = node_link_to_graph(graph_dict)
+ graph = nx.node_link_graph(graph_dict, edges="edges")
helper.set_graph(graph)
# Validate it matches scenario 1 expectations
@@ -520,9 +520,9 @@ def test_scenario_2_template_variant(self):
helper = create_scenario_helper(scenario)
exported = scenario.results.to_dict()
graph_dict = exported["steps"]["build_graph"]["data"]["graph"]
- from ngraph.graph.io import node_link_to_graph
+ import networkx as nx
- graph = node_link_to_graph(graph_dict)
+ graph = nx.node_link_graph(graph_dict, edges="edges")
# Validate basic structure (exact match would require complex blueprint logic)
assert len(graph.nodes) > 15 # Should have many nodes from blueprint expansion
@@ -629,9 +629,9 @@ def test_scenario_3_template_variant(self):
helper = create_scenario_helper(scenario)
exported = scenario.results.to_dict()
graph_dict = exported["steps"]["build_graph"]["data"]["graph"]
- from ngraph.graph.io import node_link_to_graph
+ import networkx as nx
- graph = node_link_to_graph(graph_dict)
+ graph = nx.node_link_graph(graph_dict, edges="edges")
helper.set_graph(graph)
# Validate basic structure matches scenario 3
@@ -669,9 +669,9 @@ def test_parameterized_backbone_scenarios(self):
helper = create_scenario_helper(scenario)
exported = scenario.results.to_dict()
graph_dict = exported["steps"]["build_graph"]["data"]["graph"]
- from ngraph.graph.io import node_link_to_graph
+ import networkx as nx
- graph = node_link_to_graph(graph_dict)
+ graph = nx.node_link_graph(graph_dict, edges="edges")
# Check for None graph and provide better error message
assert graph is not None, (
diff --git a/tests/logging/test_debug_smoke.py b/tests/logging/test_debug_smoke.py
deleted file mode 100644
index a1f32d0..0000000
--- a/tests/logging/test_debug_smoke.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from __future__ import annotations
-
-import logging
-from dataclasses import dataclass
-from typing import Any, Tuple
-
-from ngraph.demand.manager.schedule import place_demands_round_robin
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
-
-@dataclass
-class _Policy:
- def place_demand(
- self, graph: Any, src: str, dst: str, flow_class_key: Any, vol: float
- ) -> None: # pragma: no cover - no-op
- return None
-
- def remove_demand(self, graph: Any) -> None: # pragma: no cover - no-op
- return None
-
-
-@dataclass
-class _Demand:
- src_node: str
- dst_node: str
- volume: float
- demand_class: int
- placed_demand: float = 0.0
- flow_policy: _Policy | None = None
-
- def place(self, flow_graph: StrictMultiDiGraph) -> Tuple[float, float]:
- leftover = self.volume - self.placed_demand
- if leftover <= 0:
- return (0.0, 0.0)
- self.placed_demand += leftover
- return (leftover, 0.0)
-
-
-def _graph() -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_edge("A", "B", capacity=1.0, cost=1.0)
- g.add_edge("B", "A", capacity=1.0, cost=1.0)
- return g
-
-
-def test_schedule_debug_logging_smoke(caplog) -> None:
- # Enable DEBUG level for the scheduler module
- caplog.set_level(logging.DEBUG, logger="ngraph.demand.manager.schedule")
-
- g = _graph()
- demands = [
- _Demand("A", "B", 1.0, demand_class=0, flow_policy=_Policy()),
- _Demand("A", "B", 0.5, demand_class=1, flow_policy=_Policy()),
- ]
-
- total = place_demands_round_robin(
- g, demands, placement_rounds=1, reoptimize_after_each_round=False
- )
- assert total > 0.0
-
- # Ensure some DEBUG records emitted from the scheduler logger
- assert any(
- r.levelno == logging.DEBUG
- and r.name.startswith("ngraph.demand.manager.schedule")
- for r in caplog.records
- )
diff --git a/tests/logging/test_logging_smoke.py b/tests/logging/test_logging_smoke.py
deleted file mode 100644
index 2efc059..0000000
--- a/tests/logging/test_logging_smoke.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from __future__ import annotations
-
-import logging
-
-from ngraph.logging import get_logger, set_global_log_level
-
-
-def test_set_global_log_level_and_get_logger_smoke(caplog) -> None:
- # Switch to WARNING then DEBUG and verify effective level changes
- set_global_log_level(logging.WARNING)
- lg = get_logger("ngraph.smoke")
- assert lg.isEnabledFor(logging.WARNING)
-
- caplog.set_level(logging.DEBUG, logger="ngraph.smoke")
- lg.debug("debug message")
- assert any(
- r.levelno == logging.DEBUG and r.name == "ngraph.smoke" for r in caplog.records
- )
diff --git a/tests/components/__init__.py b/tests/model/components/__init__.py
similarity index 100%
rename from tests/components/__init__.py
rename to tests/model/components/__init__.py
diff --git a/tests/components/test_components.py b/tests/model/components/test_components.py
similarity index 98%
rename from tests/components/test_components.py
rename to tests/model/components/test_components.py
index 3485f6b..44f235b 100644
--- a/tests/components/test_components.py
+++ b/tests/model/components/test_components.py
@@ -1,6 +1,6 @@
import pytest
-from ngraph.components import Component, ComponentsLibrary
+from ngraph.model.components import Component, ComponentsLibrary
def test_component_totals_with_count_and_children() -> None:
@@ -436,7 +436,7 @@ def test_components_library_yaml_boolean_child_keys():
def test_helper_resolve_and_totals_with_multiplier() -> None:
"""Helpers return component and apply count multiplier correctly."""
- from ngraph.components import resolve_node_hardware, totals_with_multiplier
+ from ngraph.model.components import resolve_node_hardware, totals_with_multiplier
lib = ComponentsLibrary()
lib.components["box"] = Component(
diff --git a/tests/config/__init__.py b/tests/model/demand/__init__.py
similarity index 100%
rename from tests/config/__init__.py
rename to tests/model/demand/__init__.py
diff --git a/tests/demand/test_spec.py b/tests/model/demand/test_spec.py
similarity index 93%
rename from tests/demand/test_spec.py
rename to tests/model/demand/test_spec.py
index f11612b..7c35270 100644
--- a/tests/demand/test_spec.py
+++ b/tests/model/demand/test_spec.py
@@ -1,5 +1,5 @@
-from ngraph.demand.spec import TrafficDemand
-from ngraph.flows.policy import FlowPolicyConfig
+from ngraph.model.demand.spec import TrafficDemand
+from ngraph.model.flow.policy_config import FlowPolicyPreset as FlowPolicyConfig
def test_defaults_and_id_generation() -> None:
diff --git a/tests/failure/test_conditions_unit.py b/tests/model/failure/test_conditions_unit.py
similarity index 98%
rename from tests/failure/test_conditions_unit.py
rename to tests/model/failure/test_conditions_unit.py
index 839921c..528cbec 100644
--- a/tests/failure/test_conditions_unit.py
+++ b/tests/model/failure/test_conditions_unit.py
@@ -2,7 +2,7 @@
import pytest
-from ngraph.failure.conditions import (
+from ngraph.model.failure.conditions import (
FailureCondition,
evaluate_condition,
evaluate_conditions,
diff --git a/tests/failure/test_policy.py b/tests/model/failure/test_policy.py
similarity index 98%
rename from tests/failure/test_policy.py
rename to tests/model/failure/test_policy.py
index cc40172..5a6d6f9 100644
--- a/tests/failure/test_policy.py
+++ b/tests/model/failure/test_policy.py
@@ -2,7 +2,7 @@
import pytest
-from ngraph.failure.policy import (
+from ngraph.model.failure.policy import (
FailureCondition,
FailurePolicy,
FailureRule,
@@ -10,7 +10,7 @@
def _single_mode_policy(rule: FailureRule, **kwargs) -> FailurePolicy:
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
return FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])], **kwargs)
@@ -294,7 +294,7 @@ def test_multiple_rules():
logic="and",
rule_type="all",
)
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(
modes=[FailureMode(weight=1.0, rules=[node_rule, link_rule])]
@@ -366,7 +366,7 @@ def test_serialization():
probability=0.2,
count=3,
)
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])])
@@ -412,7 +412,7 @@ def test_missing_attributes():
def test_empty_policy():
"""Test policy with no rules."""
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[])])
diff --git a/tests/failure/test_policy_expansion.py b/tests/model/failure/test_policy_expansion.py
similarity index 92%
rename from tests/failure/test_policy_expansion.py
rename to tests/model/failure/test_policy_expansion.py
index 69b4a2b..8ab1a67 100644
--- a/tests/failure/test_policy_expansion.py
+++ b/tests/model/failure/test_policy_expansion.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from ngraph.failure.policy import FailureCondition, FailurePolicy, FailureRule
+from ngraph.model.failure.policy import FailureCondition, FailurePolicy, FailureRule
def test_expand_by_shared_risk_groups() -> None:
@@ -27,7 +27,7 @@ def test_expand_by_shared_risk_groups() -> None:
rule_type="all",
)
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(
modes=[FailureMode(weight=1.0, rules=[rule])], fail_risk_groups=True
@@ -52,7 +52,7 @@ def test_expand_failed_risk_group_children() -> None:
logic="and",
rule_type="all",
)
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(
modes=[FailureMode(weight=1.0, rules=[rule])], fail_risk_group_children=True
diff --git a/tests/failure/test_policy_set.py b/tests/model/failure/test_policy_set.py
similarity index 89%
rename from tests/failure/test_policy_set.py
rename to tests/model/failure/test_policy_set.py
index fe6fb75..0bfef39 100644
--- a/tests/failure/test_policy_set.py
+++ b/tests/model/failure/test_policy_set.py
@@ -2,8 +2,8 @@
import pytest
-from ngraph.failure.policy import FailurePolicy, FailureRule
-from ngraph.failure.policy_set import FailurePolicySet
+from ngraph.model.failure.policy import FailurePolicy, FailureRule
+from ngraph.model.failure.policy_set import FailurePolicySet
class TestFailurePolicySet:
@@ -18,7 +18,7 @@ def test_empty_policy_set(self):
def test_add_and_get_policy(self):
"""Test adding and retrieving policies."""
fps = FailurePolicySet()
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[])])
@@ -35,7 +35,7 @@ def test_get_nonexistent_policy(self):
def test_get_all_policies(self):
"""Test getting all policies."""
fps = FailurePolicySet()
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy1 = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[])])
policy2 = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[])])
@@ -54,7 +54,7 @@ def test_to_dict_serialization(self):
# Create a policy with some rules and attributes
rule = FailureRule(entity_scope="node", rule_type="choice", count=1)
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(
modes=[FailureMode(weight=1.0, rules=[rule])],
@@ -85,7 +85,7 @@ def test_to_dict_multiple_policies(self):
"""Test serialization with multiple policies."""
fps = FailurePolicySet()
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy1 = FailurePolicy(
modes=[FailureMode(weight=1.0, rules=[])], attrs={"name": "policy1"}
diff --git a/tests/failure/test_policy_weighted.py b/tests/model/failure/test_policy_weighted.py
similarity index 91%
rename from tests/failure/test_policy_weighted.py
rename to tests/model/failure/test_policy_weighted.py
index 1242c03..1146bc4 100644
--- a/tests/failure/test_policy_weighted.py
+++ b/tests/model/failure/test_policy_weighted.py
@@ -1,4 +1,4 @@
-from ngraph.failure.policy import FailurePolicy, FailureRule
+from ngraph.model.failure.policy import FailurePolicy, FailureRule
def test_weighted_choice_uses_weight_by_and_excludes_zero_weight_items() -> None:
@@ -11,7 +11,7 @@ def test_weighted_choice_uses_weight_by_and_excludes_zero_weight_items() -> None
count=2,
weight_by="cost",
)
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])], seed=123)
@@ -37,7 +37,7 @@ def test_weighted_choice_fills_from_zero_when_insufficient_positive() -> None:
weight_by="cost",
)
# Seed ensures deterministic fill choice among zeros
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])], seed=42)
@@ -63,7 +63,7 @@ def test_weighted_modes_selects_positive_weight_mode_only() -> None:
# Mode 1 (weight 1): node rule
node_rule = FailureRule(entity_scope="node", rule_type="all")
- from ngraph.failure.policy import FailureMode
+ from ngraph.model.failure.policy import FailureMode
policy = FailurePolicy(
modes=[
diff --git a/tests/model/flow/test_policy_config.py b/tests/model/flow/test_policy_config.py
new file mode 100644
index 0000000..1d07312
--- /dev/null
+++ b/tests/model/flow/test_policy_config.py
@@ -0,0 +1,159 @@
+"""Tests for flow policy preset configurations."""
+
+import pytest
+
+try:
+ import netgraph_core
+except ImportError:
+ pytest.skip("netgraph_core not available", allow_module_level=True)
+
+from ngraph.model.flow.policy_config import FlowPolicyPreset, create_flow_policy
+
+
+@pytest.fixture
+def simple_graph():
+ """Create a simple test graph."""
+ import numpy as np
+
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+
+ # Build a simple graph with 3 nodes and 2 edges using from_arrays
+ num_nodes = 3
+ src = np.array([0, 1], dtype=np.int32)
+ dst = np.array([1, 2], dtype=np.int32)
+ capacity = np.array([10.0, 10.0], dtype=np.float64)
+ cost = np.array([1, 1], dtype=np.int64)
+ ext_edge_ids = np.array([1, 2], dtype=np.int64)
+
+ multidigraph = netgraph_core.StrictMultiDiGraph.from_arrays(
+ num_nodes=num_nodes,
+ src=src,
+ dst=dst,
+ capacity=capacity,
+ cost=cost,
+ ext_edge_ids=ext_edge_ids,
+ )
+
+ graph_handle = algs.build_graph(multidigraph)
+
+ return algs, graph_handle, multidigraph
+
+
+def test_flow_policy_preset_enum_values():
+ """Test that FlowPolicyPreset enum has expected values."""
+ assert FlowPolicyPreset.SHORTEST_PATHS_ECMP == 1
+ assert FlowPolicyPreset.SHORTEST_PATHS_WCMP == 2
+ assert FlowPolicyPreset.TE_WCMP_UNLIM == 3
+ assert FlowPolicyPreset.TE_ECMP_UP_TO_256_LSP == 4
+ assert FlowPolicyPreset.TE_ECMP_16_LSP == 5
+
+
+def test_create_flow_policy_shortest_paths_ecmp(simple_graph):
+ """Test creating SHORTEST_PATHS_ECMP policy."""
+ algs, graph_handle, _ = simple_graph
+
+ policy = create_flow_policy(
+ algs, graph_handle, FlowPolicyPreset.SHORTEST_PATHS_ECMP
+ )
+
+ assert policy is not None
+ assert isinstance(policy, netgraph_core.FlowPolicy)
+
+
+def test_create_flow_policy_shortest_paths_wcmp(simple_graph):
+ """Test creating SHORTEST_PATHS_WCMP policy."""
+ algs, graph_handle, _ = simple_graph
+
+ policy = create_flow_policy(
+ algs, graph_handle, FlowPolicyPreset.SHORTEST_PATHS_WCMP
+ )
+
+ assert policy is not None
+ assert isinstance(policy, netgraph_core.FlowPolicy)
+
+
+def test_create_flow_policy_te_wcmp_unlim(simple_graph):
+ """Test creating TE_WCMP_UNLIM policy."""
+ algs, graph_handle, _ = simple_graph
+
+ policy = create_flow_policy(algs, graph_handle, FlowPolicyPreset.TE_WCMP_UNLIM)
+
+ assert policy is not None
+ assert isinstance(policy, netgraph_core.FlowPolicy)
+
+
+def test_create_flow_policy_te_ecmp_up_to_256_lsp(simple_graph):
+ """Test creating TE_ECMP_UP_TO_256_LSP policy."""
+ algs, graph_handle, _ = simple_graph
+
+ policy = create_flow_policy(
+ algs, graph_handle, FlowPolicyPreset.TE_ECMP_UP_TO_256_LSP
+ )
+
+ assert policy is not None
+ assert isinstance(policy, netgraph_core.FlowPolicy)
+
+
+def test_create_flow_policy_te_ecmp_16_lsp(simple_graph):
+ """Test creating TE_ECMP_16_LSP policy."""
+ algs, graph_handle, _ = simple_graph
+
+ policy = create_flow_policy(algs, graph_handle, FlowPolicyPreset.TE_ECMP_16_LSP)
+
+ assert policy is not None
+ assert isinstance(policy, netgraph_core.FlowPolicy)
+
+
+def test_create_flow_policy_invalid_preset(simple_graph):
+ """Test error handling for invalid preset."""
+ algs, graph_handle, _ = simple_graph
+
+ # Cast to int to bypass enum validation
+ invalid_preset = 999
+
+ with pytest.raises(ValueError, match="Unknown flow policy preset"):
+ create_flow_policy(algs, graph_handle, invalid_preset)
+
+
+def test_create_flow_policy_all_presets(simple_graph):
+ """Test that all defined presets can be created."""
+ algs, graph_handle, _ = simple_graph
+
+ for preset in FlowPolicyPreset:
+ policy = create_flow_policy(algs, graph_handle, preset)
+ assert policy is not None
+ assert isinstance(policy, netgraph_core.FlowPolicy)
+
+
+def test_flow_policy_preset_is_int_enum():
+ """Test that FlowPolicyPreset is an IntEnum."""
+ from enum import IntEnum
+
+ assert issubclass(FlowPolicyPreset, IntEnum)
+
+
+def test_flow_policy_preset_can_be_used_as_int():
+ """Test that FlowPolicyPreset values can be used as integers."""
+ preset = FlowPolicyPreset.SHORTEST_PATHS_ECMP
+ assert preset == 1
+ assert int(preset) == 1
+ assert preset + 1 == 2
+
+
+def test_flow_policy_preset_from_value():
+ """Test creating FlowPolicyPreset from integer value."""
+ preset = FlowPolicyPreset(1)
+ assert preset == FlowPolicyPreset.SHORTEST_PATHS_ECMP
+
+ preset = FlowPolicyPreset(2)
+ assert preset == FlowPolicyPreset.SHORTEST_PATHS_WCMP
+
+
+def test_flow_policy_preset_from_name():
+ """Test creating FlowPolicyPreset from name."""
+ preset = FlowPolicyPreset["SHORTEST_PATHS_ECMP"]
+ assert preset == FlowPolicyPreset.SHORTEST_PATHS_ECMP
+
+ preset = FlowPolicyPreset["TE_ECMP_16_LSP"]
+ assert preset == FlowPolicyPreset.TE_ECMP_16_LSP
diff --git a/tests/model/test_enhanced_max_flow.py b/tests/model/test_enhanced_max_flow.py
deleted file mode 100644
index 7ababb7..0000000
--- a/tests/model/test_enhanced_max_flow.py
+++ /dev/null
@@ -1,328 +0,0 @@
-"""Tests for the new enhanced max_flow methods."""
-
-import pytest
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.algorithms.types import FlowSummary
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.model.network import Link, Network, Node
-
-
-class TestEnhancedMaxFlowMethods:
- """Test the new max_flow_with_summary, max_flow_with_graph, and max_flow_detailed methods."""
-
- def test_max_flow_with_summary_basic(self):
- """Test max_flow_with_summary returns correct types and values."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=5))
- net.add_link(Link("B", "C", capacity=3))
-
- result = net.max_flow_with_summary("A", "C")
-
- # Check return type and structure
- assert isinstance(result, dict)
- assert len(result) == 1
-
- key = ("A", "C")
- assert key in result
-
- flow_val, summary = result[key]
- assert isinstance(flow_val, (int, float))
- assert isinstance(summary, FlowSummary)
- assert flow_val == 3.0
- assert summary.total_flow == 3.0
- assert len(summary.edge_flow) > 0
- assert len(summary.residual_cap) > 0
-
- def test_max_flow_with_graph_basic(self):
- """Test max_flow_with_graph returns correct types and values."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B", capacity=5))
-
- result = net.max_flow_with_graph("A", "B")
-
- # Check return type and structure
- assert isinstance(result, dict)
- assert len(result) == 1
-
- key = ("A", "B")
- assert key in result
-
- flow_val, flow_graph = result[key]
- assert isinstance(flow_val, (int, float))
- assert isinstance(flow_graph, StrictMultiDiGraph)
- assert flow_val == 5.0
- assert flow_graph.number_of_nodes() >= 2
-
- def test_max_flow_detailed_basic(self):
- """Test max_flow_detailed returns correct types and values."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B", capacity=10))
-
- result = net.max_flow_detailed("A", "B")
-
- # Check return type and structure
- assert isinstance(result, dict)
- assert len(result) == 1
-
- key = ("A", "B")
- assert key in result
-
- flow_val, summary, flow_graph = result[key]
- assert isinstance(flow_val, (int, float))
- assert isinstance(summary, FlowSummary)
- assert isinstance(flow_graph, StrictMultiDiGraph)
- assert flow_val == 10.0
- assert summary.total_flow == 10.0
-
- def test_consistency_with_original_max_flow(self):
- """Test that new methods return consistent flow values with original method."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=8))
- net.add_link(Link("B", "C", capacity=6))
-
- # Get results from all methods
- original = net.max_flow("A", "C")
- with_summary = net.max_flow_with_summary("A", "C")
- with_graph = net.max_flow_with_graph("A", "C")
- detailed = net.max_flow_detailed("A", "C")
-
- key = ("A", "C")
- original_flow = original[key]
- summary_flow = with_summary[key][0]
- graph_flow = with_graph[key][0]
- detailed_flow = detailed[key][0]
-
- # All should return the same flow value
- assert original_flow == summary_flow == graph_flow == detailed_flow == 6.0
-
- def test_flow_placement_parameter(self):
- """Test that flow_placement parameter works with new methods."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
-
- # Create parallel paths
- net.add_link(Link("A", "B", capacity=5, cost=1))
- net.add_link(Link("A", "C", capacity=3, cost=1))
- net.add_link(Link("B", "C", capacity=8, cost=1))
-
- # Test with different flow placement strategies
- for placement in [FlowPlacement.PROPORTIONAL, FlowPlacement.EQUAL_BALANCED]:
- result = net.max_flow_with_summary("A", "C", flow_placement=placement)
- flow_val, summary = result[("A", "C")]
-
- assert isinstance(flow_val, (int, float))
- assert flow_val > 0
- assert summary.total_flow == flow_val
-
- def test_shortest_path_parameter(self):
- """Test that shortest_path parameter works with new methods."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_node(Node("D"))
-
- # Short path A->B->D and longer path A->C->D
- net.add_link(Link("A", "B", capacity=5, cost=1))
- net.add_link(Link("B", "D", capacity=3, cost=1))
- net.add_link(Link("A", "C", capacity=4, cost=2))
- net.add_link(Link("C", "D", capacity=6, cost=2))
-
- # Test with shortest_path=True
- result = net.max_flow_with_summary("A", "D", shortest_path=True)
- flow_val, summary = result[("A", "D")]
-
- assert isinstance(flow_val, (int, float))
- assert flow_val > 0
- assert summary.total_flow == flow_val
-
- def test_pairwise_mode(self):
- """Test pairwise mode with new methods."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_node(Node("D"))
- net.add_link(Link("A", "C", capacity=5))
- net.add_link(Link("B", "D", capacity=3))
-
- result = net.max_flow_with_summary("^([AB])$", "^([CD])$", mode="pairwise")
-
- # Should have 4 combinations: A->C, A->D, B->C, B->D
- assert len(result) == 4
-
- # Check specific pairs
- assert ("A", "C") in result
- assert ("A", "D") in result
- assert ("B", "C") in result
- assert ("B", "D") in result
-
- # A->C should have flow, B->D should have flow, others should be 0
- assert result[("A", "C")][0] == 5.0
- assert result[("B", "D")][0] == 3.0
- assert result[("A", "D")][0] == 0.0
- assert result[("B", "C")][0] == 0.0
-
- def test_combine_mode(self):
- """Test combine mode with new methods."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_link(Link("A", "C", capacity=5))
- net.add_link(Link("B", "C", capacity=3))
-
- result = net.max_flow_with_summary("^([AB])$", "C", mode="combine")
-
- # Should have 1 combined result
- assert len(result) == 1
-
- key = ("A|B", "C")
- assert key in result
-
- flow_val, summary = result[key]
- assert flow_val == 8.0 # Both A and B can send to C
-
- def test_empty_results_handling(self):
- """Test handling of cases with no flow."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- # No link between A and B
-
- result = net.max_flow_with_summary("A", "B")
- flow_val, summary = result[("A", "B")]
-
- assert flow_val == 0.0
- assert summary.total_flow == 0.0
- assert len(summary.min_cut) == 0
-
- def test_disabled_nodes_handling(self):
- """Test handling of disabled nodes."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B", disabled=True))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=5))
- net.add_link(Link("B", "C", capacity=3))
-
- result = net.max_flow_with_summary("A", "C")
- flow_val, summary = result[("A", "C")]
-
- # Should be 0 because B is disabled
- assert flow_val == 0.0
- assert summary.total_flow == 0.0
-
- def test_error_cases(self):
- """Test error handling for invalid inputs."""
- net = Network()
- net.add_node(Node("A"))
-
- # Test invalid mode
- with pytest.raises(ValueError, match="Invalid mode"):
- net.max_flow_with_summary("A", "A", mode="invalid")
-
- # Test no matching sources
- with pytest.raises(ValueError, match="No source nodes found"):
- net.max_flow_with_summary("X", "A")
-
- # Test no matching sinks
- with pytest.raises(ValueError, match="No sink nodes found"):
- net.max_flow_with_summary("A", "X")
-
- def test_min_cut_identification(self):
- """Test that min-cut edges are correctly identified."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=10))
- net.add_link(Link("B", "C", capacity=5)) # This should be the bottleneck
-
- result = net.max_flow_with_summary("A", "C")
- flow_val, summary = result[("A", "C")]
-
- assert flow_val == 5.0
- assert len(summary.min_cut) == 1
-
- # The min-cut should include the B->C edge
- min_cut_edges = summary.min_cut
- assert any(u == "B" and v == "C" for u, v, k in min_cut_edges)
-
- def test_reachability_analysis(self):
- """Test that reachable nodes are correctly identified."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_node(Node("D"))
- net.add_link(Link("A", "B", capacity=5))
- net.add_link(Link("B", "C", capacity=3))
- # D is isolated
-
- result = net.max_flow_with_summary("A", "C")
- flow_val, summary = result[("A", "C")]
-
- # A and B should be reachable from source, C might be reachable depending on flow
- assert "A" in summary.reachable
- # D should not be reachable since it's isolated
- assert "D" not in summary.reachable
-
- def test_network_cost_distribution_functionality(self):
- """Test that cost distribution is exposed through Network max flow methods."""
- # Create a network with different path costs
- nodes = {
- "S": Node("S"),
- "A": Node("A"),
- "B": Node("B"),
- "T": Node("T"),
- }
-
- links = {
- "link1": Link(
- "S", "A", capacity=5.0, cost=1.0
- ), # Path 1: cost 2, capacity 5
- "link2": Link("A", "T", capacity=5.0, cost=1.0),
- "link3": Link(
- "S", "B", capacity=3.0, cost=2.0
- ), # Path 2: cost 4, capacity 3
- "link4": Link("B", "T", capacity=3.0, cost=2.0),
- }
-
- network = Network(nodes=nodes, links=links, risk_groups={}, attrs={})
-
- # Test max_flow_with_summary for cost distribution
- result = network.max_flow_with_summary("^S$", "^T$", mode="combine")
-
- assert len(result) == 1
- (src_label, sink_label), (flow_value, summary) = next(iter(result.items()))
-
- # Verify flow value and cost distribution
- assert flow_value == 8.0
- assert hasattr(summary, "cost_distribution")
- assert summary.cost_distribution == {2.0: 5.0, 4.0: 3.0}
-
- # Test max_flow_detailed for cost distribution
- detailed_result = network.max_flow_detailed("^S$", "^T$", mode="combine")
-
- assert len(detailed_result) == 1
- (src_label, sink_label), (flow_value, summary, flow_graph) = next(
- iter(detailed_result.items())
- )
-
- # Should have the same cost distribution
- assert flow_value == 8.0
- assert summary.cost_distribution == {2.0: 5.0, 4.0: 3.0}
diff --git a/tests/model/test_flow.py b/tests/model/test_flow.py
index cb87c3e..62fb3cb 100644
--- a/tests/model/test_flow.py
+++ b/tests/model/test_flow.py
@@ -1,16 +1,18 @@
"""
-Tests for flow analysis methods in the network module.
+Tests for flow analysis using the functional max_flow API.
-This module contains tests for:
-- Maximum flow calculations (max_flow)
-- Saturated edges identification (saturated_edges)
-- Sensitivity analysis (sensitivity_analysis)
-- Flow-related edge cases and overlapping patterns
+This module tests maximum flow calculations using the new functional API from
+ngraph.solver.maxflow after NetGraph-Core migration.
+
+Note: saturated_edges and sensitivity_analysis tests have been removed as these
+methods no longer exist in NetGraph-Core. These capabilities may be added back
+through different APIs in future versions.
"""
import pytest
from ngraph.model.network import Link, Network, Node
+from ngraph.solver.maxflow import max_flow
class TestMaxFlow:
@@ -26,7 +28,7 @@ def test_max_flow_simple(self):
net.add_link(Link("A", "B", capacity=5))
net.add_link(Link("B", "C", capacity=3))
- flow_value = net.max_flow("A", "C")
+ flow_value = max_flow(net, "A", "C")
assert flow_value == {("A", "C"): 3.0}
def test_max_flow_multi_parallel(self):
@@ -42,7 +44,7 @@ def test_max_flow_multi_parallel(self):
net.add_link(Link("A", "D", capacity=5))
net.add_link(Link("D", "C", capacity=5))
- flow_value = net.max_flow("A", "C")
+ flow_value = max_flow(net, "A", "C")
assert flow_value == {("A", "C"): 10.0}
def test_max_flow_no_source(self):
@@ -53,7 +55,7 @@ def test_max_flow_no_source(self):
net.add_link(Link("B", "C", capacity=10))
with pytest.raises(ValueError, match="No source nodes found matching 'A'"):
- net.max_flow("A", "C")
+ max_flow(net, "A", "C")
def test_max_flow_no_sink(self):
"""Test max flow when no sink nodes match the pattern."""
@@ -63,7 +65,7 @@ def test_max_flow_no_sink(self):
net.add_link(Link("A", "B", capacity=10))
with pytest.raises(ValueError, match="No sink nodes found matching 'C'"):
- net.max_flow("A", "C")
+ max_flow(net, "A", "C")
def test_max_flow_invalid_mode(self):
"""Invalid mode must raise ValueError."""
@@ -71,7 +73,7 @@ def test_max_flow_invalid_mode(self):
net.add_node(Node("A"))
net.add_node(Node("B"))
with pytest.raises(ValueError):
- net.max_flow("A", "B", mode="foobar")
+ max_flow(net, "A", "B", mode="foobar")
def test_max_flow_with_attribute_grouping_combine(self):
"""Test max flow when grouping sources/sinks by attribute directive."""
@@ -85,7 +87,7 @@ def test_max_flow_with_attribute_grouping_combine(self):
net.add_link(Link("S1", "T1", capacity=5.0))
net.add_link(Link("S2", "T1", capacity=3.0))
- flow = net.max_flow("attr:src_group", "attr:dst_group", mode="combine")
+ flow = max_flow(net, "attr:src_group", "attr:dst_group", mode="combine")
assert flow == {("src", "dst"): 8.0}
def test_max_flow_with_mixed_attr_and_regex(self):
@@ -99,7 +101,7 @@ def test_max_flow_with_mixed_attr_and_regex(self):
net.add_link(Link("S1", "T1", capacity=2.0))
net.add_link(Link("S2", "T2", capacity=3.0))
- flow = net.max_flow("attr:role", r"^T\d$", mode="pairwise")
+ flow = max_flow(net, "attr:role", r"^T\d$", mode="pairwise")
# Groups: sources -> {"edge": [S1, S2]}, sinks -> {"^T\\d$": [T1, T2]}
# Expect pairs (edge, ^T\d$)
assert ("edge", r"^T\d$") in flow
@@ -119,7 +121,8 @@ def test_max_flow_overlap_detection_coverage(self):
net.add_link(Link("B", "C", capacity=3.0))
# Create a scenario where there are valid groups but they overlap
- flow_result = net.max_flow(
+ flow_result = max_flow(
+ net,
source_path=r"^(A|B)$", # Matches A and B
sink_path=r"^(B|C)$", # Matches B and C (B overlaps!)
mode="combine",
@@ -137,7 +140,7 @@ def test_max_flow_invalid_mode_error(self):
net.add_link(Link("A", "B", capacity=10))
with pytest.raises(ValueError):
- net.max_flow("A", "B", mode="totally_invalid")
+ max_flow(net, "A", "B", mode="totally_invalid")
def test_max_flow_disabled_nodes_coverage(self):
"""Test max_flow with disabled source nodes for coverage."""
@@ -147,74 +150,9 @@ def test_max_flow_disabled_nodes_coverage(self):
net.add_link(Link("A", "B", capacity=5.0))
# This should trigger the empty sources condition
- flow_result = net.max_flow("A", "B")
+ flow_result = max_flow(net, "A", "B")
assert flow_result[("A", "B")] == 0.0
- def test_saturated_edges_empty_combine_coverage(self):
- """Test saturated_edges with empty nodes in combine mode for coverage."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B", disabled=True))
- net.add_node(Node("C", disabled=True))
- net.add_link(Link("A", "B", capacity=5.0))
-
- # This should create empty combined sink nodes
- saturated = net.saturated_edges("A", "B|C", mode="combine")
- key = ("A", "B|C")
- assert key in saturated
- assert saturated[key] == []
-
- def test_saturated_edges_invalid_mode_error(self):
- """Invalid mode raises ValueError (no need to assert exact message)."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B", capacity=10))
-
- with pytest.raises(ValueError):
- net.saturated_edges("A", "B", mode="bad_mode")
-
- def test_sensitivity_analysis_empty_combine_coverage(self):
- """Test sensitivity_analysis with empty nodes in combine mode for coverage."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B", disabled=True))
- net.add_node(Node("C", disabled=True))
- net.add_link(Link("A", "B", capacity=5.0))
-
- # This should create empty combined sink nodes
- sensitivity = net.sensitivity_analysis("A", "B|C", mode="combine")
- key = ("A", "B|C")
- assert key in sensitivity
- assert sensitivity[key] == {}
-
- def test_sensitivity_analysis_invalid_mode_error(self):
- """Invalid mode raises ValueError (no need to assert exact message)."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B", capacity=10))
-
- with pytest.raises(ValueError):
- net.sensitivity_analysis("A", "B", mode="wrong_mode")
-
- def test_flow_methods_overlap_conditions_coverage(self):
- """Test overlap conditions in flow methods for coverage."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=5.0))
- net.add_link(Link("B", "C", capacity=3.0))
-
- # Test overlap condition in saturated_edges pairwise mode
- saturated = net.saturated_edges("A", "A", mode="pairwise")
- assert saturated[("A", "A")] == []
-
- # Test overlap condition in sensitivity_analysis pairwise mode
- sensitivity = net.sensitivity_analysis("A", "A", mode="pairwise")
- assert sensitivity[("A", "A")] == {}
-
def test_no_private_method_calls(self):
"""Ensure public API suffices; don't rely on private helpers in tests."""
network = Network()
@@ -222,7 +160,7 @@ def test_no_private_method_calls(self):
network.add_node(Node("T"))
network.add_link(Link("S", "T", capacity=10))
- flow = network.max_flow("S", "T")
+ flow = max_flow(network, "S", "T")
assert flow[("S", "T")] == 10.0
@@ -236,7 +174,8 @@ def test_max_flow_overlapping_patterns_combine_mode(self):
net.add_node(Node("N2"))
net.add_link(Link("N1", "N2", capacity=5.0))
- flow_result = net.max_flow(
+ flow_result = max_flow(
+ net,
source_path=r"^N(\d+)$",
sink_path=r"^N(\d+)$",
mode="combine",
@@ -256,7 +195,8 @@ def test_max_flow_overlapping_patterns_pairwise_mode(self):
net.add_node(Node("N2"))
net.add_link(Link("N1", "N2", capacity=3.0))
- flow_result = net.max_flow(
+ flow_result = max_flow(
+ net,
source_path=r"^N(\d+)$",
sink_path=r"^N(\d+)$",
mode="pairwise",
@@ -289,7 +229,8 @@ def test_max_flow_partial_overlap_pairwise(self):
net.add_link(Link("BOTH1", "SINK1", capacity=1.5))
net.add_link(Link("BOTH2", "BOTH1", capacity=1.0))
- flow_result = net.max_flow(
+ flow_result = max_flow(
+ net,
source_path=r"^(SRC\d+|BOTH\d+)$", # Matches SRC1, BOTH1, BOTH2
sink_path=r"^(SINK\d+|BOTH\d+)$", # Matches SINK1, BOTH1, BOTH2 (partial overlap!)
mode="pairwise",
@@ -316,7 +257,8 @@ def test_max_flow_overlapping_with_disabled_nodes(self):
net.add_link(Link("N1", "N3", capacity=2.0))
net.add_link(Link("N2", "N3", capacity=1.0)) # This link won't be used
- flow_result = net.max_flow(
+ flow_result = max_flow(
+ net,
source_path=r"^N(\d+)$", # Matches N1, N2, N3
sink_path=r"^N(\d+)$", # Matches N1, N2, N3 (OVERLAPPING!)
mode="pairwise",
@@ -341,273 +283,12 @@ def test_max_flow_overlapping_with_disabled_nodes(self):
assert flow_result[("3", "1")] == 2.0 # N3->N1 (due to reverse edges)
-class TestSaturatedEdges:
- """Tests for saturated edges identification."""
-
- @pytest.fixture
- def bottleneck_network(self):
- """Fixture providing a network with a clear bottleneck."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=10.0))
- net.add_link(Link("B", "C", capacity=5.0)) # bottleneck
- return net
-
- def test_saturated_edges_simple(self, bottleneck_network):
- """Test saturated_edges method with a simple bottleneck scenario."""
- saturated = bottleneck_network.saturated_edges("A", "C")
-
- assert len(saturated) == 1
- key = ("A", "C")
- assert key in saturated
-
- edge_list = saturated[key]
- assert len(edge_list) == 1
-
- edge = edge_list[0]
- assert edge[0] == "B" # source
- assert edge[1] == "C" # target
-
- def test_saturated_edges_no_bottleneck(self):
- """Test saturated_edges when there's no clear bottleneck."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B", capacity=100.0))
-
- saturated = net.saturated_edges("A", "B")
-
- assert len(saturated) == 1
- key = ("A", "B")
- assert key in saturated
-
- def test_saturated_edges_pairwise_mode(self):
- """Test saturated_edges with pairwise mode using regex patterns."""
- net = Network()
- for node in ["A1", "A2", "B", "C1", "C2"]:
- net.add_node(Node(node))
-
- net.add_link(Link("A1", "B", capacity=3.0))
- net.add_link(Link("A2", "B", capacity=4.0))
- net.add_link(Link("B", "C1", capacity=2.0))
- net.add_link(Link("B", "C2", capacity=3.0))
-
- saturated = net.saturated_edges("A(.*)", "C(.*)", mode="pairwise")
-
- assert len(saturated) >= 1
-
- for (_src_label, _sink_label), edge_list in saturated.items():
- assert isinstance(edge_list, list)
-
- def test_saturated_edges_error_cases(self, bottleneck_network):
- """Test error cases for saturated_edges."""
- with pytest.raises(ValueError, match="No source nodes found matching"):
- bottleneck_network.saturated_edges("NONEXISTENT", "C")
-
- with pytest.raises(ValueError, match="No sink nodes found matching"):
- bottleneck_network.saturated_edges("A", "NONEXISTENT")
-
- with pytest.raises(ValueError, match="Invalid mode 'invalid'"):
- bottleneck_network.saturated_edges("A", "C", mode="invalid")
-
- def test_saturated_edges_disabled_nodes(self):
- """Test saturated_edges with disabled nodes."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B", disabled=True))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=5.0))
- net.add_link(Link("B", "C", capacity=3.0))
-
- saturated = net.saturated_edges("A", "C")
-
- key = ("A", "C")
- assert key in saturated
- assert saturated[key] == []
-
- def test_saturated_edges_overlapping_groups(self):
- """Test saturated_edges when source and sink groups overlap."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B", capacity=5.0))
-
- saturated = net.saturated_edges("A|B", "A|B")
-
- key = ("A|B", "A|B")
- assert key in saturated
- assert saturated[key] == []
-
- def test_saturated_edges_tolerance_parameter(self, bottleneck_network):
- """Test saturated_edges with different tolerance values."""
- saturated_strict = bottleneck_network.saturated_edges("A", "C", tolerance=1e-15)
- saturated_loose = bottleneck_network.saturated_edges("A", "C", tolerance=1.0)
-
- assert ("A", "C") in saturated_strict
- assert ("A", "C") in saturated_loose
-
-
-class TestSensitivityAnalysis:
- """Tests for sensitivity analysis."""
-
- @pytest.fixture
- def bottleneck_network(self):
- """Fixture providing a network with a clear bottleneck."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=10.0))
- net.add_link(Link("B", "C", capacity=5.0)) # bottleneck
- return net
-
- def test_sensitivity_analysis_simple(self, bottleneck_network):
- """Test sensitivity_analysis method with a simple bottleneck scenario."""
- sensitivity = bottleneck_network.sensitivity_analysis(
- "A", "C", change_amount=1.0
- )
-
- assert len(sensitivity) == 1
- key = ("A", "C")
- assert key in sensitivity
-
- sens_dict = sensitivity[key]
- assert isinstance(sens_dict, dict)
-
- if sens_dict:
- for edge, flow_change in sens_dict.items():
- assert isinstance(edge, tuple)
- assert len(edge) == 3
- assert isinstance(flow_change, (int, float))
-
- def test_sensitivity_analysis_negative_change(self, bottleneck_network):
- """Test sensitivity_analysis with negative capacity change."""
- sensitivity = bottleneck_network.sensitivity_analysis(
- "A", "C", change_amount=-1.0
- )
-
- assert ("A", "C") in sensitivity
- sens_dict = sensitivity[("A", "C")]
- assert isinstance(sens_dict, dict)
-
- def test_sensitivity_analysis_pairwise_mode(self):
- """Test sensitivity_analysis with pairwise mode."""
- net = Network()
- for node in ["A1", "A2", "B", "C1", "C2"]:
- net.add_node(Node(node))
-
- net.add_link(Link("A1", "B", capacity=3.0))
- net.add_link(Link("A2", "B", capacity=4.0))
- net.add_link(Link("B", "C1", capacity=2.0))
- net.add_link(Link("B", "C2", capacity=3.0))
-
- sensitivity = net.sensitivity_analysis("A(.*)", "C(.*)", mode="pairwise")
-
- assert len(sensitivity) >= 1
-
- for (_src_label, _sink_label), sens_dict in sensitivity.items():
- assert isinstance(sens_dict, dict)
-
- def test_sensitivity_analysis_error_cases(self, bottleneck_network):
- """Test error cases for sensitivity_analysis."""
- with pytest.raises(ValueError, match="No source nodes found matching"):
- bottleneck_network.sensitivity_analysis("NONEXISTENT", "C")
-
- with pytest.raises(ValueError, match="No sink nodes found matching"):
- bottleneck_network.sensitivity_analysis("A", "NONEXISTENT")
-
- with pytest.raises(ValueError, match="Invalid mode 'invalid'"):
- bottleneck_network.sensitivity_analysis("A", "C", mode="invalid")
-
- def test_sensitivity_analysis_disabled_nodes(self):
- """Test sensitivity_analysis with disabled nodes."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B", disabled=True))
- net.add_node(Node("C"))
- net.add_link(Link("A", "B", capacity=5.0))
- net.add_link(Link("B", "C", capacity=3.0))
-
- sensitivity = net.sensitivity_analysis("A", "C")
-
- key = ("A", "C")
- assert key in sensitivity
- assert sensitivity[key] == {}
-
- def test_sensitivity_analysis_overlapping_groups(self):
- """Test sensitivity_analysis when source and sink groups overlap."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B", capacity=5.0))
-
- sensitivity = net.sensitivity_analysis("A|B", "A|B")
-
- key = ("A|B", "A|B")
- assert key in sensitivity
- assert sensitivity[key] == {}
-
- def test_sensitivity_analysis_zero_change(self, bottleneck_network):
- """Test sensitivity_analysis with zero capacity change."""
- sensitivity = bottleneck_network.sensitivity_analysis(
- "A", "C", change_amount=0.0
- )
-
- assert ("A", "C") in sensitivity
- sens_dict = sensitivity[("A", "C")]
- assert isinstance(sens_dict, dict)
-
-
class TestFlowIntegration:
- """Integration tests for flow analysis methods."""
-
- def test_saturated_edges_and_sensitivity_consistency(self):
- """Test that saturated_edges and sensitivity_analysis are consistent."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_node(Node("C"))
-
- net.add_link(Link("A", "B", capacity=10.0))
- net.add_link(Link("B", "C", capacity=5.0))
-
- saturated = net.saturated_edges("A", "C")
- sensitivity = net.sensitivity_analysis("A", "C")
-
- key = ("A", "C")
- saturated_edges_list = saturated[key]
- sensitivity_dict = sensitivity[key]
-
- for _edge in saturated_edges_list:
- assert isinstance(sensitivity_dict, dict)
-
- def test_complex_network_analysis(self):
- """Test both methods on a more complex network topology."""
- net = Network()
-
- for node in ["A", "B", "C", "D"]:
- net.add_node(Node(node))
-
- net.add_link(Link("A", "B", capacity=5.0))
- net.add_link(Link("A", "C", capacity=3.0))
- net.add_link(Link("B", "D", capacity=4.0))
- net.add_link(Link("C", "D", capacity=6.0))
-
- saturated = net.saturated_edges("A", "D")
- sensitivity = net.sensitivity_analysis("A", "D", change_amount=1.0)
-
- key = ("A", "D")
- assert key in saturated
- assert key in sensitivity
-
- assert isinstance(saturated[key], list)
- assert isinstance(sensitivity[key], dict)
+ """Integration tests for max_flow with various parameters."""
def test_flow_placement_parameter(self):
- """Test that different flow_placement parameters work with both methods."""
- from ngraph.algorithms.base import FlowPlacement
+ """Test that different flow_placement parameters work correctly."""
+ from ngraph.types.base import FlowPlacement
net = Network()
net.add_node(Node("A"))
@@ -621,41 +302,28 @@ def test_flow_placement_parameter(self):
FlowPlacement.PROPORTIONAL,
FlowPlacement.EQUAL_BALANCED,
]:
- saturated = net.saturated_edges("A", "C", flow_placement=flow_placement)
- sensitivity = net.sensitivity_analysis(
- "A", "C", flow_placement=flow_placement
- )
-
- key = ("A", "C")
- assert key in saturated
- assert key in sensitivity
+ result = max_flow(net, "A", "C", flow_placement=flow_placement)
+ assert result == {("A", "C"): 5.0}
def test_shortest_path_parameter(self):
- """Test that shortest_path parameter works with both methods."""
+ """Test that shortest_path parameter works correctly."""
net = Network()
for node in ["A", "B", "C", "D"]:
net.add_node(Node(node))
- # Short path: A -> B -> D (cost 2)
+ # Short path: A -> B -> D (cost 2, capacity 3)
net.add_link(Link("A", "B", capacity=5.0, cost=1))
net.add_link(Link("B", "D", capacity=3.0, cost=1))
- # Long path: A -> C -> D (cost 4)
+ # Long path: A -> C -> D (cost 4, capacity 4)
net.add_link(Link("A", "C", capacity=4.0, cost=2))
net.add_link(Link("C", "D", capacity=6.0, cost=2))
- # Test with shortest_path=True
- saturated_sp = net.saturated_edges("A", "D", shortest_path=True)
- sensitivity_sp = net.sensitivity_analysis("A", "D", shortest_path=True)
-
- key = ("A", "D")
- assert key in saturated_sp
- assert key in sensitivity_sp
-
- # Test with shortest_path=False
- saturated_all = net.saturated_edges("A", "D", shortest_path=False)
- sensitivity_all = net.sensitivity_analysis("A", "D", shortest_path=False)
+ # Test with shortest_path=True (single augmentation on lowest cost path)
+ result_sp = max_flow(net, "A", "D", shortest_path=True)
+ assert result_sp == {("A", "D"): 3.0} # Limited by B->D capacity
- assert key in saturated_all
- assert key in sensitivity_all
+ # Test with shortest_path=False (full max flow using all paths)
+ result_all = max_flow(net, "A", "D", shortest_path=False)
+ assert result_all == {("A", "D"): 7.0} # 3.0 via B + 4.0 via C
diff --git a/tests/model/test_network_integration.py b/tests/model/test_network_integration.py
index e23f46c..749f3ae 100644
--- a/tests/model/test_network_integration.py
+++ b/tests/model/test_network_integration.py
@@ -11,6 +11,7 @@
import pytest
from ngraph.model.network import Link, Network, Node, RiskGroup
+from ngraph.solver.maxflow import max_flow
class TestNetworkIntegration:
@@ -29,26 +30,6 @@ def diamond_network(self):
net.add_link(Link("C", "D"))
return net
- def test_end_to_end_flow_analysis(self, diamond_network):
- """Test complete flow analysis workflow on diamond network."""
- # Basic max flow
- flow = diamond_network.max_flow("A", "D")
- assert flow[("A", "D")] == 2.0
-
- # Saturated edges analysis returns dict format
- saturated = diamond_network.saturated_edges("A", "D")
- assert isinstance(saturated, dict)
- assert len(saturated) > 0
-
- # Sensitivity analysis
- sensitivity = diamond_network.sensitivity_analysis("A", "D")
- assert len(sensitivity) > 0
-
- # All methods should work together consistently
- assert isinstance(flow, dict)
- assert isinstance(saturated, dict) # dict format, not list
- assert isinstance(sensitivity, dict)
-
def test_risk_group_with_flow_analysis(self):
"""Test integration of risk groups with flow analysis."""
net = Network()
@@ -63,195 +44,15 @@ def test_risk_group_with_flow_analysis(self):
net.risk_groups["critical"] = RiskGroup("critical")
# Flow should work normally when risk group is enabled
- flow = net.max_flow("A", "D")
+ flow = max_flow(net, "A", "D")
assert flow[("A", "D")] == 1.0
# Flow should be 0 when critical nodes are disabled
net.disable_risk_group("critical")
- flow = net.max_flow("A", "D")
+ flow = max_flow(net, "A", "D")
assert flow[("A", "D")] == 0.0
# Flow should resume when risk group is re-enabled
net.enable_risk_group("critical")
- flow = net.max_flow("A", "D")
+ flow = max_flow(net, "A", "D")
assert flow[("A", "D")] == 1.0
-
- def test_complex_network_construction(self):
- """Test building and analyzing a complex multi-tier network."""
- net = Network()
-
- # Create a 3-tier network: sources -> transit -> sinks
- sources = ["src-1", "src-2", "src-3"]
- transit = ["transit-1", "transit-2"]
- sinks = ["sink-1", "sink-2"]
-
- # Add all nodes
- for node in sources + transit + sinks:
- net.add_node(Node(node))
-
- # Connect sources to transit (full mesh)
- for src in sources:
- for t in transit:
- net.add_link(Link(src, t, capacity=2.0))
-
- # Connect transit to sinks (full mesh)
- for t in transit:
- for sink in sinks:
- net.add_link(Link(t, sink, capacity=3.0))
-
- # Analyze flow characteristics
- total_flow = 0
- for src in sources:
- for sink in sinks:
- flow = net.max_flow(src, sink)
- total_flow += flow[(src, sink)]
-
- # Should have meaningful flow through the network
- assert total_flow > 0
-
- # Network should have correct structure
- assert len(net.nodes) == 7
- assert len(net.links) == 10 # 3*2 + 2*2
-
- def test_disabled_node_propagation(self):
- """Test how disabled nodes affect complex network operations."""
- net = Network()
-
- # Create linear chain: A->B->C->D
- nodes = ["A", "B", "C", "D"]
- for node in nodes:
- net.add_node(Node(node))
-
- for i in range(len(nodes) - 1):
- net.add_link(Link(nodes[i], nodes[i + 1]))
-
- # Initially flow should exist
- flow = net.max_flow("A", "D")
- assert flow[("A", "D")] == 1.0
-
- # Disable middle node - should break flow
- net.disable_node("B")
- flow = net.max_flow("A", "D")
- assert flow[("A", "D")] == 0.0
-
- # Re-enable, flow should resume
- net.enable_node("B")
- flow = net.max_flow("A", "D")
- assert flow[("A", "D")] == 1.0
-
- # Disable different middle node
- net.disable_node("C")
- flow = net.max_flow("A", "D")
- assert flow[("A", "D")] == 0.0
-
- def test_network_with_mixed_capacities(self):
- """Test analysis of networks with varying link capacities."""
- net = Network()
-
- nodes = ["A", "B", "C", "D", "E"]
- for node in nodes:
- net.add_node(Node(node))
-
- # Create network with bottlenecks
- capacities = [
- ("A", "B", 10.0),
- ("A", "C", 5.0),
- ("B", "D", 2.0), # bottleneck
- ("C", "D", 8.0),
- ("C", "E", 3.0),
- ("D", "E", 15.0),
- ]
-
- for src, tgt, cap in capacities:
- net.add_link(Link(src, tgt, capacity=cap))
-
- # Max flow should be limited by bottlenecks
- flow_ad = net.max_flow("A", "D")
- net.max_flow("A", "E")
-
- # A->D gets flow through multiple paths: A->B->D (2.0) + A->C->D (5.0) = 7.0
- # But limited by B->D capacity and C->D capacity
- assert flow_ad[("A", "D")] == 7.0 # A->B->D (2.0) + A->C->D (5.0) = 7.0
-
- # Test that saturated edges identify bottlenecks
- saturated = net.saturated_edges("A", "E")
- assert len(saturated) > 0
-
- def test_large_network_performance(self):
- """Test performance characteristics with larger networks."""
- net = Network()
-
- # Create a larger network (grid-like)
- size = 10
- for i in range(size):
- for j in range(size):
- net.add_node(Node(f"node-{i}-{j}"))
-
- # Add horizontal and vertical connections
- for i in range(size):
- for j in range(size - 1):
- # Horizontal links
- net.add_link(Link(f"node-{i}-{j}", f"node-{i}-{j + 1}"))
- # Vertical links
- net.add_link(Link(f"node-{j}-{i}", f"node-{j + 1}-{i}"))
-
- # Should be able to handle this size efficiently
- assert len(net.nodes) == size * size
- assert len(net.links) == 2 * size * (size - 1)
-
- # Basic operations should still work
- flow = net.max_flow("node-0-0", "node-9-9")
- assert len(flow) == 1
- assert flow[("node-0-0", "node-9-9")] > 0
-
- def test_network_modification_during_analysis(self):
- """Test network state consistency during complex operations."""
- net = Network()
-
- # Build initial network
- for node in ["A", "B", "C"]:
- net.add_node(Node(node))
-
- link_ab = Link("A", "B")
- link_bc = Link("B", "C")
- net.add_link(link_ab)
- net.add_link(link_bc)
-
- # Get initial flow
- initial_flow = net.max_flow("A", "C")
- assert initial_flow[("A", "C")] == 1.0
-
- # Modify network and verify consistency
- net.add_node(Node("D"))
- net.add_link(Link("A", "D"))
- net.add_link(Link("D", "C"))
-
- # Flow should increase with additional path
- new_flow = net.max_flow("A", "C")
- assert new_flow[("A", "C")] >= initial_flow[("A", "C")]
-
- # Network should maintain internal consistency
- assert len(net.nodes) == 4
- assert len(net.links) == 4
-
- def test_comprehensive_error_handling(self):
- """Test error handling in complex scenarios."""
- net = Network()
-
- # Empty network operations should raise errors for non-matching patterns
- with pytest.raises(ValueError, match="No source nodes found matching"):
- net.max_flow("nonexistent", "also_nonexistent")
-
- with pytest.raises(ValueError, match="No source nodes found matching"):
- net.saturated_edges("none", "zero")
-
- with pytest.raises(ValueError, match="No source nodes found matching"):
- net.sensitivity_analysis("void", "null")
-
- # Single node operations
- net.add_node(Node("lonely"))
- assert net.max_flow("lonely", "lonely") == {("lonely", "lonely"): 0}
-
- # Disconnected network
- net.add_node(Node("isolated"))
- assert net.max_flow("lonely", "isolated") == {("lonely", "isolated"): 0}
diff --git a/tests/model/test_path.py b/tests/model/test_path.py
new file mode 100644
index 0000000..2d409a9
--- /dev/null
+++ b/tests/model/test_path.py
@@ -0,0 +1,348 @@
+"""Tests for Path dataclass."""
+
+import pytest
+
+from ngraph.model.path import Path
+from ngraph.types.dto import EdgeRef
+
+
+def test_path_basic_creation():
+ """Test basic Path creation."""
+ path_data = (
+ ("A", (EdgeRef("link1", "AB"),)),
+ ("B", ()),
+ )
+ path = Path(path_data, cost=1.0)
+
+ assert path.cost == 1.0
+ assert len(path.path) == 2
+ assert path.src_node == "A"
+ assert path.dst_node == "B"
+
+
+def test_path_post_init_populates_fields():
+ """Test that __post_init__ populates edges, nodes, and edge_tuples."""
+ edge1 = EdgeRef("link1", "AB")
+ edge2 = EdgeRef("link2", "BC")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", (edge2,)),
+ ("C", ()),
+ )
+ path = Path(path_data, cost=2.0)
+
+ assert "A" in path.nodes
+ assert "B" in path.nodes
+ assert "C" in path.nodes
+ assert edge1 in path.edges
+ assert edge2 in path.edges
+ assert (edge1,) in path.edge_tuples
+ assert (edge2,) in path.edge_tuples
+
+
+def test_path_getitem():
+ """Test __getitem__ access."""
+ edge1 = EdgeRef("link1", "AB")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", ()),
+ )
+ path = Path(path_data, cost=1.0)
+
+ assert path[0] == ("A", (edge1,))
+ assert path[1] == ("B", ())
+ assert path[-1] == ("B", ())
+
+
+def test_path_iter():
+ """Test __iter__ iteration."""
+ edge1 = EdgeRef("link1", "AB")
+ edge2 = EdgeRef("link2", "BC")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", (edge2,)),
+ ("C", ()),
+ )
+ path = Path(path_data, cost=2.0)
+
+ elements = list(path)
+ assert len(elements) == 3
+ assert elements[0] == ("A", (edge1,))
+ assert elements[1] == ("B", (edge2,))
+ assert elements[2] == ("C", ())
+
+
+def test_path_len():
+ """Test __len__."""
+ path_data = (
+ ("A", (EdgeRef("link1", "AB"),)),
+ ("B", (EdgeRef("link2", "BC"),)),
+ ("C", ()),
+ )
+ path = Path(path_data, cost=2.0)
+
+ assert len(path) == 3
+
+
+def test_path_src_node():
+ """Test src_node property."""
+ path_data = (
+ ("A", (EdgeRef("link1", "AB"),)),
+ ("B", (EdgeRef("link2", "BC"),)),
+ ("C", ()),
+ )
+ path = Path(path_data, cost=2.0)
+
+ assert path.src_node == "A"
+
+
+def test_path_dst_node():
+ """Test dst_node property."""
+ path_data = (
+ ("A", (EdgeRef("link1", "AB"),)),
+ ("B", (EdgeRef("link2", "BC"),)),
+ ("C", ()),
+ )
+ path = Path(path_data, cost=2.0)
+
+ assert path.dst_node == "C"
+
+
+def test_path_lt():
+ """Test __lt__ comparison based on cost."""
+ path1 = Path((("A", ()), ("B", ())), cost=1.0)
+ path2 = Path((("A", ()), ("B", ())), cost=2.0)
+
+ assert path1 < path2
+ assert not path2 < path1
+ assert not path1 < path1
+
+
+def test_path_lt_with_non_path():
+ """Test __lt__ with non-Path returns NotImplemented."""
+ path = Path((("A", ()), ("B", ())), cost=1.0)
+
+ result = path.__lt__("not a path")
+ assert result is NotImplemented
+
+
+def test_path_eq():
+ """Test __eq__ comparison."""
+ edge1 = EdgeRef("link1", "AB")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", ()),
+ )
+ path1 = Path(path_data, cost=1.0)
+ path2 = Path(path_data, cost=1.0)
+
+ assert path1 == path2
+
+
+def test_path_eq_different_cost():
+ """Test __eq__ with different costs."""
+ path_data = (
+ ("A", (EdgeRef("link1", "AB"),)),
+ ("B", ()),
+ )
+ path1 = Path(path_data, cost=1.0)
+ path2 = Path(path_data, cost=2.0)
+
+ assert path1 != path2
+
+
+def test_path_eq_different_path():
+ """Test __eq__ with different paths."""
+ path1 = Path((("A", ()), ("B", ())), cost=1.0)
+ path2 = Path((("A", ()), ("C", ())), cost=1.0)
+
+ assert path1 != path2
+
+
+def test_path_eq_with_non_path():
+ """Test __eq__ with non-Path returns NotImplemented."""
+ path = Path((("A", ()), ("B", ())), cost=1.0)
+
+ result = path.__eq__("not a path")
+ assert result is NotImplemented
+
+
+def test_path_hash():
+ """Test __hash__ for set/dict usage."""
+ edge1 = EdgeRef("link1", "AB")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", ()),
+ )
+ path1 = Path(path_data, cost=1.0)
+ path2 = Path(path_data, cost=1.0)
+
+ # Same path should have same hash
+ assert hash(path1) == hash(path2)
+
+ # Can be used in sets
+ path_set = {path1, path2}
+ assert len(path_set) == 1
+
+
+def test_path_hash_different_paths():
+ """Test __hash__ for different paths."""
+ path1 = Path((("A", ()), ("B", ())), cost=1.0)
+ path2 = Path((("A", ()), ("C", ())), cost=1.0)
+
+ # Different paths should (likely) have different hashes
+ # Note: hash collisions are possible but unlikely for these simple cases
+ assert hash(path1) != hash(path2)
+
+
+def test_path_repr():
+ """Test __repr__ string representation."""
+ path_data = (
+ ("A", (EdgeRef("link1", "AB"),)),
+ ("B", ()),
+ )
+ path = Path(path_data, cost=1.0)
+
+ repr_str = repr(path)
+ assert "Path(" in repr_str
+ assert "cost=1.0" in repr_str
+
+
+def test_path_edges_seq():
+ """Test edges_seq cached property."""
+ edge1 = EdgeRef("link1", "AB")
+ edge2 = EdgeRef("link2", "BC")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", (edge2,)),
+ ("C", ()),
+ )
+ path = Path(path_data, cost=2.0)
+
+ edges_seq = path.edges_seq
+ assert len(edges_seq) == 2
+ assert edges_seq[0] == (edge1,)
+ assert edges_seq[1] == (edge2,)
+
+
+def test_path_edges_seq_single_node():
+ """Test edges_seq with single node path."""
+ path = Path((("A", ()),), cost=0.0)
+
+ edges_seq = path.edges_seq
+ assert edges_seq == ()
+
+
+def test_path_edges_seq_two_nodes():
+ """Test edges_seq with two node path."""
+ edge1 = EdgeRef("link1", "AB")
+ path = Path((("A", (edge1,)), ("B", ())), cost=1.0)
+
+ edges_seq = path.edges_seq
+ assert len(edges_seq) == 1
+ assert edges_seq[0] == (edge1,)
+
+
+def test_path_nodes_seq():
+ """Test nodes_seq cached property."""
+ edge1 = EdgeRef("link1", "AB")
+ edge2 = EdgeRef("link2", "BC")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", (edge2,)),
+ ("C", ()),
+ )
+ path = Path(path_data, cost=2.0)
+
+ nodes_seq = path.nodes_seq
+ assert nodes_seq == ("A", "B", "C")
+
+
+def test_path_get_sub_path():
+ """Test get_sub_path method."""
+ edge1 = EdgeRef("link1", "AB")
+ edge2 = EdgeRef("link2", "BC")
+ edge3 = EdgeRef("link3", "CD")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", (edge2,)),
+ ("C", (edge3,)),
+ ("D", ()),
+ )
+ path = Path(path_data, cost=3.0)
+
+ sub_path = path.get_sub_path("C")
+ assert sub_path.src_node == "A"
+ assert sub_path.dst_node == "C"
+ assert len(sub_path) == 3
+ assert sub_path[-1] == ("C", ())
+
+
+def test_path_get_sub_path_source_node():
+ """Test get_sub_path with source node as destination."""
+ edge1 = EdgeRef("link1", "AB")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", ()),
+ )
+ path = Path(path_data, cost=1.0)
+
+ sub_path = path.get_sub_path("A")
+ assert sub_path.src_node == "A"
+ assert sub_path.dst_node == "A"
+ assert len(sub_path) == 1
+ assert sub_path[0] == ("A", ())
+
+
+def test_path_get_sub_path_node_not_found():
+ """Test get_sub_path with non-existent node."""
+ edge1 = EdgeRef("link1", "AB")
+ path_data = (
+ ("A", (edge1,)),
+ ("B", ()),
+ )
+ path = Path(path_data, cost=1.0)
+
+ with pytest.raises(ValueError, match="Node 'Z' not found in path"):
+ path.get_sub_path("Z")
+
+
+def test_path_parallel_edges():
+ """Test Path with parallel edges."""
+ edge1 = EdgeRef("link1", "AB")
+ edge2 = EdgeRef("link2", "AB")
+ path_data = (
+ ("A", (edge1, edge2)),
+ ("B", ()),
+ )
+ path = Path(path_data, cost=1.0)
+
+ assert edge1 in path.edges
+ assert edge2 in path.edges
+ assert (edge1, edge2) in path.edge_tuples
+
+
+def test_path_sorting():
+ """Test that paths can be sorted by cost."""
+ path1 = Path((("A", ()), ("B", ())), cost=3.0)
+ path2 = Path((("A", ()), ("B", ())), cost=1.0)
+ path3 = Path((("A", ()), ("B", ())), cost=2.0)
+
+ sorted_paths = sorted([path1, path2, path3])
+ assert sorted_paths[0].cost == 1.0
+ assert sorted_paths[1].cost == 2.0
+ assert sorted_paths[2].cost == 3.0
+
+
+def test_path_set_deduplication():
+ """Test that identical paths are deduplicated in sets."""
+ path_data = (
+ ("A", (EdgeRef("link1", "AB"),)),
+ ("B", ()),
+ )
+ path1 = Path(path_data, cost=1.0)
+ path2 = Path(path_data, cost=1.0)
+ path3 = Path(path_data, cost=2.0)
+
+ path_set = {path1, path2, path3}
+ assert len(path_set) == 2 # path1 and path2 are identical
diff --git a/tests/model/test_paths_api.py b/tests/model/test_paths_api.py
deleted file mode 100644
index 32b0cf5..0000000
--- a/tests/model/test_paths_api.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from __future__ import annotations
-
-import math
-
-from ngraph.model.network import Link, Network, Node
-from ngraph.model.view import NetworkView
-
-
-def _build_triangle_network() -> Network:
- net = Network()
- net.add_node(Node("S"))
- net.add_node(Node("X"))
- net.add_node(Node("T"))
- net.add_link(Link("S", "X", capacity=10.0, cost=1.0))
- net.add_link(Link("X", "T", capacity=10.0, cost=1.0))
- net.add_link(Link("S", "T", capacity=10.0, cost=5.0))
- return net
-
-
-def test_network_shortest_path_costs_and_paths_basic() -> None:
- net = _build_triangle_network()
-
- costs = net.shortest_path_costs("^S$", "^T$")
- assert costs[("^S$", "^T$")] == 2.0
-
- res = net.shortest_paths("^S$", "^T$")
- paths = res[("^S$", "^T$")]
- assert paths
- # Best path must be S->X->T with cost 2.0
- assert any(
- p.nodes_seq == ("S", "X", "T") and math.isclose(p.cost, 2.0) for p in paths
- )
-
-
-def test_network_k_shortest_paths_factor_limits_worse_routes() -> None:
- net = Network()
- for n in ["S", "A", "B", "C", "T"]:
- net.add_node(Node(n))
- net.add_link(Link("S", "A", capacity=10.0, cost=1.0))
- net.add_link(Link("A", "T", capacity=10.0, cost=1.0)) # cost 2
- net.add_link(Link("S", "B", capacity=10.0, cost=1.0))
- net.add_link(Link("B", "T", capacity=10.0, cost=1.0)) # cost 2
- net.add_link(Link("S", "C", capacity=10.0, cost=2.0))
- net.add_link(Link("C", "T", capacity=10.0, cost=2.0)) # cost 4
-
- res = net.k_shortest_paths("^S$", "^T$", max_k=5, max_path_cost_factor=1.0)
- paths = res[("^S$", "^T$")]
- assert len(paths) <= 2
- assert all(math.isclose(p.cost, 2.0) for p in paths)
- assert all("C" not in p.nodes_seq for p in paths)
-
-
-def test_network_split_parallel_edges_enumeration() -> None:
- net = Network()
- for n in ["S", "A", "T"]:
- net.add_node(Node(n))
- # Add two parallel edges per hop with equal cost
- net.add_link(Link("S", "A", capacity=10.0, cost=1.0))
- net.add_link(Link("S", "A", capacity=10.0, cost=1.0))
- net.add_link(Link("A", "T", capacity=10.0, cost=1.0))
- net.add_link(Link("A", "T", capacity=10.0, cost=1.0))
-
- no_split = net.shortest_paths("^S$", "^T$", split_parallel_edges=False)
- assert len(no_split[("^S$", "^T$")]) == 1
-
- split = net.shortest_paths("^S$", "^T$", split_parallel_edges=True)
- assert len(split[("^S$", "^T$")]) == 4
-
-
-def test_network_pairwise_labels_mapping() -> None:
- net = Network()
- for n in ["S1", "S2", "T1", "T2"]:
- net.add_node(Node(n))
- net.add_link(Link("S1", "T1", capacity=10.0, cost=3.0))
- net.add_link(Link("S2", "T1", capacity=10.0, cost=1.0))
- # T2 unreachable
-
- res_costs = net.shortest_path_costs("S(1|2)", "T(1|2)", mode="pairwise")
- assert res_costs[("1", "1")] == 3.0
- assert res_costs[("2", "1")] == 1.0
- assert math.isinf(res_costs[("1", "2")])
- assert math.isinf(res_costs[("2", "2")])
-
-
-def test_view_respects_exclusions_and_disabled_nodes() -> None:
- net = _build_triangle_network()
- # Exclude the middle node; no path should remain
- view = NetworkView.from_excluded_sets(net, excluded_nodes=["X"])
- costs = view.shortest_path_costs("^S$", "^T$")
- # Path via X is blocked; direct S->T remains with cost 5
- assert math.isclose(costs[("^S$", "^T$")], 5.0, rel_tol=1e-9)
- sp = view.shortest_paths("^S$", "^T$")[("^S$", "^T$")]
- assert sp and all(p.nodes_seq == ("S", "T") for p in sp)
-
- # Disable T; also no paths
- net.nodes["T"].disabled = True
- costs2 = net.shortest_path_costs("^S$", "^T$")
- assert math.isinf(costs2[("^S$", "^T$")])
diff --git a/tests/model/test_view.py b/tests/model/test_view.py
deleted file mode 100644
index 90e7688..0000000
--- a/tests/model/test_view.py
+++ /dev/null
@@ -1,602 +0,0 @@
-"""Tests for NetworkView class."""
-
-import pytest
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.model.network import Link, Network, Node, RiskGroup
-from ngraph.model.view import NetworkView
-
-
-class TestNetworkViewBasics:
- """Test basic NetworkView functionality."""
-
- def test_create_empty_view(self):
- """Test creating a NetworkView with empty exclusions."""
- net = Network()
- view = NetworkView(_base=net)
-
- assert view._base is net
- assert view._excluded_nodes == frozenset()
- assert view._excluded_links == frozenset()
-
- def test_from_excluded_sets(self):
- """Test creating NetworkView using from_excluded_sets factory method."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- link = Link("A", "B", capacity=100)
- net.add_link(link)
-
- view = NetworkView.from_excluded_sets(
- net, excluded_nodes=["A"], excluded_links=[link.id]
- )
-
- assert view._base is net
- assert view._excluded_nodes == frozenset(["A"])
- assert view._excluded_links == frozenset([link.id])
-
- def test_from_excluded_sets_empty(self):
- """Test from_excluded_sets with empty iterables."""
- net = Network()
- view = NetworkView.from_excluded_sets(net)
- assert view._excluded_nodes == frozenset()
- assert view._excluded_links == frozenset()
-
- def test_view_is_frozen(self):
- """Test that NetworkView is immutable."""
- net = Network()
- view = NetworkView(_base=net)
-
- with pytest.raises(AttributeError):
- view._base = Network() # type: ignore
- with pytest.raises(AttributeError):
- view._excluded_nodes = frozenset(["A"]) # type: ignore
-
- def test_attrs_delegation(self):
- """Test that attrs and risk_groups are delegated to base network."""
- net = Network()
- net.attrs["test"] = "value"
- net.risk_groups["rg1"] = RiskGroup("rg1")
-
- view = NetworkView(_base=net)
-
- assert view.attrs == {"test": "value"}
- assert "rg1" in view.risk_groups
- assert view.risk_groups["rg1"].name == "rg1"
-
-
-class TestNetworkViewVisibility:
- """Test node and link visibility logic."""
-
- def setup_method(self):
- """Set up test network."""
- self.net = Network()
-
- # Add nodes
- self.net.add_node(Node("A"))
- self.net.add_node(Node("B"))
- self.net.add_node(Node("C"))
- self.net.add_node(Node("D", disabled=True)) # scenario-disabled
-
- # Add links
- self.link_ab = Link("A", "B")
- self.link_bc = Link("B", "C")
- self.link_cd = Link("C", "D")
- self.link_disabled = Link("A", "C", disabled=True) # scenario-disabled
-
- self.net.add_link(self.link_ab)
- self.net.add_link(self.link_bc)
- self.net.add_link(self.link_cd)
- self.net.add_link(self.link_disabled)
-
- def test_node_visibility_no_exclusions(self):
- """Test node visibility with no analysis exclusions."""
- view = NetworkView(_base=self.net)
-
- assert not view.is_node_hidden("A")
- assert not view.is_node_hidden("B")
- assert not view.is_node_hidden("C")
- assert view.is_node_hidden("D") # scenario-disabled
- assert view.is_node_hidden("NONEXISTENT") # doesn't exist
-
- def test_node_visibility_with_exclusions(self):
- """Test node visibility with analysis exclusions."""
- view = NetworkView(_base=self.net, _excluded_nodes=frozenset(["B"]))
-
- assert not view.is_node_hidden("A")
- assert view.is_node_hidden("B") # analysis-excluded
- assert not view.is_node_hidden("C")
- assert view.is_node_hidden("D") # scenario-disabled
-
- def test_link_visibility_no_exclusions(self):
- """Test link visibility with no analysis exclusions."""
- view = NetworkView(_base=self.net)
-
- assert not view.is_link_hidden(self.link_ab.id)
- assert not view.is_link_hidden(self.link_bc.id)
- assert view.is_link_hidden(self.link_cd.id) # connected to disabled node D
- assert view.is_link_hidden(self.link_disabled.id) # scenario-disabled
- assert view.is_link_hidden("NONEXISTENT") # doesn't exist
-
- def test_link_visibility_with_exclusions(self):
- """Test link visibility with analysis exclusions."""
- view = NetworkView(
- _base=self.net,
- _excluded_nodes=frozenset(["B"]),
- _excluded_links=frozenset([self.link_ab.id]),
- )
-
- assert view.is_link_hidden(self.link_ab.id) # analysis-excluded
- assert view.is_link_hidden(self.link_bc.id) # connected to excluded node B
- assert view.is_link_hidden(
- self.link_disabled.id
- ) # A-C, both visible, but scenario-disabled
- assert view.is_link_hidden(self.link_cd.id) # connected to disabled node D
-
- def test_nodes_property(self):
- """Test nodes property returns only visible nodes."""
- view = NetworkView(_base=self.net, _excluded_nodes=frozenset(["B"]))
-
- visible_nodes = view.nodes
-
- assert "A" in visible_nodes
- assert "B" not in visible_nodes # analysis-excluded
- assert "C" in visible_nodes
- assert "D" not in visible_nodes # scenario-disabled
- assert len(visible_nodes) == 2
-
- def test_links_property(self):
- """Test links property returns only visible links."""
- view = NetworkView(_base=self.net, _excluded_nodes=frozenset(["B"]))
-
- visible_links = view.links
-
- # Only links not connected to hidden nodes and not disabled
- expected_links = {
- link_id
- for link_id, link in self.net.links.items()
- if not view.is_link_hidden(link_id)
- }
-
- assert set(visible_links.keys()) == expected_links
- # Should exclude links connected to B, D, and the disabled link
-
-
-class TestNetworkViewCaching:
- """Test NetworkView graph caching functionality."""
-
- def setup_method(self):
- """Set up test network."""
- self.net = Network()
- for i in range(10):
- self.net.add_node(Node(f"node_{i}"))
- for i in range(9):
- self.net.add_link(Link(f"node_{i}", f"node_{i + 1}"))
-
- self.view = NetworkView.from_excluded_sets(self.net, excluded_nodes=["node_0"])
-
- def test_initial_cache_state(self):
- """Test that cache doesn't exist initially."""
- assert not hasattr(self.view, "_graph_cache")
-
- def test_cache_initialization(self):
- """Test cache is initialized on first graph build."""
- graph = self.view.to_strict_multidigraph()
-
- assert hasattr(self.view, "_graph_cache")
- assert (True, False) in self.view._graph_cache # type: ignore
- assert self.view._graph_cache[(True, False)] is graph # type: ignore
-
- def test_cache_hit(self):
- """Test that subsequent calls return cached graph."""
- graph1 = self.view.to_strict_multidigraph()
- graph2 = self.view.to_strict_multidigraph()
-
- assert graph1 is graph2 # Same object reference
-
- def test_cache_per_add_reverse_parameter(self):
- """Test that cache is separate for different add_reverse values."""
- graph_with_reverse = self.view.to_strict_multidigraph(add_reverse=True)
- graph_without_reverse = self.view.to_strict_multidigraph(add_reverse=False)
-
- assert graph_with_reverse is not graph_without_reverse
- assert hasattr(self.view, "_graph_cache")
- assert (True, False) in self.view._graph_cache # type: ignore
- assert (False, False) in self.view._graph_cache # type: ignore
-
- # Subsequent calls should hit cache
- assert self.view.to_strict_multidigraph(add_reverse=True) is graph_with_reverse
- assert (
- self.view.to_strict_multidigraph(add_reverse=False) is graph_without_reverse
- )
-
- def test_different_views_independent_cache(self):
- """Test that different NetworkView instances have independent caches."""
- view1 = NetworkView.from_excluded_sets(self.net, excluded_nodes=["node_0"])
- view2 = NetworkView.from_excluded_sets(self.net, excluded_nodes=["node_1"])
-
- graph1 = view1.to_strict_multidigraph()
- graph2 = view2.to_strict_multidigraph()
-
- assert graph1 is not graph2
- assert hasattr(view1, "_graph_cache")
- assert hasattr(view2, "_graph_cache")
- assert view1._graph_cache is not view2._graph_cache # type: ignore
-
-
-class TestNetworkViewFlowMethods:
- """Test NetworkView flow analysis methods."""
-
- def setup_method(self):
- """Set up test network with flow capacity."""
- self.net = Network()
-
- # Create a simple path: A -> B -> C -> D
- for name in ["A", "B", "C", "D"]:
- self.net.add_node(Node(name))
-
- self.net.add_link(Link("A", "B", capacity=10.0))
- self.net.add_link(Link("B", "C", capacity=5.0)) # bottleneck
- self.net.add_link(Link("C", "D", capacity=15.0))
-
- self.view = NetworkView(_base=self.net)
-
- def test_max_flow_delegation(self):
- """Test that max_flow delegates to base network internal method."""
- flows = self.view.max_flow("A", "D")
-
- assert isinstance(flows, dict)
- assert len(flows) == 1
- # Should get bottleneck capacity of 5.0
- flow_value = list(flows.values())[0]
- assert flow_value == 5.0
-
- def test_max_flow_with_summary(self):
- """Test max_flow_with_summary method."""
- results = self.view.max_flow_with_summary("A", "D")
-
- assert isinstance(results, dict)
- assert len(results) == 1
-
- flow_value, summary = list(results.values())[0]
- assert flow_value == 5.0
- assert hasattr(summary, "total_flow")
- assert summary.total_flow == 5.0
-
- def test_max_flow_with_graph(self):
- """Test max_flow_with_graph method."""
- results = self.view.max_flow_with_graph("A", "D")
-
- assert isinstance(results, dict)
- assert len(results) == 1
-
- flow_value, graph = list(results.values())[0]
- assert flow_value == 5.0
- assert hasattr(graph, "nodes")
- assert hasattr(graph, "edges")
-
- def test_max_flow_detailed(self):
- """Test max_flow_detailed method."""
- results = self.view.max_flow_detailed("A", "D")
-
- assert isinstance(results, dict)
- assert len(results) == 1
-
- flow_value, summary, graph = list(results.values())[0]
- assert flow_value == 5.0
- assert hasattr(summary, "total_flow")
- assert hasattr(graph, "nodes")
-
- def test_saturated_edges(self):
- """Test saturated_edges method."""
- results = self.view.saturated_edges("A", "D")
-
- assert isinstance(results, dict)
- assert len(results) == 1
-
- saturated_list = list(results.values())[0]
- assert isinstance(saturated_list, list)
- # Should identify B->C as saturated (capacity 5.0, fully utilized)
-
- def test_sensitivity_analysis(self):
- """Test sensitivity_analysis method."""
- results = self.view.sensitivity_analysis("A", "D", change_amount=1.0)
-
- assert isinstance(results, dict)
- assert len(results) == 1
-
- sensitivity_dict = list(results.values())[0]
- assert isinstance(sensitivity_dict, dict)
-
- def test_flow_methods_with_exclusions(self):
- """Test flow methods work correctly with node/link exclusions."""
- # Exclude node B to break the path
- view = NetworkView(_base=self.net, _excluded_nodes=frozenset(["B"]))
-
- flows = view.max_flow("A", "D")
- flow_value = list(flows.values())[0]
- assert flow_value == 0.0 # No path available
-
- def test_flow_methods_parameters(self):
- """Test flow methods accept all expected parameters."""
- # Test with all parameters
- flows = self.view.max_flow(
- "A",
- "D",
- mode="combine",
- shortest_path=True,
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- assert isinstance(flows, dict)
-
- def test_max_flow_attribute_grouping_in_view(self):
- """Attribute grouping works through NetworkView and filters hidden nodes."""
- net = Network()
- net.add_node(Node("S1", attrs={"role_src": "edge"}))
- net.add_node(Node("S2", attrs={"role_src": "edge"}))
- net.add_node(Node("T1", attrs={"role_dst": "core"}))
- net.add_link(Link("S1", "T1", capacity=4.0))
- net.add_link(Link("S2", "T1", capacity=6.0))
-
- # Hide S2 in the view
- view = NetworkView(_base=net, _excluded_nodes=frozenset(["S2"]))
- flows = view.max_flow("attr:role_src", "attr:role_dst", mode="combine")
- # Only S1 contributes on source side
- assert flows == {("edge", "core"): 4.0}
-
-
-class TestNetworkViewSelectNodeGroups:
- """Test select_node_groups_by_path method."""
-
- def setup_method(self):
- """Set up test network with grouped nodes."""
- self.net = Network()
-
- # Add nodes with patterns
- nodes = [
- "dc1_rack1_server1",
- "dc1_rack1_server2",
- "dc1_rack2_server1",
- "dc1_rack2_server2",
- "dc2_rack1_server1",
- "dc2_rack1_server2",
- "edge_router1",
- "edge_router2",
- ]
-
- for name in nodes:
- self.net.add_node(Node(name))
-
- # Disable one node
- self.net.nodes["dc1_rack1_server2"].disabled = True
-
- self.view = NetworkView(_base=self.net)
-
- def test_select_all_visible_nodes(self):
- """Test selecting all visible nodes."""
- groups = self.view.select_node_groups_by_path(".*")
-
- # Should get one group with all visible nodes
- assert len(groups) == 1
- group_nodes = list(groups.values())[0]
-
- # Should exclude disabled node
- node_names = [node.name for node in group_nodes]
- assert "dc1_rack1_server2" not in node_names
- assert len(node_names) == 7 # 8 total - 1 disabled
-
- def test_select_with_capturing_groups(self):
- """Test selecting nodes with regex capturing groups."""
- groups = self.view.select_node_groups_by_path(r"(dc\d+)_.*")
-
- # Should group by datacenter
- assert "dc1" in groups
- assert "dc2" in groups
-
- dc1_nodes = [node.name for node in groups["dc1"]]
- dc2_nodes = [node.name for node in groups["dc2"]]
-
- # dc1 should have 3 nodes (4 total - 1 disabled)
- assert len(dc1_nodes) == 3
- assert "dc1_rack1_server2" not in dc1_nodes # disabled
-
- # dc2 should have 2 nodes
- assert len(dc2_nodes) == 2
-
- def test_select_with_exclusions(self):
- """Test selecting nodes with analysis exclusions."""
- view = NetworkView(
- _base=self.net, _excluded_nodes=frozenset(["dc1_rack1_server1"])
- )
-
- groups = view.select_node_groups_by_path(r"(dc1)_.*")
-
- if "dc1" in groups:
- dc1_nodes = [node.name for node in groups["dc1"]]
- # Should exclude both disabled and analysis-excluded nodes
- assert "dc1_rack1_server1" not in dc1_nodes # analysis-excluded
- assert "dc1_rack1_server2" not in dc1_nodes # scenario-disabled
- assert len(dc1_nodes) == 2 # only rack2 servers
- else:
- # If all nodes in group are hidden, group should be empty
- assert len(groups) == 0
-
- def test_select_no_matches(self):
- """Test selecting with pattern that matches no visible nodes."""
- groups = self.view.select_node_groups_by_path("nonexistent.*")
-
- assert len(groups) == 0
-
- def test_select_empty_after_filtering(self):
- """Test selecting where all matching nodes are hidden."""
- # Exclude all dc1 nodes
- view = NetworkView(
- _base=self.net,
- _excluded_nodes=frozenset(
- ["dc1_rack1_server1", "dc1_rack2_server1", "dc1_rack2_server2"]
- ),
- )
-
- groups = view.select_node_groups_by_path(r"(dc1)_.*")
-
- # Should return empty dict since all dc1 nodes are hidden
- assert len(groups) == 0
-
- def test_select_by_attribute_filters_hidden(self):
- """Attribute grouping respects view-level hidden nodes."""
- net = Network()
- net.add_node(Node("n1", attrs={"role": "edge"}))
- net.add_node(Node("n2", attrs={"role": "edge"}))
- net.add_node(Node("n3", attrs={"role": "core"}))
- # Hide n2
- view = NetworkView(_base=net, _excluded_nodes=frozenset(["n2"]))
-
- groups = view.select_node_groups_by_path("attr:role")
- assert set(groups.keys()) == {"edge", "core"}
- assert {n.name for n in groups["edge"]} == {"n1"}
- assert {n.name for n in groups["core"]} == {"n3"}
-
-
-class TestNetworkViewEdgeCases:
- """Test NetworkView edge cases and error conditions."""
-
- def test_view_of_empty_network(self):
- """Test NetworkView with empty base network."""
- net = Network()
- view = NetworkView(_base=net)
-
- assert len(view.nodes) == 0
- assert len(view.links) == 0
-
- # Should handle empty network gracefully
- graph = view.to_strict_multidigraph()
- assert len(graph.nodes) == 0
- assert len(graph.edges) == 0
-
- def test_view_excluding_all_nodes(self):
- """Test NetworkView that excludes all nodes."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
-
- view = NetworkView(_base=net, _excluded_nodes=frozenset(["A", "B"]))
-
- assert len(view.nodes) == 0
-
- graph = view.to_strict_multidigraph()
- assert len(graph.nodes) == 0
-
- def test_view_with_nonexistent_exclusions(self):
- """Test NetworkView with exclusions for nonexistent nodes/links."""
- net = Network()
- net.add_node(Node("A"))
-
- view = NetworkView(
- _base=net,
- _excluded_nodes=frozenset(["NONEXISTENT"]),
- _excluded_links=frozenset(["NONEXISTENT_LINK"]),
- )
-
- # Should work normally, ignoring nonexistent exclusions
- assert "A" in view.nodes
- assert len(view.nodes) == 1
-
- def test_multiple_cache_initialization_calls(self):
- """Test that multiple threads/calls don't break cache initialization."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- net.add_link(Link("A", "B"))
-
- view = NetworkView(_base=net)
-
- # Multiple calls should be safe
- graph1 = view.to_strict_multidigraph()
- graph2 = view.to_strict_multidigraph()
- graph3 = view.to_strict_multidigraph()
-
- assert graph1 is graph2 is graph3
-
-
-class TestNetworkViewIntegration:
- """Test NetworkView integration with Network workflows."""
-
- def test_view_after_network_modification(self):
- """Test NetworkView behavior after base network is modified."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- link = Link("A", "B")
- net.add_link(link)
-
- view = NetworkView(_base=net)
-
- # Cache a graph
- graph1 = view.to_strict_multidigraph()
- assert len(graph1.nodes) == 2
-
- # Modify base network
- net.disable_node("A")
-
- # Note: Cache is now stale, but this is documented behavior
- # In practice, views should be created after transforms complete
- cached_graph = view.to_strict_multidigraph()
- assert cached_graph is graph1 # Still returns cached version
-
- # Fresh view sees the change
- fresh_view = NetworkView(_base=net)
- fresh_graph = fresh_view.to_strict_multidigraph()
- assert len(fresh_graph.nodes) == 1 # Node A is disabled
-
- def test_view_with_risk_groups(self):
- """Test NetworkView with nodes in risk groups."""
- net = Network()
-
- # Add nodes with risk groups
- node_a = Node("A", risk_groups={"rg1"})
- node_b = Node("B", risk_groups={"rg1", "rg2"})
- node_c = Node("C")
-
- net.add_node(node_a)
- net.add_node(node_b)
- net.add_node(node_c)
-
- # Add risk group
- net.risk_groups["rg1"] = RiskGroup("rg1")
-
- view = NetworkView(_base=net)
-
- # Risk groups should be accessible through view
- assert "rg1" in view.risk_groups
- assert view.risk_groups["rg1"].name == "rg1"
-
- # Nodes should be visible normally
- assert len(view.nodes) == 3
-
- def test_from_excluded_sets_with_iterables(self):
- """Test from_excluded_sets with different iterable types."""
- net = Network()
- net.add_node(Node("A"))
- net.add_node(Node("B"))
- link = Link("A", "B")
- net.add_link(link)
-
- # Test with lists
- view1 = NetworkView.from_excluded_sets(
- net, excluded_nodes=["A"], excluded_links=[link.id]
- )
-
- # Test with sets
- view2 = NetworkView.from_excluded_sets(
- net, excluded_nodes={"A"}, excluded_links={link.id}
- )
-
- # Test with tuples
- view3 = NetworkView.from_excluded_sets(
- net, excluded_nodes=("A",), excluded_links=(link.id,)
- )
-
- # All should have same exclusion sets
- assert view1._excluded_nodes == view2._excluded_nodes == view3._excluded_nodes
- assert view1._excluded_links == view2._excluded_links == view3._excluded_links
diff --git a/tests/monte_carlo/test_functions.py b/tests/monte_carlo/test_functions.py
deleted file mode 100644
index bbc4e6b..0000000
--- a/tests/monte_carlo/test_functions.py
+++ /dev/null
@@ -1,330 +0,0 @@
-"""Tests for monte_carlo.functions module."""
-
-from unittest.mock import MagicMock, patch
-
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.monte_carlo.functions import (
- demand_placement_analysis,
- max_flow_analysis,
- sensitivity_analysis,
-)
-from ngraph.results.flow import FlowIterationResult
-
-
-class TestMaxFlowAnalysis:
- """Test max_flow_analysis function."""
-
- def test_max_flow_analysis_basic(self) -> None:
- """Test basic max_flow_analysis functionality."""
- # Mock NetworkView
- mock_network_view = MagicMock()
- # max_flow returns a dict, not a list
- mock_network_view.max_flow.return_value = {
- ("datacenter", "edge"): 100.0,
- ("edge", "datacenter"): 80.0,
- }
-
- result = max_flow_analysis(
- network_view=mock_network_view,
- source_regex="datacenter.*",
- sink_regex="edge.*",
- mode="combine",
- )
-
- # Verify function called NetworkView.max_flow with correct parameters
- mock_network_view.max_flow.assert_called_once_with(
- "datacenter.*",
- "edge.*",
- mode="combine",
- shortest_path=False,
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- # Verify return format
- assert isinstance(result, FlowIterationResult)
- pairs = {(e.source, e.destination, e.placed) for e in result.flows}
- assert ("datacenter", "edge", 100.0) in pairs
- assert ("edge", "datacenter", 80.0) in pairs
-
- def test_max_flow_analysis_with_summary(self) -> None:
- """Test include_flow_details and include_min_cut path and return shape."""
- mock_network_view = MagicMock()
- summary_obj_1 = MagicMock()
- summary_obj_1.cost_distribution = {3.0: 10.0}
- summary_obj_1.min_cut = [("A", "B", "k")]
- summary_obj_2 = MagicMock()
- summary_obj_2.cost_distribution = {4.0: 5.0}
- summary_obj_2.min_cut = [("B", "A", "k")]
- mock_network_view.max_flow_with_summary.return_value = {
- ("X", "Y"): (10.0, summary_obj_1),
- ("Y", "X"): (5.0, summary_obj_2),
- }
-
- result = max_flow_analysis(
- network_view=mock_network_view,
- source_regex="X.*",
- sink_regex="Y.*",
- include_flow_details=True,
- include_min_cut=True,
- )
-
- mock_network_view.max_flow_with_summary.assert_called_once_with(
- "X.*",
- "Y.*",
- mode="combine",
- shortest_path=False,
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- assert isinstance(result, FlowIterationResult)
- e_xy = next(e for e in result.flows if e.source == "X" and e.destination == "Y")
- assert e_xy.cost_distribution.get(3.0) == 10.0
- assert e_xy.data.get("edges_kind") == "min_cut"
-
- def test_max_flow_analysis_with_optional_params(self) -> None:
- """Test max_flow_analysis with optional parameters."""
- mock_network_view = MagicMock()
- mock_network_view.max_flow.return_value = {("A", "B"): 50.0}
-
- result = max_flow_analysis(
- network_view=mock_network_view,
- source_regex="A.*",
- sink_regex="B.*",
- mode="pairwise",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- extra_param="ignored",
- )
-
- mock_network_view.max_flow.assert_called_once_with(
- "A.*",
- "B.*",
- mode="pairwise",
- shortest_path=True,
- flow_placement=FlowPlacement.EQUAL_BALANCED,
- )
-
- assert isinstance(result, FlowIterationResult)
- assert len(result.flows) == 1
- assert result.flows[0].source == "A" and result.flows[0].destination == "B"
-
- def test_max_flow_analysis_empty_result(self) -> None:
- """Test max_flow_analysis with empty result."""
- mock_network_view = MagicMock()
- mock_network_view.max_flow.return_value = {}
-
- result = max_flow_analysis(
- network_view=mock_network_view,
- source_regex="nonexistent.*",
- sink_regex="also_nonexistent.*",
- )
-
- assert isinstance(result, FlowIterationResult)
- assert result.flows == []
- assert result.summary.total_demand == 0.0
- assert result.summary.total_placed == 0.0
- assert result.summary.overall_ratio == 1.0
-
-
-class TestDemandPlacementAnalysis:
- """Test demand_placement_analysis function."""
-
- def test_demand_placement_analysis_basic(self) -> None:
- """Test basic demand_placement_analysis functionality."""
- mock_network_view = MagicMock()
-
- # Mock TrafficManager and its behavior
- with (
- patch("ngraph.monte_carlo.functions.TrafficManager") as MockTrafficManager,
- patch(
- "ngraph.monte_carlo.functions.TrafficMatrixSet"
- ) as MockTrafficMatrixSet,
- patch("ngraph.monte_carlo.functions.TrafficDemand") as MockTrafficDemand,
- ):
- # Setup mock demands
- mock_demand1 = MagicMock()
- mock_demand1.volume = 100.0
- mock_demand1.placed_demand = 80.0
- mock_demand1.priority = 0
-
- mock_demand2 = MagicMock()
- mock_demand2.volume = 50.0
- mock_demand2.placed_demand = 50.0
- mock_demand2.priority = 1
-
- MockTrafficDemand.side_effect = [mock_demand1, mock_demand2]
-
- # Setup mock TrafficManager
- mock_tm = MockTrafficManager.return_value
- mock_tm.demands = [mock_demand1, mock_demand2]
- mock_tm.place_all_demands.return_value = 130.0
-
- # Setup mock TrafficMatrixSet
- mock_tms = MockTrafficMatrixSet.return_value
-
- demands_config = [
- {
- "source_path": "A",
- "sink_path": "B",
- "demand": 100.0,
- "mode": "pairwise",
- "priority": 0,
- },
- {
- "source_path": "C",
- "sink_path": "D",
- "demand": 50.0,
- "priority": 1,
- },
- ]
-
- result = demand_placement_analysis(
- network_view=mock_network_view,
- demands_config=demands_config,
- placement_rounds=25,
- )
-
- # Verify TrafficDemand creation
- assert MockTrafficDemand.call_count == 2
- MockTrafficDemand.assert_any_call(
- source_path="A",
- sink_path="B",
- demand=100.0,
- mode="pairwise",
- flow_policy_config=None,
- priority=0,
- )
-
- # Verify TrafficManager setup
- MockTrafficManager.assert_called_once_with(
- network=mock_network_view,
- traffic_matrix_set=mock_tms,
- matrix_name="main",
- )
-
- mock_tm.build_graph.assert_called_once()
- mock_tm.expand_demands.assert_called_once()
- mock_tm.place_all_demands.assert_called_once_with(placement_rounds=25)
-
- # Verify results structure
- assert isinstance(result, FlowIterationResult)
- assert len(result.flows) == 2
- # Check ordering by priority logic in test
- dr = sorted(result.flows, key=lambda x: x.priority)
- assert dr[0].placed == 80.0 and dr[0].priority == 0
- assert dr[1].placed == 50.0 and dr[1].priority == 1
- summary = result.summary
- assert summary.total_demand == 150.0
- assert summary.total_placed == 130.0
- from pytest import approx
-
- assert summary.overall_ratio == approx(130.0 / 150.0)
-
- def test_demand_placement_analysis_zero_total_demand(self) -> None:
- """Handles zero total demand without division by zero."""
- mock_network_view = MagicMock()
-
- with (
- patch("ngraph.monte_carlo.functions.TrafficManager") as MockTrafficManager,
- patch(
- "ngraph.monte_carlo.functions.TrafficMatrixSet"
- ) as MockTrafficMatrixSet,
- patch("ngraph.monte_carlo.functions.TrafficDemand") as MockTrafficDemand,
- ):
- # Create a single zero-volume demand
- mock_demand = MagicMock()
- mock_demand.volume = 0.0
- mock_demand.placed_demand = 0.0
- mock_demand.priority = 0
- MockTrafficDemand.return_value = mock_demand
-
- mock_tm = MockTrafficManager.return_value
- mock_tm.demands = [mock_demand]
- mock_tm.place_all_demands.return_value = 0.0
-
- _ = MockTrafficMatrixSet.return_value
-
- demands_config = [
- {
- "source_path": "A",
- "sink_path": "B",
- "demand": 0.0,
- }
- ]
-
- result = demand_placement_analysis(
- network_view=mock_network_view,
- demands_config=demands_config,
- placement_rounds=1,
- )
-
- assert isinstance(result, FlowIterationResult)
- assert len(result.flows) == 1
- assert result.flows[0].placed == 0.0
- summary = result.summary
- assert summary.total_demand == 0.0
- assert summary.total_placed == 0.0
- assert summary.overall_ratio == 1.0
-
-
-class TestSensitivityAnalysis:
- """Test sensitivity_analysis function."""
-
- def test_sensitivity_analysis_basic(self) -> None:
- """Test basic sensitivity_analysis functionality."""
- mock_network_view = MagicMock()
-
- # Mock sensitivity_analysis result with nested dict structure
- mock_sensitivity_result = {
- ("datacenter", "edge"): {
- ("node", "A", "type"): 0.15,
- ("link", "A", "B"): 0.08,
- },
- ("edge", "datacenter"): {
- ("node", "B", "type"): 0.12,
- ("link", "B", "C"): 0.05,
- },
- }
- mock_network_view.sensitivity_analysis.return_value = mock_sensitivity_result
-
- result = sensitivity_analysis(
- network_view=mock_network_view,
- source_regex="datacenter.*",
- sink_regex="edge.*",
- mode="combine",
- )
-
- # Verify function called NetworkView.sensitivity_analysis with correct parameters
- mock_network_view.sensitivity_analysis.assert_called_once_with(
- "datacenter.*",
- "edge.*",
- mode="combine",
- shortest_path=False,
- flow_placement=FlowPlacement.PROPORTIONAL,
- )
-
- # Verify result format conversion
- expected_result = {
- "datacenter->edge": {
- "('node', 'A', 'type')": 0.15,
- "('link', 'A', 'B')": 0.08,
- },
- "edge->datacenter": {
- "('node', 'B', 'type')": 0.12,
- "('link', 'B', 'C')": 0.05,
- },
- }
- assert result == expected_result
-
- def test_sensitivity_analysis_empty_result(self) -> None:
- """Test sensitivity_analysis with empty result."""
- mock_network_view = MagicMock()
- mock_network_view.sensitivity_analysis.return_value = {}
-
- result = sensitivity_analysis(
- network_view=mock_network_view,
- source_regex="nonexistent.*",
- sink_regex="also_nonexistent.*",
- )
-
- assert result == {}
diff --git a/tests/monte_carlo/test_functions_details.py b/tests/monte_carlo/test_functions_details.py
deleted file mode 100644
index fe3bc60..0000000
--- a/tests/monte_carlo/test_functions_details.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from __future__ import annotations
-
-from unittest.mock import MagicMock, patch
-
-from ngraph.monte_carlo.functions import demand_placement_analysis
-
-
-def test_demand_placement_analysis_includes_flow_details_costs_and_edges() -> None:
- mock_network_view = MagicMock()
-
- with (
- patch("ngraph.monte_carlo.functions.TrafficManager") as MockTrafficManager,
- patch("ngraph.monte_carlo.functions.TrafficMatrixSet"),
- patch("ngraph.monte_carlo.functions.TrafficDemand") as MockTrafficDemand,
- ):
- # Two demands with flow_policy flows including placed_flow and path_bundle
- demand1 = MagicMock()
- demand1.volume = 10.0
- demand1.placed_demand = 8.0
- demand1.priority = 0
- # Flow objects under policy
- flow_a = MagicMock()
- flow_a.placed_flow = 5.0
- flow_a.path_bundle.cost = 2.0
- flow_a.path_bundle.edges = {"e1"}
- flow_b = MagicMock()
- flow_b.placed_flow = 3.0
- flow_b.path_bundle.cost = 3.0
- flow_b.path_bundle.edges = {"e2"}
- demand1.flow_policy.flows = {1: flow_a, 2: flow_b}
-
- demand2 = MagicMock()
- demand2.volume = 4.0
- demand2.placed_demand = 4.0
- demand2.priority = 1
- demand2.flow_policy.flows = {}
-
- MockTrafficDemand.side_effect = [demand1, demand2]
- mock_tm = MockTrafficManager.return_value
- mock_tm.demands = [demand1, demand2]
-
- demands_config = [
- {
- "source_path": "A",
- "sink_path": "B",
- "demand": 10.0,
- "mode": "pairwise",
- "priority": 0,
- },
- {
- "source_path": "C",
- "sink_path": "D",
- "demand": 4.0,
- "priority": 1,
- },
- ]
-
- result = demand_placement_analysis(
- network_view=mock_network_view,
- demands_config=demands_config,
- placement_rounds=1,
- include_flow_details=True,
- include_used_edges=True,
- )
-
- # Validate cost_distribution aggregated per demand path_bundle cost
- entries = list(result.flows)
- e0 = entries[0]
- if e0.demand != 10.0:
- e0 = entries[1]
- cd = e0.cost_distribution
- assert cd.get(2.0) == 5.0 and cd.get(3.0) == 3.0
- assert e0.data.get("edges_kind") == "used"
- # Edges collected across flows
- assert set(e0.data.get("edges", [])) == {"e1", "e2"}
diff --git a/tests/monte_carlo/test_results.py b/tests/monte_carlo/test_results.py
deleted file mode 100644
index 987e8a7..0000000
--- a/tests/monte_carlo/test_results.py
+++ /dev/null
@@ -1,339 +0,0 @@
-"""Tests for monte_carlo.results module (SensitivityResults only after refactor)."""
-
-import pandas as pd
-import pytest
-
-from ngraph.monte_carlo.results import SensitivityResults
-
-
-class TestSensitivityResults:
- """Test SensitivityResults class."""
-
- def test_sensitivity_results_creation(self) -> None:
- """Test basic SensitivityResults creation."""
- raw_results = {"sensitivity_data": "test"}
-
- result = SensitivityResults(
- raw_results=raw_results,
- iterations=100,
- baseline={"baseline_value": 1.0},
- failure_patterns={"pattern1": "data"},
- metadata={"test": "value"},
- )
-
- assert result.raw_results == raw_results
- assert result.iterations == 100
- assert result.baseline == {"baseline_value": 1.0}
- assert result.failure_patterns == {"pattern1": "data"}
- assert result.metadata == {"test": "value"}
-
- def test_sensitivity_post_init_defaults(self) -> None:
- """Test post_init sets proper defaults."""
- raw_results = {"sensitivity_data": "test"}
-
- result = SensitivityResults(
- raw_results=raw_results,
- iterations=100,
- )
-
- assert result.baseline is None
- assert result.failure_patterns == {} # post_init sets empty dict, not None
- assert result.metadata == {} # post_init sets empty dict, not None
-
- def test_component_impact_distribution(self) -> None:
- """Test component_impact_distribution method."""
- component_scores = {
- "flow_1": {
- "component_a": {"mean": 0.8, "max": 1.0, "min": 0.6, "count": 10},
- "component_b": {"mean": 0.6, "max": 0.8, "min": 0.4, "count": 10},
- },
- "flow_2": {
- "component_a": {"mean": 0.9, "max": 1.0, "min": 0.8, "count": 5},
- },
- }
-
- result = SensitivityResults(
- raw_results={"test": "data"},
- iterations=100,
- component_scores=component_scores,
- )
-
- df = result.component_impact_distribution()
-
- assert isinstance(df, pd.DataFrame)
- assert len(df) == 3 # Two components in flow_1, one in flow_2
- assert "flow_key" in df.columns
- assert "component" in df.columns
- assert "mean_impact" in df.columns
- assert "max_impact" in df.columns
-
- # Check specific values
- comp_a_flow_1 = df[
- (df["flow_key"] == "flow_1") & (df["component"] == "component_a")
- ].iloc[0]
- assert comp_a_flow_1["mean_impact"] == 0.8
- assert comp_a_flow_1["max_impact"] == 1.0
-
- def test_component_impact_distribution_empty_scores(self) -> None:
- """Test component_impact_distribution with empty scores."""
- result = SensitivityResults(raw_results={"results": []}, iterations=100)
-
- df = result.component_impact_distribution()
-
- assert isinstance(df, pd.DataFrame)
- assert len(df) == 0
-
- def test_flow_keys(self) -> None:
- """Test flow_keys method."""
- component_scores = {
- "flow_1": {"component_a": {"mean": 0.8}},
- "flow_2": {"component_b": {"mean": 0.6}},
- }
-
- result = SensitivityResults(
- raw_results={"test": "data"},
- iterations=100,
- component_scores=component_scores,
- )
-
- keys = result.flow_keys()
- assert set(keys) == {"flow_1", "flow_2"}
-
- def test_get_flow_sensitivity(self) -> None:
- """Test get_flow_sensitivity method."""
- component_scores = {
- "flow_1": {
- "component_a": {"mean": 0.8, "max": 1.0, "min": 0.6},
- "component_b": {"mean": 0.6, "max": 0.8, "min": 0.4},
- }
- }
-
- result = SensitivityResults(
- raw_results={"test": "data"},
- iterations=100,
- component_scores=component_scores,
- )
-
- sensitivity = result.get_flow_sensitivity("flow_1")
- assert sensitivity == component_scores["flow_1"]
-
- def test_get_flow_sensitivity_missing_key(self) -> None:
- """Test get_flow_sensitivity with missing flow key."""
- result = SensitivityResults(raw_results={"test": "data"}, iterations=100)
-
- with pytest.raises(KeyError, match="Flow key 'missing_flow' not found"):
- result.get_flow_sensitivity("missing_flow")
-
- def test_summary_statistics(self) -> None:
- """Test summary_statistics method."""
- component_scores = {
- "flow_1": {
- "comp_a": {"mean": 0.8, "max": 1.0, "min": 0.6},
- "comp_b": {"mean": 0.6, "max": 0.8, "min": 0.4},
- },
- "flow_2": {
- "comp_a": {"mean": 0.9, "max": 1.0, "min": 0.8},
- "comp_b": {"mean": 0.7, "max": 0.9, "min": 0.5},
- },
- }
-
- result = SensitivityResults(
- raw_results={"test": "data"},
- iterations=100,
- component_scores=component_scores,
- )
-
- stats = result.summary_statistics()
-
- assert isinstance(stats, dict)
- # Should have aggregated stats for comp_a and comp_b
- assert "comp_a" in stats
- assert "comp_b" in stats
-
- # Check comp_a stats (aggregated from both flows: 0.8 and 0.9)
- comp_a_stats = stats["comp_a"]
- assert "mean_impact" in comp_a_stats
- assert "max_impact" in comp_a_stats
- assert "min_impact" in comp_a_stats
- assert "flow_count" in comp_a_stats
- assert comp_a_stats["flow_count"] == 2
- assert (
- abs(comp_a_stats["mean_impact"] - 0.85) < 0.001
- ) # Handle floating point precision
-
- def test_get_failure_pattern_summary_empty(self) -> None:
- """Test get_failure_pattern_summary with no patterns."""
- result = SensitivityResults(
- raw_results={"results": []}, iterations=100, failure_patterns={}
- )
-
- df = result.get_failure_pattern_summary()
-
- assert isinstance(df, pd.DataFrame)
- assert len(df) == 0
-
- def test_get_failure_pattern_summary_with_patterns(self) -> None:
- """Test get_failure_pattern_summary with actual patterns."""
- failure_patterns = {
- "pattern_1": {
- "count": 10,
- "is_baseline": False,
- "excluded_nodes": ["node_a", "node_b"],
- "excluded_links": ["link_x"],
- "sensitivity_result": {
- "flow_1": {"comp_a": 0.8, "comp_b": 0.6},
- "flow_2": {"comp_a": 0.7, "comp_b": 0.5},
- },
- },
- "pattern_2": {
- "count": 5,
- "is_baseline": True,
- "excluded_nodes": [],
- "excluded_links": [],
- "sensitivity_result": {
- "flow_1": {"comp_a": 0.9, "comp_b": 0.8},
- },
- },
- }
-
- result = SensitivityResults(
- raw_results={"results": []},
- iterations=100,
- failure_patterns=failure_patterns,
- )
-
- df = result.get_failure_pattern_summary()
-
- assert isinstance(df, pd.DataFrame)
- assert len(df) == 2
-
- # Check columns
- expected_cols = [
- "pattern_key",
- "count",
- "is_baseline",
- "failed_nodes",
- "failed_links",
- "total_failures",
- ]
- for col in expected_cols:
- assert col in df.columns
-
- # Check pattern 1 data
- row1 = df[df["pattern_key"] == "pattern_1"].iloc[0]
- assert row1["count"] == 10
- assert not row1["is_baseline"]
- assert row1["failed_nodes"] == 2
- assert row1["failed_links"] == 1
- assert row1["total_failures"] == 3
- assert "avg_sensitivity_flow_1" in df.columns
- assert row1["avg_sensitivity_flow_1"] == 0.7 # (0.8 + 0.6) / 2
-
- # Check pattern 2 data
- row2 = df[df["pattern_key"] == "pattern_2"].iloc[0]
- assert row2["count"] == 5
- assert row2["is_baseline"]
- assert row2["failed_nodes"] == 0
- assert row2["failed_links"] == 0
- assert row2["total_failures"] == 0
-
- def test_get_failure_pattern_summary_missing_fields(self) -> None:
- """Test get_failure_pattern_summary with missing optional fields."""
- failure_patterns = {
- "incomplete_pattern": {
- "count": 3,
- # Missing optional fields
- }
- }
-
- result = SensitivityResults(
- raw_results={"results": []},
- iterations=100,
- failure_patterns=failure_patterns,
- )
-
- df = result.get_failure_pattern_summary()
-
- assert isinstance(df, pd.DataFrame)
- assert len(df) == 1
-
- row = df.iloc[0]
- assert row["count"] == 3
- assert not row["is_baseline"] # Default value
- assert row["failed_nodes"] == 0 # Default for missing excluded_nodes
- assert row["failed_links"] == 0 # Default for missing excluded_links
- assert row["total_failures"] == 0
-
- def test_export_summary(self) -> None:
- """Test export_summary method."""
- result = SensitivityResults(
- raw_results={"test": "data"},
- iterations=100,
- source_pattern="datacenter.*",
- sink_pattern="edge.*",
- mode="combine",
- component_scores={
- "flow_1": {"comp_a": {"mean": 0.8, "max": 1.0, "min": 0.6}}
- },
- failure_patterns={"pattern_1": {"data": "test"}},
- metadata={"test": "value"},
- )
-
- summary = result.export_summary()
-
- assert isinstance(summary, dict)
- required_keys = [
- "source_pattern",
- "sink_pattern",
- "mode",
- "iterations",
- "metadata",
- "component_scores",
- "failure_patterns",
- "summary_statistics",
- ]
- for key in required_keys:
- assert key in summary
-
- assert summary["source_pattern"] == "datacenter.*"
- assert summary["sink_pattern"] == "edge.*"
- assert summary["mode"] == "combine"
- assert summary["iterations"] == 100
- assert summary["metadata"] == {"test": "value"}
- assert summary["component_scores"] == {
- "flow_1": {"comp_a": {"mean": 0.8, "max": 1.0, "min": 0.6}}
- }
- assert summary["failure_patterns"] == {"pattern_1": {"data": "test"}}
-
- def test_export_summary_defaults(self) -> None:
- """Test export_summary with default/None values."""
- result = SensitivityResults(
- raw_results={"test": "data"},
- iterations=50,
- )
-
- summary = result.export_summary()
-
- assert isinstance(summary, dict)
- assert summary["source_pattern"] is None
- assert summary["sink_pattern"] is None
- assert summary["mode"] is None
- assert summary["iterations"] == 50
- assert summary["metadata"] == {}
- assert summary["component_scores"] == {}
- assert summary["failure_patterns"] == {}
-
- def test_get_flow_sensitivity_keyerror_message(self) -> None:
- """KeyError message contains available keys or 'none'."""
- result = SensitivityResults(
- raw_results={"test": "data"},
- iterations=1,
- component_scores={},
- )
-
- with pytest.raises(KeyError) as exc:
- result.get_flow_sensitivity("x->y")
-
- msg = str(exc.value)
- assert "Flow key 'x->y' not found" in msg
- assert "Available:" in msg
diff --git a/tests/paths/__init__.py b/tests/paths/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/paths/test_bundle.py b/tests/paths/test_bundle.py
deleted file mode 100644
index 31059dc..0000000
--- a/tests/paths/test_bundle.py
+++ /dev/null
@@ -1,303 +0,0 @@
-from typing import Dict, List, Set
-
-import pytest
-
-from ngraph.algorithms.base import EdgeSelect
-from ngraph.graph.strict_multidigraph import EdgeID, NodeID, StrictMultiDiGraph
-from ngraph.paths.bundle import Path, PathBundle
-
-
-@pytest.fixture
-def triangle1():
- """A small triangle graph for testing basic path operations."""
- g = StrictMultiDiGraph()
- g.add_node("A")
- g.add_node("B")
- g.add_node("C")
- g.add_edge("A", "B", cost=1, capacity=15, key=0)
- g.add_edge("B", "A", cost=1, capacity=15, key=1)
- g.add_edge("B", "C", cost=1, capacity=15, key=2)
- g.add_edge("C", "B", cost=1, capacity=15, key=3)
- g.add_edge("A", "C", cost=1, capacity=5, key=4)
- g.add_edge("C", "A", cost=1, capacity=5, key=5)
- return g
-
-
-class TestPathBundle:
- def test_path_bundle_1(self):
- path_bundle = PathBundle(
- "A",
- "C",
- {
- "A": {},
- "C": {"B": [1], "D": [3]},
- "B": {"A": [0]},
- "D": {"A": [2]},
- "E": {"A": [4]},
- },
- 2,
- )
- assert path_bundle.pred == {
- "A": {},
- "C": {"B": [1], "D": [3]},
- "B": {"A": [0]},
- "D": {"A": [2]},
- }
-
- def test_path_bundle_2(self):
- path_bundle = PathBundle(
- "A",
- "C",
- {
- "A": {},
- "C": {"B": [1, 5], "D": [3, 6, 7]},
- "B": {"A": [0, 8]},
- "D": {"A": [2]},
- "E": {"A": [4]},
- },
- 2,
- )
-
- assert [path for path in path_bundle.resolve_to_paths()] == [
- Path((("A", (0, 8)), ("B", (1, 5)), ("C", ())), 2),
- Path((("A", (2,)), ("D", (3, 6, 7)), ("C", ())), 2),
- ]
-
- assert [
- path for path in path_bundle.resolve_to_paths(split_parallel_edges=True)
- ] == [
- Path((("A", (0,)), ("B", (1,)), ("C", ())), 2),
- Path((("A", (0,)), ("B", (5,)), ("C", ())), 2),
- Path((("A", (8,)), ("B", (1,)), ("C", ())), 2),
- Path((("A", (8,)), ("B", (5,)), ("C", ())), 2),
- Path((("A", (2,)), ("D", (3,)), ("C", ())), 2),
- Path((("A", (2,)), ("D", (6,)), ("C", ())), 2),
- Path((("A", (2,)), ("D", (7,)), ("C", ())), 2),
- ]
-
- def test_path_bundle_3(self):
- path_bundle = PathBundle(
- "A",
- "C",
- {
- "A": {},
- "C": {"B": [2, 3]},
- "B": {"A": [0, 1]},
- },
- 2,
- )
-
- paths: List[Path] = [
- path for path in path_bundle.resolve_to_paths(split_parallel_edges=False)
- ]
-
- assert len(paths) == 1
- assert paths[0].cost == 2
- assert paths[0].edges == {0, 1, 2, 3}
- assert paths[0].nodes == {"A", "B", "C"}
-
- def test_path_bundle_4(self):
- path_bundle = PathBundle.from_path(
- Path((("A", (0,)), ("B", (1,)), ("C", ())), 2)
- )
- assert path_bundle.pred == {
- "A": {},
- "C": {"B": [1]},
- "B": {"A": [0]},
- }
- assert path_bundle.cost == 2
-
- def test_path_bundle_5(self, triangle1):
- path_bundle = PathBundle.from_path(
- Path((("A", ()), ("B", ()), ("C", ())), 2),
- resolve_edges=True,
- graph=triangle1,
- edge_select=EdgeSelect.ALL_MIN_COST,
- )
- assert path_bundle.pred == {
- "A": {},
- "C": {"B": [2]},
- "B": {"A": [0]},
- }
- assert path_bundle.cost == 2
-
- def test_get_sub_bundle_1(self, triangle1):
- path_bundle = PathBundle.from_path(
- Path((("A", ()), ("B", ()), ("C", ())), 2),
- resolve_edges=True,
- graph=triangle1,
- edge_select=EdgeSelect.ALL_MIN_COST,
- )
- sub_bundle = path_bundle.get_sub_path_bundle("B", triangle1)
- assert sub_bundle.pred == {
- "A": {},
- "B": {"A": [0]},
- }
- assert sub_bundle.cost == 1
-
- def test_get_sub_bundle_2(self, triangle1):
- path_bundle = PathBundle.from_path(
- Path((("A", ()), ("B", ()), ("C", ())), 2),
- resolve_edges=True,
- graph=triangle1,
- edge_select=EdgeSelect.ALL_MIN_COST,
- )
- sub_bundle = path_bundle.get_sub_path_bundle("A", triangle1)
- assert sub_bundle.pred == {
- "A": {},
- }
- assert sub_bundle.cost == 0
-
- def test_get_sub_bundle_min_cost_across_alternatives(self):
- g = StrictMultiDiGraph()
- for n in ("A", "B", "C", "D"):
- g.add_node(n)
-
- # Two routes A->D: A->C->D cost 2 (1+1), A->B->D cost 20 (10+10)
- e_ac = g.add_edge("A", "C", cost=1)
- e_cd = g.add_edge("C", "D", cost=1)
- e_ab = g.add_edge("A", "B", cost=10)
- e_bd = g.add_edge("B", "D", cost=10)
-
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {
- "A": {},
- "C": {"A": [e_ac]},
- "B": {"A": [e_ab]},
- "D": {"C": [e_cd], "B": [e_bd]},
- }
-
- bundle = PathBundle("A", "D", pred, cost=2)
- sub = bundle.get_sub_path_bundle("D", g, cost_attr="cost")
- assert sub.cost == 2
-
- def test_get_sub_bundle_src_equals_dst_zero_cost(self):
- g = StrictMultiDiGraph()
- g.add_node("A")
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {"A": {}}
- bundle = PathBundle("A", "A", pred, cost=0)
- sub = bundle.get_sub_path_bundle("A", g)
- assert sub.cost == 0
-
- def test_get_sub_bundle_raises_when_src_unreachable_in_subgraph(self):
- g = StrictMultiDiGraph()
- for n in ("A", "B", "C"):
- g.add_node(n)
- # pred missing any chain from A to C (only B->C exists)
- e_bc = g.add_edge("B", "C", cost=1)
- pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {
- "A": {},
- "B": {},
- "C": {"B": [e_bc]},
- }
- bundle = PathBundle("A", "C", pred, cost=0)
- with pytest.raises(ValueError, match="No path from 'A' to 'C'"):
- _ = bundle.get_sub_path_bundle("C", g)
-
- def test_add_method(self):
- """Test concatenating two PathBundles with matching src/dst."""
- pb1 = PathBundle(
- "A",
- "B",
- {
- "A": {},
- "B": {"A": [0]},
- },
- cost=3,
- )
- pb2 = PathBundle(
- "B",
- "C",
- {
- "B": {},
- "C": {"B": [1]},
- },
- cost=4,
- )
- new_pb = pb1.add(pb2)
- # new_pb should be A->C with cost=7
- assert new_pb.src_node == "A"
- assert new_pb.dst_node == "C"
- assert new_pb.cost == 7
- assert new_pb.pred == {
- "A": {},
- "B": {"A": [0]},
- "C": {"B": [1]},
- }
-
- def test_contains_subset_disjoint(self):
- """Test contains, is_subset_of, and is_disjoint_from."""
- pb_base = PathBundle(
- "X",
- "Z",
- {
- "X": {},
- "Y": {"X": [1]},
- "Z": {"Y": [2]},
- },
- cost=10,
- )
- pb_small = PathBundle(
- "X",
- "Z",
- {
- "X": {},
- "Y": {"X": [1]},
- "Z": {"Y": [2]},
- },
- cost=10,
- )
- # They have the same edges
- assert pb_base.contains(pb_small) is True
- assert pb_small.contains(pb_base) is True
- assert pb_small.is_subset_of(pb_base) is True
- assert pb_base.is_subset_of(pb_small) is True
- assert pb_small.is_disjoint_from(pb_base) is False
-
- # Now create a partial subset
- pb_partial = PathBundle(
- "X",
- "Y",
- {
- "X": {},
- "Y": {"X": [1]},
- },
- cost=5,
- )
- # pb_partial edges is {1} while pb_base edges is {1, 2}
- assert pb_base.contains(pb_partial) is True
- assert pb_partial.contains(pb_base) is False
- assert pb_partial.is_subset_of(pb_base) is True
- assert pb_base.is_subset_of(pb_partial) is False
-
- # Now a disjoint
- pb_disjoint = PathBundle(
- "R",
- "S",
- {
- "R": {},
- "S": {"R": [9]},
- },
- cost=2,
- )
- assert pb_base.is_disjoint_from(pb_disjoint) is True
- assert pb_disjoint.is_disjoint_from(pb_base) is True
-
- def test_ordering_and_hash_contract(self):
- """Equality, ordering by cost, and hash set behavior for `PathBundle`."""
- pb1 = PathBundle("A", "B", {"A": {}, "B": {"A": [11]}}, cost=5)
- pb2 = PathBundle("A", "B", {"A": {}, "B": {"A": [11]}}, cost=5)
- pb3 = PathBundle("A", "B", {"A": {}, "B": {"A": [11, 12]}}, cost=5)
- pb4 = PathBundle("A", "B", {"A": {}, "B": {"A": [11]}}, cost=6)
-
- # Equality driven by (src, dst, cost, edges)
- assert pb1 == pb2
- assert pb1 != pb3
- assert pb1 != pb4
-
- # Ordering by cost only
- assert pb1 < pb4
- assert not (pb4 < pb1)
-
- # Hash respects equality
- unique: Set[PathBundle] = {pb1, pb2, pb3, pb4}
- assert len(unique) == 3
diff --git a/tests/paths/test_path.py b/tests/paths/test_path.py
deleted file mode 100644
index c349412..0000000
--- a/tests/paths/test_path.py
+++ /dev/null
@@ -1,182 +0,0 @@
-import pytest
-
-from ngraph.algorithms.base import PathTuple
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.paths.path import Path
-
-
-def test_path_init():
- """Test basic initialization of a Path and derived sets."""
- path_tuple: PathTuple = (
- ("A", ("edgeA-B",)),
- ("B", ("edgeB-C",)),
- ("C", ()),
- )
- p = Path(path_tuple, cost=10.0)
-
- assert p.path_tuple == path_tuple
- assert p.cost == 10.0
- assert p.nodes == {"A", "B", "C"}
- assert p.edges == {"edgeA-B", "edgeB-C"}
- # The last element has an empty tuple, so we have exactly 2 edge_tuples
- assert len(p.edge_tuples) == 3 # Includes the empty tuple for "C"
- assert ("edgeA-B",) in p.edge_tuples
- assert ("edgeB-C",) in p.edge_tuples
- assert () in p.edge_tuples
-
-
-def test_path_indexing_and_iteration():
- """Test __getitem__ and __iter__ for accessing path elements."""
- path_tuple: PathTuple = (
- ("N1", ("e1", "e2")),
- ("N2", ()),
- )
- p = Path(path_tuple, 3)
- assert p[0] == ("N1", ("e1", "e2"))
- assert p[1] == ("N2", ())
- # Test iteration
- items = list(p)
- assert len(items) == 2
- assert items[0][0] == "N1"
- assert items[1][0] == "N2"
-
-
-def test_path_len():
- """Test __len__ for number of elements in path."""
- p = Path((("A", ("eA-B",)), ("B", ("eB-C",)), ("C", ())), cost=4)
- assert len(p) == 3
-
-
-def test_path_src_node_and_dst_node():
- """Test src_node and dst_node properties."""
- p = Path((("X", ("e1",)), ("Y", ("e2",)), ("Z", ())), cost=2)
- assert p.src_node == "X"
- assert p.dst_node == "Z"
-
-
-def test_path_comparison():
- """Test __lt__ (less than) for cost-based comparison."""
- p1 = Path((("A", ("e1",)), ("B", ())), cost=10)
- p2 = Path((("A", ("e1",)), ("B", ())), cost=20)
- assert p1 < p2
- assert not (p2 < p1)
-
-
-def test_path_equality():
- """Test equality and hash usage for Path."""
- p1 = Path((("A", ("e1",)), ("B", ())), cost=5)
- p2 = Path((("A", ("e1",)), ("B", ())), cost=5)
- p3 = Path((("A", ("e1",)), ("C", ())), cost=5)
- p4 = Path((("A", ("e1",)), ("B", ())), cost=6)
-
- assert p1 == p2
- assert p1 != p3
- assert p1 != p4
-
- s = {p1, p2, p3}
- # p1 and p2 are the same, so set should have only two unique items
- assert len(s) == 2
-
-
-def test_path_edges_seq():
- """Test edges_seq cached_property."""
- p = Path((("A", ("eA-B",)), ("B", ("eB-C",)), ("C", ())), cost=7)
- # edges_seq should exclude the last element's parallel-edges (often empty)
- assert p.edges_seq == (("eA-B",), ("eB-C",))
-
- p_single = Path((("A", ()),), cost=0)
- # If length <= 1, it should return an empty tuple
- assert p_single.edges_seq == ()
-
-
-def test_path_nodes_seq():
- """Test nodes_seq cached_property."""
- p = Path((("X", ("eX-Y",)), ("Y", ())), cost=1)
- assert p.nodes_seq == ("X", "Y")
-
- p2 = Path((("N1", ("e1",)), ("N2", ("e2",)), ("N3", ())), cost=10)
- assert p2.nodes_seq == ("N1", "N2", "N3")
-
-
-def test_get_sub_path_success():
- """Test get_sub_path for a valid dst_node with edge cost summation."""
- # Build a small graph
- g = StrictMultiDiGraph()
- for node_id in ("A", "B", "C", "D"):
- g.add_node(node_id)
-
- # Add edges with 'cost' attributes
- eAB = g.add_edge("A", "B", cost=5)
- eBC = g.add_edge("B", "C", cost=7)
- eCD = g.add_edge("C", "D", cost=2)
-
- # Path is A->B->C->D
- path_tuple: PathTuple = (
- ("A", (eAB,)),
- ("B", (eBC,)),
- ("C", (eCD,)),
- ("D", ()),
- )
- p = Path(path_tuple, cost=14.0)
-
- # Subpath: A->B->C
- sub_p = p.get_sub_path("C", g, cost_attr="cost")
- assert sub_p.dst_node == "C"
- # Check that the cost is sum of edges (A->B=5) + (B->C=7) = 12
- assert sub_p.cost == 12
- # Check sub_path elements
- assert len(sub_p) == 3
- assert sub_p[2][0] == "C"
- # Ensure last node is C with empty edges
- assert sub_p.path_tuple[-1] == ("C", ())
-
-
-def test_get_sub_path_not_found():
- """Test get_sub_path raises ValueError if dst_node not in path."""
- g = StrictMultiDiGraph()
- g.add_node("X")
- g.add_node("Y")
-
- path_tuple: PathTuple = (("X", ()),)
- p = Path(path_tuple, cost=0)
- with pytest.raises(ValueError, match="Node 'Y' not found in path."):
- _ = p.get_sub_path("Y", g)
-
-
-def test_get_sub_path_empty_parallel_edges():
- """Test that get_sub_path cost calculation handles empty edge sets."""
- g = StrictMultiDiGraph()
- for n in ("N1", "N2"):
- g.add_node(n)
-
- # Add an edge between N1->N2
- e12 = g.add_edge("N1", "N2", cost=10)
-
- # A path where the second to last step has an empty parallel edge set
- # just to confirm we skip cost addition for that step
- path_tuple: PathTuple = (
- ("N1", (e12,)),
- ("N2", ()),
- )
- p = Path(path_tuple, cost=10.0)
-
- # get_sub_path("N2") should not raise an error,
- # and cost is 10 from the single edge
- sub = p.get_sub_path("N2", g)
- assert sub.cost == 10
- assert len(sub) == 2
-
-
-def test_get_sub_path_uses_min_cost_among_parallel_edges():
- g = StrictMultiDiGraph()
- for n in ("A", "B", "C"):
- g.add_node(n)
-
- e1 = g.add_edge("A", "B", cost=5)
- e2 = g.add_edge("A", "B", cost=7)
- e3 = g.add_edge("B", "C", cost=1)
-
- path_tuple: PathTuple = (("A", (e1, e2)), ("B", (e3,)), ("C", ()))
- p = Path(path_tuple, cost=0)
- sub = p.get_sub_path("B", g, cost_attr="cost")
- assert sub.cost == 5
diff --git a/tests/profiling/test_profiling.py b/tests/profiling/test_profiling.py
index 111d7f2..d114857 100644
--- a/tests/profiling/test_profiling.py
+++ b/tests/profiling/test_profiling.py
@@ -5,7 +5,7 @@
import pytest
-from ngraph.profiling import (
+from ngraph.profiling.profiler import (
PerformanceProfiler,
PerformanceReporter,
ProfileResults,
diff --git a/tests/profiling/test_reporter_smoke.py b/tests/profiling/test_reporter_smoke.py
deleted file mode 100644
index 0f5895c..0000000
--- a/tests/profiling/test_reporter_smoke.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from __future__ import annotations
-
-import importlib
-
-
-def test_profiling_reporter_module_importable() -> None:
- # Smoke: module provides docstring and is importable
- mod = importlib.import_module("ngraph.profiling.reporter")
- assert hasattr(mod, "__doc__")
diff --git a/tests/report/__init__.py b/tests/report/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/results/test_artifacts.py b/tests/results/test_artifacts.py
deleted file mode 100644
index f254a0e..0000000
--- a/tests/results/test_artifacts.py
+++ /dev/null
@@ -1,308 +0,0 @@
-import json
-
-import pytest
-
-from ngraph.demand.manager.manager import TrafficResult
-from ngraph.demand.matrix import TrafficMatrixSet
-from ngraph.demand.spec import TrafficDemand
-from ngraph.results.artifacts import (
- CapacityEnvelope,
- PlacementEnvelope,
- PlacementResultSet,
-)
-
-
-def test_capacity_envelope_percentile_and_expand():
- """Validate percentile computation and frequency expansion."""
- env = CapacityEnvelope.from_values("A", "B", "combine", [1, 1, 2, 3, 5, 8])
- # expand_to_values should reconstruct the multiset
- values = sorted(env.expand_to_values())
- assert values == [1, 1, 2, 3, 5, 8]
-
- # Percentiles on discrete frequency distribution
- assert env.get_percentile(0) == 1
- assert env.get_percentile(50) == 2
- assert env.get_percentile(100) == 8
- with pytest.raises(ValueError):
- env.get_percentile(-1)
- with pytest.raises(ValueError):
- env.get_percentile(101)
-
-
-def test_traffic_matrix_set_comprehensive():
- """Test TrafficMatrixSet with multiple complex scenarios."""
- from ngraph.demand.spec import TrafficDemand
-
- tms = TrafficMatrixSet()
-
- # Peak hour scenario with multiple demands
- peak_demands = [
- TrafficDemand(
- source_path="servers.*", sink_path="storage.*", demand=200.0, priority=1
- ),
- TrafficDemand(source_path="web.*", sink_path="db.*", demand=50.0, priority=0),
- TrafficDemand(
- source_path="cache.*", sink_path="origin.*", demand=75.0, priority=2
- ),
- ]
- tms.add("peak_hour", peak_demands)
-
- # Off-peak scenario
- off_peak_demands = [
- TrafficDemand(
- source_path="backup.*", sink_path="archive.*", demand=25.0, priority=3
- ),
- TrafficDemand(
- source_path="sync.*", sink_path="replica.*", demand=10.0, priority=2
- ),
- ]
- tms.add("off_peak", off_peak_demands)
-
- # Emergency scenario
- emergency_demands = [
- TrafficDemand(
- source_path="critical.*",
- sink_path="backup.*",
- demand=500.0,
- priority=0,
- mode="pairwise",
- )
- ]
- tms.add("emergency", emergency_demands)
-
- # Test serialization
- d = tms.to_dict()
- json.dumps(d) # Must be JSON-serializable
-
- # Verify structure
- assert len(d) == 3
- assert "peak_hour" in d
- assert "off_peak" in d
- assert "emergency" in d
-
- # Verify content
- assert len(d["peak_hour"]) == 3
- assert len(d["off_peak"]) == 2
- assert len(d["emergency"]) == 1
-
- # Verify demand details
- assert d["peak_hour"][0]["demand"] == 200.0
- assert d["peak_hour"][0]["priority"] == 1
- assert d["emergency"][0]["mode"] == "pairwise"
- assert d["off_peak"][1]["source_path"] == "sync.*"
-
-
-def test_capacity_envelope_comprehensive_stats():
- """Test CapacityEnvelope with various statistical scenarios."""
- # Test with normal distribution-like values
- env1 = CapacityEnvelope.from_values(
- "A", "B", "combine", [10, 12, 15, 18, 20, 22, 25]
- )
- assert env1.min_capacity == 10
- assert env1.max_capacity == 25
- assert abs(env1.mean_capacity - 17.428571428571427) < 0.001
- assert env1.stdev_capacity > 0
-
- # Test with identical values
- env2 = CapacityEnvelope.from_values("C", "D", "combine", [100, 100, 100, 100])
- assert env2.min_capacity == 100
- assert env2.max_capacity == 100
- assert env2.mean_capacity == 100
- assert env2.stdev_capacity == 0.0
-
- # Test with extreme outliers
- env3 = CapacityEnvelope.from_values("E", "F", "combine", [1, 1000])
- assert env3.min_capacity == 1
- assert env3.max_capacity == 1000
- assert env3.mean_capacity == 500.5
-
- # Test serialization of all variants
- for env in [env1, env2, env3]:
- d = env.to_dict()
- json.dumps(d)
- assert "source" in d
- assert "sink" in d
- assert "frequencies" in d
- assert "min" in d
- assert "max" in d
- assert "mean" in d
- assert "stdev" in d
-
-
-def test_placement_result_set_complex_scenarios():
- """Test PlacementResultSet with complex multi-case scenarios."""
- # Multiple test cases with different results
- results_by_case = {
- "baseline": [
- TrafficResult(0, 100, 95, 5, "A", "B"),
- TrafficResult(1, 50, 45, 5, "C", "D"),
- TrafficResult(0, 200, 180, 20, "E", "F"),
- ],
- "optimized": [
- TrafficResult(0, 100, 100, 0, "A", "B"),
- TrafficResult(1, 50, 50, 0, "C", "D"),
- TrafficResult(0, 200, 200, 0, "E", "F"),
- ],
- "degraded": [
- TrafficResult(0, 100, 80, 20, "A", "B"),
- TrafficResult(1, 50, 30, 20, "C", "D"),
- TrafficResult(0, 200, 150, 50, "E", "F"),
- ],
- }
-
- # Complex statistics
- overall_stats = {
- "total_improvement": 15.0,
- "avg_utilization": 0.92,
- "worst_case_loss": 0.25,
- }
-
- # Per-demand statistics
- demand_stats = {
- ("A", "B", 0): {"success_rate": 0.95, "avg_latency": 1.2},
- ("C", "D", 1): {"success_rate": 0.90, "avg_latency": 2.1},
- ("E", "F", 0): {"success_rate": 0.88, "avg_latency": 1.8},
- }
-
- prs = PlacementResultSet(
- results_by_case=results_by_case,
- overall_stats=overall_stats,
- demand_stats=demand_stats,
- )
-
- # Test serialization
- d = prs.to_dict()
- json.dumps(d) # Must be JSON-serializable
-
- # Verify structure
- assert len(d["cases"]) == 3
- assert "baseline" in d["cases"]
- assert "optimized" in d["cases"]
- assert "degraded" in d["cases"]
-
- # Verify case data
- assert len(d["cases"]["baseline"]) == 3
- assert d["cases"]["optimized"][0]["unplaced_volume"] == 0
- assert d["cases"]["degraded"][2]["placed_volume"] == 150
-
- # Verify statistics
- assert d["overall_stats"]["total_improvement"] == 15.0
- assert len(d["demand_stats"]) == 3
- assert "A->B|prio=0" in d["demand_stats"]
- assert d["demand_stats"]["A->B|prio=0"]["success_rate"] == 0.95
-
-
-def test_all_artifacts_json_roundtrip():
- """Test that all result artifacts can roundtrip through JSON."""
- from ngraph.demand.spec import TrafficDemand
- from ngraph.results.artifacts import PlacementResultSet
-
- # Create instances of all artifact types
- env = CapacityEnvelope.from_values("src", "dst", "combine", [100, 150, 200])
-
- tms = TrafficMatrixSet()
- td = TrafficDemand(source_path="^test.*", sink_path="^dest.*", demand=42.0)
- tms.add("test_matrix", [td])
-
- prs = PlacementResultSet(
- results_by_case={"test": [TrafficResult(0, 10, 8, 2, "A", "B")]},
- overall_stats={"efficiency": 0.8},
- demand_stats={("A", "B", 0): {"rate": 0.8}},
- )
-
- # Test individual serialization and JSON roundtrip
- artifacts = [env, tms, prs]
- for artifact in artifacts:
- # Serialize to dict
- d = artifact.to_dict()
-
- # Convert to JSON and back
- json_str = json.dumps(d)
- parsed = json.loads(json_str)
-
- # Verify structure is preserved
- assert isinstance(parsed, dict)
- assert len(parsed) > 0
-
- # Verify no objects remain (all primitives)
- def check_primitives(obj):
- if isinstance(obj, dict):
- for v in obj.values():
- check_primitives(v)
- elif isinstance(obj, list):
- for item in obj:
- check_primitives(item)
- else:
- # Should be a primitive type
- assert obj is None or isinstance(obj, (str, int, float, bool))
-
- check_primitives(parsed)
-
-
-def test_placement_envelope_from_values_basic():
- env = PlacementEnvelope.from_values(
- source="A",
- sink="B",
- mode="pairwise",
- priority=1,
- ratios=[1.0, 0.8, 0.8],
- )
- assert env.source == "A"
- assert env.sink == "B"
- assert env.mode == "pairwise"
- assert env.priority == 1
- assert env.frequencies.get(1.0) == 1
- assert env.frequencies.get(0.8) == 2
- assert env.total_samples == 3
- d = env.to_dict()
- json.dumps(d)
-
-
-def test_traffic_matrix_set_get_default_single_matrix():
- """Test get_default() with only one matrix."""
- matrix_set = TrafficMatrixSet()
- demand1 = TrafficDemand(source_path="A", sink_path="B", demand=100)
- matrix_set.add("single", [demand1])
-
- # Should return the single matrix even though it's not named 'default'
- result = matrix_set.get_default_matrix()
- assert result == [demand1]
-
-
-def test_traffic_matrix_set_get_default_multiple_matrices_no_default():
- """Test get_default_matrix() with multiple matrices but no 'default' matrix."""
- matrix_set = TrafficMatrixSet()
- demand1 = TrafficDemand(source_path="A", sink_path="B", demand=100)
- demand2 = TrafficDemand(source_path="C", sink_path="D", demand=200)
-
- matrix_set.add("matrix1", [demand1])
- matrix_set.add("matrix2", [demand2])
-
- # Should raise ValueError since multiple matrices exist but no 'default'
- with pytest.raises(ValueError, match="Multiple matrices exist"):
- matrix_set.get_default_matrix()
-
-
-def test_traffic_matrix_set_get_all_demands():
- """Test get_all_demands() method."""
- matrix_set = TrafficMatrixSet()
- demand1 = TrafficDemand(source_path="A", sink_path="B", demand=100)
- demand2 = TrafficDemand(source_path="C", sink_path="D", demand=200)
- demand3 = TrafficDemand(source_path="E", sink_path="F", demand=300)
-
- matrix_set.add("matrix1", [demand1, demand2])
- matrix_set.add("matrix2", [demand3])
-
- all_demands = matrix_set.get_all_demands()
- assert len(all_demands) == 3
- assert demand1 in all_demands
- assert demand2 in all_demands
- assert demand3 in all_demands
-
-
-def test_capacity_envelope_from_values_empty_list():
- """Test CapacityEnvelope.from_values() with empty values list."""
- with pytest.raises(
- ValueError, match="Cannot create envelope from empty values list"
- ):
- CapacityEnvelope.from_values("A", "B", "combine", [])
diff --git a/tests/results/test_artifacts_unit.py b/tests/results/test_artifacts_unit.py
deleted file mode 100644
index 53808b2..0000000
--- a/tests/results/test_artifacts_unit.py
+++ /dev/null
@@ -1,103 +0,0 @@
-from __future__ import annotations
-
-import math
-
-import pytest
-
-from ngraph.demand.manager.manager import TrafficResult
-from ngraph.results.artifacts import (
- CapacityEnvelope,
- FailurePatternResult,
- PlacementEnvelope,
- PlacementResultSet,
-)
-
-
-def test_placement_result_set_to_dict_shapes() -> None:
- res = PlacementResultSet(
- results_by_case={
- "case1": [
- TrafficResult(0, 10.0, 7.0, 3.0, "A", "B"),
- TrafficResult(0, 5.0, 5.0, 0.0, "A", "C"),
- ]
- },
- overall_stats={"mean": 0.7},
- demand_stats={("A", "B", 0): {"ratio": 0.7}},
- )
- d = res.to_dict()
- assert "cases" in d and "overall_stats" in d and "demand_stats" in d
- assert d["cases"]["case1"][0]["src"] == "A"
- # demand_stats keys are stringified
- assert any(key.startswith("A->B|prio=") for key in d["demand_stats"].keys())
-
-
-def test_capacity_envelope_from_values_and_percentile_roundtrip() -> None:
- values = [1.0, 1.0, 2.0, 10.0]
- env = CapacityEnvelope.from_values("A", "B", "combine", values)
- assert env.total_samples == 4
- assert env.frequencies[1.0] == 2
- assert math.isclose(env.get_percentile(50), 1.0)
- assert math.isclose(env.get_percentile(100), 10.0)
- # to_dict/from_dict
- env2 = CapacityEnvelope.from_dict(env.to_dict())
- assert env2.frequencies == env.frequencies
- assert math.isclose(env2.mean_capacity, env.mean_capacity)
-
-
-def test_capacity_envelope_expand_to_values() -> None:
- env = CapacityEnvelope(
- source_pattern="A",
- sink_pattern="B",
- mode="combine",
- frequencies={1.0: 2, 2.0: 1},
- min_capacity=1.0,
- max_capacity=2.0,
- mean_capacity=1.3333333,
- stdev_capacity=0.4714,
- total_samples=3,
- )
- vals = sorted(env.expand_to_values())
- assert vals == [1.0, 1.0, 2.0]
-
-
-def test_capacity_envelope_invalid_inputs() -> None:
- with pytest.raises(ValueError):
- CapacityEnvelope.from_values("A", "B", "pairwise", [])
- with pytest.raises(ValueError):
- # percentile outside range
- env = CapacityEnvelope.from_values("A", "B", "combine", [1.0])
- env.get_percentile(-1)
-
-
-def test_failure_pattern_result_key_and_dict() -> None:
- fpr = FailurePatternResult(
- excluded_nodes=["n1"],
- excluded_links=["e1"],
- capacity_matrix={"A->B": 1.0},
- count=2,
- is_baseline=False,
- )
- k1 = fpr.pattern_key
- k2 = fpr.pattern_key
- assert k1 == k2 and k1.startswith("pattern_")
- d = fpr.to_dict()
- assert d["count"] == 2 and d["capacity_matrix"]["A->B"] == 1.0
- # baseline has fixed key
- base = FailurePatternResult(["n1"], ["e1"], {}, 1, is_baseline=True)
- assert base.pattern_key == "baseline"
-
-
-def test_placement_envelope_roundtrip_and_stats() -> None:
- pe = PlacementEnvelope.from_values(
- source="A",
- sink="B",
- mode="pairwise",
- priority=0,
- ratios=[0.1, 0.1, 0.2, 1.0],
- rounding_decimals=2,
- )
- assert pe.total_samples == 4
- d = pe.to_dict()
- pe2 = PlacementEnvelope.from_dict(d)
- assert pe2.frequencies == pe.frequencies
- assert pe2.priority == 0 and pe2.mode == "pairwise"
diff --git a/tests/scenario/test_scenario.py b/tests/scenario/test_scenario.py
index 990715b..d69e995 100644
--- a/tests/scenario/test_scenario.py
+++ b/tests/scenario/test_scenario.py
@@ -3,7 +3,7 @@
import pytest
-from ngraph.failure.policy import FailurePolicy
+from ngraph.model.failure.policy import FailurePolicy
from ngraph.model.network import Network
from ngraph.results import Results
from ngraph.scenario import Scenario
@@ -348,10 +348,14 @@ def test_scenario_run(valid_scenario_yaml: str) -> None:
def test_scenario_from_yaml_missing_step_type(missing_step_type_yaml: str) -> None:
"""
- Tests that Scenario.from_yaml raises a ValueError if a workflow step
+ Tests that Scenario.from_yaml raises an error if a workflow step
is missing the 'step_type' field.
+
+ Schema validation catches this and raises ValidationError.
"""
- with pytest.raises(ValueError):
+ import jsonschema.exceptions
+
+ with pytest.raises(jsonschema.exceptions.ValidationError):
Scenario.from_yaml(missing_step_type_yaml)
diff --git a/tests/seed_manager/__init__.py b/tests/seed_manager/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/solver/test_flow_placement_semantics.py b/tests/solver/test_flow_placement_semantics.py
new file mode 100644
index 0000000..9fa2958
--- /dev/null
+++ b/tests/solver/test_flow_placement_semantics.py
@@ -0,0 +1,901 @@
+"""Comprehensive tests validating IP and TE flow placement semantics with ECMP and WCMP.
+
+This test suite validates that NetGraph correctly implements the distinct behavioral
+semantics of IP routing vs Traffic Engineering, and ECMP vs WCMP flow placement.
+
+Key distinctions tested:
+1. IP routing (require_capacity=False): Routes based on costs only, ignoring capacity
+2. TE routing (require_capacity=True): Routes adapt to residual capacity
+3. ECMP (EQUAL_BALANCED): Equal splitting across equal-cost paths
+4. WCMP (PROPORTIONAL): Capacity-proportional splitting across equal-cost paths
+
+Tests use a shared topology where different settings produce measurably different results,
+validating actual placement behavior (not just API correctness).
+"""
+
+from __future__ import annotations
+
+import pytest
+
+from ngraph.model.network import Link, Network, Node
+from ngraph.solver.maxflow import max_flow, max_flow_with_details
+from ngraph.types.base import FlowPlacement
+
+
+def _unbalanced_parallel_paths() -> Network:
+ """Create network with parallel paths of equal cost but different capacities.
+
+ This topology is specifically designed to expose differences between:
+ - ECMP vs WCMP: Different capacities mean WCMP can utilize more flow
+ - IP vs TE: Multiple augmentations will behave differently
+
+ Topology:
+ S -> A (cap 10, cost 1) -> T (cap 10, cost 1) [path 1: cost 2, cap 10]
+ S -> B (cap 30, cost 1) -> T (cap 30, cost 1) [path 2: cost 2, cap 30]
+ S -> C (cap 50, cost 1) -> T (cap 50, cost 1) [path 3: cost 2, cap 50]
+
+ All paths have equal cost (2), but different capacities (10, 30, 50).
+ Total capacity: 90
+ """
+ net = Network()
+ for name in ["S", "A", "B", "C", "T"]:
+ net.add_node(Node(name))
+
+ # Path 1: S -> A -> T (cap 10)
+ net.add_link(Link("S", "A", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "T", capacity=10.0, cost=1.0))
+
+ # Path 2: S -> B -> T (cap 30)
+ net.add_link(Link("S", "B", capacity=30.0, cost=1.0))
+ net.add_link(Link("B", "T", capacity=30.0, cost=1.0))
+
+ # Path 3: S -> C -> T (cap 50)
+ net.add_link(Link("S", "C", capacity=50.0, cost=1.0))
+ net.add_link(Link("C", "T", capacity=50.0, cost=1.0))
+
+ return net
+
+
+def _multi_tier_unbalanced() -> Network:
+ """Create network with multiple cost tiers and unbalanced capacities within each tier.
+
+ This topology tests:
+ - IP shortest_path mode: should only use tier 1
+ - TE progressive mode: should use multiple tiers when tier 1 saturates
+ - ECMP vs WCMP within each tier
+
+ Topology:
+ Tier 1 (cost 10):
+ S -> A1 (cap 20, cost 5) -> T (cap 20, cost 5)
+ S -> A2 (cap 40, cost 5) -> T (cap 40, cost 5)
+ Tier 2 (cost 20):
+ S -> B1 (cap 30, cost 10) -> T (cap 30, cost 10)
+ S -> B2 (cap 60, cost 10) -> T (cap 60, cost 10)
+ """
+ net = Network()
+ for name in ["S", "A1", "A2", "B1", "B2", "T"]:
+ net.add_node(Node(name))
+
+ # Tier 1: cost 10, total cap 60
+ net.add_link(Link("S", "A1", capacity=20.0, cost=5.0))
+ net.add_link(Link("A1", "T", capacity=20.0, cost=5.0))
+ net.add_link(Link("S", "A2", capacity=40.0, cost=5.0))
+ net.add_link(Link("A2", "T", capacity=40.0, cost=5.0))
+
+ # Tier 2: cost 20, total cap 90
+ net.add_link(Link("S", "B1", capacity=30.0, cost=10.0))
+ net.add_link(Link("B1", "T", capacity=30.0, cost=10.0))
+ net.add_link(Link("S", "B2", capacity=60.0, cost=10.0))
+ net.add_link(Link("B2", "T", capacity=60.0, cost=10.0))
+
+ return net
+
+
+class TestECMPvsWCMPSemantics:
+ """Test ECMP vs WCMP placement on unbalanced parallel paths."""
+
+ def test_ecmp_equal_split_on_unbalanced_paths(self):
+ """ECMP should split flow equally across paths, leaving capacity unused on larger paths."""
+ net = _unbalanced_parallel_paths()
+
+ # ECMP: equal split across 3 paths
+ # With equal splitting, the smallest path (cap 10) becomes the bottleneck
+ # Each path can carry at most 10 units (limited by smallest path)
+ # Total: 3 * 10 = 30 units
+ result = max_flow(
+ net,
+ "S",
+ "T",
+ flow_placement=FlowPlacement.EQUAL_BALANCED,
+ shortest_path=True,
+ )
+
+ assert result[("S", "T")] == pytest.approx(30.0, abs=1e-6), (
+ "ECMP with equal splitting should be limited by smallest path capacity"
+ )
+
+ def test_wcmp_proportional_split_on_unbalanced_paths(self):
+ """WCMP should split flow proportionally to capacity, fully utilizing all paths."""
+ net = _unbalanced_parallel_paths()
+
+ # WCMP: proportional split based on capacity
+ # Path 1: 10 units (10 / 90 = 11.1%)
+ # Path 2: 30 units (30 / 90 = 33.3%)
+ # Path 3: 50 units (50 / 90 = 55.6%)
+ # Total: 90 units (full utilization)
+ result = max_flow(
+ net, "S", "T", flow_placement=FlowPlacement.PROPORTIONAL, shortest_path=True
+ )
+
+ assert result[("S", "T")] == pytest.approx(90.0, abs=1e-6), (
+ "WCMP with proportional splitting should fully utilize all paths"
+ )
+
+ def test_ecmp_vs_wcmp_utilization_gap(self):
+ """Verify that WCMP achieves higher utilization than ECMP on unbalanced paths."""
+ net = _unbalanced_parallel_paths()
+
+ ecmp_result = max_flow(
+ net,
+ "S",
+ "T",
+ flow_placement=FlowPlacement.EQUAL_BALANCED,
+ shortest_path=True,
+ )
+
+ wcmp_result = max_flow(
+ net, "S", "T", flow_placement=FlowPlacement.PROPORTIONAL, shortest_path=True
+ )
+
+ ecmp_flow = ecmp_result[("S", "T")]
+ wcmp_flow = wcmp_result[("S", "T")]
+
+ # WCMP should achieve 3x the flow of ECMP on this topology
+ assert wcmp_flow == pytest.approx(3.0 * ecmp_flow, abs=1e-6), (
+ f"Expected WCMP ({wcmp_flow}) to be 3x ECMP ({ecmp_flow})"
+ )
+
+ # Verify specific values
+ assert ecmp_flow == pytest.approx(30.0, abs=1e-6)
+ assert wcmp_flow == pytest.approx(90.0, abs=1e-6)
+
+
+class TestIPvsTE_Semantics:
+ """Test IP routing vs Traffic Engineering semantics."""
+
+ def test_ip_shortest_path_single_tier(self):
+ """IP shortest_path=True should only use lowest cost tier."""
+ net = _multi_tier_unbalanced()
+
+ # IP mode: shortest_path=True, uses only tier 1 (cost 10)
+ # Tier 1 capacity: 60 units
+ result_ip_ecmp = max_flow(
+ net,
+ "S",
+ "T",
+ flow_placement=FlowPlacement.EQUAL_BALANCED,
+ shortest_path=True,
+ )
+
+ result_ip_wcmp = max_flow(
+ net, "S", "T", flow_placement=FlowPlacement.PROPORTIONAL, shortest_path=True
+ )
+
+ # ECMP on tier 1: limited by smallest path (20)
+ # Equal split: 2 * 20 = 40 units
+ assert result_ip_ecmp[("S", "T")] == pytest.approx(40.0, abs=1e-6), (
+ "IP ECMP should use only tier 1 with equal splitting"
+ )
+
+ # WCMP on tier 1: proportional split
+ # 20 + 40 = 60 units (full tier 1 utilization)
+ assert result_ip_wcmp[("S", "T")] == pytest.approx(60.0, abs=1e-6), (
+ "IP WCMP should use only tier 1 with proportional splitting"
+ )
+
+ def test_te_progressive_multi_tier(self):
+ """TE shortest_path=False should use multiple tiers when lower tiers saturate."""
+ net = _multi_tier_unbalanced()
+
+ # TE mode: shortest_path=False, progressive fill across tiers
+ # Tier 1: 60 units (fills first)
+ # Tier 2: 90 units (fills next)
+ # Total: 150 units
+ result_te_wcmp = max_flow(
+ net,
+ "S",
+ "T",
+ flow_placement=FlowPlacement.PROPORTIONAL,
+ shortest_path=False,
+ )
+
+ assert result_te_wcmp[("S", "T")] == pytest.approx(150.0, abs=1e-6), (
+ "TE WCMP should progressively fill all cost tiers"
+ )
+
+ # Verify cost distribution shows both tiers were used
+ result_details = max_flow_with_details(
+ net,
+ "S",
+ "T",
+ mode="combine",
+ flow_placement=FlowPlacement.PROPORTIONAL,
+ shortest_path=False,
+ )
+
+ summary = result_details[("S", "T")]
+
+ # Should have flow at two different cost levels
+ assert len(summary.cost_distribution) == 2, "TE mode should use both cost tiers"
+
+ # Tier 1 (cost 10) should have 60 units
+ assert 10.0 in summary.cost_distribution
+ assert summary.cost_distribution[10.0] == pytest.approx(60.0, abs=1e-6)
+
+ # Tier 2 (cost 20) should have 90 units
+ assert 20.0 in summary.cost_distribution
+ assert summary.cost_distribution[20.0] == pytest.approx(90.0, abs=1e-6)
+
+ def test_te_ecmp_progressive_multi_tier(self):
+ """TE ECMP progressive mode achieves full utilization via multi-round equal splitting.
+
+ In progressive mode with EQUAL_BALANCED, each tier is filled independently in
+ separate augmentation rounds. Within each round, EQUAL_BALANCED constrains splitting,
+ but across rounds, full capacity is utilized.
+ """
+ net = _multi_tier_unbalanced()
+
+ result_te_ecmp = max_flow(
+ net,
+ "S",
+ "T",
+ flow_placement=FlowPlacement.EQUAL_BALANCED,
+ shortest_path=False,
+ )
+
+ # Progressive ECMP fills tiers sequentially with equal splitting per tier:
+ # Round 1 (Tier 1): 2 paths with equal splitting → fills both (20 + 40 = 60)
+ # Round 2 (Tier 2): 2 paths with equal splitting → fills both (30 + 60 = 90)
+ # Total: 150 units (full utilization achieved via multiple rounds)
+ assert result_te_ecmp[("S", "T")] == pytest.approx(150.0, abs=1e-6), (
+ "TE ECMP progressive mode should achieve full utilization via multi-round placement"
+ )
+
+
+class TestCombinedSemantics:
+ """Test combinations of IP/TE and ECMP/WCMP semantics."""
+
+ @pytest.mark.parametrize(
+ "shortest_path,flow_placement,expected_flow",
+ [
+ # IP ECMP: single tier, equal split → limited by smallest path
+ (True, FlowPlacement.EQUAL_BALANCED, 40.0),
+ # IP WCMP: single tier, proportional split → full tier utilization
+ (True, FlowPlacement.PROPORTIONAL, 60.0),
+ # TE ECMP: multi-tier, multi-round equal splitting → full utilization
+ (False, FlowPlacement.EQUAL_BALANCED, 150.0),
+ # TE WCMP: multi-tier, progressive proportional splitting → full utilization
+ (False, FlowPlacement.PROPORTIONAL, 150.0),
+ ],
+ )
+ def test_semantic_combinations_on_multi_tier(
+ self, shortest_path, flow_placement, expected_flow
+ ):
+ """Test all four combinations of IP/TE and ECMP/WCMP semantics."""
+ net = _multi_tier_unbalanced()
+
+ result = max_flow(
+ net, "S", "T", flow_placement=flow_placement, shortest_path=shortest_path
+ )
+
+ assert result[("S", "T")] == pytest.approx(expected_flow, abs=1e-6), (
+ f"Expected {expected_flow} for shortest_path={shortest_path}, "
+ f"flow_placement={flow_placement.name}"
+ )
+
+ @pytest.mark.parametrize(
+ "shortest_path,flow_placement,expected_flow",
+ [
+ # IP ECMP: single tier (all equal cost), equal split → limited by smallest
+ (True, FlowPlacement.EQUAL_BALANCED, 30.0),
+ # IP WCMP: single tier (all equal cost), proportional split → full utilization
+ (True, FlowPlacement.PROPORTIONAL, 90.0),
+ # TE ECMP: multi-round on single tier → achieves full utilization
+ (False, FlowPlacement.EQUAL_BALANCED, 90.0),
+ # TE WCMP: progressive on single tier → full utilization
+ (False, FlowPlacement.PROPORTIONAL, 90.0),
+ ],
+ )
+ def test_semantic_combinations_on_parallel_paths(
+ self, shortest_path, flow_placement, expected_flow
+ ):
+ """Test all four combinations on parallel paths topology."""
+ net = _unbalanced_parallel_paths()
+
+ result = max_flow(
+ net, "S", "T", flow_placement=flow_placement, shortest_path=shortest_path
+ )
+
+ assert result[("S", "T")] == pytest.approx(expected_flow, abs=1e-6), (
+ f"Expected {expected_flow} for shortest_path={shortest_path}, "
+ f"flow_placement={flow_placement.name}"
+ )
+
+
+class TestAccountingValidation:
+ """Validate that flow accounting is correct across all modes."""
+
+ @pytest.mark.parametrize("shortest_path", [True, False])
+ @pytest.mark.parametrize(
+ "flow_placement", [FlowPlacement.EQUAL_BALANCED, FlowPlacement.PROPORTIONAL]
+ )
+ def test_cost_distribution_sums_to_total_flow(self, shortest_path, flow_placement):
+ """Verify cost distribution values sum to total flow."""
+ net = _multi_tier_unbalanced()
+
+ result = max_flow_with_details(
+ net,
+ "S",
+ "T",
+ mode="combine",
+ flow_placement=flow_placement,
+ shortest_path=shortest_path,
+ )
+
+ summary = result[("S", "T")]
+
+ # Sum of cost distribution should equal total flow
+ cost_dist_sum = sum(summary.cost_distribution.values())
+ assert cost_dist_sum == pytest.approx(summary.total_flow, abs=1e-9), (
+ f"Cost distribution sum ({cost_dist_sum}) != total flow ({summary.total_flow})"
+ )
+
+ @pytest.mark.parametrize("shortest_path", [True, False])
+ @pytest.mark.parametrize(
+ "flow_placement", [FlowPlacement.EQUAL_BALANCED, FlowPlacement.PROPORTIONAL]
+ )
+ def test_flow_results_are_deterministic(self, shortest_path, flow_placement):
+ """Verify that flow results are deterministic across multiple runs."""
+ net = _unbalanced_parallel_paths()
+
+ results = []
+ for _ in range(3):
+ result = max_flow(
+ net,
+ "S",
+ "T",
+ flow_placement=flow_placement,
+ shortest_path=shortest_path,
+ )
+ results.append(result[("S", "T")])
+
+ # All runs should produce identical results
+ assert all(r == pytest.approx(results[0], abs=1e-9) for r in results), (
+ f"Non-deterministic results: {results}"
+ )
+
+
+class TestTELSPLimits:
+ """Test TE LSP scenarios with limited flow counts.
+
+ These tests validate the behavior when the number of TE LSPs (tunnels) is limited
+ while multiple diverse paths exist. Key semantic: with multipath=False and a max
+ flow count, each LSP is a distinct tunnel using a single path (MPLS LSP semantics).
+
+ Expected behavior: LSPs should be allocated to maximize throughput by selecting
+ the highest-capacity paths.
+ """
+
+ def test_4_lsps_on_8_diverse_paths(self):
+ """With 4 LSPs and 8 diverse paths, should use the 4 highest-capacity paths.
+
+ This tests the core TE LSP allocation strategy: when LSPs are limited,
+ they should be allocated to maximize total throughput by selecting the
+ highest-capacity paths.
+ """
+ import netgraph_core
+
+ from ngraph.adapters.core import build_graph
+
+ # Create 8 diverse paths with different capacities
+ # Capacities: 10, 15, 20, 25, 30, 35, 40, 45
+ net = Network()
+ nodes = ["S"] + [f"M{i}" for i in range(8)] + ["T"]
+ for node in nodes:
+ net.add_node(Node(node))
+
+ capacities = [10, 15, 20, 25, 30, 35, 40, 45]
+ for i, cap in enumerate(capacities):
+ # Create path S -> Mi -> T with given capacity
+ net.add_link(Link("S", f"M{i}", capacity=cap, cost=1.0))
+ net.add_link(Link(f"M{i}", "T", capacity=cap, cost=1.0))
+
+ # Use netgraph_core directly to create custom FlowPolicy with 4 LSPs
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+ graph_handle, multidigraph, _, node_mapper = build_graph(net)
+
+ # Create TE LSP config with custom max_flow_count
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.EQUAL_BALANCED
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config.multipath = False # Each LSP uses a single path
+ config.min_flow_count = 4
+ config.max_flow_count = 4 # Exactly 4 LSPs
+ config.reoptimize_flows_on_each_placement = True
+
+ policy = netgraph_core.FlowPolicy(algs, graph_handle, config)
+ fg = netgraph_core.FlowGraph(multidigraph)
+
+ src_id = node_mapper.to_id("S")
+ dst_id = node_mapper.to_id("T")
+
+ placed, remaining = policy.place_demand(
+ fg, src_id, dst_id, flowClass=0, volume=1000.0
+ )
+
+ # With 4 LSPs on 8 paths, should use the 4 highest-capacity paths
+ # Highest 4 capacities: 45, 40, 35, 30
+ # With ECMP constraint (EQUAL_BALANCED), all LSPs must carry equal volume
+ # Limited by smallest selected path: 4 × 30 = 120
+ assert placed == pytest.approx(120.0, abs=1e-3), (
+ f"Expected 4 LSPs with ECMP constraint (4×30=120), got {placed}"
+ )
+ assert policy.flow_count() == 4
+
+ def test_2_lsps_on_5_diverse_paths(self):
+ """With 2 LSPs and 5 diverse paths, should use the 2 highest-capacity paths."""
+ import netgraph_core
+
+ from ngraph.adapters.core import build_graph
+
+ net = Network()
+ nodes = ["S"] + [f"M{i}" for i in range(5)] + ["T"]
+ for node in nodes:
+ net.add_node(Node(node))
+
+ # Capacities: 10, 20, 30, 40, 50
+ capacities = [10, 20, 30, 40, 50]
+ for i, cap in enumerate(capacities):
+ net.add_link(Link("S", f"M{i}", capacity=cap, cost=1.0))
+ net.add_link(Link(f"M{i}", "T", capacity=cap, cost=1.0))
+
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+ graph_handle, multidigraph, _, node_mapper = build_graph(net)
+
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.EQUAL_BALANCED
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config.multipath = False
+ config.min_flow_count = 2
+ config.max_flow_count = 2
+ config.reoptimize_flows_on_each_placement = True
+
+ policy = netgraph_core.FlowPolicy(algs, graph_handle, config)
+ fg = netgraph_core.FlowGraph(multidigraph)
+
+ src_id = node_mapper.to_id("S")
+ dst_id = node_mapper.to_id("T")
+
+ placed, remaining = policy.place_demand(
+ fg, src_id, dst_id, flowClass=0, volume=500.0
+ )
+
+ # Should use 2 highest-capacity paths: 50, 40
+ # With ECMP constraint, all LSPs carry equal volume: 2 × 40 = 80
+ assert placed == pytest.approx(80.0, abs=1e-3), (
+ f"Expected 2 LSPs with ECMP constraint (2×40=80), got {placed}"
+ )
+ assert policy.flow_count() == 2
+
+ def test_lsps_equal_to_path_count(self):
+ """When LSP count equals path count, should utilize all paths."""
+ import netgraph_core
+
+ from ngraph.adapters.core import build_graph
+
+ net = Network()
+ nodes = ["S", "M1", "M2", "M3", "T"]
+ for node in nodes:
+ net.add_node(Node(node))
+
+ # 3 paths with capacities 20, 30, 40
+ capacities = [20, 30, 40]
+ for i, cap in enumerate(capacities, 1):
+ net.add_link(Link("S", f"M{i}", capacity=cap, cost=1.0))
+ net.add_link(Link(f"M{i}", "T", capacity=cap, cost=1.0))
+
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+ graph_handle, multidigraph, _, node_mapper = build_graph(net)
+
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.EQUAL_BALANCED
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config.multipath = False
+ config.min_flow_count = 3
+ config.max_flow_count = 3
+ config.reoptimize_flows_on_each_placement = True
+
+ policy = netgraph_core.FlowPolicy(algs, graph_handle, config)
+ fg = netgraph_core.FlowGraph(multidigraph)
+
+ src_id = node_mapper.to_id("S")
+ dst_id = node_mapper.to_id("T")
+
+ placed, remaining = policy.place_demand(
+ fg, src_id, dst_id, flowClass=0, volume=500.0
+ )
+
+ # Should utilize all 3 paths with capacities 20, 30, 40
+ # With ECMP constraint, all LSPs carry equal volume: 3 × 20 = 60
+ assert placed == pytest.approx(60.0, abs=1e-3), (
+ f"Expected 3 LSPs with ECMP constraint (3×20=60), got {placed}"
+ )
+ assert policy.flow_count() == 3
+
+ def test_lsp_vs_unlimited_te_comparison(self):
+ """Compare limited LSP allocation vs unlimited TE on same topology."""
+ import netgraph_core
+
+ from ngraph.adapters.core import build_graph
+
+ net = Network()
+ nodes = ["S"] + [f"M{i}" for i in range(6)] + ["T"]
+ for node in nodes:
+ net.add_node(Node(node))
+
+ # 6 paths: 10, 20, 30, 40, 50, 60
+ capacities = [10, 20, 30, 40, 50, 60]
+ for i, cap in enumerate(capacities):
+ net.add_link(Link("S", f"M{i}", capacity=cap, cost=1.0))
+ net.add_link(Link(f"M{i}", "T", capacity=cap, cost=1.0))
+
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+ graph_handle, multidigraph, _, node_mapper = build_graph(net)
+
+ src_id = node_mapper.to_id("S")
+ dst_id = node_mapper.to_id("T")
+
+ # Test with 3 LSPs
+ config_3lsp = netgraph_core.FlowPolicyConfig()
+ config_3lsp.path_alg = netgraph_core.PathAlg.SPF
+ config_3lsp.flow_placement = netgraph_core.FlowPlacement.EQUAL_BALANCED
+ config_3lsp.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config_3lsp.multipath = False
+ config_3lsp.min_flow_count = 3
+ config_3lsp.max_flow_count = 3
+ config_3lsp.reoptimize_flows_on_each_placement = True
+
+ policy_3lsp = netgraph_core.FlowPolicy(algs, graph_handle, config_3lsp)
+ fg_3lsp = netgraph_core.FlowGraph(multidigraph)
+ placed_3lsp, _ = policy_3lsp.place_demand(
+ fg_3lsp, src_id, dst_id, flowClass=0, volume=1000.0
+ )
+
+ # Test with unlimited TE
+ config_unlim = netgraph_core.FlowPolicyConfig()
+ config_unlim.path_alg = netgraph_core.PathAlg.SPF
+ config_unlim.flow_placement = netgraph_core.FlowPlacement.PROPORTIONAL
+ config_unlim.selection = netgraph_core.EdgeSelection(
+ multi_edge=True,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config_unlim.min_flow_count = 1
+ # max_flow_count defaults to None (unlimited)
+
+ policy_unlim = netgraph_core.FlowPolicy(algs, graph_handle, config_unlim)
+ fg_unlim = netgraph_core.FlowGraph(multidigraph)
+ placed_unlim, _ = policy_unlim.place_demand(
+ fg_unlim, src_id, dst_id, flowClass=0, volume=1000.0
+ )
+
+ # 3 LSPs with ECMP constraint on top 3 paths (60, 50, 40): 3 × 40 = 120
+ assert placed_3lsp == pytest.approx(120.0, abs=1e-3)
+ assert policy_3lsp.flow_count() == 3
+
+ # Unlimited TE: all paths = 10 + 20 + 30 + 40 + 50 + 60 = 210
+ assert placed_unlim == pytest.approx(210.0, abs=1e-3)
+
+ # Verify that limited LSPs achieve less than unlimited
+ assert placed_3lsp < placed_unlim
+
+
+class TestTELSPLimitsWCMP:
+ """Test TE LSP scenarios with WCMP (proportional splitting).
+
+ These tests validate WCMP behavior with limited LSPs. Unlike ECMP, WCMP allows
+ each LSP to carry different volumes proportional to path capacity, achieving
+ better utilization without the equal-splitting constraint.
+ """
+
+ def test_4_lsps_wcmp_on_8_diverse_paths(self):
+ """With 4 WCMP LSPs and 8 diverse paths, should fully utilize 4 highest-capacity paths.
+
+ Unlike ECMP, WCMP allows each LSP to carry volume proportional to its path capacity,
+ so total = sum of the 4 highest capacities.
+ """
+ import netgraph_core
+
+ from ngraph.adapters.core import build_graph
+
+ net = Network()
+ nodes = ["S"] + [f"M{i}" for i in range(8)] + ["T"]
+ for node in nodes:
+ net.add_node(Node(node))
+
+ capacities = [10, 15, 20, 25, 30, 35, 40, 45]
+ for i, cap in enumerate(capacities):
+ net.add_link(Link("S", f"M{i}", capacity=cap, cost=1.0))
+ net.add_link(Link(f"M{i}", "T", capacity=cap, cost=1.0))
+
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+ graph_handle, multidigraph, _, node_mapper = build_graph(net)
+
+ # WCMP TE LSP config
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.PROPORTIONAL # WCMP
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config.multipath = False
+ config.min_flow_count = 4
+ config.max_flow_count = 4
+ config.reoptimize_flows_on_each_placement = True
+
+ policy = netgraph_core.FlowPolicy(algs, graph_handle, config)
+ fg = netgraph_core.FlowGraph(multidigraph)
+
+ src_id = node_mapper.to_id("S")
+ dst_id = node_mapper.to_id("T")
+
+ placed, remaining = policy.place_demand(
+ fg, src_id, dst_id, flowClass=0, volume=1000.0
+ )
+
+ # WCMP: 4 LSPs on top 4 paths can each utilize full path capacity
+ # Top 4: 45, 40, 35, 30 → total = 150
+ assert placed == pytest.approx(150.0, abs=1e-3), (
+ f"Expected 4 WCMP LSPs to fully utilize 4 highest paths (45+40+35+30=150), "
+ f"got {placed}"
+ )
+ assert policy.flow_count() == 4
+
+ def test_wcmp_vs_ecmp_utilization_with_limited_lsps(self):
+ """Compare WCMP vs ECMP with limited LSPs to validate utilization difference."""
+ import netgraph_core
+
+ from ngraph.adapters.core import build_graph
+
+ net = Network()
+ nodes = ["S"] + [f"M{i}" for i in range(5)] + ["T"]
+ for node in nodes:
+ net.add_node(Node(node))
+
+ capacities = [10, 20, 30, 40, 50]
+ for i, cap in enumerate(capacities):
+ net.add_link(Link("S", f"M{i}", capacity=cap, cost=1.0))
+ net.add_link(Link(f"M{i}", "T", capacity=cap, cost=1.0))
+
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+ graph_handle, multidigraph, _, node_mapper = build_graph(net)
+
+ src_id = node_mapper.to_id("S")
+ dst_id = node_mapper.to_id("T")
+
+ # Test with ECMP (3 LSPs)
+ config_ecmp = netgraph_core.FlowPolicyConfig()
+ config_ecmp.path_alg = netgraph_core.PathAlg.SPF
+ config_ecmp.flow_placement = netgraph_core.FlowPlacement.EQUAL_BALANCED
+ config_ecmp.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config_ecmp.multipath = False
+ config_ecmp.min_flow_count = 3
+ config_ecmp.max_flow_count = 3
+ config_ecmp.reoptimize_flows_on_each_placement = True
+
+ policy_ecmp = netgraph_core.FlowPolicy(algs, graph_handle, config_ecmp)
+ fg_ecmp = netgraph_core.FlowGraph(multidigraph)
+ placed_ecmp, _ = policy_ecmp.place_demand(
+ fg_ecmp, src_id, dst_id, flowClass=0, volume=500.0
+ )
+
+ # Test with WCMP (3 LSPs)
+ config_wcmp = netgraph_core.FlowPolicyConfig()
+ config_wcmp.path_alg = netgraph_core.PathAlg.SPF
+ config_wcmp.flow_placement = netgraph_core.FlowPlacement.PROPORTIONAL
+ config_wcmp.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config_wcmp.multipath = False
+ config_wcmp.min_flow_count = 3
+ config_wcmp.max_flow_count = 3
+ config_wcmp.reoptimize_flows_on_each_placement = True
+
+ policy_wcmp = netgraph_core.FlowPolicy(algs, graph_handle, config_wcmp)
+ fg_wcmp = netgraph_core.FlowGraph(multidigraph)
+ placed_wcmp, _ = policy_wcmp.place_demand(
+ fg_wcmp, src_id, dst_id, flowClass=0, volume=500.0
+ )
+
+ # ECMP: 3 LSPs on top 3 paths (50, 40, 30) with equal constraint: 3 × 30 = 90
+ assert placed_ecmp == pytest.approx(90.0, abs=1e-3)
+
+ # WCMP: 3 LSPs on top 3 paths with proportional splitting: 50 + 40 + 30 = 120
+ assert placed_wcmp == pytest.approx(120.0, abs=1e-3)
+
+ # WCMP should achieve better utilization than ECMP
+ assert placed_wcmp > placed_ecmp
+
+ # Verify the utilization ratio
+ utilization_ratio = placed_wcmp / placed_ecmp
+ assert utilization_ratio == pytest.approx(120.0 / 90.0, abs=0.01), (
+ f"Expected WCMP to achieve ~1.33x ECMP utilization, got {utilization_ratio}"
+ )
+
+ def test_2_wcmp_lsps_on_5_diverse_paths(self):
+ """With 2 WCMP LSPs, should fully utilize 2 highest-capacity paths."""
+ import netgraph_core
+
+ from ngraph.adapters.core import build_graph
+
+ net = Network()
+ nodes = ["S"] + [f"M{i}" for i in range(5)] + ["T"]
+ for node in nodes:
+ net.add_node(Node(node))
+
+ capacities = [10, 20, 30, 40, 50]
+ for i, cap in enumerate(capacities):
+ net.add_link(Link("S", f"M{i}", capacity=cap, cost=1.0))
+ net.add_link(Link(f"M{i}", "T", capacity=cap, cost=1.0))
+
+ backend = netgraph_core.Backend.cpu()
+ algs = netgraph_core.Algorithms(backend)
+ graph_handle, multidigraph, _, node_mapper = build_graph(net)
+
+ config = netgraph_core.FlowPolicyConfig()
+ config.path_alg = netgraph_core.PathAlg.SPF
+ config.flow_placement = netgraph_core.FlowPlacement.PROPORTIONAL
+ config.selection = netgraph_core.EdgeSelection(
+ multi_edge=False,
+ require_capacity=True,
+ tie_break=netgraph_core.EdgeTieBreak.PREFER_HIGHER_RESIDUAL,
+ )
+ config.multipath = False
+ config.min_flow_count = 2
+ config.max_flow_count = 2
+ config.reoptimize_flows_on_each_placement = True
+
+ policy = netgraph_core.FlowPolicy(algs, graph_handle, config)
+ fg = netgraph_core.FlowGraph(multidigraph)
+
+ src_id = node_mapper.to_id("S")
+ dst_id = node_mapper.to_id("T")
+
+ placed, _ = policy.place_demand(fg, src_id, dst_id, flowClass=0, volume=500.0)
+
+ # WCMP: 2 LSPs on top 2 paths (50, 40) → 50 + 40 = 90
+ assert placed == pytest.approx(90.0, abs=1e-3), (
+ f"Expected 2 WCMP LSPs to fully utilize 2 highest paths (50+40=90), got {placed}"
+ )
+ assert policy.flow_count() == 2
+
+
+class TestEdgeCases:
+ """Test edge cases and boundary conditions."""
+
+ def test_single_path_ecmp_equals_wcmp(self):
+ """When there's only one path, ECMP and WCMP should behave identically."""
+ net = Network()
+ for name in ["A", "B", "C"]:
+ net.add_node(Node(name))
+
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "C", capacity=5.0, cost=1.0))
+
+ result_ecmp = max_flow(
+ net,
+ "A",
+ "C",
+ flow_placement=FlowPlacement.EQUAL_BALANCED,
+ shortest_path=True,
+ )
+
+ result_wcmp = max_flow(
+ net, "A", "C", flow_placement=FlowPlacement.PROPORTIONAL, shortest_path=True
+ )
+
+ # Both should be limited by bottleneck capacity (5.0)
+ assert result_ecmp[("A", "C")] == pytest.approx(5.0, abs=1e-9)
+ assert result_wcmp[("A", "C")] == pytest.approx(5.0, abs=1e-9)
+ assert result_ecmp[("A", "C")] == pytest.approx(
+ result_wcmp[("A", "C")], abs=1e-9
+ )
+
+ def test_balanced_parallel_paths_ecmp_equals_wcmp(self):
+ """When parallel paths have equal capacity, ECMP and WCMP should produce same results."""
+ net = Network()
+ for name in ["S", "A", "B", "T"]:
+ net.add_node(Node(name))
+
+ # Two paths with equal capacity
+ net.add_link(Link("S", "A", capacity=20.0, cost=1.0))
+ net.add_link(Link("A", "T", capacity=20.0, cost=1.0))
+ net.add_link(Link("S", "B", capacity=20.0, cost=1.0))
+ net.add_link(Link("B", "T", capacity=20.0, cost=1.0))
+
+ result_ecmp = max_flow(
+ net,
+ "S",
+ "T",
+ flow_placement=FlowPlacement.EQUAL_BALANCED,
+ shortest_path=True,
+ )
+
+ result_wcmp = max_flow(
+ net, "S", "T", flow_placement=FlowPlacement.PROPORTIONAL, shortest_path=True
+ )
+
+ # Both should achieve full utilization (40.0)
+ assert result_ecmp[("S", "T")] == pytest.approx(40.0, abs=1e-9)
+ assert result_wcmp[("S", "T")] == pytest.approx(40.0, abs=1e-9)
+
+ def test_zero_capacity_path_ignored(self):
+ """Paths with zero capacity should be ignored in both ECMP and WCMP."""
+ net = Network()
+ for name in ["S", "A", "B", "T"]:
+ net.add_node(Node(name))
+
+ # One path with capacity, one with zero capacity
+ net.add_link(Link("S", "A", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "T", capacity=10.0, cost=1.0))
+ net.add_link(Link("S", "B", capacity=0.0, cost=1.0))
+ net.add_link(Link("B", "T", capacity=0.0, cost=1.0))
+
+ result_ecmp = max_flow(
+ net,
+ "S",
+ "T",
+ flow_placement=FlowPlacement.EQUAL_BALANCED,
+ shortest_path=True,
+ )
+
+ result_wcmp = max_flow(
+ net, "S", "T", flow_placement=FlowPlacement.PROPORTIONAL, shortest_path=True
+ )
+
+ # Both should only use the path with capacity
+ assert result_ecmp[("S", "T")] == pytest.approx(10.0, abs=1e-9)
+ assert result_wcmp[("S", "T")] == pytest.approx(10.0, abs=1e-9)
diff --git a/tests/solver/test_helpers_smoke.py b/tests/solver/test_helpers_smoke.py
deleted file mode 100644
index 7b5e50b..0000000
--- a/tests/solver/test_helpers_smoke.py
+++ /dev/null
@@ -1,4 +0,0 @@
-def test_import_solver_helpers_module() -> None:
- import ngraph.solver.helpers as helpers
-
- assert hasattr(helpers, "__doc__")
diff --git a/tests/solver/test_maxflow_api.py b/tests/solver/test_maxflow_api.py
index 2b20fb4..2ad189c 100644
--- a/tests/solver/test_maxflow_api.py
+++ b/tests/solver/test_maxflow_api.py
@@ -12,6 +12,7 @@
import pytest
from ngraph.model.network import Link, Network, Node
+from ngraph.solver.maxflow import max_flow, max_flow_with_details, sensitivity_analysis
def _simple_network() -> Network:
@@ -61,7 +62,7 @@ def _triangle_network() -> Network:
def test_max_flow_combine_basic() -> None:
net = _triangle_network()
- result: Dict[Tuple[str, str], float] = net.max_flow("^A$", "^C$", mode="combine")
+ result: Dict[Tuple[str, str], float] = max_flow(net, "^A$", "^C$", mode="combine")
assert ("^A$", "^C$") in result
assert pytest.approx(result[("^A$", "^C$")], rel=0, abs=1e-9) == 2.0
@@ -78,7 +79,7 @@ def test_max_flow_pairwise_disjoint_groups() -> None:
net.add_link(Link("S2", "X", capacity=1.0))
net.add_link(Link("X", "T2", capacity=1.0))
- res = net.max_flow(r"^(S\d)$", r"^(T\d)$", mode="pairwise")
+ res = max_flow(net, r"^(S\d)$", r"^(T\d)$", mode="pairwise")
# All pairwise problems are solved independently on the same topology.
# Valid paths exist for all pairs with the following capacities.
@@ -91,89 +92,45 @@ def test_max_flow_pairwise_disjoint_groups() -> None:
def test_overlap_groups_yield_zero_flow() -> None:
net = _simple_network()
# Selecting the same node as both source and sink should yield zero
- res = net.max_flow("^S$", "^S$", mode="combine")
+ res = max_flow(net, "^S$", "^S$", mode="combine")
assert pytest.approx(res[("^S$", "^S$")], rel=0, abs=1e-9) == 0.0
def test_empty_selection_raises() -> None:
net = _simple_network()
with pytest.raises(ValueError):
- _ = net.max_flow("^Z$", "^T$")
+ _ = max_flow(net, "^Z$", "^T$")
with pytest.raises(ValueError):
- _ = net.max_flow("^S$", "^Z$")
+ _ = max_flow(net, "^S$", "^Z$")
def test_shortest_path_vs_full_max_flow() -> None:
- net = _simple_network()
- full = net.max_flow("^S$", "^T$", mode="combine", shortest_path=False)
- sp = net.max_flow("^S$", "^T$", mode="combine", shortest_path=True)
-
- # In this implementation, a single augmentation can place flow across all
- # equal-cost shortest paths; shortest_path matches full in this topology.
- assert pytest.approx(full[("^S$", "^T$")], rel=0, abs=1e-9) == 2.0
- assert pytest.approx(sp[("^S$", "^T$")], rel=0, abs=1e-9) == 2.0
+ """Test that shortest_path mode uses all equal-cost shortest paths.
+ This is a regression test for a critical bug that was fixed in NetGraph-Core.
+ The bug (flow_state.cpp line 233) caused shortest_path=True to break after
+ one DFS push, using only 1 of N parallel equal-cost paths instead of saturating
+ the entire equal-cost DAG.
-def test_max_flow_with_summary_total_matches() -> None:
+ This test ensures shortest_path=True correctly saturates all equal-cost paths
+ in the lowest-cost tier without going to higher-cost tiers.
+ """
net = _simple_network()
- res = net.max_flow_with_summary("^S$", "^T$", mode="combine")
- (flow, summary) = res[("^S$", "^T$")]
- assert pytest.approx(flow, rel=0, abs=1e-9) == 2.0
- assert pytest.approx(summary.total_flow, rel=0, abs=1e-9) == 2.0
- # Sanity on summary structure
- assert isinstance(summary.edge_flow, dict)
- assert isinstance(summary.residual_cap, dict)
+ full = max_flow(net, "^S$", "^T$", mode="combine", shortest_path=False)
+ sp = max_flow(net, "^S$", "^T$", mode="combine", shortest_path=True)
+ # Full max-flow should use both paths
+ assert pytest.approx(full[("^S$", "^T$")], rel=0, abs=1e-9) == 2.0
-def test_max_flow_with_graph_contains_pseudo_nodes() -> None:
- net = _simple_network()
- res = net.max_flow_with_graph("^S$", "^T$", mode="combine")
- flow, graph = res[("^S$", "^T$")]
- assert pytest.approx(flow, rel=0, abs=1e-9) == 2.0
- assert "source" in graph
- assert "sink" in graph
+ # shortest_path=True should use all equal-cost paths (both S->A->T and S->B->T)
+ assert pytest.approx(sp[("^S$", "^T$")], rel=0, abs=1e-9) == 2.0
-def test_max_flow_detailed_consistency() -> None:
+def test_max_flow_with_details_total_matches() -> None:
net = _simple_network()
- res = net.max_flow_detailed("^S$", "^T$", mode="combine")
- flow, summary, graph = res[("^S$", "^T$")]
- assert pytest.approx(flow, rel=0, abs=1e-9) == 2.0
+ res = max_flow_with_details(net, "^S$", "^T$", mode="combine")
+ summary = res[("^S$", "^T$")]
assert pytest.approx(summary.total_flow, rel=0, abs=1e-9) == 2.0
- assert "source" in graph and "sink" in graph
-
-
-def test_saturated_edges_identification() -> None:
- # Single path S->A->T with unit capacities: both edges are saturated at max flow 1
- net = Network()
- for name in ["S", "A", "T"]:
- net.add_node(Node(name))
- net.add_link(Link("S", "A", capacity=1.0))
- net.add_link(Link("A", "T", capacity=1.0))
-
- sat = net.saturated_edges("^S$", "^T$", mode="combine")
- edges = sat[("^S$", "^T$")]
- # Expect at least one saturated edge along S->A or A->T
- assert any(u == "S" and v == "A" for (u, v, _k) in edges) or any(
- u == "A" and v == "T" for (u, v, _k) in edges
- )
-
-
-def test_sensitivity_analysis_keys_align_with_saturated_edges() -> None:
- # Wrapper should report sensitivity for saturated edges; values are numeric.
- net = Network()
- for name in ["S", "A", "T"]:
- net.add_node(Node(name))
- net.add_link(Link("S", "A", capacity=2.0))
- net.add_link(Link("A", "T", capacity=1.0))
-
- sens = net.sensitivity_analysis("^S$", "^T$", mode="combine", change_amount=1.0)
- delta_by_edge = sens[("^S$", "^T$")]
- assert delta_by_edge, "Expected sensitivity results on saturated edges"
-
- sat = net.saturated_edges("^S$", "^T$", mode="combine")[("^S$", "^T$")]
- assert set((u, v, k) for (u, v, k) in sat) == set(delta_by_edge.keys())
- assert all(isinstance(delta, (int, float)) for delta in delta_by_edge.values())
def test_network_dc_to_dc_reverse_edge_first_hop() -> None:
@@ -196,6 +153,59 @@ def test_network_dc_to_dc_reverse_edge_first_hop() -> None:
net.add_link(Link("A/leaf", "B/leaf", capacity=10.0, cost=1.0))
net.add_link(Link("B/leaf", "B/dc", capacity=10.0, cost=1.0))
- res = net.max_flow(r"^A/dc$", r"^B/dc$", mode="combine")
+ res = max_flow(net, r"^A/dc$", r"^B/dc$", mode="combine")
assert (r"^A/dc$", r"^B/dc$") in res
assert res[(r"^A/dc$", r"^B/dc$")] == 10.0
+
+
+def _two_cost_tier_network() -> Network:
+ """Build a network with two cost tiers for shortest_path testing.
+
+ Topology: S -> A -> T (cap 10, cost 1+1=2)
+ S -> B -> T (cap 5, cost 2+2=4)
+
+ With shortest_path=False: uses both paths, total flow = 15
+ With shortest_path=True: uses only S->A->T path, total flow = 10
+ """
+ net = Network()
+ for name in ["S", "A", "B", "T"]:
+ net.add_node(Node(name))
+
+ net.add_link(Link("S", "A", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "T", capacity=10.0, cost=1.0))
+ net.add_link(Link("S", "B", capacity=5.0, cost=2.0))
+ net.add_link(Link("B", "T", capacity=5.0, cost=2.0))
+ return net
+
+
+def test_sensitivity_shortest_path_vs_full_max_flow() -> None:
+ """Test that shortest_path parameter is forwarded to sensitivity analysis.
+
+ This verifies the fix for an issue where shortest_path was accepted but
+ never forwarded to the C++ backend.
+
+ With full max-flow, all 4 edges are critical.
+ With shortest_path=True, only S->A->T path edges are critical because
+ the S->B->T path is unused under ECMP routing.
+ """
+ net = _two_cost_tier_network()
+
+ # Full max-flow mode: all 4 edges should be critical
+ res_full = sensitivity_analysis(
+ net, "^S$", "^T$", mode="combine", shortest_path=False
+ )
+ assert ("^S$", "^T$") in res_full
+ assert len(res_full[("^S$", "^T$")]) == 4, "Full max-flow should report all 4 edges"
+
+ # Shortest-path mode: only 2 edges (S->A, A->T) should be critical
+ res_sp = sensitivity_analysis(net, "^S$", "^T$", mode="combine", shortest_path=True)
+ assert ("^S$", "^T$") in res_sp
+ assert len(res_sp[("^S$", "^T$")]) == 2, (
+ "Shortest-path mode should only report 2 edges"
+ )
+
+ # Verify the delta values (removing S->A or A->T forces traffic to S->B->T)
+ for link_id, delta in res_sp[("^S$", "^T$")].items():
+ assert pytest.approx(delta, rel=0, abs=1e-9) == 5.0, (
+ f"Edge {link_id} should have delta 5.0 (baseline 10 -> 5 via alternate path)"
+ )
diff --git a/tests/solver/test_maxflow_cache.py b/tests/solver/test_maxflow_cache.py
new file mode 100644
index 0000000..f31ba6f
--- /dev/null
+++ b/tests/solver/test_maxflow_cache.py
@@ -0,0 +1,372 @@
+"""Tests for max_flow caching and masking functionality.
+
+This module tests that the cached max_flow path correctly handles:
+- Disabled nodes (pre-computed in cache, applied via masks)
+- Disabled links (pre-computed in cache, applied via masks)
+- Combination of disabled topology and explicit exclusions
+- Consistency between cached and non-cached code paths
+
+These tests validate the fix for a bug where disabled_node_ids and
+disabled_link_ids were pre-computed in the cache but never applied
+when no explicit exclusions were provided.
+"""
+
+from __future__ import annotations
+
+import pytest
+
+from ngraph.model.network import Link, Network, Node
+from ngraph.solver.maxflow import (
+ build_maxflow_cache,
+ max_flow,
+ max_flow_with_details,
+ sensitivity_analysis,
+)
+
+
+def _diamond_network(
+ *,
+ disable_node_b: bool = False,
+ disable_link_a_b: bool = False,
+) -> Network:
+ """Build a diamond network with optional disabled components.
+
+ Topology:
+ A -> B (cap 5) -> D (cap 5) [path 1, cost 2]
+ A -> C (cap 3) -> D (cap 3) [path 2, cost 4]
+
+ With both paths enabled: max flow = 8 (5 via B + 3 via C)
+ With B disabled: max flow = 3 (only via C)
+ With A->B link disabled: max flow = 3 (only via C)
+
+ Args:
+ disable_node_b: If True, disable node B.
+ disable_link_a_b: If True, disable the A->B link.
+
+ Returns:
+ Network with configured topology.
+ """
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B", disabled=disable_node_b))
+ net.add_node(Node("C"))
+ net.add_node(Node("D"))
+
+ net.add_link(Link("A", "B", capacity=5.0, cost=1.0, disabled=disable_link_a_b))
+ net.add_link(Link("B", "D", capacity=5.0, cost=1.0))
+ net.add_link(Link("A", "C", capacity=3.0, cost=2.0))
+ net.add_link(Link("C", "D", capacity=3.0, cost=2.0))
+
+ return net
+
+
+def _linear_network(*, disable_middle: bool = False) -> Network:
+ """Build a linear network A -> B -> C with optional disabled middle node.
+
+ Args:
+ disable_middle: If True, disable node B.
+
+ Returns:
+ Network with linear topology.
+ """
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B", disabled=disable_middle))
+ net.add_node(Node("C"))
+
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "C", capacity=10.0, cost=1.0))
+
+ return net
+
+
+class TestCachedMaxFlowDisabledNodes:
+ """Tests for disabled node masking in cached max_flow path."""
+
+ def test_disabled_node_blocks_path_cached(self) -> None:
+ """Disabled node should block flow through it when using cache."""
+ net = _diamond_network(disable_node_b=True)
+
+ # Build cache (disabled node B is pre-computed)
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+
+ # Verify cache captured the disabled node
+ assert len(cache.disabled_node_ids) == 1
+
+ # Call max_flow with cache (no explicit exclusions)
+ result = max_flow(net, "^A$", "^D$", mode="combine", _cache=cache)
+
+ # Flow should only go through C (capacity 3), not B
+ assert pytest.approx(result[("^A$", "^D$")], abs=1e-9) == 3.0
+
+ def test_disabled_node_cached_vs_uncached_consistency(self) -> None:
+ """Cached and non-cached paths should produce identical results."""
+ net = _diamond_network(disable_node_b=True)
+
+ # Non-cached path
+ result_uncached = max_flow(net, "^A$", "^D$", mode="combine")
+
+ # Cached path
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+ result_cached = max_flow(net, "^A$", "^D$", mode="combine", _cache=cache)
+
+ assert result_cached == result_uncached
+
+ def test_disabled_node_in_only_path_yields_zero_flow(self) -> None:
+ """Disabling the only path's middle node should yield zero flow."""
+ net = _linear_network(disable_middle=True)
+
+ cache = build_maxflow_cache(net, "^A$", "^C$", mode="combine")
+ result = max_flow(net, "^A$", "^C$", mode="combine", _cache=cache)
+
+ assert result[("^A$", "^C$")] == 0.0
+
+ def test_max_flow_with_details_disabled_node_cached(self) -> None:
+ """max_flow_with_details should respect disabled nodes via cache."""
+ net = _diamond_network(disable_node_b=True)
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+ result = max_flow_with_details(net, "^A$", "^D$", mode="combine", _cache=cache)
+
+ summary = result[("^A$", "^D$")]
+ assert pytest.approx(summary.total_flow, abs=1e-9) == 3.0
+
+ # Cost distribution should only show cost 4 path (via C)
+ assert len(summary.cost_distribution) == 1
+ assert 4.0 in summary.cost_distribution
+
+ def test_sensitivity_analysis_disabled_node_cached(self) -> None:
+ """sensitivity_analysis should respect disabled nodes via cache."""
+ net = _diamond_network(disable_node_b=True)
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+ result = sensitivity_analysis(net, "^A$", "^D$", mode="combine", _cache=cache)
+
+ sens = result[("^A$", "^D$")]
+
+ # Should only report edges on the C path (A->C, C->D)
+ # The B path edges should not appear as they're masked out
+ for link_id in sens:
+ assert "A[" not in link_id or "B]" not in link_id # No A->B link
+ assert "B[" not in link_id # No B->D link
+
+
+class TestCachedMaxFlowDisabledLinks:
+ """Tests for disabled link masking in cached max_flow path."""
+
+ def test_disabled_link_blocks_path_cached(self) -> None:
+ """Disabled link should block flow through it when using cache."""
+ net = _diamond_network(disable_link_a_b=True)
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+
+ # Verify cache captured the disabled link
+ assert len(cache.disabled_link_ids) == 1
+
+ result = max_flow(net, "^A$", "^D$", mode="combine", _cache=cache)
+
+ # Flow should only go through C (capacity 3)
+ assert pytest.approx(result[("^A$", "^D$")], abs=1e-9) == 3.0
+
+ def test_disabled_link_cached_vs_uncached_consistency(self) -> None:
+ """Cached and non-cached paths should produce identical results."""
+ net = _diamond_network(disable_link_a_b=True)
+
+ result_uncached = max_flow(net, "^A$", "^D$", mode="combine")
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+ result_cached = max_flow(net, "^A$", "^D$", mode="combine", _cache=cache)
+
+ assert result_cached == result_uncached
+
+ def test_disabled_link_in_only_path_yields_zero_flow(self) -> None:
+ """Disabling the only link should yield zero flow."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_link(Link("A", "B", capacity=10.0, disabled=True))
+
+ cache = build_maxflow_cache(net, "^A$", "^B$", mode="combine")
+ result = max_flow(net, "^A$", "^B$", mode="combine", _cache=cache)
+
+ assert result[("^A$", "^B$")] == 0.0
+
+ def test_max_flow_with_details_disabled_link_cached(self) -> None:
+ """max_flow_with_details should respect disabled links via cache."""
+ net = _diamond_network(disable_link_a_b=True)
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+ result = max_flow_with_details(net, "^A$", "^D$", mode="combine", _cache=cache)
+
+ summary = result[("^A$", "^D$")]
+ assert pytest.approx(summary.total_flow, abs=1e-9) == 3.0
+
+
+class TestCachedMaxFlowCombinedExclusions:
+ """Tests for combining disabled topology with explicit exclusions."""
+
+ def test_disabled_node_plus_explicit_node_exclusion(self) -> None:
+ """Both disabled and explicitly excluded nodes should be masked."""
+ net = _diamond_network(disable_node_b=True) # B disabled
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+
+ # Also exclude C explicitly - should result in zero flow
+ result = max_flow(
+ net, "^A$", "^D$", mode="combine", _cache=cache, excluded_nodes={"C"}
+ )
+
+ assert result[("^A$", "^D$")] == 0.0
+
+ def test_disabled_link_plus_explicit_link_exclusion(self) -> None:
+ """Both disabled and explicitly excluded links should be masked."""
+ net = _diamond_network(disable_link_a_b=True) # A->B disabled
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+
+ # Get the A->C link ID to exclude it explicitly
+ a_c_link_id = None
+ for link_id, link in net.links.items():
+ if link.source == "A" and link.target == "C":
+ a_c_link_id = link_id
+ break
+
+ assert a_c_link_id is not None
+
+ result = max_flow(
+ net,
+ "^A$",
+ "^D$",
+ mode="combine",
+ _cache=cache,
+ excluded_links={a_c_link_id},
+ )
+
+ assert result[("^A$", "^D$")] == 0.0
+
+ def test_explicit_exclusion_without_disabled_topology(self) -> None:
+ """Explicit exclusions should work even when no disabled topology."""
+ net = _diamond_network() # Nothing disabled
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+
+ # Verify no disabled components in cache
+ assert len(cache.disabled_node_ids) == 0
+ assert len(cache.disabled_link_ids) == 0
+
+ # Exclude node B explicitly
+ result = max_flow(
+ net, "^A$", "^D$", mode="combine", _cache=cache, excluded_nodes={"B"}
+ )
+
+ # Should only flow through C
+ assert pytest.approx(result[("^A$", "^D$")], abs=1e-9) == 3.0
+
+
+class TestCachedMaxFlowNoDisabledTopology:
+ """Tests for cache behavior when no topology is disabled."""
+
+ def test_no_disabled_topology_full_flow(self) -> None:
+ """With no disabled components, full flow should be achieved."""
+ net = _diamond_network() # Nothing disabled
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+
+ assert len(cache.disabled_node_ids) == 0
+ assert len(cache.disabled_link_ids) == 0
+
+ result = max_flow(net, "^A$", "^D$", mode="combine", _cache=cache)
+
+ # Full flow through both paths: 5 via B + 3 via C = 8
+ assert pytest.approx(result[("^A$", "^D$")], abs=1e-9) == 8.0
+
+ def test_cached_vs_uncached_no_disabled(self) -> None:
+ """Cached and non-cached should match when nothing is disabled."""
+ net = _diamond_network()
+
+ result_uncached = max_flow(net, "^A$", "^D$", mode="combine")
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+ result_cached = max_flow(net, "^A$", "^D$", mode="combine", _cache=cache)
+
+ assert result_cached == result_uncached
+
+
+class TestCachedMaxFlowPairwiseMode:
+ """Tests for cached max_flow in pairwise mode with disabled topology."""
+
+ def test_disabled_node_pairwise_mode_cached(self) -> None:
+ """Disabled node should be respected in pairwise mode with cache."""
+ net = Network()
+ net.add_node(Node("S1"))
+ net.add_node(Node("S2", disabled=True)) # Disabled source
+ net.add_node(Node("M"))
+ net.add_node(Node("T1"))
+ net.add_node(Node("T2"))
+
+ net.add_link(Link("S1", "M", capacity=5.0))
+ net.add_link(Link("S2", "M", capacity=5.0))
+ net.add_link(Link("M", "T1", capacity=5.0))
+ net.add_link(Link("M", "T2", capacity=5.0))
+
+ cache = build_maxflow_cache(net, r"^(S\d)$", r"^(T\d)$", mode="pairwise")
+ result = max_flow(net, r"^(S\d)$", r"^(T\d)$", mode="pairwise", _cache=cache)
+
+ # S1 -> T1 and S1 -> T2 should have flow
+ assert result[("S1", "T1")] == 5.0
+ assert result[("S1", "T2")] == 5.0
+
+ # S2 -> anything should be 0 (S2 is disabled)
+ assert result[("S2", "T1")] == 0.0
+ assert result[("S2", "T2")] == 0.0
+
+
+class TestBuildMaxflowCachePrecomputation:
+ """Tests for correct pre-computation of disabled IDs in cache."""
+
+ def test_cache_captures_disabled_node_ids(self) -> None:
+ """Cache should pre-compute disabled node IDs correctly."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B", disabled=True))
+ net.add_node(Node("C", disabled=True))
+ net.add_node(Node("D"))
+
+ net.add_link(Link("A", "B", capacity=1.0))
+ net.add_link(Link("B", "C", capacity=1.0))
+ net.add_link(Link("C", "D", capacity=1.0))
+ net.add_link(Link("A", "D", capacity=1.0))
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+
+ # Should have captured 2 disabled nodes (B and C)
+ assert len(cache.disabled_node_ids) == 2
+
+ def test_cache_captures_disabled_link_ids(self) -> None:
+ """Cache should pre-compute disabled link IDs correctly."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_node(Node("C"))
+
+ link1 = Link("A", "B", capacity=1.0, disabled=True)
+ link2 = Link("A", "C", capacity=1.0, disabled=True)
+ link3 = Link("B", "C", capacity=1.0)
+
+ net.add_link(link1)
+ net.add_link(link2)
+ net.add_link(link3)
+
+ cache = build_maxflow_cache(net, "^A$", "^C$", mode="combine")
+
+ # Should have captured 2 disabled links
+ assert len(cache.disabled_link_ids) == 2
+
+ def test_cache_empty_disabled_sets_when_nothing_disabled(self) -> None:
+ """Cache should have empty disabled sets when nothing is disabled."""
+ net = _diamond_network()
+
+ cache = build_maxflow_cache(net, "^A$", "^D$", mode="combine")
+
+ assert len(cache.disabled_node_ids) == 0
+ assert len(cache.disabled_link_ids) == 0
diff --git a/tests/solver/test_maxflow_cost_distribution.py b/tests/solver/test_maxflow_cost_distribution.py
new file mode 100644
index 0000000..9ff85bf
--- /dev/null
+++ b/tests/solver/test_maxflow_cost_distribution.py
@@ -0,0 +1,247 @@
+"""Tests for max_flow_with_details cost distribution validation.
+
+These tests ensure that cost_distribution values are correct, not just present.
+They validate the actual flow volumes at different path costs.
+"""
+
+from __future__ import annotations
+
+import pytest
+
+from ngraph.model.network import Link, Network, Node
+from ngraph.solver.maxflow import max_flow_with_details
+from ngraph.types.base import FlowPlacement
+
+
+def _diamond_network() -> Network:
+ """Build a diamond network with two paths of different costs.
+
+ Topology:
+ A -> B (cap 3, cost 1) -> D (cap 3, cost 1)
+ A -> C (cap 3, cost 2) -> D (cap 3, cost 2)
+
+ Total cost: path via B = 2, path via C = 4
+ Max flow = 6 (3 via B + 3 via C)
+ """
+ net = Network()
+ for name in ["A", "B", "C", "D"]:
+ net.add_node(Node(name))
+
+ net.add_link(Link("A", "B", capacity=3.0, cost=1.0))
+ net.add_link(Link("B", "D", capacity=3.0, cost=1.0))
+ net.add_link(Link("A", "C", capacity=3.0, cost=2.0))
+ net.add_link(Link("C", "D", capacity=3.0, cost=2.0))
+
+ return net
+
+
+def _parallel_paths_network() -> Network:
+ """Build network with multiple parallel paths at same cost.
+
+ Topology:
+ S -> A (cap 1, cost 1) -> T (cap 1, cost 1)
+ S -> B (cap 2, cost 1) -> T (cap 2, cost 1)
+
+ All paths have cost 2, max flow = 3
+ """
+ net = Network()
+ for name in ["S", "A", "B", "T"]:
+ net.add_node(Node(name))
+
+ net.add_link(Link("S", "A", capacity=1.0, cost=1.0))
+ net.add_link(Link("A", "T", capacity=1.0, cost=1.0))
+ net.add_link(Link("S", "B", capacity=2.0, cost=1.0))
+ net.add_link(Link("B", "T", capacity=2.0, cost=1.0))
+
+ return net
+
+
+def _three_tier_network() -> Network:
+ """Build network with three different cost tiers.
+
+ Topology:
+ S -> A (cap 1, cost 1) -> T (cap 1, cost 1) [total cost 2]
+ S -> B (cap 1, cost 2) -> T (cap 1, cost 2) [total cost 4]
+ S -> C (cap 1, cost 3) -> T (cap 1, cost 3) [total cost 6]
+
+ Max flow = 3 (1 at each cost tier)
+ """
+ net = Network()
+ for name in ["S", "A", "B", "C", "T"]:
+ net.add_node(Node(name))
+
+ net.add_link(Link("S", "A", capacity=1.0, cost=1.0))
+ net.add_link(Link("A", "T", capacity=1.0, cost=1.0))
+ net.add_link(Link("S", "B", capacity=1.0, cost=2.0))
+ net.add_link(Link("B", "T", capacity=1.0, cost=2.0))
+ net.add_link(Link("S", "C", capacity=1.0, cost=3.0))
+ net.add_link(Link("C", "T", capacity=1.0, cost=3.0))
+
+ return net
+
+
+def test_cost_distribution_two_paths_different_costs() -> None:
+ """Validate cost distribution with two paths of different costs."""
+ net = _diamond_network()
+ result = max_flow_with_details(net, "^A$", "^D$", mode="combine")
+
+ assert ("^A$", "^D$") in result
+ summary = result[("^A$", "^D$")]
+
+ # Total flow should be 6.0
+ assert pytest.approx(summary.total_flow, rel=0, abs=1e-9) == 6.0
+
+ # Cost distribution should show flow at two different costs
+ assert len(summary.cost_distribution) == 2
+
+ # 3 units at cost 2 (via B)
+ assert 2.0 in summary.cost_distribution
+ assert pytest.approx(summary.cost_distribution[2.0], rel=0, abs=1e-9) == 3.0
+
+ # 3 units at cost 4 (via C)
+ assert 4.0 in summary.cost_distribution
+ assert pytest.approx(summary.cost_distribution[4.0], rel=0, abs=1e-9) == 3.0
+
+ # Sum of cost distribution should equal total flow
+ total_from_dist = sum(summary.cost_distribution.values())
+ assert pytest.approx(total_from_dist, rel=0, abs=1e-9) == summary.total_flow
+
+
+def test_cost_distribution_parallel_paths_same_cost() -> None:
+ """Validate cost distribution when all paths have the same cost."""
+ net = _parallel_paths_network()
+ result = max_flow_with_details(net, "^S$", "^T$", mode="combine")
+
+ summary = result[("^S$", "^T$")]
+
+ # Total flow should be 3.0
+ assert pytest.approx(summary.total_flow, rel=0, abs=1e-9) == 3.0
+
+ # All flow at cost 2 (both paths have cost 1+1=2)
+ assert len(summary.cost_distribution) == 1
+ assert 2.0 in summary.cost_distribution
+ assert pytest.approx(summary.cost_distribution[2.0], rel=0, abs=1e-9) == 3.0
+
+
+def test_cost_distribution_three_tiers() -> None:
+ """Validate cost distribution with three different cost tiers."""
+ net = _three_tier_network()
+ result = max_flow_with_details(net, "^S$", "^T$", mode="combine")
+
+ summary = result[("^S$", "^T$")]
+
+ # Total flow should be 3.0
+ assert pytest.approx(summary.total_flow, rel=0, abs=1e-9) == 3.0
+
+ # Should have three different costs
+ assert len(summary.cost_distribution) == 3
+
+ # 1 unit at each cost tier
+ assert pytest.approx(summary.cost_distribution[2.0], rel=0, abs=1e-9) == 1.0
+ assert pytest.approx(summary.cost_distribution[4.0], rel=0, abs=1e-9) == 1.0
+ assert pytest.approx(summary.cost_distribution[6.0], rel=0, abs=1e-9) == 1.0
+
+
+def test_cost_distribution_shortest_path_mode() -> None:
+ """Validate cost distribution in shortest_path mode (only lowest cost tier)."""
+ net = _three_tier_network()
+ result = max_flow_with_details(
+ net, "^S$", "^T$", mode="combine", shortest_path=True
+ )
+
+ summary = result[("^S$", "^T$")]
+
+ # Should only use the lowest cost path
+ assert pytest.approx(summary.total_flow, rel=0, abs=1e-9) == 1.0
+
+ # Should have only one cost tier (the lowest)
+ assert len(summary.cost_distribution) == 1
+ assert 2.0 in summary.cost_distribution
+ assert pytest.approx(summary.cost_distribution[2.0], rel=0, abs=1e-9) == 1.0
+
+
+def test_cost_distribution_pairwise_mode() -> None:
+ """Validate cost distribution in pairwise mode."""
+ net = Network()
+ for name in ["S1", "S2", "M", "T1", "T2"]:
+ net.add_node(Node(name))
+
+ # S1 -> M -> T1: cost 2, capacity 2
+ net.add_link(Link("S1", "M", capacity=2.0, cost=1.0))
+ net.add_link(Link("M", "T1", capacity=2.0, cost=1.0))
+
+ # S2 -> M -> T2: cost 4, capacity 1
+ net.add_link(Link("S2", "M", capacity=1.0, cost=2.0))
+ net.add_link(Link("M", "T2", capacity=1.0, cost=2.0))
+
+ # Use capture group in regex to extract node names for pairwise keys
+ result = max_flow_with_details(net, r"^(S\d)$", r"^(T\d)$", mode="pairwise")
+
+ # Check S1 -> T1
+ s1_t1 = result[("S1", "T1")]
+ assert pytest.approx(s1_t1.total_flow, rel=0, abs=1e-9) == 2.0
+ assert 2.0 in s1_t1.cost_distribution
+ assert pytest.approx(s1_t1.cost_distribution[2.0], rel=0, abs=1e-9) == 2.0
+
+ # Check S2 -> T2
+ s2_t2 = result[("S2", "T2")]
+ assert pytest.approx(s2_t2.total_flow, rel=0, abs=1e-9) == 1.0
+ assert 4.0 in s2_t2.cost_distribution
+ assert pytest.approx(s2_t2.cost_distribution[4.0], rel=0, abs=1e-9) == 1.0
+
+
+def test_cost_distribution_with_flow_placement_proportional() -> None:
+ """Validate cost distribution with proportional flow placement."""
+ net = _diamond_network()
+ result = max_flow_with_details(
+ net, "^A$", "^D$", mode="combine", flow_placement=FlowPlacement.PROPORTIONAL
+ )
+
+ summary = result[("^A$", "^D$")]
+
+ # Should still get correct total and distribution
+ assert pytest.approx(summary.total_flow, rel=0, abs=1e-9) == 6.0
+ assert len(summary.cost_distribution) == 2
+ assert pytest.approx(summary.cost_distribution[2.0], rel=0, abs=1e-9) == 3.0
+ assert pytest.approx(summary.cost_distribution[4.0], rel=0, abs=1e-9) == 3.0
+
+
+def test_cost_distribution_empty_when_no_flow() -> None:
+ """Validate cost distribution is empty when there's no flow."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ # No links - no flow possible
+
+ result = max_flow_with_details(net, "^A$", "^B$", mode="combine")
+ summary = result[("^A$", "^B$")]
+
+ assert summary.total_flow == 0.0
+ assert len(summary.cost_distribution) == 0
+
+
+def test_cost_distribution_weighted_average_latency() -> None:
+ """Validate cost distribution enables correct latency analysis."""
+ net = _diamond_network()
+ result = max_flow_with_details(net, "^A$", "^D$", mode="combine")
+ summary = result[("^A$", "^D$")]
+
+ # Calculate weighted average latency
+ total_flow = sum(summary.cost_distribution.values())
+ weighted_avg = (
+ sum(cost * flow for cost, flow in summary.cost_distribution.items())
+ / total_flow
+ )
+
+ # With equal flow on both paths (3 at cost 2, 3 at cost 4)
+ # weighted average = (2*3 + 4*3) / 6 = 18/6 = 3.0
+ assert pytest.approx(weighted_avg, rel=0, abs=1e-9) == 3.0
+
+ # Latency span
+ min_latency = min(summary.cost_distribution.keys())
+ max_latency = max(summary.cost_distribution.keys())
+ latency_span = max_latency - min_latency
+
+ assert min_latency == 2.0
+ assert max_latency == 4.0
+ assert latency_span == 2.0
diff --git a/tests/solver/test_maxflow_wrappers_edge_cases.py b/tests/solver/test_maxflow_wrappers_edge_cases.py
deleted file mode 100644
index c20c6ce..0000000
--- a/tests/solver/test_maxflow_wrappers_edge_cases.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""Edge-case coverage for solver-layer maxflow wrappers.
-
-Covers invalid modes, empty selections, overlapping groups, pairwise empties,
-and disabled-node handling using a minimal test context.
-"""
-
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Dict, List
-
-import pytest
-
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.solver import maxflow as sol
-
-
-@dataclass
-class _NodeStub:
- name: str
- disabled: bool = False
-
-
-class _Context:
- """Minimal context with controllable groups and graph.
-
- select_map: mapping from path -> dict[label -> list[_NodeStub]]
- edges: list of (u, v, capacity)
- nodes: iterable of node names to pre-create in graph
- """
-
- def __init__(
- self,
- select_map: Dict[str, Dict[str, List[_NodeStub]]],
- nodes: List[str] | None = None,
- edges: List[tuple[str, str, float]] | None = None,
- ) -> None:
- self._select_map = select_map
- self._nodes = nodes or []
- self._edges = edges or []
-
- def select_node_groups_by_path(self, path: str) -> Dict[str, List[_NodeStub]]:
- return self._select_map.get(path, {})
-
- def to_strict_multidigraph(
- self, add_reverse: bool = True, *, compact: bool = False
- ) -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- for n in self._nodes:
- if n not in g:
- g.add_node(n)
- for u, v, cap in self._edges:
- # forward
- g.add_edge(u, v, capacity=cap, cost=1)
- if add_reverse:
- # reverse with same capacity to match model defaults
- g.add_edge(v, u, capacity=cap, cost=1)
- return g
-
-
-def _base_graph(nodes: list[str], edges: list[tuple[str, str, float]]) -> _Context:
- return _Context(select_map={}, nodes=nodes, edges=edges)
-
-
-def test_invalid_mode_raises_all_wrappers() -> None:
- ctx = _base_graph(["A", "B"], [("A", "B", 1.0)])
- # Non-empty groups to pass initial validation
- s_groups = {"S": [_NodeStub("A")]}
- t_groups = {"T": [_NodeStub("B")]}
- ctx._select_map = {"S": s_groups, "T": t_groups}
-
- with pytest.raises(ValueError):
- sol.max_flow(ctx, "S", "T", mode="invalid")
- with pytest.raises(ValueError):
- sol.max_flow_with_summary(ctx, "S", "T", mode="invalid")
- with pytest.raises(ValueError):
- sol.max_flow_with_graph(ctx, "S", "T", mode="invalid")
- with pytest.raises(ValueError):
- sol.max_flow_detailed(ctx, "S", "T", mode="invalid")
- with pytest.raises(ValueError):
- sol.saturated_edges(ctx, "S", "T", mode="invalid")
- with pytest.raises(ValueError):
- sol.sensitivity_analysis(ctx, "S", "T", mode="invalid")
-
-
-def test_combine_empty_groups_return_zero_or_empty() -> None:
- # Provide labels with empty lists to avoid early ValueError and exercise empty-branch
- ctx = _base_graph(["A", "B"], [("A", "B", 1.0)])
- ctx._select_map = {"S": {"S": []}, "T": {"T": []}}
-
- assert sol.max_flow(ctx, "S", "T", mode="combine") == {("S", "T"): 0.0}
- flow, graph = sol.max_flow_with_graph(ctx, "S", "T", mode="combine")[("S", "T")]
- assert flow == 0.0 and isinstance(graph, StrictMultiDiGraph)
- flow, summary = sol.max_flow_with_summary(ctx, "S", "T", mode="combine")[("S", "T")]
- assert flow == 0.0 and summary.total_flow == 0.0
- flow, summary, graph = sol.max_flow_detailed(ctx, "S", "T", mode="combine")[
- ("S", "T")
- ]
- assert (
- flow == 0.0
- and summary.total_flow == 0.0
- and isinstance(graph, StrictMultiDiGraph)
- )
- assert sol.saturated_edges(ctx, "S", "T", mode="combine") == {("S", "T"): []}
- assert sol.sensitivity_analysis(ctx, "S", "T", mode="combine") == {("S", "T"): {}}
-
-
-def test_combine_overlap_groups_yield_zero_or_empty() -> None:
- # Overlap: same node in both groups
- n = _NodeStub("X")
- ctx = _base_graph(["X"], [])
- ctx._select_map = {"S": {"G1": [n]}, "T": {"G2": [n]}}
-
- assert sol.max_flow(ctx, "S", "T", mode="combine") == {("G1", "G2"): 0.0}
- flow, graph = sol.max_flow_with_graph(ctx, "S", "T", mode="combine")[("G1", "G2")]
- assert flow == 0.0 and isinstance(graph, StrictMultiDiGraph)
- flow, summary = sol.max_flow_with_summary(ctx, "S", "T", mode="combine")[
- ("G1", "G2")
- ]
- assert flow == 0.0 and summary.total_flow == 0.0
- flow, summary, graph = sol.max_flow_detailed(ctx, "S", "T", mode="combine")[
- ("G1", "G2")
- ]
- assert (
- flow == 0.0
- and summary.total_flow == 0.0
- and isinstance(graph, StrictMultiDiGraph)
- )
- assert sol.saturated_edges(ctx, "S", "T", mode="combine") == {("G1", "G2"): []}
- assert sol.sensitivity_analysis(ctx, "S", "T", mode="combine") == {("G1", "G2"): {}}
-
-
-def test_pairwise_with_empty_and_overlap_entries() -> None:
- # Setup nodes and a single usable edge S2->T1
- ctx = _base_graph(["S2", "T1", "X"], [("S2", "T1", 5.0)])
- s1_empty: list[_NodeStub] = []
- s2 = [_NodeStub("S2")]
- s3_overlap = [_NodeStub("X")]
- t1 = [_NodeStub("T1")]
- t2_empty: list[_NodeStub] = []
- t3_overlap = [_NodeStub("X")]
- ctx._select_map = {
- "S": {"S1": s1_empty, "S2": s2, "S3": s3_overlap},
- "T": {"T1": t1, "T2": t2_empty, "T3": t3_overlap},
- }
-
- res = sol.max_flow(ctx, "S", "T", mode="pairwise")
- # Empty entries -> zero
- assert res[("S1", "T1")] == 0.0
- assert res[("S2", "T2")] == 0.0
- # Overlap -> zero
- assert res[("S3", "T3")] == 0.0
- # Valid pair uses the only available path
- assert res[("S2", "T1")] == 5.0
-
-
-def test_disabled_nodes_become_inactive_and_yield_zero() -> None:
- # Both groups non-empty but all nodes disabled -> helper returns 0.0
- s = _NodeStub("S", disabled=True)
- t = _NodeStub("T", disabled=True)
- ctx = _base_graph(["S", "T"], [("S", "T", 10.0)])
- ctx._select_map = {"S": {"S": [s]}, "T": {"T": [t]}}
-
- assert sol.max_flow(ctx, "S", "T", mode="combine") == {("S", "T"): 0.0}
- # Saturated/sensitivity also reduce to empty because no active nodes
- assert sol.saturated_edges(ctx, "S", "T", mode="combine") == {("S", "T"): []}
- assert sol.sensitivity_analysis(ctx, "S", "T", mode="combine") == {("S", "T"): {}}
diff --git a/tests/solver/test_paths.py b/tests/solver/test_paths.py
new file mode 100644
index 0000000..68f6fb9
--- /dev/null
+++ b/tests/solver/test_paths.py
@@ -0,0 +1,398 @@
+import pytest
+
+from ngraph.model.network import Link, Network, Node
+from ngraph.solver.paths import (
+ k_shortest_paths,
+ shortest_path_costs,
+ shortest_paths,
+)
+from ngraph.types.base import EdgeSelect
+
+
+def test_shortest_paths_simple():
+ # Create a simple network
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+
+ # Test shortest_paths
+ results = shortest_paths(net, "A", "B")
+ assert ("A", "B") in results
+ paths = results[("A", "B")]
+ assert len(paths) == 1
+ p = paths[0]
+ assert p.cost == 1.0
+ assert p.nodes_seq == ("A", "B")
+ assert len(p.edges) == 1
+
+ # Test shortest_path_costs
+ costs = shortest_path_costs(net, "A", "B")
+ assert costs[("A", "B")] == 1.0
+
+
+def test_shortest_paths_no_path():
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ # No link
+
+ results = shortest_paths(net, "A", "B")
+ assert ("A", "B") in results
+ assert len(results[("A", "B")]) == 0
+
+ costs = shortest_path_costs(net, "A", "B")
+ assert costs[("A", "B")] == float("inf")
+
+
+def test_shortest_paths_mode_pairwise():
+ """Test pairwise mode with multiple source/sink groups."""
+ net = Network()
+ net.add_node(Node("A", attrs={"group": "src"}))
+ net.add_node(Node("B", attrs={"group": "src"}))
+ net.add_node(Node("C", attrs={"group": "dst"}))
+ net.add_node(Node("D", attrs={"group": "dst"}))
+ net.add_link(Link("A", "C", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "D", capacity=10.0, cost=2.0))
+
+ results = shortest_paths(net, "attr:group", "attr:group", mode="pairwise")
+ # In pairwise mode with attr:group, we get src->src, src->dst, dst->src, dst->dst
+ # but only src->dst should have paths
+ assert ("src", "dst") in results
+ paths = results[("src", "dst")]
+ assert len(paths) > 0
+ # Should return the shortest path (A->C with cost 1.0)
+ assert min(p.cost for p in paths) == 1.0
+
+
+def test_shortest_paths_mode_combine():
+ """Test combine mode aggregating all sources and sinks."""
+ net = Network()
+ net.add_node(Node("A", attrs={"type": "src"}))
+ net.add_node(Node("B", attrs={"type": "src"}))
+ net.add_node(Node("C", attrs={"type": "dst"}))
+ net.add_link(Link("A", "C", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "C", capacity=10.0, cost=3.0))
+
+ # Use regex to select src vs dst nodes
+ results = shortest_paths(net, "^[AB]$", "^C$", mode="combine")
+ # In combine mode, we get one aggregated label
+ assert len(results) == 1
+ label = ("^[AB]$", "^C$")
+ assert label in results
+ paths = results[label]
+ assert len(paths) > 0
+ assert min(p.cost for p in paths) == 1.0
+
+
+def test_shortest_path_costs_mode_pairwise():
+ """Test shortest_path_costs with pairwise mode."""
+ net = Network()
+ net.add_node(Node("A", attrs={"group": "src"}))
+ net.add_node(Node("B", attrs={"group": "src"}))
+ net.add_node(Node("C", attrs={"group": "dst"}))
+ net.add_link(Link("A", "C", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "C", capacity=10.0, cost=2.0))
+
+ costs = shortest_path_costs(net, "attr:group", "attr:group", mode="pairwise")
+ assert ("src", "dst") in costs
+ assert costs[("src", "dst")] == 1.0
+
+
+def test_shortest_path_costs_mode_combine():
+ """Test shortest_path_costs with combine mode."""
+ net = Network()
+ net.add_node(Node("A", attrs={"type": "src"}))
+ net.add_node(Node("B", attrs={"type": "src"}))
+ net.add_node(Node("C", attrs={"type": "dst"}))
+ net.add_link(Link("A", "C", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "C", capacity=10.0, cost=3.0))
+
+ # Use regex to select src vs dst nodes
+ costs = shortest_path_costs(net, "^[AB]$", "^C$", mode="combine")
+ assert len(costs) == 1
+ label = ("^[AB]$", "^C$")
+ assert label in costs
+ assert costs[label] == 1.0
+
+
+def test_shortest_paths_invalid_mode():
+ """Test error handling for invalid mode."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+
+ with pytest.raises(
+ ValueError, match="Invalid mode.*Must be 'combine' or 'pairwise'"
+ ):
+ shortest_paths(net, "A", "B", mode="invalid")
+
+ with pytest.raises(
+ ValueError, match="Invalid mode.*Must be 'combine' or 'pairwise'"
+ ):
+ shortest_path_costs(net, "A", "B", mode="invalid")
+
+
+def test_shortest_paths_no_source_match():
+ """Test error handling when no source nodes match."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+
+ with pytest.raises(ValueError, match="No source nodes found matching"):
+ shortest_paths(net, "nonexistent", "B")
+
+ with pytest.raises(ValueError, match="No source nodes found matching"):
+ shortest_path_costs(net, "nonexistent", "B")
+
+
+def test_shortest_paths_no_sink_match():
+ """Test error handling when no sink nodes match."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+
+ with pytest.raises(ValueError, match="No sink nodes found matching"):
+ shortest_paths(net, "A", "nonexistent")
+
+ with pytest.raises(ValueError, match="No sink nodes found matching"):
+ shortest_path_costs(net, "A", "nonexistent")
+
+
+def test_shortest_paths_excluded_nodes():
+ """Test shortest paths with excluded nodes."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_node(Node("C"))
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "C", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "C", capacity=10.0, cost=10.0))
+
+ # Without exclusion, should go through B
+ results = shortest_paths(net, "A", "C")
+ paths = results[("A", "C")]
+ assert len(paths) > 0
+ assert paths[0].cost == 2.0
+
+ # Exclude B, should take direct path
+ results_excluded = shortest_paths(net, "A", "C", excluded_nodes={"B"})
+ paths_excluded = results_excluded[("A", "C")]
+ assert len(paths_excluded) > 0
+ assert paths_excluded[0].cost == 10.0
+
+
+def test_shortest_paths_excluded_links():
+ """Test shortest paths with excluded links."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_node(Node("C"))
+ link1 = Link("A", "B", capacity=10.0, cost=1.0)
+ link2 = Link("B", "C", capacity=10.0, cost=1.0)
+ link3 = Link("A", "C", capacity=10.0, cost=10.0)
+ net.add_link(link1)
+ net.add_link(link2)
+ net.add_link(link3)
+
+ # Exclude link1, should take direct path
+ results = shortest_paths(net, "A", "C", excluded_links={link1.id})
+ paths = results[("A", "C")]
+ assert len(paths) > 0
+ assert paths[0].cost == 10.0
+
+
+def test_shortest_paths_edge_select_single():
+ """Test shortest paths with SINGLE_MIN_COST edge selection."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ # Add multiple parallel edges with same cost
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "B", capacity=20.0, cost=1.0))
+
+ results = shortest_paths(net, "A", "B", edge_select=EdgeSelect.SINGLE_MIN_COST)
+ paths = results[("A", "B")]
+ assert len(paths) > 0
+ assert paths[0].cost == 1.0
+
+
+def test_shortest_paths_split_parallel_edges():
+ """Test shortest paths with split_parallel_edges=True."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_node(Node("C"))
+ # Create parallel edges
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "B", capacity=20.0, cost=1.0))
+ net.add_link(Link("B", "C", capacity=10.0, cost=1.0))
+
+ # With split_parallel_edges, should expand parallel edges into distinct paths
+ results = shortest_paths(net, "A", "C", split_parallel_edges=True)
+ paths = results[("A", "C")]
+ # Should have multiple paths due to parallel edges
+ assert len(paths) >= 1
+
+
+def test_shortest_paths_disabled_node():
+ """Test that disabled nodes are excluded from paths."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B", disabled=True))
+ net.add_node(Node("C"))
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "C", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "C", capacity=10.0, cost=10.0))
+
+ # Should take direct path, avoiding disabled node B
+ results = shortest_paths(net, "A", "C")
+ paths = results[("A", "C")]
+ assert len(paths) > 0
+ assert paths[0].cost == 10.0
+ assert "B" not in paths[0].nodes
+
+
+def test_shortest_paths_overlapping_src_sink():
+ """Test that overlapping source/sink membership returns no path."""
+ net = Network()
+ net.add_node(Node("A", attrs={"group": "both"}))
+ net.add_node(Node("B", attrs={"group": "both"}))
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+
+ # Should return empty path list due to overlap
+ results = shortest_paths(net, "attr:group", "attr:group")
+ paths = results[("both", "both")]
+ assert len(paths) == 0
+
+ costs = shortest_path_costs(net, "attr:group", "attr:group")
+ assert costs[("both", "both")] == float("inf")
+
+
+def test_k_shortest_paths_basic():
+ """Test k_shortest_paths with a simple network."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_node(Node("C"))
+ net.add_node(Node("D"))
+ # Create multiple paths from A to D
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "D", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "C", capacity=10.0, cost=2.0))
+ net.add_link(Link("C", "D", capacity=10.0, cost=2.0))
+
+ results = k_shortest_paths(net, "A", "D", max_k=2, mode="pairwise")
+ assert ("A", "D") in results
+ paths = results[("A", "D")]
+ assert len(paths) >= 1
+ # Paths should be sorted by cost
+ if len(paths) > 1:
+ assert paths[0].cost <= paths[1].cost
+
+
+def test_k_shortest_paths_combine_mode():
+ """Test k_shortest_paths with combine mode."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+
+ # Use regex to select source and destination
+ results = k_shortest_paths(net, "^A$", "^B$", max_k=3, mode="combine")
+ label = ("^A$", "^B$")
+ assert label in results
+ paths = results[label]
+ assert len(paths) >= 1
+
+
+def test_k_shortest_paths_with_exclusions():
+ """Test k_shortest_paths with excluded nodes."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_node(Node("C"))
+ net.add_node(Node("D"))
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "D", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "C", capacity=10.0, cost=2.0))
+ net.add_link(Link("C", "D", capacity=10.0, cost=2.0))
+
+ # Exclude B, should only find path through C
+ results = k_shortest_paths(
+ net, "A", "D", max_k=2, mode="pairwise", excluded_nodes={"B"}
+ )
+ paths = results[("A", "D")]
+ assert len(paths) >= 1
+ # Verify B is not in any path
+ for path in paths:
+ assert "B" not in path.nodes
+
+
+def test_k_shortest_paths_max_path_cost_factor():
+ """Test k_shortest_paths with max_path_cost_factor."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_node(Node("C"))
+ net.add_node(Node("D"))
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+ net.add_link(Link("B", "D", capacity=10.0, cost=1.0))
+ net.add_link(Link("A", "C", capacity=10.0, cost=5.0))
+ net.add_link(Link("C", "D", capacity=10.0, cost=5.0))
+
+ # Only paths within 1.5x of the shortest should be returned
+ results = k_shortest_paths(
+ net, "A", "D", max_k=5, mode="pairwise", max_path_cost_factor=1.5
+ )
+ paths = results[("A", "D")]
+ # Shortest path is 2.0, so max allowed is 3.0
+ # Path through C is 10.0, should be excluded
+ for path in paths:
+ assert path.cost <= 3.0
+
+
+def test_k_shortest_paths_no_path():
+ """Test k_shortest_paths when no path exists."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ # No link
+
+ results = k_shortest_paths(net, "A", "B", max_k=3, mode="pairwise")
+ assert ("A", "B") in results
+ assert len(results[("A", "B")]) == 0
+
+
+def test_k_shortest_paths_invalid_mode():
+ """Test k_shortest_paths error handling for invalid mode."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+ net.add_link(Link("A", "B", capacity=10.0, cost=1.0))
+
+ with pytest.raises(
+ ValueError, match="Invalid mode.*Must be 'combine' or 'pairwise'"
+ ):
+ k_shortest_paths(net, "A", "B", max_k=3, mode="invalid")
+
+
+def test_k_shortest_paths_no_source_match():
+ """Test k_shortest_paths error handling when no source nodes match."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+
+ with pytest.raises(ValueError, match="No source nodes found matching"):
+ k_shortest_paths(net, "nonexistent", "B", max_k=3)
+
+
+def test_k_shortest_paths_no_sink_match():
+ """Test k_shortest_paths error handling when no sink nodes match."""
+ net = Network()
+ net.add_node(Node("A"))
+ net.add_node(Node("B"))
+
+ with pytest.raises(ValueError, match="No sink nodes found matching"):
+ k_shortest_paths(net, "A", "nonexistent", max_k=3)
diff --git a/tests/solver/test_paths_behavior.py b/tests/solver/test_paths_behavior.py
deleted file mode 100644
index d7ba363..0000000
--- a/tests/solver/test_paths_behavior.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from __future__ import annotations
-
-import math
-from dataclasses import dataclass
-from typing import Dict, List
-
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.solver import paths as sol_paths
-
-
-@dataclass
-class _Node:
- name: str
- disabled: bool = False
-
-
-class _Ctx:
- def __init__(
- self,
- select_map: Dict[str, Dict[str, List[_Node]]],
- nodes: List[str],
- edges: List[tuple[str, str, float, float]],
- ) -> None:
- self._select_map = select_map
- self._nodes = nodes
- self._edges = edges
-
- def select_node_groups_by_path(self, path: str) -> Dict[str, List[_Node]]:
- return self._select_map.get(path, {})
-
- def to_strict_multidigraph(
- self, add_reverse: bool = True, *, compact: bool = False
- ) -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- for n in self._nodes:
- if n not in g:
- g.add_node(n)
- for u, v, cap, cost in self._edges:
- g.add_edge(u, v, capacity=cap, cost=cost)
- if add_reverse:
- g.add_edge(v, u, capacity=cap, cost=cost)
- return g
-
-
-def test_combine_mode_respects_overlap_semantics() -> None:
- # When overlap exists between active src and dst groups, result must be inf/empty
- nodes = ["X"]
- edges: List[tuple[str, str, float, float]] = []
- ctx = _Ctx({"S": {"G": [_Node("X")]}, "T": {"H": [_Node("X")]}}, nodes, edges)
- costs = sol_paths.shortest_path_costs(ctx, "S", "T", mode="combine")
- assert math.isinf(costs[("G", "H")])
- paths = sol_paths.shortest_paths(ctx, "S", "T", mode="combine")[("G", "H")]
- assert paths == []
diff --git a/tests/solver/test_paths_cost_thresholds.py b/tests/solver/test_paths_cost_thresholds.py
deleted file mode 100644
index d48eee1..0000000
--- a/tests/solver/test_paths_cost_thresholds.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Dict, List
-
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.solver import paths as sol_paths
-
-
-@dataclass
-class _Node:
- name: str
- disabled: bool = False
-
-
-class _Ctx:
- def __init__(
- self,
- select_map: Dict[str, Dict[str, List[_Node]]],
- nodes: List[str],
- edges: List[tuple[str, str, float, float]],
- ) -> None:
- self._select_map = select_map
- self._nodes = nodes
- self._edges = edges
-
- def select_node_groups_by_path(self, path: str) -> Dict[str, List[_Node]]:
- return self._select_map.get(path, {})
-
- def to_strict_multidigraph(
- self, add_reverse: bool = True, *, compact: bool = False
- ) -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- for n in self._nodes:
- if n not in g:
- g.add_node(n)
- for u, v, cap, cost in self._edges:
- g.add_edge(u, v, capacity=cap, cost=cost)
- if add_reverse:
- g.add_edge(v, u, capacity=cap, cost=cost)
- return g
-
-
-def test_k_shortest_paths_max_path_cost_factor_filters_worse_paths() -> None:
- nodes = ["S", "A", "B", "T"]
- edges = [
- ("S", "A", 10.0, 1.0),
- ("A", "T", 10.0, 1.0), # cost 2
- ("S", "B", 10.0, 1.0),
- ("B", "T", 10.0, 2.0), # cost 3 (filtered out)
- ]
- ctx = _Ctx(
- {
- "S": {"SRC": [_Node("S")]},
- "T": {"DST": [_Node("T")]},
- },
- nodes,
- edges,
- )
- res = sol_paths.k_shortest_paths(
- ctx,
- "S",
- "T",
- max_k=5,
- max_path_cost_factor=1.0, # keep only best-cost paths
- )
- paths = res[("SRC", "DST")]
- assert all(p.cost == 2.0 for p in paths)
diff --git a/tests/solver/test_paths_edge_cases.py b/tests/solver/test_paths_edge_cases.py
deleted file mode 100644
index e01ea66..0000000
--- a/tests/solver/test_paths_edge_cases.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from __future__ import annotations
-
-import math
-from dataclasses import dataclass
-from typing import Dict, List
-
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.solver import paths as sol_paths
-
-
-@dataclass
-class _Node:
- name: str
- disabled: bool = False
-
-
-class _Ctx:
- def __init__(
- self,
- select_map: Dict[str, Dict[str, List[_Node]]],
- nodes: List[str],
- edges: List[tuple[str, str, float, float]],
- ) -> None:
- self._select_map = select_map
- self._nodes = nodes
- self._edges = edges
-
- def select_node_groups_by_path(self, path: str) -> Dict[str, List[_Node]]:
- return self._select_map.get(path, {})
-
- def to_strict_multidigraph(
- self, add_reverse: bool = True, *, compact: bool = False
- ) -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- for n in self._nodes:
- if n not in g:
- g.add_node(n)
- for u, v, cap, cost in self._edges:
- g.add_edge(u, v, capacity=cap, cost=cost)
- if add_reverse:
- g.add_edge(v, u, capacity=cap, cost=cost)
- return g
-
-
-def test_shortest_path_costs_pairwise_disabled_and_overlap() -> None:
- # Disabled sink node -> inf cost for that pair
- nodes = ["S", "T"]
- ctx = _Ctx(
- {"S": {"G": [_Node("S")]}, "T": {"H": [_Node("T", disabled=True)]}},
- nodes,
- [],
- )
- res = sol_paths.shortest_path_costs(ctx, "S", "T", mode="pairwise")
- assert math.isinf(res[("G", "H")])
-
- # Overlap in pairwise mode -> inf cost
- ctx2 = _Ctx(
- {
- "S": {"A": [_Node("X")]},
- "T": {"B": [_Node("X")]},
- },
- ["X"],
- [],
- )
- res2 = sol_paths.shortest_path_costs(ctx2, "S", "T", mode="pairwise")
- assert math.isinf(res2[("A", "B")])
-
-
-def test_shortest_paths_combine_collects_equal_cost_paths_from_multiple_sources() -> (
- None
-):
- # S1->T cost 2, S2->T cost 2 => both should appear when combine mode picks best cost
- nodes = ["S1", "S2", "T"]
- edges = [("S1", "T", 10.0, 2.0), ("S2", "T", 10.0, 2.0)]
- ctx = _Ctx(
- {
- "S": {"SRC1": [_Node("S1")], "SRC2": [_Node("S2")]},
- "T": {"DST": [_Node("T")]},
- },
- nodes,
- edges,
- )
- paths_map = sol_paths.shortest_paths(ctx, "S", "T", mode="combine")
- label = ("SRC1|SRC2", "DST")
- paths = paths_map[label]
- node_seqs = {p.nodes_seq for p in paths}
- # Expect two single-hop paths S1->T and S2->T
- assert ("S1", "T") in node_seqs and ("S2", "T") in node_seqs
diff --git a/tests/solver/test_paths_wrappers.py b/tests/solver/test_paths_wrappers.py
deleted file mode 100644
index 2e71907..0000000
--- a/tests/solver/test_paths_wrappers.py
+++ /dev/null
@@ -1,228 +0,0 @@
-from __future__ import annotations
-
-import math
-from dataclasses import dataclass
-from typing import Dict, List
-
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.solver import paths as sol_paths
-
-
-@dataclass
-class _NodeStub:
- name: str
- disabled: bool = False
-
-
-class _Context:
- def __init__(
- self,
- select_map: Dict[str, Dict[str, List[_NodeStub]]],
- nodes: List[str] | None = None,
- edges: List[tuple[str, str, float, float]] | None = None,
- ) -> None:
- self._select_map = select_map
- self._nodes = nodes or []
- # edges: (u, v, capacity, cost)
- self._edges = edges or []
-
- def select_node_groups_by_path(self, path: str) -> Dict[str, List[_NodeStub]]:
- return self._select_map.get(path, {})
-
- def to_strict_multidigraph(
- self, add_reverse: bool = True, *, compact: bool = False
- ) -> StrictMultiDiGraph:
- g = StrictMultiDiGraph()
- for n in self._nodes:
- if n not in g:
- g.add_node(n)
- for u, v, cap, cost in self._edges:
- g.add_edge(u, v, capacity=cap, cost=cost)
- if add_reverse:
- g.add_edge(v, u, capacity=cap, cost=cost)
- return g
-
-
-def _ctx_simple() -> _Context:
- # Triangle: S -> X -> T, with costs 1 + 1, and direct S->T cost 5
- nodes = ["S", "X", "T"]
- edges = [("S", "X", 10.0, 1.0), ("X", "T", 10.0, 1.0), ("S", "T", 10.0, 5.0)]
- return _Context(select_map={}, nodes=nodes, edges=edges)
-
-
-def test_shortest_path_costs_combine_and_pairwise() -> None:
- ctx = _ctx_simple()
- ctx._select_map = {
- "S": {"SRC": [_NodeStub("S")]},
- "T": {"DST": [_NodeStub("T")]},
- }
- res_c = sol_paths.shortest_path_costs(ctx, "S", "T", mode="combine")
- assert res_c[("SRC", "DST")] == 2.0
-
- res_p = sol_paths.shortest_path_costs(ctx, "S", "T", mode="pairwise")
- assert res_p[("SRC", "DST")] == 2.0
-
-
-def test_shortest_paths_returns_paths_and_respects_parallel_edges() -> None:
- ctx = _ctx_simple()
- ctx._select_map = {
- "S": {"SRC": [_NodeStub("S")]},
- "T": {"DST": [_NodeStub("T")]},
- }
- res = sol_paths.shortest_paths(ctx, "S", "T", mode="combine")
- paths = res[("SRC", "DST")]
- assert paths, "expected at least one path"
- # Best cost is 2.0 via S->X->T
- assert math.isclose(paths[0].cost, 2.0, rel_tol=1e-9)
-
-
-def test_shortest_paths_split_parallel_edges_enumeration() -> None:
- # S->A has 2 parallel equal-cost edges, A->T has 2 parallel equal-cost edges
- # Without splitting, get a single abstract path; with splitting, expect 2*2=4 paths
- nodes = ["S", "A", "T"]
- edges = [
- ("S", "A", 10.0, 1.0),
- ("S", "A", 10.0, 1.0),
- ("A", "T", 10.0, 1.0),
- ("A", "T", 10.0, 1.0),
- ]
- ctx = _Context(select_map={}, nodes=nodes, edges=edges)
- ctx._select_map = {
- "S": {"SRC": [_NodeStub("S")]},
- "T": {"DST": [_NodeStub("T")]},
- }
- # No split: one grouped path, nodes S->A->T, each hop may have multiple parallel edges
- no_split = sol_paths.shortest_paths(ctx, "S", "T", split_parallel_edges=False)
- paths0 = no_split[("SRC", "DST")]
- assert len(paths0) == 1
- p0 = paths0[0]
- assert p0.nodes_seq == ("S", "A", "T")
- # edges_seq has 2 segments (S->A, A->T); each segment includes grouped parallel edges
- assert len(p0.edges_seq) == 2
- assert all(len(seg) == 2 for seg in p0.edges_seq)
-
- # Split: enumerate concrete permutations => 4 paths
- split = sol_paths.shortest_paths(ctx, "S", "T", split_parallel_edges=True)
- paths1 = split[("SRC", "DST")]
- assert len(paths1) == 4
- assert all(p.nodes_seq == ("S", "A", "T") for p in paths1)
- # Each concrete path has exactly one chosen edge per hop
- assert all(len(seg) == 1 for p in paths1 for seg in p.edges_seq)
-
-
-def test_unreachable_and_overlap_yield_inf_or_empty() -> None:
- # Disconnected graph: S and T not connected
- ctx = _Context(select_map={}, nodes=["S", "T"], edges=[])
- ctx._select_map = {"S": {"SRC": [_NodeStub("S")]}, "T": {"DST": [_NodeStub("T")]}}
- res = sol_paths.shortest_path_costs(ctx, "S", "T")
- assert math.isinf(res[("SRC", "DST")])
- resp = sol_paths.shortest_paths(ctx, "S", "T")
- assert resp[("SRC", "DST")] == []
-
- # Overlap
- n = _NodeStub("X")
- ctx2 = _Context(
- select_map={"S": {"A": [n]}, "T": {"B": [n]}}, nodes=["X"], edges=[]
- )
- res2 = sol_paths.shortest_path_costs(ctx2, "S", "T")
- assert math.isinf(res2[("A", "B")])
- resp2 = sol_paths.shortest_paths(ctx2, "S", "T")
- assert resp2[("A", "B")] == []
-
-
-def test_shortest_path_costs_pairwise_labels_and_values() -> None:
- # Two sources and two sinks; only some pairs connected
- nodes = ["S1", "S2", "T1", "T2"]
- edges = [
- ("S1", "T1", 10.0, 3.0), # cost 3
- ("S2", "T1", 10.0, 1.0), # cost 1 (best)
- # T2 unreachable
- ]
- ctx = _Context(select_map={}, nodes=nodes, edges=edges)
- ctx._select_map = {
- "S": {"G1": [_NodeStub("S1")], "G2": [_NodeStub("S2")]},
- "T": {"H1": [_NodeStub("T1")], "H2": [_NodeStub("T2")]},
- }
- res = sol_paths.shortest_path_costs(ctx, "S", "T", mode="pairwise")
- assert res[("G1", "H1")] == 3.0
- assert res[("G2", "H1")] == 1.0
- assert math.isinf(res[("G1", "H2")])
- assert math.isinf(res[("G2", "H2")])
-
-
-def test_k_shortest_paths_respects_cost_thresholds_and_order() -> None:
- # Two best paths of cost 2, one worse path of cost 4; factor=1.0 keeps only best-cost
- nodes = ["S", "A", "B", "C", "T"]
- edges = [
- ("S", "A", 10.0, 1.0),
- ("A", "T", 10.0, 1.0), # S-A-T cost 2
- ("S", "B", 10.0, 1.0),
- ("B", "T", 10.0, 1.0), # S-B-T cost 2
- ("S", "C", 10.0, 2.0),
- ("C", "T", 10.0, 2.0), # S-C-T cost 4
- ]
- ctx = _Context(select_map={}, nodes=nodes, edges=edges)
- ctx._select_map = {
- "S": {"SRC": [_NodeStub("S")]},
- "T": {"DST": [_NodeStub("T")]},
- }
- res = sol_paths.k_shortest_paths(ctx, "S", "T", max_k=5, max_path_cost_factor=1.0)
- paths = res[("SRC", "DST")]
- # Only the two best-cost paths should be present
- assert len(paths) <= 2
- assert all(math.isclose(p.cost, 2.0, rel_tol=1e-9) for p in paths)
- # Ensure none of the paths go via C
- assert all("C" not in p.nodes_seq for p in paths)
-
-
-def test_disabled_nodes_are_excluded() -> None:
- ctx = _Context(select_map={}, nodes=["S", "T"], edges=[("S", "T", 10.0, 1.0)])
- ctx._select_map = {
- "S": {"SRC": [_NodeStub("S")]},
- "T": {"DST": [_NodeStub("T", disabled=True)]},
- }
- res = sol_paths.shortest_path_costs(ctx, "S", "T")
- assert math.isinf(res[("SRC", "DST")])
- paths = sol_paths.shortest_paths(ctx, "S", "T")[("SRC", "DST")]
- assert paths == []
-
-
-def test_combine_mode_selects_best_pair_and_paths_are_correct() -> None:
- # S1->T2 cost 10, S2->T1 cost 1 (best overall). Combine should pick S2->T1.
- nodes = ["S1", "S2", "T1", "T2"]
- edges = [
- ("S1", "T2", 10.0, 10.0),
- ("S2", "T1", 10.0, 1.0),
- ]
- ctx = _Context(select_map={}, nodes=nodes, edges=edges)
- ctx._select_map = {
- "S": {"A": [_NodeStub("S1")], "B": [_NodeStub("S2")]},
- "T": {"C": [_NodeStub("T1")], "D": [_NodeStub("T2")]},
- }
- label = ("A|B", "C|D")
- costs = sol_paths.shortest_path_costs(ctx, "S", "T", mode="combine")
- assert costs[label] == 1.0
- paths = sol_paths.shortest_paths(ctx, "S", "T", mode="combine")[label]
- assert paths, "expected at least one best path"
- # Best path must be S2->T1
- assert any(p.nodes_seq == ("S2", "T1") for p in paths)
-
-
-def test_k_shortest_paths_limits_and_order() -> None:
- # Line with two equal-cost alternatives S->A->T and S->B->T
- nodes = ["S", "A", "B", "T"]
- edges = [
- ("S", "A", 10.0, 1.0),
- ("A", "T", 10.0, 1.0),
- ("S", "B", 10.0, 1.0),
- ("B", "T", 10.0, 1.0),
- ]
- ctx = _Context(select_map={}, nodes=nodes, edges=edges)
- ctx._select_map = {
- "S": {"SRC": [_NodeStub("S")]},
- "T": {"DST": [_NodeStub("T")]},
- }
- res = sol_paths.k_shortest_paths(ctx, "S", "T", max_k=2)
- paths = res[("SRC", "DST")]
- assert len(paths) <= 2
- assert all(math.isclose(p.cost, 2.0, rel_tol=1e-9) for p in paths)
diff --git a/tests/solver/test_paths_wrappers_additional.py b/tests/solver/test_paths_wrappers_additional.py
deleted file mode 100644
index ebe6153..0000000
--- a/tests/solver/test_paths_wrappers_additional.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from __future__ import annotations
-
-import pytest
-
-from ngraph.solver import paths as sol_paths
-
-
-class _Ctx:
- def __init__(self) -> None:
- self._map: dict[str, dict[str, list[object]]] = {}
-
- def select_node_groups_by_path(self, path: str) -> dict[str, list[object]]:
- return self._map.get(path, {})
-
- def to_strict_multidigraph(
- self, add_reverse: bool = True, *, compact: bool = False
- ): # pragma: no cover - type shape only
- from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-
- g = StrictMultiDiGraph()
- return g
-
-
-def test_shortest_path_costs_invalid_inputs() -> None:
- ctx = _Ctx()
- with pytest.raises(ValueError):
- sol_paths.shortest_path_costs(ctx, "S", "T")
- ctx._map["S"] = {"A": []}
- with pytest.raises(ValueError):
- sol_paths.shortest_path_costs(ctx, "S", "T")
- ctx._map["T"] = {"B": []}
- with pytest.raises(ValueError):
- sol_paths.shortest_path_costs(ctx, "S", "T", mode="bad")
-
-
-def test_k_shortest_paths_invalid_mode_raises() -> None:
- ctx = _Ctx()
- ctx._map = {"S": {"SRC": [object()]}, "T": {"DST": [object()]}}
- with pytest.raises(ValueError):
- sol_paths.k_shortest_paths(ctx, "S", "T", mode="invalid")
diff --git a/tests/monte_carlo/test_types_smoke.py b/tests/types/test_types.py
similarity index 90%
rename from tests/monte_carlo/test_types_smoke.py
rename to tests/types/test_types.py
index 2b150f1..fb20e5d 100644
--- a/tests/monte_carlo/test_types_smoke.py
+++ b/tests/types/test_types.py
@@ -2,7 +2,7 @@
from typing import get_type_hints
-from ngraph.monte_carlo import types as mc_types
+from ngraph.exec.analysis import types as mc_types
def test_monte_carlo_types_protocols_shape() -> None:
diff --git a/tests/demand/__init__.py b/tests/utils/__init__.py
similarity index 100%
rename from tests/demand/__init__.py
rename to tests/utils/__init__.py
diff --git a/tests/yaml_utils/test_boolean_keys.py b/tests/utils/test_boolean_keys.py
similarity index 98%
rename from tests/yaml_utils/test_boolean_keys.py
rename to tests/utils/test_boolean_keys.py
index 7964e92..839d284 100644
--- a/tests/yaml_utils/test_boolean_keys.py
+++ b/tests/utils/test_boolean_keys.py
@@ -3,7 +3,7 @@
import textwrap
from ngraph.scenario import Scenario
-from ngraph.yaml_utils import normalize_yaml_dict_keys
+from ngraph.utils.yaml_utils import normalize_yaml_dict_keys
# =============================================================================
# Unit Tests for normalize_yaml_dict_keys utility function
diff --git a/tests/seed_manager/test_seed_manager.py b/tests/utils/test_seed_manager.py
similarity index 98%
rename from tests/seed_manager/test_seed_manager.py
rename to tests/utils/test_seed_manager.py
index 7f279a2..9ba6671 100644
--- a/tests/seed_manager/test_seed_manager.py
+++ b/tests/utils/test_seed_manager.py
@@ -2,7 +2,7 @@
import random
-from ngraph.seed_manager import SeedManager
+from ngraph.utils.seed_manager import SeedManager
class TestSeedManager:
diff --git a/tests/workflow/test_build_graph.py b/tests/workflow/test_build_graph.py
deleted file mode 100644
index 3ce03c4..0000000
--- a/tests/workflow/test_build_graph.py
+++ /dev/null
@@ -1,92 +0,0 @@
-from unittest.mock import MagicMock
-
-import pytest
-
-from ngraph.graph.strict_multidigraph import StrictMultiDiGraph
-from ngraph.model.network import Link, Network, Node
-from ngraph.results.store import Results
-from ngraph.workflow.build_graph import BuildGraph
-
-
-@pytest.fixture
-def mock_scenario():
- """
- Provides a mock Scenario object for testing, including:
- - A Network object with two nodes (A, B).
- - Two links (L1, L2), each of which is auto-created via Link
- but we override their IDs to maintain the naming expected by the tests.
- - A real Results object for storage assertions.
- """
- scenario = MagicMock()
- scenario.network = Network()
- scenario.results = Results()
-
- # Create real Node objects and add them to the network
- node_a = Node(name="A", attrs={"type": "router", "location": "rack1"})
- node_b = Node(name="B", attrs={"type": "router", "location": "rack2"})
- scenario.network.add_node(node_a)
- scenario.network.add_node(node_b)
-
- # Create real Link objects, then override their ID to match the original test expectations.
- link1 = Link(source="A", target="B", capacity=100, cost=5, attrs={"fiber": True})
- link1.id = "L1" # Force the ID so the test can look up "L1"
- scenario.network.links[link1.id] = link1 # Insert directly
-
- link2 = Link(source="B", target="A", capacity=50, cost=2, attrs={"copper": True})
- link2.id = "L2"
- scenario.network.links[link2.id] = link2
-
- return scenario
-
-
-def test_build_graph_stores_multidigraph_in_results(mock_scenario):
- """
- Ensure BuildGraph creates a StrictMultiDiGraph, adds all nodes/edges,
- and stores it in scenario.results under steps[name]["data"]["graph"].
- """
- step = BuildGraph(name="MyBuildStep")
-
- step.execute(mock_scenario)
-
- exported = mock_scenario.results.to_dict()
- step_data = exported["steps"]["MyBuildStep"]["data"]
- created_graph = step_data.get("graph")
-
- # Allow either in-memory object or serialized dict (after to_dict conversion)
- if isinstance(created_graph, StrictMultiDiGraph):
- graph_obj = created_graph
- # Verify the correct nodes were added
- assert set(graph_obj.nodes()) == {"A", "B"}
- # Check node attributes remain present in full build
- assert graph_obj.nodes["A"]["type"] == "router"
- assert graph_obj.nodes["B"]["location"] == "rack2"
- # Verify edges (two edges per link: forward and reverse)
- assert graph_obj.number_of_edges() == 4
- # Count by direction (two edges each way)
- num_ab = sum(1 for _k in graph_obj.get_edge_data("A", "B").keys())
- num_ba = sum(1 for _k in graph_obj.get_edge_data("B", "A").keys())
- assert num_ab == 2
- assert num_ba == 2
- else:
- # Serialized representation: expect dict with nodes/links lists
- assert isinstance(created_graph, dict)
- nodes = created_graph.get("nodes", [])
- links = created_graph.get("links", [])
- # Basic shape checks
- assert isinstance(nodes, list) and isinstance(links, list)
- # Verify nodes content
- names = {n.get("id") for n in nodes}
- assert names == {"A", "B"}
- # Verify there are two edges per direction (A->B and B->A)
- # Build index mapping id -> idx
- idx_by_id = {node["id"]: i for i, node in enumerate(nodes)}
- a_idx = idx_by_id["A"]
- b_idx = idx_by_id["B"]
- ab = sum(
- 1 for lk in links if lk.get("source") == a_idx and lk.get("target") == b_idx
- )
- ba = sum(
- 1 for lk in links if lk.get("source") == b_idx and lk.get("target") == a_idx
- )
- assert ab == 2
- assert ba == 2
diff --git a/tests/workflow/test_capacity_envelope_analysis.py b/tests/workflow/test_capacity_envelope_analysis.py
index 9598410..19cc638 100644
--- a/tests/workflow/test_capacity_envelope_analysis.py
+++ b/tests/workflow/test_capacity_envelope_analysis.py
@@ -4,12 +4,12 @@
import pytest
-from ngraph.algorithms.base import FlowPlacement
-from ngraph.failure.policy import FailurePolicy, FailureRule
-from ngraph.failure.policy_set import FailurePolicySet
+from ngraph.model.failure.policy import FailurePolicy, FailureRule
+from ngraph.model.failure.policy_set import FailurePolicySet
from ngraph.model.network import Link, Network, Node
from ngraph.results import Results
from ngraph.scenario import Scenario
+from ngraph.types.base import FlowPlacement
from ngraph.workflow.max_flow_step import MaxFlow
@@ -35,9 +35,9 @@ def simple_failure_policy() -> FailurePolicy:
)
return FailurePolicy(
modes=[
- __import__("ngraph.failure.policy", fromlist=["FailureMode"]).FailureMode(
- weight=1.0, rules=[rule]
- )
+ __import__(
+ "ngraph.model.failure.policy", fromlist=["FailureMode"]
+ ).FailureMode(weight=1.0, rules=[rule])
]
)
diff --git a/tests/workflow/test_cost_power.py b/tests/workflow/test_cost_power.py
index 42cc6e3..14baec4 100644
--- a/tests/workflow/test_cost_power.py
+++ b/tests/workflow/test_cost_power.py
@@ -4,7 +4,7 @@
import pytest
-from ngraph.components import Component, ComponentsLibrary
+from ngraph.model.components import Component, ComponentsLibrary
from ngraph.model.network import Link, Network, Node
from ngraph.results.store import Results
from ngraph.workflow.cost_power import CostPower
@@ -105,7 +105,8 @@ def test_cost_power_include_disabled_filters_active_view() -> None:
net.add_node(
Node(
"dc1/leaf/B",
- attrs={"hardware": {"component": "NodeHW", "count": 1}, "disabled": True},
+ disabled=True,
+ attrs={"hardware": {"component": "NodeHW", "count": 1}},
)
)
diff --git a/tests/workflow/test_maximum_supported_demand.py b/tests/workflow/test_maximum_supported_demand.py
index 759da46..3ea4029 100644
--- a/tests/workflow/test_maximum_supported_demand.py
+++ b/tests/workflow/test_maximum_supported_demand.py
@@ -87,13 +87,14 @@ def _eval(*, alpha, scenario, matrix_name, placement_rounds, seeds): # type: ig
def test_msd_end_to_end_single_link() -> None:
- # Build a tiny deterministic scenario: A --(cap=10)--> B, demand base=5
- from ngraph.demand.manager.manager import TrafficManager
+ """Test MSD end-to-end with a simple single-link scenario."""
+ from ngraph.exec.analysis.flow import demand_placement_analysis
from ngraph.workflow.maximum_supported_demand_step import (
MaximumSupportedDemand as MSD,
)
from tests.integration.helpers import ScenarioDataBuilder
+ # Build a tiny deterministic scenario: A --(cap=10)--> B, demand base=5
scenario = (
ScenarioDataBuilder()
.with_simple_nodes(["A", "B"])
@@ -123,45 +124,66 @@ def test_msd_end_to_end_single_link() -> None:
base_demands = data.get("base_demands")
assert isinstance(base_demands, list) and base_demands
- # Verify feasibility at alpha*
- tmset = MSD._build_scaled_matrix(base_demands, float(alpha_star))
- tm = TrafficManager(
- network=scenario.network, traffic_matrix_set=tmset, matrix_name="temp"
+ # Verify feasibility at alpha* using new Core-based API
+ scaled_demands = MSD._build_scaled_demands(base_demands, float(alpha_star))
+ demands_config = [
+ {
+ "source_path": d.source_path,
+ "sink_path": d.sink_path,
+ "demand": d.demand,
+ "mode": d.mode,
+ "priority": d.priority,
+ "flow_policy_config": d.flow_policy_config,
+ }
+ for d in scaled_demands
+ ]
+
+ result = demand_placement_analysis(
+ network=scenario.network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ demands_config=demands_config,
+ placement_rounds=1,
)
- tm.build_graph(add_reverse=True)
- tm.expand_demands()
- tm.place_all_demands(placement_rounds="auto")
- res = tm.get_traffic_results(detailed=False)
- for r in res:
- total = float(r.total_volume)
- placed = float(r.placed_volume)
- assert pytest.approx(placed, rel=1e-9, abs=1e-9) == total
+
+ # At alpha*, all demands should be fully placed
+ assert result.summary.overall_ratio >= 1.0 - 1e-9
# Verify infeasibility just above alpha*
alpha_above = float(alpha_star) + 0.05
- tmset2 = MSD._build_scaled_matrix(base_demands, alpha_above)
- tm2 = TrafficManager(
- network=scenario.network, traffic_matrix_set=tmset2, matrix_name="temp"
+ scaled_demands_above = MSD._build_scaled_demands(base_demands, alpha_above)
+ demands_config_above = [
+ {
+ "source_path": d.source_path,
+ "sink_path": d.sink_path,
+ "demand": d.demand,
+ "mode": d.mode,
+ "priority": d.priority,
+ "flow_policy_config": d.flow_policy_config,
+ }
+ for d in scaled_demands_above
+ ]
+
+ result_above = demand_placement_analysis(
+ network=scenario.network,
+ excluded_nodes=set(),
+ excluded_links=set(),
+ demands_config=demands_config_above,
+ placement_rounds=1,
)
- tm2.build_graph(add_reverse=True)
- tm2.expand_demands()
- tm2.place_all_demands(placement_rounds="auto")
- res2 = tm2.get_traffic_results(detailed=False)
- ratios = []
- for r in res2:
- total = float(r.total_volume)
- placed = float(r.placed_volume)
- ratios.append(1.0 if total == 0 else placed / total)
- assert any(x < 1.0 - 1e-9 for x in ratios)
+
+ # Above alpha*, placement should fail (ratio < 1.0)
+ assert result_above.summary.overall_ratio < 1.0 - 1e-9
def test_msd_auto_vs_one_equivalence_single_link() -> None:
- # Same single-link scenario; compare auto vs 1 rounds
+ """Test that MSD with auto vs 1 placement rounds produces equivalent results."""
from ngraph.workflow.maximum_supported_demand_step import (
MaximumSupportedDemand as MSD,
)
from tests.integration.helpers import ScenarioDataBuilder
+ # Same single-link scenario; compare auto vs 1 rounds
scenario = (
ScenarioDataBuilder()
.with_simple_nodes(["A", "B"])
@@ -196,4 +218,5 @@ def test_msd_auto_vs_one_equivalence_single_link() -> None:
exported = scenario.results.to_dict()
alpha_auto = float(exported["steps"]["msd_auto"]["data"]["alpha_star"])
alpha_one = float(exported["steps"]["msd_one"]["data"]["alpha_star"])
+ # Both should find approximately the same alpha* for this simple case
assert abs(alpha_auto - alpha_one) <= 0.02
diff --git a/tests/workflow/test_msd_perf_safety.py b/tests/workflow/test_msd_perf_safety.py
index 6fe2312..92523c0 100644
--- a/tests/workflow/test_msd_perf_safety.py
+++ b/tests/workflow/test_msd_perf_safety.py
@@ -14,8 +14,8 @@ def __init__(self, network: Any, tmset: Any, results: Any) -> None:
def test_msd_reuse_tm_across_seeds_is_behaviorally_identical(monkeypatch):
# Build a tiny scenario
- from ngraph.demand.matrix import TrafficMatrixSet
- from ngraph.demand.spec import TrafficDemand
+ from ngraph.model.demand.matrix import TrafficMatrixSet
+ from ngraph.model.demand.spec import TrafficDemand
from ngraph.model.network import Link, Network, Node
net = Network()
diff --git a/tests/workflow/test_namespace_alignment.py b/tests/workflow/test_namespace_alignment.py
index 1fcb7d2..9a54039 100644
--- a/tests/workflow/test_namespace_alignment.py
+++ b/tests/workflow/test_namespace_alignment.py
@@ -42,7 +42,7 @@ def test_metadata_aligns_with_results_for_empty_name() -> None:
def test_cost_power_collects_levels_schema_smoke() -> None:
# Minimal scenario with components and one link; results is a real store
- from ngraph.components import Component, ComponentsLibrary
+ from ngraph.model.components import Component, ComponentsLibrary
from ngraph.model.network import Link, Network, Node
net = Network()
diff --git a/tests/workflow/test_tm_analysis_perf_safety.py b/tests/workflow/test_tm_analysis_perf_safety.py
index 877339f..61dae72 100644
--- a/tests/workflow/test_tm_analysis_perf_safety.py
+++ b/tests/workflow/test_tm_analysis_perf_safety.py
@@ -19,8 +19,8 @@ def __init__(
def test_tm_basic_behavior_unchanged(monkeypatch):
# Small sanity test that the step runs end-to-end and stores new outputs
- from ngraph.demand.matrix import TrafficMatrixSet
- from ngraph.demand.spec import TrafficDemand
+ from ngraph.model.demand.matrix import TrafficMatrixSet
+ from ngraph.model.demand.spec import TrafficDemand
from ngraph.model.network import Link, Network, Node
net = Network()
diff --git a/tests/yaml_utils/__init__.py b/tests/yaml_utils/__init__.py
deleted file mode 100644
index e69de29..0000000