diff --git a/.claude/skills/netgraph-dsl/SKILL.md b/.claude/skills/netgraph-dsl/SKILL.md new file mode 100644 index 0000000..8b40468 --- /dev/null +++ b/.claude/skills/netgraph-dsl/SKILL.md @@ -0,0 +1,348 @@ +--- +name: netgraph-dsl +description: > + NetGraph scenario DSL for defining network topologies, traffic demands, failure policies, + and analysis workflows in YAML. Use when: creating or editing .yaml/.yml network scenarios, + defining nodes/links/groups, writing adjacency rules, configuring selectors or blueprints, + setting up traffic matrices or failure policies, debugging DSL syntax or validation errors, + or asking about NetGraph scenario structure. +license: MIT +metadata: + author: "netgraph" + version: "1.0" + repo: "https://github.com/networmix/NetGraph" +--- + +# NetGraph DSL + +Define network simulation scenarios in YAML format. + +## Quick Reference + +| Section | Purpose | +|---------|---------| +| `network` | Topology: nodes, links, groups, adjacency (required) | +| `blueprints` | Reusable topology templates | +| `components` | Hardware library for cost/power modeling | +| `risk_groups` | Failure correlation groups | +| `vars` | YAML anchors for value reuse | +| `traffic_matrix_set` | Traffic demand definitions | +| `failure_policy_set` | Failure simulation rules | +| `workflow` | Analysis execution steps | +| `seed` | Master seed for reproducibility | + +## Minimal Example + +```yaml +network: + nodes: + A: {} + B: {} + links: + - source: A + target: B + link_params: + capacity: 100 + cost: 1 +``` + +## Core Patterns + +### Nodes and Links + +```yaml +network: + nodes: + Seattle: + attrs: # Custom attributes go here + role: core + risk_groups: ["RG1"] + disabled: false + Portland: + attrs: + role: edge + + links: + - source: Seattle + target: Portland + link_params: # Required wrapper for link parameters + capacity: 100 + cost: 10 + attrs: + distance_km: 280 + link_count: 2 # Parallel links +``` + +### Node Groups + +```yaml +network: + groups: + leaf: + node_count: 4 + name_template: "leaf-{node_num}" + attrs: + role: leaf +``` + +Creates: `leaf/leaf-1`, `leaf/leaf-2`, `leaf/leaf-3`, `leaf/leaf-4` + +### Template Syntaxes + +| Syntax | Example | Context | +|--------|---------|---------| +| `[1-3]` | `dc[1-3]/rack` | Group names, risk groups | +| `$var`/`${var}` | `pod${p}/leaf` | Adjacency & demand selectors | +| `{node_num}` | `srv-{node_num}` | `name_template` field | + +These are NOT interchangeable. See [REFERENCE.md](references/REFERENCE.md) for details. + +### Bracket Expansion + +```yaml +network: + groups: + dc[1-3]/rack[a,b]: # Cartesian product + node_count: 4 + name_template: "srv-{node_num}" +``` + +Creates: `dc1/racka`, `dc1/rackb`, `dc2/racka`, `dc2/rackb`, `dc3/racka`, `dc3/rackb` + +**Scope**: Bracket expansion works in group names, risk group definitions (including children), and risk group membership arrays. Component names and other fields treat brackets as literal characters. + +### Adjacency Patterns + +```yaml +network: + adjacency: + - source: /leaf + target: /spine + pattern: mesh # Every source to every target + link_params: + capacity: 100 + + - source: /group_a # 4 nodes + target: /group_b # 2 nodes + pattern: one_to_one # Pairwise with modulo wrap (sizes must have multiple factor) +``` + +### Selectors with Conditions + +```yaml +adjacency: + - source: + path: "/datacenter" + match: + logic: and # "and" or "or" (default) + conditions: + - attr: role + operator: "==" + value: leaf + target: /spine + pattern: mesh +``` + +**Operators**: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `in`, `not_in`, `any_value`, `no_value` + +### Capturing Groups for Grouping + +```yaml +# Single capture group creates groups by captured value +source: "^(dc[1-3])/.*" # Groups: dc1, dc2, dc3 + +# Multiple capture groups join with | +source: "^(dc\\d+)/(spine|leaf)/.*" # Groups: dc1|spine, dc1|leaf, etc. +``` + +### Variable Expansion + +```yaml +adjacency: + - source: "plane${p}/rack" + target: "spine${s}" + expand_vars: + p: [1, 2] + s: [1, 2, 3] + expansion_mode: cartesian # or "zip" (equal-length lists required) + pattern: mesh +``` + +### Blueprints + +```yaml +blueprints: + clos_pod: + groups: + leaf: + node_count: 4 + name_template: "leaf-{node_num}" + spine: + node_count: 2 + name_template: "spine-{node_num}" + adjacency: + - source: /leaf + target: /spine + pattern: mesh + link_params: + capacity: 100 + +network: + groups: + pod[1-2]: + use_blueprint: clos_pod + parameters: + leaf.node_count: 6 # Override defaults +``` + +### Traffic Demands + +```yaml +traffic_matrix_set: + production: + - source: "^dc1/.*" + sink: "^dc2/.*" + demand: 1000 + mode: pairwise # or "combine" + flow_policy_config: SHORTEST_PATHS_ECMP +``` + +**Flow policies**: `SHORTEST_PATHS_ECMP`, `SHORTEST_PATHS_WCMP`, `TE_WCMP_UNLIM`, `TE_ECMP_16_LSP`, `TE_ECMP_UP_TO_256_LSP` + +### Failure Policies + +```yaml +failure_policy_set: + single_link: + fail_risk_groups: false # Expand to shared-risk entities + modes: # Weighted modes (one selected per iteration) + - weight: 1.0 + rules: + - entity_scope: link # node, link, or risk_group + rule_type: choice # all, choice, or random + count: 1 + # Optional: weight_by: capacity # Weighted sampling by attribute +``` + +**Rule types**: `all` (select all matches), `choice` (sample `count`), `random` (each with `probability`) + +### Workflow + +```yaml +workflow: + - step_type: NetworkStats + name: stats + - step_type: MaximumSupportedDemand + name: msd + matrix_name: production + alpha_start: 1.0 + resolution: 0.05 + - step_type: TrafficMatrixPlacement + name: placement + matrix_name: production + failure_policy: single_link + iterations: 1000 + alpha_from_step: msd # Reference MSD result + alpha_from_field: data.alpha_star + - step_type: MaxFlow + source: "^(dc[1-3])$" + sink: "^(dc[1-3])$" + mode: pairwise + failure_policy: single_link + iterations: 1000 + baseline: true # Include no-failure baseline +``` + +**Step types**: `BuildGraph`, `NetworkStats`, `MaxFlow`, `TrafficMatrixPlacement`, `MaximumSupportedDemand`, `CostPower` + +## Common Pitfalls + +### 1. Custom fields must go in `attrs` + +```yaml +# WRONG +nodes: + A: + my_field: value # Error! + +# CORRECT +nodes: + A: + attrs: + my_field: value +``` + +### 2. Link parameters require `link_params` wrapper + +```yaml +# WRONG +links: + - source: A + target: B + capacity: 100 # Error! + +# CORRECT +links: + - source: A + target: B + link_params: + capacity: 100 +``` + +### 3. `one_to_one` requires compatible sizes + +Sizes must have a multiple factor (4-to-2 OK, 3-to-2 ERROR). + +### 4. Path patterns are anchored at start + +```yaml +path: "leaf" # Only matches names STARTING with "leaf" +path: ".*leaf.*" # Matches "leaf" anywhere +``` + +**Note**: Leading `/` is stripped and has no effect. `/leaf` and `leaf` are equivalent. All paths are relative to the current scope (blueprint instantiation path or network root). + +### 5. Variable syntax uses `$` prefix + +```yaml +# WRONG (conflicts with regex {m,n}) +source: "{dc}/leaf" + +# CORRECT +source: "${dc}/leaf" +``` + +### 6. `zip` requires equal-length lists + +```yaml +# WRONG +expand_vars: + a: [1, 2] + b: [x, y, z] # Length mismatch! +expansion_mode: zip +``` + +### 7. Processing order matters + +1. Groups and direct nodes created +2. Node overrides applied +3. Adjacency and blueprint adjacencies expanded +4. Direct links created +5. Link overrides applied + +Overrides only affect entities that exist at their processing stage. + +## Validation Checklist + +- [ ] Custom fields inside `attrs` +- [ ] Link parameters inside `link_params` +- [ ] Referenced blueprints exist +- [ ] Node names in direct links exist +- [ ] `one_to_one` sizes have multiple factor +- [ ] `zip` lists have equal length +- [ ] Selectors have at least one of: `path`, `group_by`, `match` + +## More Information + +- [Full DSL Reference](references/REFERENCE.md) - Complete field documentation, all operators, workflow steps +- [Working Examples](references/EXAMPLES.md) - 11 complete scenarios from simple to advanced diff --git a/.claude/skills/netgraph-dsl/references/EXAMPLES.md b/.claude/skills/netgraph-dsl/references/EXAMPLES.md new file mode 100644 index 0000000..dc142a7 --- /dev/null +++ b/.claude/skills/netgraph-dsl/references/EXAMPLES.md @@ -0,0 +1,906 @@ +# NetGraph DSL Examples + +Complete working examples for common use cases. + +> For quick patterns and pitfalls, see the main [SKILL.md](../SKILL.md). +> For detailed field reference, see [REFERENCE.md](REFERENCE.md). + +## Example 1: Simple Data Center + +A basic leaf-spine topology with traffic analysis. + +```yaml +network: + groups: + leaf: + node_count: 4 + name_template: "leaf-{node_num}" + attrs: + role: leaf + spine: + node_count: 2 + name_template: "spine-{node_num}" + attrs: + role: spine + adjacency: + - source: /leaf + target: /spine + pattern: mesh + link_params: + capacity: 100 + cost: 1 + +traffic_matrix_set: + default: + - source: "^leaf/.*" + sink: "^leaf/.*" + demand: 50 + mode: pairwise + +failure_policy_set: + single_link: + modes: + - weight: 1.0 + rules: + - entity_scope: link + rule_type: choice + count: 1 + +workflow: + - step_type: TrafficMatrixPlacement + name: placement + matrix_name: default + failure_policy: single_link + iterations: 100 +``` + +**Result**: 6 nodes (4 leaf + 2 spine), 8 links (4x2 mesh) + +## Example 2: Multi-Pod with Blueprint + +Two pods sharing a blueprint, connected via spine layer. + +```yaml +blueprints: + clos_pod: + groups: + leaf: + node_count: 4 + name_template: "leaf-{node_num}" + attrs: + role: leaf + spine: + node_count: 2 + name_template: "spine-{node_num}" + attrs: + role: spine + adjacency: + - source: /leaf + target: /spine + pattern: mesh + link_params: + capacity: 100 + +network: + groups: + pod[1-2]: + use_blueprint: clos_pod + + adjacency: + - source: + path: "pod1/spine" + match: + conditions: + - attr: role + operator: "==" + value: spine + target: + path: "pod2/spine" + pattern: mesh + link_params: + capacity: 400 +``` + +**Result**: 12 nodes (2 pods x 6 nodes), 20 links (16 internal + 4 inter-pod) + +## Example 3: Backbone with Risk Groups + +Wide-area network with shared-risk link groups. + +```yaml +network: + nodes: + NewYork: {attrs: {site_type: core}} + Chicago: {attrs: {site_type: core}} + LosAngeles: {attrs: {site_type: core}} + + links: + # Parallel diverse paths + - source: NewYork + target: Chicago + link_params: + capacity: 100 + cost: 10 + risk_groups: [RG_NY_CHI] + - source: NewYork + target: Chicago + link_params: + capacity: 100 + cost: 10 + # Single path + - source: Chicago + target: LosAngeles + link_params: + capacity: 100 + cost: 15 + risk_groups: [RG_CHI_LA] + +risk_groups: + - name: RG_NY_CHI + attrs: + corridor: NYC-Chicago + distance_km: 1200 + - name: RG_CHI_LA + attrs: + corridor: Chicago-LA + distance_km: 2800 + +failure_policy_set: + srlg_failure: + modes: + - weight: 1.0 + rules: + - entity_scope: risk_group + rule_type: choice + count: 1 +``` + +**Result**: 3 nodes, 3 links, 2 risk groups + +## Example 4: Variable Expansion at Scale + +Large fabric using variable expansion. + +```yaml +network: + groups: + plane[1-4]/rack[1-8]: + node_count: 48 + name_template: "server-{node_num}" + attrs: + role: compute + + fabric/spine[1-4]: + node_count: 1 + name_template: "spine" + attrs: + role: spine + + adjacency: + - source: "plane${p}/rack${r}" + target: "fabric/spine${s}" + expand_vars: + p: [1, 2, 3, 4] + r: [1, 2, 3, 4, 5, 6, 7, 8] + s: [1, 2, 3, 4] + expansion_mode: cartesian + pattern: mesh + link_params: + capacity: 100 +``` + +**Result**: 1540 nodes (4x8x48 compute + 4 spine), 6144 links + +## Example 5: Full Mesh Topology + +Simple 4-node full mesh for testing. + +```yaml +seed: 42 + +network: + nodes: + N1: {} + N2: {} + N3: {} + N4: {} + + links: + - source: N1 + target: N2 + link_params: {capacity: 2.0, cost: 1.0} + - source: N1 + target: N3 + link_params: {capacity: 1.0, cost: 1.0} + - source: N1 + target: N4 + link_params: {capacity: 2.0, cost: 1.0} + - source: N2 + target: N3 + link_params: {capacity: 2.0, cost: 1.0} + - source: N2 + target: N4 + link_params: {capacity: 1.0, cost: 1.0} + - source: N3 + target: N4 + link_params: {capacity: 2.0, cost: 1.0} + +failure_policy_set: + single_link_failure: + modes: + - weight: 1.0 + rules: + - entity_scope: link + rule_type: choice + count: 1 + +traffic_matrix_set: + baseline: + - source: "^N([1-4])$" + sink: "^N([1-4])$" + demand: 12.0 + mode: pairwise + +workflow: + - step_type: MaximumSupportedDemand + name: msd + matrix_name: baseline + acceptance_rule: hard + alpha_start: 1.0 + resolution: 0.05 + + - step_type: MaxFlow + name: capacity_matrix + source: "^(N[1-4])$" + sink: "^(N[1-4])$" + mode: pairwise + failure_policy: single_link_failure + iterations: 1000 + baseline: true +``` + +## Example 6: Attribute-Based Selectors + +Using match conditions to filter nodes. + +```yaml +network: + groups: + servers: + node_count: 4 + name_template: "srv-{node_num}" + attrs: + role: compute + rack: "rack-1" + servers_b: + node_count: 2 + name_template: "srvb-{node_num}" + attrs: + role: compute + rack: "rack-9" + switches: + node_count: 2 + name_template: "sw-{node_num}" + attrs: + tier: spine + + adjacency: + - source: + path: "/servers" + match: + logic: and + conditions: + - attr: role + operator: "==" + value: compute + - attr: rack + operator: "!=" + value: "rack-9" + target: + path: "/switches" + match: + conditions: + - attr: tier + operator: "==" + value: spine + pattern: mesh + link_params: + capacity: 10 + cost: 1 +``` + +**Result**: 8 nodes, 8 links (only rack-1 servers connect to switches) + +## Example 7: Blueprint with Parameter Overrides + +Customizing blueprint instances. + +```yaml +blueprints: + bp1: + groups: + leaf: + node_count: 1 + attrs: + some_field: + nested_key: 111 + +network: + groups: + Main: + use_blueprint: bp1 + parameters: + leaf.attrs.some_field.nested_key: 999 +``` + +**Result**: Node `Main/leaf/leaf-1` has `attrs.some_field.nested_key = 999` + +## Example 8: Node and Link Overrides + +Modifying topology after creation. + +```yaml +blueprints: + test_bp: + groups: + switches: + node_count: 3 + name_template: "switch-{node_num}" + +network: + groups: + group1: + node_count: 2 + name_template: "node-{node_num}" + group2: + node_count: 2 + name_template: "node-{node_num}" + my_clos1: + use_blueprint: test_bp + + adjacency: + - source: /group1 + target: /group2 + pattern: mesh + link_params: + capacity: 100 + cost: 10 + + node_overrides: + - path: "^my_clos1/switches/switch-(1|3)$" + disabled: true + attrs: + maintenance_mode: active + hw_type: newer_model + + link_overrides: + - source: "^group1/node-1$" + target: "^group2/node-1$" + link_params: + capacity: 200 + cost: 5 +``` + +**Result**: Switches 1 and 3 disabled, specific link upgraded to 200 capacity + +## Example 9: Complete Traffic Analysis + +Full workflow with MSD and placement analysis. + +```yaml +seed: 42 + +blueprints: + Clos_L16_S4: + groups: + spine: + node_count: 4 + name_template: spine{node_num} + attrs: + role: spine + leaf: + node_count: 16 + name_template: leaf{node_num} + attrs: + role: leaf + adjacency: + - source: /leaf + target: /spine + pattern: mesh + link_params: + capacity: 3200 + cost: 1 + +network: + groups: + metro1/pop[1-2]: + use_blueprint: Clos_L16_S4 + attrs: + metro_name: new-york + node_type: pop + +traffic_matrix_set: + baseline: + - source: "^metro1/pop1/.*" + sink: "^metro1/pop2/.*" + demand: 15000.0 + mode: pairwise + flow_policy_config: TE_WCMP_UNLIM + +failure_policy_set: + single_link: + modes: + - weight: 1.0 + rules: + - entity_scope: link + rule_type: choice + count: 1 + +workflow: + - step_type: NetworkStats + name: network_statistics + + - step_type: MaximumSupportedDemand + name: msd_baseline + matrix_name: baseline + acceptance_rule: hard + alpha_start: 1.0 + growth_factor: 2.0 + resolution: 0.05 + + - step_type: TrafficMatrixPlacement + name: tm_placement + seed: 42 + matrix_name: baseline + failure_policy: single_link + iterations: 1000 + parallelism: 7 + baseline: true + alpha_from_step: msd_baseline + alpha_from_field: data.alpha_star +``` + +## Example 10: Group-By Selectors + +Grouping nodes by attribute for demand generation. + +```yaml +network: + nodes: + dc1_srv1: {attrs: {dc: dc1, role: server}} + dc1_srv2: {attrs: {dc: dc1, role: server}} + dc2_srv1: {attrs: {dc: dc2, role: server}} + dc2_srv2: {attrs: {dc: dc2, role: server}} + links: + - source: dc1_srv1 + target: dc2_srv1 + link_params: {capacity: 100} + - source: dc1_srv2 + target: dc2_srv2 + link_params: {capacity: 100} + +traffic_matrix_set: + inter_dc: + - source: + group_by: dc + sink: + group_by: dc + demand: 100 + mode: pairwise +``` + +**Result**: Traffic flows grouped by datacenter attribute + +## Example 11: Advanced Failure Policies + +Multiple weighted failure modes with conditions and weighted sampling. + +```yaml +network: + nodes: + core1: {attrs: {role: core, capacity_gbps: 1000}} + core2: {attrs: {role: core, capacity_gbps: 1000}} + edge1: {attrs: {role: edge, capacity_gbps: 400, region: west}} + edge2: {attrs: {role: edge, capacity_gbps: 400, region: east}} + edge3: {attrs: {role: edge, capacity_gbps: 200, region: west}} + links: + - source: core1 + target: core2 + link_params: {capacity: 1000, risk_groups: [RG_core]} + - source: core1 + target: edge1 + link_params: {capacity: 400, risk_groups: [RG_west]} + - source: core1 + target: edge3 + link_params: {capacity: 200, risk_groups: [RG_west]} + - source: core2 + target: edge2 + link_params: {capacity: 400, risk_groups: [RG_east]} + +risk_groups: + - name: RG_core + attrs: {tier: core, distance_km: 50} + - name: RG_west + attrs: {tier: edge, distance_km: 500} + - name: RG_east + attrs: {tier: edge, distance_km: 800} + +failure_policy_set: + mixed_failures: + fail_risk_groups: true # Expand to shared-risk entities + fail_risk_group_children: false + modes: + # 40% chance: fail 1 edge node weighted by capacity + - weight: 0.4 + attrs: {scenario: edge_failure} + rules: + - entity_scope: node + rule_type: choice + count: 1 + conditions: + - attr: role + operator: "==" + value: edge + logic: and + weight_by: capacity_gbps + + # 35% chance: fail 1 risk group weighted by distance + - weight: 0.35 + attrs: {scenario: srlg_failure} + rules: + - entity_scope: risk_group + rule_type: choice + count: 1 + weight_by: distance_km + + # 15% chance: fail all west-region nodes + - weight: 0.15 + attrs: {scenario: regional_outage} + rules: + - entity_scope: node + rule_type: all + conditions: + - attr: region + operator: "==" + value: west + + # 10% chance: random link failures (5% each) + - weight: 0.1 + attrs: {scenario: random_link} + rules: + - entity_scope: link + rule_type: random + probability: 0.05 + +workflow: + - step_type: MaxFlow + name: failure_analysis + source: "^(edge[1-3])$" + sink: "^(edge[1-3])$" + mode: pairwise + failure_policy: mixed_failures + iterations: 1000 + seed: 42 +``` + +**Result**: 5 nodes, 4 links, 3 risk groups, failure policy with 4 weighted modes + +## Example 12: Hardware Components and Cost Analysis + +Using the components library for cost/power modeling. + +```yaml +components: + SpineRouter: + component_type: chassis + description: "64-port spine switch" + capex: 55000.0 + power_watts: 2000.0 + power_watts_max: 3000.0 + capacity: 102400.0 + ports: 64 + + LeafRouter: + component_type: chassis + description: "48-port leaf switch" + capex: 25000.0 + power_watts: 800.0 + power_watts_max: 1200.0 + capacity: 38400.0 + ports: 48 + + Optic400G: + component_type: optic + description: "400G DR4 pluggable" + capex: 3000.0 + power_watts: 16.0 + capacity: 400.0 + +network: + name: "datacenter-fabric" + version: "2.0" + + groups: + spine: + node_count: 2 + name_template: "spine-{node_num}" + attrs: + hardware: + component: SpineRouter + count: 1 + leaf: + node_count: 4 + name_template: "leaf-{node_num}" + attrs: + hardware: + component: LeafRouter + count: 1 + + adjacency: + - source: /leaf + target: /spine + pattern: mesh + link_count: 2 # 2 parallel links per pair + link_params: + capacity: 800 + cost: 1 + attrs: + hardware: + source: + component: Optic400G + count: 2 + target: + component: Optic400G + count: 2 + exclusive: true # Dedicated optics (rounds up count) + +workflow: + - step_type: NetworkStats + name: stats + + - step_type: CostPower + name: cost_analysis + include_disabled: false + aggregation_level: 1 # Aggregate by top-level group +``` + +**Result**: 6 nodes, 16 links (4x2x2), component-based cost/power analysis + +## Example 13: YAML Anchors for Reuse + +Using `vars` section for DRY configuration. + +```yaml +vars: + default_link: &link_cfg + capacity: 100 + cost: 1 + spine_attrs: &spine_attrs + role: spine + tier: 2 + leaf_attrs: &leaf_attrs + role: leaf + tier: 1 + +network: + groups: + spine: + node_count: 2 + name_template: "spine-{node_num}" + attrs: + <<: *spine_attrs # Merge anchor + region: east + + leaf: + node_count: 4 + name_template: "leaf-{node_num}" + attrs: + <<: *leaf_attrs + region: east + + adjacency: + - source: /leaf + target: /spine + pattern: mesh + link_params: + <<: *link_cfg # Reuse link config + attrs: + link_type: fabric +``` + +**Result**: Anchors resolved during YAML parsing; cleaner, less repetitive config + +## Example 14: One-to-One Adjacency and Zip Expansion + +Demonstrating pairwise connectivity patterns. + +```yaml +network: + groups: + # 4 servers, 2 switches - compatible for one_to_one (4 is multiple of 2) + server[1-4]: + node_count: 1 + name_template: "srv" + switch[1-2]: + node_count: 1 + name_template: "sw" + + adjacency: + # one_to_one: server1->switch1, server2->switch2, server3->switch1, server4->switch2 + - source: /server + target: /switch + pattern: one_to_one + link_params: + capacity: 100 + + # zip expansion: pairs variables by index (equal-length lists required) + - source: "server${idx}" + target: "switch${sw}" + expand_vars: + idx: [1, 2] + sw: [1, 2] + expansion_mode: zip # server1->switch1, server2->switch2 + pattern: one_to_one + link_params: + capacity: 50 + cost: 2 +``` + +**Result**: Demonstrates one_to_one modulo wrap and zip expansion mode + +## Example 15: Traffic Demands with Variable Expansion and Group Modes + +Advanced demand configuration. + +```yaml +network: + nodes: + dc1_leaf1: {attrs: {dc: dc1, role: leaf}} + dc1_leaf2: {attrs: {dc: dc1, role: leaf}} + dc2_leaf1: {attrs: {dc: dc2, role: leaf}} + dc2_leaf2: {attrs: {dc: dc2, role: leaf}} + dc3_leaf1: {attrs: {dc: dc3, role: leaf}} + links: + - {source: dc1_leaf1, target: dc2_leaf1, link_params: {capacity: 100}} + - {source: dc1_leaf2, target: dc2_leaf2, link_params: {capacity: 100}} + - {source: dc2_leaf1, target: dc3_leaf1, link_params: {capacity: 100}} + +traffic_matrix_set: + # Variable expansion in demands + inter_dc: + - source: "^${src}/.*" + sink: "^${dst}/.*" + demand: 50 + expand_vars: + src: [dc1, dc2] + dst: [dc2, dc3] + expansion_mode: zip # dc1->dc2, dc2->dc3 + + # Group modes with group_by + grouped: + - source: + group_by: dc + sink: + group_by: dc + demand: 100 + mode: pairwise + group_mode: per_group # Separate demand per group pair + priority: 1 + demand_placed: 10.0 # 10 units pre-placed + flow_policy_config: SHORTEST_PATHS_WCMP +``` + +**Result**: Shows variable expansion in demands, group_mode, priority, demand_placed + +## Example 16: Hierarchical Risk Groups + +Nested risk group structure with children. + +```yaml +network: + nodes: + rack1_srv1: {risk_groups: [Rack1_Card1]} + rack1_srv2: {risk_groups: [Rack1_Card1]} + rack1_srv3: {risk_groups: [Rack1_Card2]} + rack2_srv1: {risk_groups: [Rack2]} + links: + - {source: rack1_srv1, target: rack2_srv1, link_params: {capacity: 100}} + - {source: rack1_srv2, target: rack2_srv1, link_params: {capacity: 100}} + - {source: rack1_srv3, target: rack2_srv1, link_params: {capacity: 100}} + +risk_groups: + - name: Rack1 + attrs: {location: "DC1-Row1"} + children: + - name: Rack1_Card1 + attrs: {slot: 1} + - name: Rack1_Card2 + attrs: {slot: 2} + - name: Rack2 + disabled: false + attrs: {location: "DC1-Row2"} + +failure_policy_set: + hierarchical: + fail_risk_groups: true + fail_risk_group_children: true # Failing Rack1 also fails Card1, Card2 + modes: + - weight: 1.0 + rules: + - entity_scope: risk_group + rule_type: choice + count: 1 + conditions: + - attr: location + operator: contains # String contains + value: "DC1" +``` + +**Result**: Hierarchical risk groups with recursive child failure expansion + +## Example 17: Additional Selector Operators + +Demonstrating all condition operators. + +```yaml +network: + nodes: + srv1: {attrs: {tier: 1, tags: [prod, web], region: null}} + srv2: {attrs: {tier: 2, tags: [prod, db], region: east}} + srv3: {attrs: {tier: 3, tags: [dev], region: west}} + srv4: {attrs: {tier: 2}} + links: + - {source: srv1, target: srv2, link_params: {capacity: 100}} + - {source: srv2, target: srv3, link_params: {capacity: 100}} + - {source: srv3, target: srv4, link_params: {capacity: 100}} + +traffic_matrix_set: + filtered: + # Tier comparison operators + - source: + match: + conditions: + - attr: tier + operator: ">=" + value: 2 + sink: + match: + conditions: + - attr: tier + operator: "<" + value: 3 + demand: 50 + mode: pairwise + + # List membership operators + - source: + match: + conditions: + - attr: region + operator: in + value: [east, west] + sink: + match: + conditions: + - attr: tags + operator: contains # List contains value + value: prod + demand: 25 + mode: combine + + # Existence operators + - source: + match: + conditions: + - attr: region + operator: any_value # Attribute exists and not null + sink: + match: + conditions: + - attr: region + operator: no_value # Attribute missing or null + demand: 10 + mode: pairwise +``` + +**Result**: Demonstrates `>=`, `<`, `in`, `contains`, `any_value`, `no_value` operators diff --git a/.claude/skills/netgraph-dsl/references/REFERENCE.md b/.claude/skills/netgraph-dsl/references/REFERENCE.md new file mode 100644 index 0000000..b4cdeb4 --- /dev/null +++ b/.claude/skills/netgraph-dsl/references/REFERENCE.md @@ -0,0 +1,895 @@ +# NetGraph DSL Reference + +Complete reference documentation for the NetGraph scenario DSL. + +> For a quick start guide and common patterns, see the main [SKILL.md](../SKILL.md). +> For complete working examples, see [EXAMPLES.md](EXAMPLES.md). + +## Syntax Overview + +### Template and Expansion Syntaxes + +NetGraph DSL uses three distinct template syntaxes in different contexts: + +| Syntax | Example | Where | Purpose | +|--------|---------|-------|---------| +| **Brackets** `[1-3]` | `dc[1-3]/rack[a,b]` | Group names, risk groups | Generate multiple entities | +| **Variables** `$var` | `pod${p}/leaf` | Adjacency, demands | Template expansion | +| **Format** `{node_num}` | `srv-{node_num}` | `name_template` | Node naming | + +**Important**: These syntaxes are NOT interchangeable: + +- `[1-3]` works in group names and risk groups (definitions and memberships), not components +- `${var}` requires `expand_vars` dict; only works in adjacency `source`/`target` and demand `source`/`sink` +- `{node_num}` is the only placeholder available in `name_template` (Python format syntax) + +### Endpoint Naming Conventions + +| Context | Fields | Terminology | +|---------|--------|-------------| +| Links, adjacency, link_overrides | `source`, `target` | Graph edge | +| Traffic demands, workflow steps | `source`, `sink` | Max-flow | + +**Why different?** Links use graph terminology (`target` = edge destination). Traffic demands and analysis use max-flow terminology (`sink` = flow destination). + +### Expansion Controls in Traffic Demands + +Traffic demands have three expansion-related fields: + +| Field | Values | Default | Purpose | +|-------|--------|---------|---------| +| `mode` | `combine`, `pairwise` | `combine` | How source/sink nodes pair | +| `group_mode` | `flatten`, `per_group`, `group_pairwise` | `flatten` | How grouped nodes expand | +| `expansion_mode` | `cartesian`, `zip` | `cartesian` | How `expand_vars` combine | + +See detailed sections below for each mechanism. + +## Top-Level Keys + +| Key | Required | Purpose | +|-----|----------|---------| +| `network` | Yes | Network topology (nodes, links, groups, adjacency) | +| `blueprints` | No | Reusable topology templates | +| `components` | No | Hardware component library | +| `risk_groups` | No | Failure correlation groups | +| `vars` | No | YAML anchors for value reuse | +| `traffic_matrix_set` | No | Traffic demand definitions | +| `failure_policy_set` | No | Failure simulation policies | +| `workflow` | No | Analysis execution steps | +| `seed` | No | Master seed for reproducible random operations | + +## Network Metadata + +```yaml +network: + name: "My Network" # Optional: network name + version: "1.0" # Optional: version string or number + nodes: ... + links: ... +``` + +## Network Topology + +### Direct Node Definitions + +```yaml +network: + nodes: + Seattle: + disabled: false # Optional: disable node + risk_groups: ["RG1"] # Optional: failure correlation + attrs: # Optional: custom attributes + coords: [47.6062, -122.3321] + role: core + hardware: + component: "SpineRouter" + count: 1 +``` + +**Allowed node keys**: `disabled`, `attrs`, `risk_groups` + +### Direct Link Definitions + +```yaml +network: + links: + - source: Seattle + target: Portland + link_params: # Required wrapper + capacity: 100.0 + cost: 10 + disabled: false + risk_groups: ["RG_Seattle_Portland"] + attrs: + distance_km: 280 + media_type: fiber + hardware: + source: + component: "800G-ZR+" + count: 1 + exclusive: false # Optional: unsharable usage (rounds up) + target: + component: "800G-ZR+" + count: 1 + link_count: 2 # Optional: parallel links +``` + +**Allowed link keys**: `source`, `target`, `link_params`, `link_count` + +**Allowed link_params keys**: `capacity`, `cost`, `disabled`, `risk_groups`, `attrs` + +**Link hardware per-end fields**: `component`, `count`, `exclusive` + +### Node Groups + +Groups create multiple nodes from a template: + +```yaml +network: + groups: + servers: + node_count: 4 + name_template: "srv-{node_num}" + disabled: false + risk_groups: ["RG_Servers"] + attrs: + role: compute +``` + +Creates: `servers/srv-1`, `servers/srv-2`, `servers/srv-3`, `servers/srv-4` + +**Allowed group keys**: `node_count`, `name_template`, `attrs`, `disabled`, `risk_groups` + +### Bracket Expansion + +Create multiple similar groups using bracket notation: + +```yaml +network: + groups: + dc[1-3]/rack[a,b]: + node_count: 4 + name_template: "srv-{node_num}" +``` + +**Expansion types**: + +- Numeric ranges: `[1-4]` -> 1, 2, 3, 4 +- Explicit lists: `[a,b,c]` -> a, b, c +- Mixed: `[1,3,5-7]` -> 1, 3, 5, 6, 7 + +Multiple brackets create Cartesian product. + +**Scope**: Bracket expansion applies to: + +- **Group names** under `network.groups` and `blueprints.*.groups` +- **Risk group names** in top-level `risk_groups` definitions (including children) +- **Risk group membership arrays** on nodes, links, and groups + +It does NOT apply to: component names, direct node names (`network.nodes`), or other string fields. + +**Risk group expansion examples**: + +```yaml +# Definition expansion - creates DC1_Power, DC2_Power, DC3_Power +risk_groups: + - name: "DC[1-3]_Power" + +# Membership expansion - assigns to RG1, RG2, RG3 +network: + nodes: + Server: + risk_groups: ["RG[1-3]"] +``` + +### Path Patterns + +Path patterns in selectors and overrides are **regex patterns** matched against node names using `re.match()` (anchored at start). + +**Key behaviors**: + +- Paths are matched from the **start** of the node name (no implicit `.*` prefix) +- Node names are hierarchical: `group/subgroup/node-1` +- Leading `/` is stripped before matching (has no functional effect) +- All paths are relative to the current scope + +**Examples**: + +| Pattern | Matches | Does NOT Match | +|---------|---------|----------------| +| `leaf` | `leaf/leaf-1`, `leaf/leaf-2` | `pod1/leaf/leaf-1` | +| `pod1/leaf` | `pod1/leaf/leaf-1` | `pod2/leaf/leaf-1` | +| `.*leaf` | `leaf/leaf-1`, `pod1/leaf/leaf-1` | (matches any path containing "leaf") | +| `pod[12]/leaf` | `pod1/leaf/leaf-1`, `pod2/leaf/leaf-1` | `pod3/leaf/leaf-1` | + +**Path scoping**: + +- **At top-level** (`network.adjacency`): Parent path is empty, so patterns match against full node names. `/leaf` and `leaf` are equivalent. +- **In blueprints**: Paths are relative to instantiation path. If `pod1` uses a blueprint with `source: /leaf`, the pattern becomes `pod1/leaf`. + +### Adjacency Rules + +```yaml +network: + adjacency: + - source: /leaf + target: /spine + pattern: mesh + link_params: + capacity: 100 + cost: 1 + link_count: 1 +``` + +**Patterns**: + +- `mesh`: Full connectivity (every source to every target) +- `one_to_one`: Pairwise with modulo wrap. Sizes must have multiple factor (4-to-2 OK, 3-to-2 ERROR) + +### Adjacency Selectors + +Filter nodes using attribute conditions: + +```yaml +network: + adjacency: + - source: + path: "/datacenter" + match: + logic: and # "and" or "or" (default: "or") + conditions: + - attr: role + operator: "==" + value: leaf + - attr: tier + operator: ">=" + value: 2 + target: + path: "/datacenter" + match: + conditions: + - attr: role + operator: "==" + value: spine + pattern: mesh + link_params: + capacity: 100 +``` + +**Condition operators**: + +| Operator | Description | +|----------|-------------| +| `==` | Equal | +| `!=` | Not equal | +| `<`, `<=`, `>`, `>=` | Numeric comparison | +| `contains` | String/list contains value | +| `not_contains` | String/list does not contain | +| `in` | Value in list | +| `not_in` | Value not in list | +| `any_value` | Attribute exists and is not None | +| `no_value` | Attribute missing or None | + +### Variable Expansion in Adjacency + +Use `$var` or `${var}` syntax in adjacency `source`/`target` fields: + +```yaml +network: + adjacency: + - source: "plane${p}/rack" + target: "spine${s}" + expand_vars: + p: [1, 2] + s: [1, 2, 3] + expansion_mode: cartesian + pattern: mesh + link_params: + capacity: 100 +``` + +**Expansion modes**: + +- `cartesian` (default): All combinations (2 * 3 = 6 expansions) +- `zip`: Pair by index (lists must have equal length) + +**Expansion limit**: Maximum 10,000 expansions per template. Exceeding this raises an error. + +## Blueprints + +Reusable topology templates: + +```yaml +blueprints: + clos_pod: + groups: + leaf: + node_count: 4 + name_template: "leaf-{node_num}" + attrs: + role: leaf + spine: + node_count: 2 + name_template: "spine-{node_num}" + attrs: + role: spine + adjacency: + - source: /leaf + target: /spine + pattern: mesh + link_params: + capacity: 100 + cost: 1 +``` + +### Blueprint Usage + +```yaml +network: + groups: + pod1: + use_blueprint: clos_pod + attrs: # Merged into all subgroup nodes + location: datacenter_east + parameters: # Override blueprint defaults + leaf.node_count: 6 + spine.name_template: "core-{node_num}" + leaf.attrs.priority: high +``` + +Creates: `pod1/leaf/leaf-1`, `pod1/spine/spine-1`, etc. + +**Parameter override syntax**: `.` or `.attrs.` + +### Blueprint Path Scoping + +All paths are relative to the current scope. In blueprints, paths resolve relative to the instantiation path: + +```yaml +blueprints: + my_bp: + adjacency: + - source: /leaf # Becomes pod1/leaf when instantiated as pod1 + target: spine # Also becomes pod1/spine (leading / is optional) + pattern: mesh +``` + +**Note**: Leading `/` is stripped and has no functional effect. Both `/leaf` and `leaf` produce the same result. The `/` serves as a visual convention indicating "from scope root". + +## Node and Link Overrides + +Modify nodes/links after initial creation: + +```yaml +network: + node_overrides: + - path: "^pod1/spine/.*$" # Regex pattern + disabled: true + risk_groups: ["Maintenance"] + attrs: + maintenance_mode: active + + link_overrides: + - source: "^pod1/leaf/.*$" + target: "^pod1/spine/.*$" + any_direction: true # Match both directions (default: true) + link_params: + capacity: 200 + attrs: + upgraded: true +``` + +**Link override fields**: + +- `source`, `target`: Regex patterns for matching link endpoints +- `any_direction`: If `true` (default), matches both A→B and B→A directions +- `link_params`: Parameters to override (`capacity`, `cost`, `disabled`, `risk_groups`, `attrs`) + +**Processing order**: + +1. Groups and direct nodes created +2. **Node overrides applied** +3. Adjacency and blueprint adjacencies expanded +4. Direct links created +5. **Link overrides applied** + +## Components Library + +Define hardware components for cost/power modeling: + +```yaml +components: + SpineRouter: + component_type: chassis + description: "64-port spine router" + capex: 50000.0 # Cost per instance + power_watts: 2500.0 # Typical power usage + power_watts_max: 3000.0 # Peak power usage + capacity: 64000.0 # Gbps + ports: 64 + attrs: + vendor: "Example Corp" + children: + LineCard400G: + component_type: linecard + capex: 8000.0 + power_watts: 400.0 + capacity: 12800.0 + ports: 32 + count: 4 + + Optic400G: + component_type: optic + description: "400G pluggable optic" + capex: 2500.0 + power_watts: 12.0 + capacity: 400.0 +``` + +**Component fields**: + +| Field | Description | +|-------|-------------| +| `component_type` | Category: `chassis`, `linecard`, `optic`, etc. | +| `description` | Human-readable description | +| `capex` | Cost per instance | +| `power_watts` | Typical power consumption (watts) | +| `power_watts_max` | Peak power consumption (watts) | +| `capacity` | Capacity in Gbps | +| `ports` | Number of ports | +| `count` | Instance count (for children) | +| `attrs` | Additional metadata | +| `children` | Nested child components | + +**Usage in nodes/links**: + +```yaml +network: + nodes: + spine-1: + attrs: + hardware: + component: "SpineRouter" + count: 2 +``` + +## Risk Groups + +Define hierarchical failure correlation: + +```yaml +risk_groups: + - name: "Rack1" + disabled: false # Optional: disable on load + attrs: + location: "DC1_Floor2" + children: + - name: "Card1.1" + children: + - name: "PortGroup1.1.1" + - name: "Card1.2" + - name: "PowerSupplyA" + attrs: + type: "power_infrastructure" +``` + +**Risk group fields**: `name` (required), `disabled`, `attrs`, `children` + +## Traffic Demands + +```yaml +traffic_matrix_set: + production: + - source: "^dc1/.*" + sink: "^dc2/.*" + demand: 1000 + demand_placed: 0.0 # Optional: pre-placed portion + mode: combine + group_mode: flatten # How to handle grouped nodes + priority: 1 + flow_policy_config: SHORTEST_PATHS_ECMP + attrs: + service: web + + - source: + path: "^datacenter/.*" + match: + conditions: + - attr: role + operator: "==" + value: leaf + sink: + group_by: metro + demand: 500 + mode: pairwise + priority: 2 +``` + +### Traffic Modes + +| Mode | Description | +|------|-------------| +| `combine` | Single aggregate flow between source/sink groups via pseudo nodes | +| `pairwise` | Individual flows between all source-sink node pairs | + +### Group Modes + +When selectors use `group_by`, `group_mode` controls how grouped nodes produce demands: + +| Group Mode | Description | +|------------|-------------| +| `flatten` | Flatten all groups into single source/sink sets (default) | +| `per_group` | Create separate demands for each group | +| `group_pairwise` | Create pairwise demands between groups | + +### Flow Policies + +| Policy | Description | +|--------|-------------| +| `SHORTEST_PATHS_ECMP` | IP/IGP routing with equal-split ECMP | +| `SHORTEST_PATHS_WCMP` | IP/IGP routing with weighted ECMP (by capacity) | +| `TE_WCMP_UNLIM` | MPLS-TE / SDN with unlimited tunnels | +| `TE_ECMP_16_LSP` | MPLS-TE with 16 ECMP LSPs per demand | +| `TE_ECMP_UP_TO_256_LSP` | MPLS-TE with up to 256 ECMP LSPs | + +### Variable Expansion in Demands + +```yaml +traffic_matrix_set: + inter_dc: + - source: "^${src_dc}/.*" + sink: "^${dst_dc}/.*" + demand: 100 + expand_vars: + src_dc: [dc1, dc2] + dst_dc: [dc2, dc3] + expansion_mode: cartesian +``` + +## Failure Policies + +Failure policies define how nodes, links, and risk groups fail during Monte Carlo simulations. + +### Structure + +```yaml +failure_policy_set: + policy_name: + attrs: {} # Optional metadata + fail_risk_groups: false # Expand to shared-risk entities + fail_risk_group_children: false # Fail child risk groups recursively + modes: # Required: weighted failure modes + - weight: 1.0 # Mode selection weight + attrs: {} # Optional mode metadata + rules: [] # Rules applied when mode is selected +``` + +### Mode Selection + +Exactly one mode is selected per failure iteration based on normalized weights: + +```yaml +modes: + - weight: 0.3 # 30% probability of selection + rules: [...] + - weight: 0.5 # 50% probability + rules: [...] + - weight: 0.2 # 20% probability + rules: [...] +``` + +- Modes with zero or negative weight are never selected +- If all weights are non-positive, falls back to the first mode + +### Rule Structure + +```yaml +rules: + - entity_scope: link # Required: node, link, or risk_group + conditions: [] # Optional: filter conditions + logic: or # Condition logic: and | or (default: or) + rule_type: all # Selection: all | choice | random (default: all) + probability: 1.0 # For random: [0.0, 1.0] + count: 1 # For choice: number to select + weight_by: null # For choice: attribute for weighted sampling +``` + +### Rule Types + +| Type | Description | Parameters | +|------|-------------|------------| +| `all` | Select all matching entities | None | +| `choice` | Random sample from matches | `count`, optional `weight_by` | +| `random` | Each match selected with probability | `probability` in [0, 1] | + +### Condition Logic + +When multiple conditions are specified: + +| Logic | Behavior | +|-------|----------| +| `or` (default) | Entity matches if **any** condition is true | +| `and` | Entity matches if **all** conditions are true | + +If no conditions are specified, all entities of the given scope match. + +### Weighted Sampling (choice mode) + +When `weight_by` is set for `rule_type: choice`: + +```yaml +- entity_scope: link + rule_type: choice + count: 2 + weight_by: capacity # Sample proportional to capacity attribute +``` + +- Uses Efraimidis-Spirakis algorithm for weighted sampling without replacement +- Entities with non-positive or missing weights are sampled uniformly after positive-weight items +- Falls back to uniform sampling if all weights are non-positive + +### Risk Group Expansion + +```yaml +fail_risk_groups: true +``` + +When enabled, after initial failures are selected, expands to fail all entities that share a risk group with any failed entity (BFS traversal). + +```yaml +fail_risk_group_children: true +``` + +When enabled and a risk_group is marked as failed, recursively fails all child risk groups. + +### Complete Example + +```yaml +failure_policy_set: + weighted_modes: + attrs: + description: "Balanced failure simulation" + fail_risk_groups: true + fail_risk_group_children: false + modes: + # 30% chance: fail 1 risk group weighted by distance + - weight: 0.3 + rules: + - entity_scope: risk_group + rule_type: choice + count: 1 + weight_by: distance_km + + # 50% chance: fail 1 non-core node weighted by capacity + - weight: 0.5 + rules: + - entity_scope: node + rule_type: choice + count: 1 + conditions: + - attr: role + operator: "!=" + value: core + logic: and + weight_by: attached_capacity_gbps + + # 20% chance: random link failures with 1% probability each + - weight: 0.2 + rules: + - entity_scope: link + rule_type: random + probability: 0.01 +``` + +### Entity Scopes + +| Scope | Description | +|-------|-------------| +| `node` | Match against node attributes | +| `link` | Match against link attributes | +| `risk_group` | Match against risk group names/attributes | + +## Workflow Steps + +```yaml +workflow: + - step_type: NetworkStats + name: baseline_stats + + - step_type: MaximumSupportedDemand + name: msd_baseline + matrix_name: production + acceptance_rule: hard + alpha_start: 1.0 + growth_factor: 2.0 + resolution: 0.05 + + - step_type: TrafficMatrixPlacement + name: tm_placement + matrix_name: production + failure_policy: weighted_modes + iterations: 1000 + parallelism: 8 + alpha_from_step: msd_baseline + alpha_from_field: data.alpha_star + + - step_type: MaxFlow + name: capacity_matrix + source: "^(dc[1-3])$" + sink: "^(dc[1-3])$" + mode: pairwise + failure_policy: single_link + iterations: 500 + baseline: true + + - step_type: CostPower + name: cost_analysis + include_disabled: true + aggregation_level: 2 +``` + +### Step Types + +| Type | Description | +|------|-------------| +| `BuildGraph` | Export graph to JSON (node-link format) | +| `NetworkStats` | Compute basic statistics (node/link counts, degrees) | +| `MaxFlow` | Monte Carlo capacity analysis between node groups | +| `TrafficMatrixPlacement` | Monte Carlo demand placement for a named matrix | +| `MaximumSupportedDemand` | Search for maximum supportable demand scaling (`alpha_star`) | +| `CostPower` | Cost and power estimation from components | + +### BuildGraph Parameters + +```yaml +- step_type: BuildGraph + name: build_graph + add_reverse: true # Add reverse edges for bidirectional connectivity +``` + +### NetworkStats Parameters + +```yaml +- step_type: NetworkStats + name: stats + include_disabled: false # Include disabled nodes/links in stats +``` + +### MaxFlow Parameters + +```yaml +- step_type: MaxFlow + name: capacity_analysis + source: "^servers/.*" + sink: "^storage/.*" + mode: combine # combine | pairwise + failure_policy: policy_name + iterations: 1000 + parallelism: auto # or integer + baseline: true # Include baseline (no failures) iteration + shortest_path: false # Restrict to shortest paths only + require_capacity: true # Path selection considers capacity + flow_placement: PROPORTIONAL # PROPORTIONAL | EQUAL_BALANCED + store_failure_patterns: false + include_flow_details: false # Cost distribution per flow + include_min_cut: false # Min-cut edge list per flow +``` + +### TrafficMatrixPlacement Parameters + +```yaml +- step_type: TrafficMatrixPlacement + name: tm_placement + matrix_name: default + failure_policy: policy_name + iterations: 100 + parallelism: auto + placement_rounds: auto # or integer + baseline: false + include_flow_details: true + include_used_edges: false + store_failure_patterns: false + # Alpha scaling options + alpha: 1.0 # Explicit scaling factor + # Or reference another step's output: + alpha_from_step: msd_step_name + alpha_from_field: data.alpha_star +``` + +### MaximumSupportedDemand Parameters + +```yaml +- step_type: MaximumSupportedDemand + name: msd + matrix_name: default + acceptance_rule: hard # Currently only "hard" supported + alpha_start: 1.0 # Starting alpha for search + growth_factor: 2.0 # Growth factor for bracketing (> 1.0) + alpha_min: 0.000001 # Minimum alpha bound + alpha_max: 1000000000.0 # Maximum alpha bound + resolution: 0.01 # Convergence resolution + max_bracket_iters: 32 + max_bisect_iters: 32 + seeds_per_alpha: 1 # Seeds per alpha (majority vote) + placement_rounds: auto +``` + +### CostPower Parameters + +```yaml +- step_type: CostPower + name: cost_power + include_disabled: false # Include disabled nodes/links + aggregation_level: 2 # Hierarchy level for aggregation (split by /) +``` + +## Selector Reference + +Selectors work across adjacency, demands, and workflows. + +### String Pattern (Regex) + +```yaml +source: "^dc1/spine/.*$" +``` + +Patterns use Python `re.match()`, anchored at start. + +### Selector Object + +```yaml +source: + path: "^dc1/.*" # Regex on node.name + group_by: metro # Group by attribute value + match: # Filter by conditions + logic: and + conditions: + - attr: role + operator: "==" + value: spine + active_only: true # Exclude disabled nodes +``` + +At least one of `path`, `group_by`, or `match` must be specified. + +### Context-Aware Defaults for active_only + +The `active_only` field has context-dependent defaults: + +| Context | Default | Rationale | +|---------|---------|-----------| +| `adjacency` | `false` | Links to disabled nodes are created | +| `override` | `false` | Overrides can target disabled nodes | +| `demand` | `true` | Traffic only between active nodes | +| `workflow` | `true` | Analysis uses active nodes only | + +### Capture Groups for Labeling + +```yaml +# Single capture group +source: "^(dc[1-3])/.*" # Groups: dc1, dc2, dc3 + +# Multiple capture groups join with | +source: "^(dc\\d+)/(spine|leaf)/.*" # Groups: dc1|spine, dc1|leaf +``` + +## YAML Anchors + +Use `vars` section for reusable values: + +```yaml +vars: + default_cap: &cap 10000 + base_attrs: &attrs {cost: 100, region: "dc1"} + spine_config: &spine_cfg + hardware: + component: "SpineRouter" + count: 1 + +network: + nodes: + spine-1: {attrs: {<<: *attrs, <<: *spine_cfg, capacity: *cap}} + spine-2: {attrs: {<<: *attrs, <<: *spine_cfg, capacity: *cap, region: "dc2"}} +``` + +Anchors are resolved during YAML parsing, before schema validation. diff --git a/CHANGELOG.md b/CHANGELOG.md index d94e089..e0ea978 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,26 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.13.0] - 2025-12-19 + +### Changed + +- **BREAKING**: `TrafficDemand.source_path`/`sink_path` renamed to `source`/`sink`; now accept string patterns or selector dicts with `path`, `group_by`, and `match` fields +- **BREAKING**: Removed `attr:` magic string syntax; use `{"group_by": ""}` dict selectors instead +- **BREAKING**: Removed `ngraph.utils.nodes` module; use `ngraph.dsl.selectors` for node selection +- **Unified selector system**: `ngraph.dsl.selectors` provides `normalize_selector()` and `select_nodes()` for consistent node selection across demands, workflows, adjacency, and overrides +- **Variable expansion in demands**: `TrafficDemand` supports `expand_vars` with `$var`/`${var}` syntax and `expansion_mode` (cartesian/zip) +- **Match conditions**: Selector `match` field supports 12 operators: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `in`, `not_in`, `any_value`, `no_value` +- **Context-aware defaults**: `active_only` defaults to `True` for demands/workflows, `False` for adjacency/overrides + +### Added + +- `ngraph.dsl.selectors` module: `NodeSelector`, `MatchSpec`, `Condition` schema classes +- `ngraph.dsl.expansion` module: `ExpansionSpec`, `expand_templates()`, `substitute_vars()`, `expand_name_patterns()`, `expand_risk_group_refs()` +- **Bracket expansion in risk groups**: `[1-3]` and `[a,b,c]` patterns now expand in risk group definitions (including children) and membership arrays on nodes, links, and groups +- `TrafficDemand.group_mode` field for node group handling (`flatten`, `per_group`, `group_pairwise`) +- `.claude/skills/netgraph-dsl/`: Claude skill with DSL syntax reference and examples + ## [0.12.3] - 2025-12-11 ### Changed diff --git a/README.md b/README.md index 322ca59..fe14050 100644 --- a/README.md +++ b/README.md @@ -130,8 +130,8 @@ network: # Define traffic matrix traffic_matrix_set: global_traffic: - - source_path: ^site1/leaf/ - sink_path: ^site2/leaf/ + - source: ^site1/leaf/ + sink: ^site2/leaf/ demand: 100.0 mode: combine flow_policy_config: SHORTEST_PATHS_ECMP @@ -142,8 +142,8 @@ workflow: name: stats - step_type: MaxFlow name: site_capacity - source_path: ^site1/leaf/ - sink_path: ^site2/leaf/ + source: ^site1/leaf/ + sink: ^site2/leaf/ mode: combine shortest_path: false - step_type: MaximumSupportedDemand diff --git a/docs/reference/api-full.md b/docs/reference/api-full.md index 8587d66..c12c37f 100644 --- a/docs/reference/api-full.md +++ b/docs/reference/api-full.md @@ -12,9 +12,9 @@ Quick links: - [CLI Reference](cli.md) - [DSL Reference](dsl.md) -Generated from source code on: December 11, 2025 at 23:43 UTC +Generated from source code on: December 20, 2025 at 00:19 UTC -Modules auto-discovered: 44 +Modules auto-discovered: 49 --- @@ -451,94 +451,42 @@ and placement. It can carry either a concrete `FlowPolicy` instance or a ### TrafficDemand -Single traffic demand input. +Traffic demand specification using unified selectors. Attributes: - source_path: Regex string selecting source nodes. - sink_path: Regex string selecting sink nodes. - priority: Priority class for this demand (lower value = higher priority). + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). demand: Total demand volume. demand_placed: Portion of this demand placed so far. - flow_policy_config: Policy preset (FlowPolicyPreset enum) used to build - a `FlowPolicy`` if ``flow_policy`` is not provided. - flow_policy: Concrete policy instance. If set, it overrides - ``flow_policy_config``. - mode: Expansion mode, ``"combine"`` or ``"pairwise"``. + priority: Priority class (lower = higher priority). + mode: Node pairing mode ("combine" or "pairwise"). + group_mode: How grouped nodes produce demands + ("flatten", "per_group", "group_pairwise"). + expand_vars: Variable substitutions using $var syntax. + expansion_mode: How to combine expand_vars ("cartesian" or "zip"). + flow_policy_config: Policy preset for routing. + flow_policy: Concrete policy instance (overrides flow_policy_config). attrs: Arbitrary user metadata. - id: Unique identifier. Auto-generated if empty or not provided. + id: Unique identifier. Auto-generated if empty. **Attributes:** -- `source_path` (str) -- `sink_path` (str) -- `priority` (int) = 0 +- `source` (Union) +- `sink` (Union) - `demand` (float) = 0.0 - `demand_placed` (float) = 0.0 +- `priority` (int) = 0 +- `mode` (str) = combine +- `group_mode` (str) = flatten +- `expand_vars` (Dict) = {} +- `expansion_mode` (str) = cartesian - `flow_policy_config` (Optional) - `flow_policy` (Optional) -- `mode` (str) = combine - `attrs` (Dict) = {} - `id` (str) --- -## ngraph.model.failure.conditions - -Shared condition primitives and evaluators. - -This module provides a small, dependency-free condition evaluation utility -that can be reused by failure policies and DSL selection filters. - -Operators supported: - -- ==, !=, <, <=, >, >= -- contains, not_contains -- any_value, no_value - -The evaluator operates on a flat attribute mapping for an entity. Callers are -responsible for constructing that mapping (e.g. merging top-level fields with -``attrs`` and ensuring appropriate precedence rules). - -### FailureCondition - -A single condition for matching an entity attribute. - -Args: - attr: Attribute name to inspect in the entity mapping. - operator: Comparison operator. See module docstring for the list. - value: Right-hand operand for the comparison (unused for any_value/no_value). - -**Attributes:** - -- `attr` (str) -- `operator` (str) -- `value` (Any | None) - -### evaluate_condition(entity_attrs: 'dict[str, Any]', cond: 'FailureCondition') -> 'bool' - -Evaluate a single condition against an entity attribute mapping. - -Args: - entity_attrs: Flat mapping of attributes for the entity. - cond: Condition to evaluate. - -Returns: - True if the condition passes, False otherwise. - -### evaluate_conditions(entity_attrs: 'dict[str, Any]', conditions: 'Iterable[FailureCondition]', logic: 'str') -> 'bool' - -Evaluate multiple conditions with AND/OR logic. - -Args: - entity_attrs: Flat mapping of attributes for the entity. - conditions: Iterable of conditions to evaluate. - logic: "and" or "or". - -Returns: - True if the combined predicate passes, False otherwise. - ---- - ## ngraph.model.failure.parser Parsers for FailurePolicySet and related failure modeling structures. @@ -563,7 +511,18 @@ Raises: ### build_risk_groups(rg_data: 'List[Dict[str, Any]]') -> 'List[RiskGroup]' -No documentation available. +Build RiskGroup objects from raw config data. + +Supports bracket expansion in risk group names. For example: + +- `{name: "DC[1-3]_Power"}` creates DC1_Power, DC2_Power, DC3_Power +- Children are also expanded recursively + +Args: + rg_data: List of risk group definition dicts. + +Returns: + List of RiskGroup objects with names expanded. --- @@ -578,18 +537,6 @@ top-level attributes with simple operators; rules select matches using (with `count`). Policies can optionally expand failures by shared risk groups or by risk-group children. -### FailureCondition - -Alias to the shared condition dataclass. - -This maintains a consistent import path within the failure policy module. - -**Attributes:** - -- `attr` (str) -- `operator` (str) -- `value` (Any | None) - ### FailureMode A weighted mode that encapsulates a set of rules applied together. @@ -884,7 +831,7 @@ Attributes: - `enable_risk_group(self, name: 'str', recursive: 'bool' = True) -> 'None'` - Enable all nodes/links that have 'name' in their risk_groups. - `find_links(self, source_regex: 'Optional[str]' = None, target_regex: 'Optional[str]' = None, any_direction: 'bool' = False) -> 'List[Link]'` - Search for links using optional regex patterns for source or target node names. - `get_links_between(self, source: 'str', target: 'str') -> 'List[str]'` - Retrieve all link IDs that connect the specified source node -- `select_node_groups_by_path(self, path: 'str') -> 'Dict[str, List[Node]]'` - Select and group nodes by regex on name or by attribute directive. +- `select_node_groups_by_path(self, path: 'str') -> 'Dict[str, List[Node]]'` - Select and group nodes by regex pattern on node name. ### Node @@ -1019,6 +966,16 @@ Args: Returns: A class decorator that adds the class to `WORKFLOW_STEP_REGISTRY`. +### resolve_parallelism(parallelism: 'Union[int, str]') -> 'int' + +Resolve parallelism setting to a concrete worker count. + +Args: + parallelism: Either an integer worker count or "auto" for CPU count. + +Returns: + Positive integer worker count (minimum 1). + --- ## ngraph.workflow.build_graph @@ -1058,7 +1015,7 @@ Actual Core graph building happens in analysis functions as needed. Attributes: add_reverse: If True, adds reverse edges for bidirectional connectivity. - Defaults to True for backward compatibility. + Defaults to True. **Attributes:** @@ -1164,8 +1121,8 @@ YAML Configuration Example: - step_type: MaxFlow name: "maxflow_dc_to_edge" - source_path: "^datacenter/.*" - sink_path: "^edge/.*" + source: "^datacenter/.*" + sink: "^edge/.*" mode: "combine" failure_policy: "random_failures" iterations: 100 @@ -1184,8 +1141,8 @@ YAML Configuration Example: Maximum flow Monte Carlo workflow step. Attributes: - source_path: Regex pattern for source node groups. - sink_path: Regex pattern for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). failure_policy: Name of failure policy in scenario.failure_policy_set. iterations: Number of Monte Carlo trials. @@ -1205,8 +1162,8 @@ Attributes: - `name` (str) - `seed` (int | None) - `_seed_source` (str) -- `source_path` (str) -- `sink_path` (str) +- `source` (Union[str, Dict[str, Any]]) +- `sink` (Union[str, Dict[str, Any]]) - `mode` (str) = combine - `failure_policy` (str | None) - `iterations` (int) = 1 @@ -1514,19 +1471,161 @@ Args: allowed: Set of recognized keys. context: Short description used in error messages. +### join_paths(parent_path: 'str', rel_path: 'str') -> 'str' + +Join two path segments according to DSL conventions. + +The DSL has no concept of absolute paths. All paths are relative to the +current context (parent_path). A leading "/" on rel_path is stripped and +has no functional effect - it serves only as a visual indicator that the +path starts from the current scope's root. + +Behavior: + +- Leading "/" on rel_path is stripped (not treated as filesystem root) +- Result is always: "{parent_path}/{stripped_rel_path}" if parent_path is non-empty +- Examples: + + join_paths("", "/leaf") -> "leaf" + join_paths("pod1", "/leaf") -> "pod1/leaf" + join_paths("pod1", "leaf") -> "pod1/leaf" (same result) + +Args: + parent_path: Parent path prefix (e.g., "pod1" when expanding a blueprint). + rel_path: Path to join. Leading "/" is stripped if present. + +Returns: + Combined path string. + +--- + +## ngraph.dsl.expansion.brackets + +Bracket expansion for name patterns. + +Provides expand_name_patterns() for expanding bracket expressions +like "fa[1-3]" into ["fa1", "fa2", "fa3"]. + ### expand_name_patterns(name: 'str') -> 'List[str]' Expand bracket expressions in a group name. +Supports: + +- Ranges: [1-3] -> 1, 2, 3 +- Lists: [a,b,c] -> a, b, c +- Mixed: [1,3,5-7] -> 1, 3, 5, 6, 7 +- Multiple brackets: Cartesian product + +Args: + name: Name pattern with optional bracket expressions. + +Returns: + List of expanded names. + Examples: + >>> expand_name_patterns("fa[1-3]") + ["fa1", "fa2", "fa3"] + >>> expand_name_patterns("dc[1,3,5-6]") + ["dc1", "dc3", "dc5", "dc6"] + >>> expand_name_patterns("fa[1-2]_plane[5-6]") + ["fa1_plane5", "fa1_plane6", "fa2_plane5", "fa2_plane6"] -- "fa[1-3]" -> ["fa1", "fa2", "fa3"] -- "dc[1,3,5-6]" -> ["dc1", "dc3", "dc5", "dc6"] -- "fa[1-2]_plane[5-6]" -> ["fa1_plane5", "fa1_plane6", "fa2_plane5", "fa2_plane6"] +### expand_risk_group_refs(rg_list: 'Iterable[str]') -> 'Set[str]' -### join_paths(parent_path: 'str', rel_path: 'str') -> 'str' +Expand bracket patterns in a list of risk group references. + +Takes an iterable of risk group names (possibly containing bracket +expressions) and returns a set of all expanded names. + +Args: + rg_list: Iterable of risk group name patterns. + +Returns: + Set of expanded risk group names. + +Examples: + >>> expand_risk_group_refs(["RG1"]) + {"RG1"} + >>> expand_risk_group_refs(["RG[1-3]"]) + {"RG1", "RG2", "RG3"} + >>> expand_risk_group_refs(["A[1-2]", "B[a,b]"]) + {"A1", "A2", "Ba", "Bb"} -Join two path segments according to the DSL conventions. +--- + +## ngraph.dsl.expansion.schema + +Schema definitions for variable expansion. + +Provides dataclasses for template expansion configuration. + +### ExpansionSpec + +Specification for variable-based expansion. + +Attributes: + expand_vars: Mapping of variable names to lists of values. + expansion_mode: How to combine variable values. + +- "cartesian": All combinations (default) +- "zip": Pair values by position + +**Attributes:** + +- `expand_vars` (Dict[str, List[Any]]) = {} +- `expansion_mode` (Literal['cartesian', 'zip']) = cartesian + +**Methods:** + +- `is_empty(self) -> 'bool'` - Check if no variables are defined. + +--- + +## ngraph.dsl.expansion.variables + +Variable expansion for templates. + +Provides expand_templates() function for substituting $var and ${var} +placeholders in template strings. + +### expand_templates(templates: 'Dict[str, str]', spec: "'ExpansionSpec'") -> 'Iterator[Dict[str, str]]' + +Expand template strings with variable substitution. + +Uses $var or ${var} syntax only. + +Args: + templates: Dict of template strings, e.g. {"source": "dc${dc}/...", "sink": "..."}. + spec: Expansion specification with variables and mode. + +Yields: + Dicts with same keys as templates, values substituted. + +Raises: + ValueError: If zip mode has mismatched list lengths or expansion exceeds limit. + KeyError: If a template references an undefined variable. + +Example: + >>> spec = ExpansionSpec(expand_vars={"dc": [1, 2]}) + >>> list(expand_templates({"src": "dc${dc}"}, spec)) + [{"src": "dc1"}, {"src": "dc2"}] + +### substitute_vars(template: 'str', var_dict: 'Dict[str, Any]') -> 'str' + +Substitute $var and ${var} placeholders in a template string. + +Uses $ prefix to avoid collision with regex {m,n} quantifiers. + +Args: + template: String containing $var or ${var} placeholders. + var_dict: Mapping of variable names to values. + +Returns: + Template with variables substituted. + +Raises: + KeyError: If a referenced variable is not in var_dict. --- @@ -1548,6 +1647,192 @@ keys) and with schema shape already enforced. --- +## ngraph.dsl.selectors.conditions + +Condition evaluation for node/entity filtering. + +Provides evaluation logic for attribute conditions used in selectors +and failure policies. Supports operators: ==, !=, <, <=, >, >=, +contains, not_contains, in, not_in, any_value, no_value. + +### evaluate_condition(attrs: 'Dict[str, Any]', cond: "'Condition'") -> 'bool' + +Evaluate a single condition against an attribute dict. + +Args: + attrs: Flat mapping of entity attributes. + cond: Condition to evaluate. + +Returns: + True if condition passes, False otherwise. + +Raises: + ValueError: If operator is unknown or value type is invalid. + +### evaluate_conditions(attrs: 'Dict[str, Any]', conditions: "Iterable['Condition']", logic: 'str' = 'or') -> 'bool' + +Evaluate multiple conditions with AND/OR logic. + +Args: + attrs: Flat mapping of entity attributes. + conditions: Iterable of Condition objects. + logic: "and" (all must match) or "or" (any must match). + +Returns: + True if combined predicate passes. + +Raises: + ValueError: If logic is not "and" or "or". + +--- + +## ngraph.dsl.selectors.normalize + +Selector parsing and normalization. + +Provides the single entry point for converting raw selector values +(strings or dicts) into NodeSelector objects. + +### normalize_selector(raw: 'Union[str, Dict[str, Any], NodeSelector]', context: 'str') -> 'NodeSelector' + +Normalize a raw selector (string or dict) to a NodeSelector. + +This is the single entry point for all selector parsing. All downstream +code works with NodeSelector objects only. + +Args: + raw: Either a regex string, selector dict, or existing NodeSelector. + context: Usage context ("adjacency", "demand", "override", "workflow"). + Determines the default for active_only. + +Returns: + Normalized NodeSelector instance. + +Raises: + ValueError: If selector format is invalid or context is unknown. + +--- + +## ngraph.dsl.selectors.schema + +Schema definitions for unified node selection. + +Provides dataclasses for node selection configuration used across +adjacency, demands, overrides, and workflow steps. + +### Condition + +A single attribute condition for filtering. + +Attributes: + attr: Attribute name to match. + operator: Comparison operator. + value: Right-hand operand (unused for any_value/no_value). + +**Attributes:** + +- `attr` (str) +- `operator` (Literal['==', '!=', '<', '<=', '>', '>=', 'contains', 'not_contains', 'in', 'not_in', 'any_value', 'no_value']) +- `value` (Any) + +### MatchSpec + +Specification for filtering nodes by attribute conditions. + +Attributes: + conditions: List of conditions to evaluate. + logic: How to combine conditions ("and" = all, "or" = any). + +**Attributes:** + +- `conditions` (List[Condition]) = [] +- `logic` (Literal['and', 'or']) = or + +### NodeSelector + +Unified node selection specification. + +Evaluation order: + +1. Select nodes matching `path` regex (default ".*" if omitted) +2. Filter by `match` conditions +3. Filter by `active_only` flag +4. Group by `group_by` attribute (if specified) + +At least one of path, group_by, or match must be specified. + +Attributes: + path: Regex pattern on node.name. + group_by: Attribute name to group nodes by. + match: Attribute-based filtering conditions. + active_only: Whether to exclude disabled nodes. None uses context default. + +**Attributes:** + +- `path` (Optional[str]) +- `group_by` (Optional[str]) +- `match` (Optional[MatchSpec]) +- `active_only` (Optional[bool]) + +--- + +## ngraph.dsl.selectors.select + +Node selection and evaluation. + +Provides the unified select_nodes() function that handles regex matching, +attribute filtering, active-only filtering, and grouping. + +### flatten_link_attrs(link: "'Link'", link_id: 'str') -> 'Dict[str, Any]' + +Build flat attribute dict for condition evaluation on links. + +Merges link's top-level fields with link.attrs. Top-level fields +take precedence on key conflicts. + +Args: + link: Link object to flatten. + link_id: The link's ID in the network. + +Returns: + Flat dict suitable for condition evaluation. + +### flatten_node_attrs(node: "'Node'") -> 'Dict[str, Any]' + +Build flat attribute dict for condition evaluation. + +Merges node's top-level fields (name, disabled, risk_groups) with +node.attrs. Top-level fields take precedence on key conflicts. + +Args: + node: Node object to flatten. + +Returns: + Flat dict suitable for condition evaluation. + +### select_nodes(network: "'Network'", selector: 'NodeSelector', default_active_only: 'bool', excluded_nodes: 'Optional[Set[str]]' = None) -> "Dict[str, List['Node']]" + +Unified entry point for node selection. + +Evaluation order: + +1. Select nodes matching `path` regex (or all nodes if path is None) +2. Filter by `match` conditions +3. Filter by `active_only` flag and excluded_nodes +4. Group by `group_by` attribute (overrides regex capture grouping) + +Args: + network: The network graph. + selector: Node selection specification. + default_active_only: Context-aware default for active_only flag. + Required parameter to prevent silent bugs. + excluded_nodes: Additional node names to exclude. + +Returns: + Dict mapping group labels to lists of nodes. + +--- + ## ngraph.results.artifacts Serializable result artifacts for analysis workflows. @@ -1560,7 +1845,6 @@ simulations in a JSON-serializable form: aggregated flow statistics - `FailurePatternResult`: capacity results for specific failure patterns -- `PlacementEnvelope`: per-demand placement envelopes ### CapacityEnvelope @@ -1628,44 +1912,6 @@ Attributes: - `from_dict(data: 'Dict[str, Any]') -> "'FailurePatternResult'"` - Construct FailurePatternResult from a dictionary. - `to_dict(self) -> 'Dict[str, Any]'` - Convert to dictionary for JSON serialization. -### PlacementEnvelope - -Per-demand placement envelope keyed like capacity envelopes. - -Each envelope captures frequency distribution of placement ratio for a -specific demand definition across Monte Carlo iterations. - -Attributes: - source: Source selection regex or node label. - sink: Sink selection regex or node label. - mode: Demand expansion mode ("combine" or "pairwise"). - priority: Demand priority class. - frequencies: Mapping of placement ratio to occurrence count. - min: Minimum observed placement ratio. - max: Maximum observed placement ratio. - mean: Mean placement ratio. - stdev: Standard deviation of placement ratio. - total_samples: Number of iterations represented. - -**Attributes:** - -- `source` (str) -- `sink` (str) -- `mode` (str) -- `priority` (int) -- `frequencies` (Dict[float, int]) -- `min` (float) -- `max` (float) -- `mean` (float) -- `stdev` (float) -- `total_samples` (int) - -**Methods:** - -- `from_dict(data: 'Dict[str, Any]') -> "'PlacementEnvelope'"` - Construct a PlacementEnvelope from a dictionary. -- `from_values(source: 'str', sink: 'str', mode: 'str', priority: 'int', ratios: 'List[float]', rounding_decimals: 'int' = 4) -> "'PlacementEnvelope'"` -- `to_dict(self) -> 'Dict[str, Any]'` - --- ## ngraph.results.flow @@ -2020,63 +2266,6 @@ Returns: --- -## ngraph.utils.nodes - -Node utility functions for filtering and selection. - -Provides centralized helpers for filtering active (non-disabled) nodes, -used across analysis, workflows, and demand expansion. - -### collect_active_node_names_from_groups(groups: "Dict[str, List['Node']]", excluded_nodes: 'Optional[Set[str]]' = None) -> 'List[str]' - -Extract active (non-disabled) node names from selection groups dict. - -Flattens all group values and filters to active nodes. - -Args: - groups: Dictionary mapping group labels to lists of Node objects. - excluded_nodes: Optional set of node names to exclude. - -Returns: - List of node names from all groups that are active. - -### collect_active_nodes_from_groups(groups: "Dict[str, List['Node']]", excluded_nodes: 'Optional[Set[str]]' = None) -> "List['Node']" - -Extract active (non-disabled) nodes from selection groups dict. - -Flattens all group values and filters to active nodes. - -Args: - groups: Dictionary mapping group labels to lists of Node objects. - excluded_nodes: Optional set of node names to exclude. - -Returns: - List of Node objects from all groups that are active. - -### get_active_node_names(nodes: "Iterable['Node']", excluded_nodes: 'Optional[Set[str]]' = None) -> 'List[str]' - -Extract names of active (non-disabled) nodes, optionally excluding some. - -Args: - nodes: Iterable of Node objects to filter. - excluded_nodes: Optional set of node names to exclude. - -Returns: - List of node names that are not disabled and not in excluded_nodes. - -### get_active_nodes(nodes: "Iterable['Node']", excluded_nodes: 'Optional[Set[str]]' = None) -> "List['Node']" - -Extract active (non-disabled) nodes, optionally excluding some. - -Args: - nodes: Iterable of Node objects to filter. - excluded_nodes: Optional set of node names to exclude. - -Returns: - List of Node objects that are not disabled and not in excluded_nodes. - ---- - ## ngraph.utils.output_paths Utilities for building CLI artifact output paths. @@ -2275,20 +2464,20 @@ Attributes: - `_disabled_node_ids` (FrozenSet[int]) - `_disabled_link_ids` (FrozenSet[str]) - `_link_id_to_edge_indices` (Mapping[str, Tuple[int, ...]]) -- `_source_path` (Optional[str]) -- `_sink_path` (Optional[str]) +- `_source` (Optional[Union[str, Dict[str, Any]]]) +- `_sink` (Optional[Union[str, Dict[str, Any]]]) - `_mode` (Optional[Mode]) - `_pseudo_context` (Optional[_PseudoNodeContext]) **Methods:** -- `from_network(network: "'Network'", *, source: 'Optional[str]' = None, sink: 'Optional[str]' = None, mode: 'Mode' = , augmentations: 'Optional[List[AugmentationEdge]]' = None) -> "'AnalysisContext'"` - Create analysis context from network. -- `k_shortest_paths(self, source: 'Optional[str]' = None, sink: 'Optional[str]' = None, *, mode: 'Mode' = , max_k: 'int' = 3, edge_select: 'EdgeSelect' = , max_path_cost: 'float' = inf, max_path_cost_factor: 'Optional[float]' = None, split_parallel_edges: 'bool' = False, excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], List[Path]]'` - Compute up to K shortest paths per group pair. -- `max_flow(self, source: 'Optional[str]' = None, sink: 'Optional[str]' = None, *, mode: 'Mode' = , shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], float]'` - Compute maximum flow between node groups. -- `max_flow_detailed(self, source: 'Optional[str]' = None, sink: 'Optional[str]' = None, *, mode: 'Mode' = , shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None, include_min_cut: 'bool' = False) -> 'Dict[Tuple[str, str], MaxFlowResult]'` - Compute max flow with detailed results including cost distribution. -- `sensitivity(self, source: 'Optional[str]' = None, sink: 'Optional[str]' = None, *, mode: 'Mode' = , shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], Dict[str, float]]'` - Analyze sensitivity of max flow to edge failures. -- `shortest_path_cost(self, source: 'Optional[str]' = None, sink: 'Optional[str]' = None, *, mode: 'Mode' = , edge_select: 'EdgeSelect' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], float]'` - Compute shortest path costs between node groups. -- `shortest_paths(self, source: 'Optional[str]' = None, sink: 'Optional[str]' = None, *, mode: 'Mode' = , edge_select: 'EdgeSelect' = , split_parallel_edges: 'bool' = False, excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], List[Path]]'` - Compute concrete shortest paths between node groups. +- `from_network(network: "'Network'", *, source: 'Optional[Union[str, Dict[str, Any]]]' = None, sink: 'Optional[Union[str, Dict[str, Any]]]' = None, mode: 'Mode' = , augmentations: 'Optional[List[AugmentationEdge]]' = None) -> "'AnalysisContext'"` - Create analysis context from network. +- `k_shortest_paths(self, source: 'Optional[Union[str, Dict[str, Any]]]' = None, sink: 'Optional[Union[str, Dict[str, Any]]]' = None, *, mode: 'Mode' = , max_k: 'int' = 3, edge_select: 'EdgeSelect' = , max_path_cost: 'float' = inf, max_path_cost_factor: 'Optional[float]' = None, split_parallel_edges: 'bool' = False, excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], List[Path]]'` - Compute up to K shortest paths per group pair. +- `max_flow(self, source: 'Optional[Union[str, Dict[str, Any]]]' = None, sink: 'Optional[Union[str, Dict[str, Any]]]' = None, *, mode: 'Mode' = , shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], float]'` - Compute maximum flow between node groups. +- `max_flow_detailed(self, source: 'Optional[Union[str, Dict[str, Any]]]' = None, sink: 'Optional[Union[str, Dict[str, Any]]]' = None, *, mode: 'Mode' = , shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None, include_min_cut: 'bool' = False) -> 'Dict[Tuple[str, str], MaxFlowResult]'` - Compute max flow with detailed results including cost distribution. +- `sensitivity(self, source: 'Optional[Union[str, Dict[str, Any]]]' = None, sink: 'Optional[Union[str, Dict[str, Any]]]' = None, *, mode: 'Mode' = , shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], Dict[str, float]]'` - Analyze sensitivity of max flow to edge failures. +- `shortest_path_cost(self, source: 'Optional[Union[str, Dict[str, Any]]]' = None, sink: 'Optional[Union[str, Dict[str, Any]]]' = None, *, mode: 'Mode' = , edge_select: 'EdgeSelect' = , excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], float]'` - Compute shortest path costs between node groups. +- `shortest_paths(self, source: 'Optional[Union[str, Dict[str, Any]]]' = None, sink: 'Optional[Union[str, Dict[str, Any]]]' = None, *, mode: 'Mode' = , edge_select: 'EdgeSelect' = , split_parallel_edges: 'bool' = False, excluded_nodes: 'Optional[Set[str]]' = None, excluded_links: 'Optional[Set[str]]' = None) -> 'Dict[Tuple[str, str], List[Path]]'` - Compute concrete shortest paths between node groups. ### AugmentationEdge @@ -2366,7 +2555,7 @@ Args: Returns: AnalysisContext ready for use with demand_placement_analysis. -### build_maxflow_context(network: "'Network'", source_path: 'str', sink_path: 'str', mode: 'str' = 'combine') -> 'AnalysisContext' +### build_maxflow_context(network: "'Network'", source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine') -> 'AnalysisContext' Build an AnalysisContext for repeated max-flow analysis. @@ -2375,8 +2564,8 @@ pairs, enabling O(|excluded|) mask building per iteration. Args: network: Network instance. - source_path: Selection expression for source node groups. - sink_path: Selection expression for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). Returns: @@ -2414,7 +2603,7 @@ Args: Returns: FlowIterationResult describing this iteration. -### max_flow_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , include_flow_details: 'bool' = False, include_min_cut: 'bool' = False, context: 'Optional[AnalysisContext]' = None, **kwargs) -> 'FlowIterationResult' +### max_flow_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine', shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , include_flow_details: 'bool' = False, include_min_cut: 'bool' = False, context: 'Optional[AnalysisContext]' = None, **kwargs) -> 'FlowIterationResult' Analyze maximum flow capacity between node groups. @@ -2422,8 +2611,8 @@ Args: network: Network instance. excluded_nodes: Set of node names to exclude temporarily. excluded_links: Set of link IDs to exclude temporarily. - source_path: Selection expression for source node groups. - sink_path: Selection expression for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). shortest_path: Whether to use shortest paths only. require_capacity: If True (default), path selection considers available @@ -2437,7 +2626,7 @@ Args: Returns: FlowIterationResult describing this iteration. -### sensitivity_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , context: 'Optional[AnalysisContext]' = None, **kwargs) -> 'FlowIterationResult' +### sensitivity_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , context: 'Optional[AnalysisContext]' = None, **kwargs) -> 'FlowIterationResult' Analyze component sensitivity to failures. @@ -2453,8 +2642,8 @@ Args: network: Network instance. excluded_nodes: Set of node names to exclude temporarily. excluded_links: Set of link IDs to exclude temporarily. - source_path: Selection expression for source node groups. - sink_path: Selection expression for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). shortest_path: If True, use single-tier shortest-path flow (IP/IGP mode). Reports only edges used under ECMP routing. If False (default), use @@ -2473,7 +2662,6 @@ Returns: Builders for traffic matrices. Construct `TrafficMatrixSet` from raw dictionaries (e.g. parsed YAML). -This logic was previously embedded in `Scenario.from_yaml`. ### build_traffic_matrix_set(raw: 'Dict[str, List[dict]]') -> 'TrafficMatrixSet' @@ -2487,7 +2675,8 @@ Returns: Initialized `TrafficMatrixSet` with constructed `TrafficDemand` objects. Raises: - ValueError: If ``raw`` is not a mapping of name -> list[dict]. + ValueError: If ``raw`` is not a mapping of name -> list[dict], + or if required fields are missing. --- @@ -2496,6 +2685,7 @@ Raises: Demand expansion: converts TrafficDemand specs into concrete placement demands. Supports both pairwise and combine modes through augmentation-based pseudo nodes. +Uses unified selectors for node selection. ### DemandExpansion @@ -2540,10 +2730,11 @@ Expand TrafficDemand specifications into concrete demands with augmentations. Pure function that: -1. Selects node groups using Network's selection API -2. Distributes volume based on mode (combine/pairwise) -3. Generates augmentation edges for combine mode (pseudo nodes) -4. Returns demands (node names) + augmentations +1. Expands variables in selectors using expand_vars +2. Normalizes and evaluates selectors to get node groups +3. Distributes volume based on mode (combine/pairwise) and group_mode +4. Generates augmentation edges for combine mode (pseudo nodes) +5. Returns demands (node names) + augmentations Node names are used (not IDs) so expansion happens BEFORE graph building. IDs are resolved after graph is built with augmentations. @@ -2613,9 +2804,9 @@ Attributes: - `compute_exclusions(self, policy: "'FailurePolicy | None'" = None, seed_offset: 'int | None' = None) -> 'tuple[set[str], set[str]]'` - Compute set of nodes and links to exclude for a failure iteration. - `get_failure_policy(self) -> "'FailurePolicy | None'"` - Get failure policy for analysis. - `run_demand_placement_monte_carlo(self, demands_config: 'list[dict[str, Any]] | Any', iterations: 'int' = 100, parallelism: 'int' = 1, placement_rounds: 'int | str' = 'auto', baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_details: 'bool' = False, include_used_edges: 'bool' = False, **kwargs) -> 'Any'` - Analyze traffic demand placement success under failures. -- `run_max_flow_monte_carlo(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement | str' = , baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_summary: 'bool' = False, **kwargs) -> 'Any'` - Analyze maximum flow capacity envelopes between node groups under failures. +- `run_max_flow_monte_carlo(self, source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement | str' = , baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_summary: 'bool' = False, **kwargs) -> 'Any'` - Analyze maximum flow capacity envelopes between node groups under failures. - `run_monte_carlo_analysis(self, analysis_func: 'AnalysisFunction', iterations: 'int' = 1, parallelism: 'int' = 1, baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, **analysis_kwargs) -> 'dict[str, Any]'` - Run Monte Carlo failure analysis with any analysis function. -- `run_sensitivity_monte_carlo(self, source_path: 'str', sink_path: 'str', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement | str' = , baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, **kwargs) -> 'dict[str, Any]'` - Analyze component criticality for flow capacity under failures. +- `run_sensitivity_monte_carlo(self, source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement | str' = , baseline: 'bool' = False, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, **kwargs) -> 'dict[str, Any]'` - Analyze component criticality for flow capacity under failures. - `run_single_failure_scenario(self, analysis_func: 'AnalysisFunction', **kwargs) -> 'Any'` - Run a single failure scenario for convenience. --- diff --git a/docs/reference/design.md b/docs/reference/design.md index c38e4c7..145d98c 100644 --- a/docs/reference/design.md +++ b/docs/reference/design.md @@ -151,11 +151,43 @@ Network is the container for scenario topology. It enforces invariants during co ### Node and Link Selection -The model supports selecting groups of nodes by pattern, which is used by algorithms to choose source/sink sets matching on their structured names or attributes. Network.select_node_groups_by_path(pattern) accepts either a regex or an attribute query: +The model supports selecting groups of nodes via a unified selector system used by algorithms to choose source/sink sets matching on structured names or attributes. -If the pattern is of the form `attr:`, it groups nodes by the value of the given attribute name. For example, `attr:role` might group nodes by their role attribute (like "core", "leaf", etc.), returning a dict mapping each distinct value to the list of nodes with that value. Nodes missing the attribute are excluded. +**Selector Forms:** -Otherwise, the pattern is treated as an anchored regular expression on the node's name. If the regex contains capturing groups, the concatenated capture groups form the group label; otherwise, the entire pattern string is used as the label. For instance, the pattern `r"(\w+)-(\d+)"` on node names could produce group labels like "metroA-1" etc. If no nodes match, an empty mapping is returned (with a debug log) instead of an error, so higher-level logic can handle it. +Selectors can be specified as: + +1. **String pattern**: A regex matched against node names (anchored at start via `re.match()`) +2. **Selector object**: A dict with `path`, `group_by`, and/or `match` fields + +**String Pattern Behavior:** + +When using a regex pattern, if the regex contains capturing groups, the concatenated capture groups form the group label; otherwise, the entire pattern string is used as the label. For instance, the pattern `r"(\w+)-(\d+)"` on node names could produce group labels like "metroA-1" etc. + +**Attribute-based Grouping:** + +Use `group_by` in a selector object to group nodes by an attribute value: + +```yaml +source: + group_by: "role" +``` + +This groups nodes by the value of `node.attrs["role"]` (e.g., "core", "leaf"), returning a dict mapping each distinct value to the list of nodes with that value. Nodes missing the attribute are excluded. + +**Attribute-based Filtering:** + +Use `match` in a selector object to filter nodes by attribute conditions: + +```yaml +source: + path: "^dc1/.*" + match: + conditions: + - attr: "tier" + operator: "==" + value: "leaf" +``` This selection mechanism allows workflow steps and API calls to refer to nodes flexibly (using human-readable patterns instead of explicit lists), which is particularly useful in large topologies. @@ -576,16 +608,16 @@ For traffic matrix placement, NetGraph provides `FlowPolicyPreset` values that b # IP network with traditional ECMP (e.g., data center leaf-spine) traffic_matrix_set: dc_traffic: - - source_path: ^rack1/ - sink_path: ^rack2/ + - source: ^rack1/ + sink: ^rack2/ demand: 1000.0 flow_policy_config: SHORTEST_PATHS_ECMP # MPLS-TE network with capacity-aware tunnel placement traffic_matrix_set: backbone_traffic: - - source_path: ^metro1/ - sink_path: ^metro2/ + - source: ^metro1/ + sink: ^metro2/ demand: 5000.0 flow_policy_config: TE_WCMP_UNLIM ``` diff --git a/docs/reference/dsl.md b/docs/reference/dsl.md index 95ecb71..b264a16 100644 --- a/docs/reference/dsl.md +++ b/docs/reference/dsl.md @@ -20,6 +20,18 @@ A scenario file defines a complete network simulation including: The DSL enables both simple direct definitions and complex hierarchical structures with templates and parameters. +## Template Syntaxes + +The DSL uses three distinct template syntaxes in different contexts: + +| Syntax | Example | Context | Purpose | +|--------|---------|---------|---------| +| `[1-3]` | `dc[1-3]/rack[a,b]` | Group names | Generate multiple groups | +| `$var` / `${var}` | `pod${p}/leaf` | Adjacency, demands | Template expansion with `expand_vars` | +| `{node_num}` | `srv-{node_num}` | `name_template` | Node naming (1-indexed) | + +**These syntaxes are not interchangeable.** Each works only in its designated context. + ## Top-Level Keys ```yaml @@ -136,7 +148,6 @@ network: hardware: source: {component: "800G-DR4", count: 2} target: {component: "800G-DR4", count: 2} - # Tip: In selector objects, 'path' also supports 'attr:' (see Node Selection) ``` ### Attribute-filtered Adjacency (selector objects) @@ -169,17 +180,20 @@ network: Notes: -- `path` uses the same semantics as runtime: regex on node name or `attr:` directive grouping (see Node Selection). -- `match.conditions` uses the shared condition operators implemented in code: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `any_value`, `no_value`. +- `path` is a regex pattern matched against node names (anchored at start via Python `re.match`). +- `match.conditions` uses the shared condition operators: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `any_value`, `no_value`. - Conditions evaluate over a flat view of node attributes combining top-level fields (`name`, `disabled`, `risk_groups`) and `node.attrs`. - `logic` in the `match` block accepts "and" or "or" (default "or"). - Selectors filter node candidates before the adjacency `pattern` is applied. - Cross-endpoint predicates (e.g., comparing a source attribute to a target attribute) are not supported. - Node overrides run before adjacency expansion; link overrides run after adjacency expansion. -Path semantics inside blueprints: +Path semantics: -- Within a blueprint's `adjacency`, a leading `/` is treated as relative to the blueprint instantiation path, not a global root. For example, if a blueprint is used under group `pod1`, then `source: /leaf` resolves to `pod1/leaf`. +- All paths are relative to the current scope. There is no concept of absolute paths. +- Leading `/` is stripped and has no functional effect - `/leaf` and `leaf` are equivalent. +- Within a blueprint, paths resolve relative to the instantiation path. For example, if a blueprint is used under group `pod1`, then `source: /leaf` resolves to `pod1/leaf`. +- At top-level `network.adjacency`, the parent path is empty, so patterns match against full node names. Example with OR logic to match multiple roles: @@ -229,12 +243,36 @@ network: - Numeric ranges: `[1-4]` → 1, 2, 3, 4 - Explicit lists: `[red,blue,green]` → red, blue, green +**Scope:** Bracket expansion applies to: + +- **Group names** under `network.groups` and `blueprints.*.groups` +- **Risk group names** in top-level `risk_groups` definitions (including children) +- **Risk group membership arrays** on nodes, links, and groups + +Component names, direct node names (`network.nodes`), and other string fields treat brackets as literal characters. + +**Risk Group Expansion Examples:** + +```yaml +# Definition expansion - creates DC1_Power, DC2_Power, DC3_Power +risk_groups: + - name: "DC[1-3]_Power" + +# Membership expansion - assigns to RG1, RG2, RG3 +network: + nodes: + Server: + risk_groups: ["RG[1-3]"] +``` + ### Variable Expansion in Adjacency +Use `$var` or `${var}` syntax for template substitution: + ```yaml adjacency: - - source: "plane{p}/rack{r}" - target: "spine{s}" + - source: "plane${p}/rack${r}" + target: "spine${s}" expand_vars: p: [1, 2] r: ["a", "b"] @@ -242,8 +280,8 @@ adjacency: expansion_mode: "cartesian" # All combinations pattern: "mesh" - - source: "server{idx}" - target: "switch{idx}" + - source: "server${idx}" + target: "switch${idx}" expand_vars: idx: [1, 2, 3, 4] expansion_mode: "zip" # Paired by index @@ -332,8 +370,10 @@ Define hardware components with attributes for cost and power modeling: components: SpineRouter: component_type: "chassis" - cost: 50000.0 + description: "64-port spine router" + capex: 50000.0 power_watts: 2500.0 + power_watts_max: 3000.0 capacity: 64000.0 # Gbps ports: 64 attrs: @@ -342,7 +382,7 @@ components: children: LineCard400G: component_type: "linecard" - cost: 8000.0 + capex: 8000.0 power_watts: 400.0 capacity: 12800.0 ports: 32 @@ -350,7 +390,8 @@ components: Optic400G: component_type: "optic" - cost: 2500.0 + description: "400G pluggable optic" + capex: 2500.0 power_watts: 12.0 capacity: 400.0 attrs: @@ -449,28 +490,85 @@ Define traffic demand patterns for capacity analysis: ```yaml traffic_matrix_set: production: - - name: "server_to_storage" - source_path: "^servers/.*" - sink_path: "^storage/.*" - demand: 1000 # Traffic volume - mode: "combine" # Aggregate demand + # Simple string pattern selectors + - source: "^servers/.*" + sink: "^storage/.*" + demand: 1000 + mode: "combine" priority: 1 flow_policy_config: "SHORTEST_PATHS_ECMP" - - name: "inter_dc_backup" - source_path: "^dc1/.*" - sink_path: "^dc2/.*" + # Dict selectors with attribute-based grouping + - source: + group_by: "dc" # Group nodes by datacenter attribute + sink: + group_by: "dc" demand: 500 - mode: "pairwise" # Distributed demand + mode: "pairwise" priority: 2 + + # Dict selectors with filtering + - source: + path: "^dc1/.*" + match: + conditions: + - attr: "role" + operator: "==" + value: "leaf" + sink: + path: "^dc2/.*" + match: + conditions: + - attr: "role" + operator: "==" + value: "spine" + demand: 200 + mode: "combine" ``` -**Traffic Modes:** +### Variable Expansion in Demands + +Use `expand_vars` to generate multiple demands from a template: + +```yaml +traffic_matrix_set: + inter_dc: + - source: "^${src_dc}/.*" + sink: "^${dst_dc}/.*" + demand: 100 + mode: "combine" + expand_vars: + src_dc: ["dc1", "dc2"] + dst_dc: ["dc2", "dc3"] + expansion_mode: "cartesian" # All combinations (default) + + - source: "^${dc}/leaf/.*" + sink: "^${dc}/spine/.*" + demand: 50 + mode: "pairwise" + expand_vars: + dc: ["dc1", "dc2", "dc3"] + expansion_mode: "zip" # Paired by index +``` + +**Expansion Modes:** + +- `cartesian`: All combinations of variable values (default) +- `zip`: Pair values by index (lists must have equal length) + +### Selector Fields + +The `source` and `sink` fields accept either: + +- A string regex pattern matched against node names +- A selector object with `path`, `group_by`, and/or `match` fields + +### Traffic Modes - `combine`: Single aggregate flow between source and sink groups - `pairwise`: Individual flows between all source-sink node pairs -**Flow Policies:** +### Flow Policies - `SHORTEST_PATHS_ECMP`: IP/IGP routing with hash-based ECMP; equal split across equal-cost paths - `SHORTEST_PATHS_WCMP`: IP/IGP routing with weighted ECMP; proportional split by link capacity @@ -575,8 +673,6 @@ workflow: baseline: true ``` -Note: Workflow `source_path` and `sink_path` accept either regex on node names or `attr:` directive to group by node attributes (see Node Selection). - **Common Steps:** - `BuildGraph`: Export graph to JSON (node-link) for external analysis @@ -589,38 +685,37 @@ See [Workflow Reference](workflow.md) for detailed configuration. ## Node Selection -NetGraph supports two ways to select and group nodes: +NetGraph provides a unified selector system for selecting and grouping nodes across adjacency, demands, and workflow steps. -1. Regex on node name (anchored at the start using `re.match()`) -2. Attribute directive `attr:` to group by a node attribute +### Selector Forms -Note: The attribute directive is node-only. It applies only in contexts that select nodes: +Selectors can be specified as: -- Workflow paths: `source_path` and `sink_path` -- Adjacency selectors: the `path` field in `source`/`target` selector objects +1. **String pattern**: A regex matched against node names (anchored at start via `re.match()`) +2. **Selector object**: A dict with `path`, `group_by`, and/or `match` fields -For links, risk groups, and failure policies, use `conditions` with an `attr` field in rules (see Failure Simulation) rather than `attr:`. +At least one of `path`, `group_by`, or `match` must be specified in a selector object. -**Regex Examples:** +### String Pattern Examples ```yaml # Exact match -path: "spine-1" +source: "spine-1" # Prefix match -path: "dc1/spine/" +source: "dc1/spine/" # Wildcard patterns -path: "dc1/leaf.*" +source: "dc1/leaf.*" # Anchored patterns -path: "^dc1/spine/switch-[1-3]$" +source: "^dc1/spine/switch-[1-3]$" # Alternation -path: "^dc1/(spine|leaf)/.*$" +source: "^dc1/(spine|leaf)/.*$" ``` -**Regex Capturing Groups:** +### Capturing Groups for Node Grouping Regex capturing groups create node groupings for analysis: @@ -638,43 +733,71 @@ Regex capturing groups create node groupings for analysis: - Multiple capturing groups: Join with `|` separator - No capturing groups: Group by original pattern string -### Attribute Directive For Node Selection +### Attribute-based Grouping -Write `attr:` to group nodes by the value of `node.attrs[]`. -Supported contexts in the DSL: +Use the `group_by` field to group nodes by an attribute value: -- Workflow: `source_path` and `sink_path` -- Adjacency selectors: the `path` in `source`/`target` selector objects +```yaml +# Group by metro attribute +source: + group_by: "metro" + +# Combine with path filtering +source: + path: "^dc1/.*" + group_by: "role" +``` Notes: -- Blueprint scoping: In blueprints, `attr:` paths are global and are not - prefixed by the parent blueprint path. -- Attribute name: `name` must be a simple identifier (`[A-Za-z_]\w*`), and it - refers to a key in `node.attrs`. Nested keys are not supported here. -- Non-node entities: For links and risk groups (e.g., in failure policies), use - rule `conditions` with an `attr` field instead of `attr:`. +- `group_by` refers to a key in `node.attrs`. Nested keys are not supported. +- Nodes without the specified attribute are omitted. +- Group labels are the string form of the attribute value. -- Strict detection: Only a full match of `attr:` (where `` matches `[A-Za-z_]\w*`) triggers attribute grouping. Everything else is treated as a normal regex. -- Missing attributes: Nodes without the attribute are omitted. -- Labels: Group labels are the string form of the attribute value. +### Attribute-based Filtering -Examples: +Use the `match` field to filter nodes by attribute conditions: + +```yaml +source: + path: "^dc1/.*" + match: + logic: "and" # "and" or "or" (default: "or") + conditions: + - attr: "role" + operator: "==" + value: "leaf" + - attr: "tier" + operator: ">=" + value: 2 +``` + +**Supported operators:** `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `in`, `not_in`, `any_value`, `no_value` + +### Workflow Examples ```yaml workflow: - step_type: MaxFlow - source_path: "attr:metro" # groups by metro attribute - sink_path: "^metro2/.*" + source: + group_by: "metro" # Group by metro attribute + sink: "^metro2/.*" # String pattern mode: "pairwise" ``` -Adjacency example using `attr:`: +### Adjacency Examples ```yaml network: adjacency: - - source: { path: "attr:role" } - target: { path: "^dc2/leaf/.*" } + - source: + group_by: "role" + target: + path: "^dc2/leaf/.*" pattern: mesh ``` + +### Notes + +- For links, risk groups, and failure policies, use `conditions` with an `attr` field in rules (see Failure Simulation). +- Blueprint scoping: In blueprints, paths are relative to the blueprint instantiation path. diff --git a/docs/reference/workflow.md b/docs/reference/workflow.md index eea8337..4e7720e 100644 --- a/docs/reference/workflow.md +++ b/docs/reference/workflow.md @@ -76,8 +76,8 @@ Monte Carlo maximum flow analysis between node groups. ```yaml - step_type: MaxFlow name: capacity_analysis - source_path: "^servers/.*" - sink_path: "^storage/.*" + source: "^servers/.*" + sink: "^storage/.*" mode: "combine" # combine | pairwise failure_policy: random_failures iterations: 1000 @@ -180,19 +180,41 @@ Outputs: ## Node Selection Mechanism -Select nodes by regex on `node.name` (anchored at start via Python `re.match`) or by attribute directive `attr:` which groups nodes by `node.attrs[]`. +Workflow steps use a unified selector system for node selection. Selectors can be specified as string patterns or selector objects. -### Basic Pattern Matching +### String Pattern Matching ```yaml # Exact match -source_path: "spine-1" +source: "spine-1" # Prefix match -source_path: "datacenter/servers/" +source: "datacenter/servers/" # Pattern match -source_path: "^pod[1-3]/leaf/.*$" +source: "^pod[1-3]/leaf/.*$" +``` + +### Selector Objects + +```yaml +# Attribute-based grouping +source: + group_by: "dc" + +# Combined path and grouping +source: + path: "^datacenter/.*" + group_by: "role" + +# With attribute filtering +source: + path: "^pod[1-3]/.*" + match: + conditions: + - attr: "tier" + operator: "==" + value: "leaf" ``` ### Capturing Groups for Node Grouping @@ -200,14 +222,14 @@ source_path: "^pod[1-3]/leaf/.*$" **No Capturing Groups**: All matching nodes form one group labeled by the pattern. ```yaml -source_path: "edge/.*" +source: "edge/.*" # Creates one group: "edge/.*" containing all matching nodes ``` **Single Capturing Group**: Each unique captured value creates a separate group. ```yaml -source_path: "(dc[1-3])/servers/.*" +source: "(dc[1-3])/servers/.*" # Creates groups: "dc1", "dc2", "dc3" # Each group contains servers from that datacenter ``` @@ -215,15 +237,16 @@ source_path: "(dc[1-3])/servers/.*" **Multiple Capturing Groups**: Group labels join captured values with `|`. ```yaml -source_path: "(dc[1-3])/(spine|leaf)/switch-(\d+)" +source: "(dc[1-3])/(spine|leaf)/switch-(\d+)" # Creates groups: "dc1|spine|1", "dc1|leaf|2", "dc2|spine|1", etc. ``` ### Attribute-based Grouping ```yaml -# Group by node attribute value (e.g., node.attrs["dc"]) — groups labeled by attribute value -source_path: "attr:dc" +# Group by node attribute value (e.g., node.attrs["dc"]) +source: + group_by: "dc" ``` ### Flow Analysis Modes @@ -236,8 +259,8 @@ source_path: "attr:dc" ### Required Parameters -- `source_path`: Regex pattern for source node selection -- `sink_path`: Regex pattern for sink node selection +- `source`: Node selector for source nodes (string pattern or selector object) +- `sink`: Node selector for sink nodes (string pattern or selector object) ### Analysis Configuration diff --git a/ngraph/_version.py b/ngraph/_version.py index 538d531..ac27bf2 100644 --- a/ngraph/_version.py +++ b/ngraph/_version.py @@ -2,4 +2,4 @@ __all__ = ["__version__"] -__version__ = "0.12.3" +__version__ = "0.13.0" diff --git a/ngraph/analysis/__init__.py b/ngraph/analysis/__init__.py index 3b05b43..9a41988 100644 --- a/ngraph/analysis/__init__.py +++ b/ngraph/analysis/__init__.py @@ -16,7 +16,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union # Internal helpers - importable but not part of public API. # Redundant aliases silence F401 while keeping them accessible. @@ -36,8 +36,8 @@ def analyze( network: "Network", *, - source: Optional[str] = None, - sink: Optional[str] = None, + source: Optional[Union[str, Dict[str, Any]]] = None, + sink: Optional[Union[str, Dict[str, Any]]] = None, mode: Mode = Mode.COMBINE, augmentations: Optional[List[AugmentationEdge]] = None, ) -> AnalysisContext: @@ -47,10 +47,10 @@ def analyze( Args: network: Network topology to analyze. - source: Optional source group pattern. If provided with sink, - creates bound context with pre-built pseudo-nodes for - efficient repeated flow analysis. - sink: Optional sink group pattern. + source: Optional source node selector (string path or selector dict). + If provided with sink, creates bound context with pre-built + pseudo-nodes for efficient repeated flow analysis. + sink: Optional sink node selector (string path or selector dict). mode: Group mode (COMBINE or PAIRWISE). Only used if bound. augmentations: Optional custom augmentation edges. diff --git a/ngraph/analysis/context.py b/ngraph/analysis/context.py index e423d83..0c30792 100644 --- a/ngraph/analysis/context.py +++ b/ngraph/analysis/context.py @@ -18,7 +18,18 @@ from __future__ import annotations from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Dict, FrozenSet, List, Mapping, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Dict, + FrozenSet, + List, + Mapping, + Optional, + Set, + Tuple, + Union, +) import netgraph_core import numpy as np @@ -58,6 +69,21 @@ def __init__(self, source: str, target: str, capacity: float, cost: float): self.cost = cost +def _get_active_node_names( + nodes: List[Any], + excluded_nodes: Optional[Set[str]] = None, +) -> List[str]: + """Extract names of active (non-disabled) nodes, optionally excluding some. + + This is a local helper to replace utils.nodes.get_active_node_names. + """ + if excluded_nodes: + return [ + n.name for n in nodes if not n.disabled and n.name not in excluded_nodes + ] + return [n.name for n in nodes if not n.disabled] + + class _NodeMapper: """Bidirectional mapping between node names (str) and Core NodeId (int).""" @@ -111,8 +137,8 @@ def to_name(self, ext_id: int) -> Optional[str]: class _PseudoNodeContext: """Context for pseudo nodes created during graph construction.""" - source_path: str - sink_path: str + source: Union[str, Dict[str, Any]] + sink: Union[str, Dict[str, Any]] mode: Mode pairs: Dict[Tuple[str, str], Tuple[int, int]] @@ -162,8 +188,8 @@ class AnalysisContext: _link_id_to_edge_indices: Mapping[str, Tuple[int, ...]] = field(repr=False) # Binding state (None if unbound) - _source_path: Optional[str] = None - _sink_path: Optional[str] = None + _source: Optional[Union[str, Dict[str, Any]]] = None + _sink: Optional[Union[str, Dict[str, Any]]] = None _mode: Optional[Mode] = None _pseudo_context: Optional[_PseudoNodeContext] = field(default=None, repr=False) @@ -175,17 +201,17 @@ def network(self) -> "Network": @property def is_bound(self) -> bool: """True if source/sink groups are pre-configured.""" - return self._source_path is not None + return self._source is not None @property - def bound_source(self) -> Optional[str]: - """Source pattern if bound, None otherwise.""" - return self._source_path + def bound_source(self) -> Optional[Union[str, Dict[str, Any]]]: + """Source selector if bound, None otherwise.""" + return self._source @property - def bound_sink(self) -> Optional[str]: - """Sink pattern if bound, None otherwise.""" - return self._sink_path + def bound_sink(self) -> Optional[Union[str, Dict[str, Any]]]: + """Sink selector if bound, None otherwise.""" + return self._sink @property def bound_mode(self) -> Optional[Mode]: @@ -259,8 +285,8 @@ def from_network( cls, network: "Network", *, - source: Optional[str] = None, - sink: Optional[str] = None, + source: Optional[Union[str, Dict[str, Any]]] = None, + sink: Optional[Union[str, Dict[str, Any]]] = None, mode: Mode = Mode.COMBINE, augmentations: Optional[List[AugmentationEdge]] = None, ) -> "AnalysisContext": @@ -268,9 +294,9 @@ def from_network( Args: network: Network topology to analyze. - source: Optional source group pattern. If provided with sink, - creates bound context with pre-built pseudo-nodes. - sink: Optional sink group pattern. + source: Optional source node selector (string path or selector dict). + If provided with sink, creates bound context with pre-built pseudo-nodes. + sink: Optional sink node selector (string path or selector dict). mode: Group mode (COMBINE or PAIRWISE). Only used if bound. augmentations: Optional custom augmentation edges. @@ -318,8 +344,8 @@ def from_network( resolved_pairs[pair_key] = (pseudo_src_id, pseudo_snk_id) pseudo_context = _PseudoNodeContext( - source_path=source, - sink_path=sink, + source=source, + sink=sink, mode=mode, pairs=resolved_pairs, ) @@ -334,8 +360,8 @@ def from_network( _disabled_node_ids=ctx._disabled_node_ids, _disabled_link_ids=ctx._disabled_link_ids, _link_id_to_edge_indices=ctx._link_id_to_edge_indices, - _source_path=source, - _sink_path=sink, + _source=source, + _sink=sink, _mode=mode if source is not None else None, _pseudo_context=pseudo_context, ) @@ -346,8 +372,8 @@ def from_network( def max_flow( self, - source: Optional[str] = None, - sink: Optional[str] = None, + source: Optional[Union[str, Dict[str, Any]]] = None, + sink: Optional[Union[str, Dict[str, Any]]] = None, *, mode: Mode = Mode.COMBINE, shortest_path: bool = False, @@ -362,8 +388,10 @@ def max_flow( pseudo-nodes for efficiency. Otherwise builds them per-call. Args: - source: Source group pattern (required if unbound). - sink: Sink group pattern (required if unbound). + source: Source node selector (required if unbound). Can be a + string pattern or a selector dict with path/group_by/match. + sink: Sink node selector (required if unbound). Can be a string + pattern or a selector dict with path/group_by/match. mode: COMBINE or PAIRWISE (ignored if bound). shortest_path: If True, use only shortest paths (IP/IGP mode). require_capacity: If True (default), path selection considers @@ -411,8 +439,8 @@ def max_flow( def max_flow_detailed( self, - source: Optional[str] = None, - sink: Optional[str] = None, + source: Optional[Union[str, Dict[str, Any]]] = None, + sink: Optional[Union[str, Dict[str, Any]]] = None, *, mode: Mode = Mode.COMBINE, shortest_path: bool = False, @@ -425,8 +453,10 @@ def max_flow_detailed( """Compute max flow with detailed results including cost distribution. Args: - source: Source group pattern (required if unbound). - sink: Sink group pattern (required if unbound). + source: Source node selector (required if unbound). Can be a + string pattern or a selector dict with path/group_by/match. + sink: Sink node selector (required if unbound). Can be a string + pattern or a selector dict with path/group_by/match. mode: COMBINE or PAIRWISE (ignored if bound). shortest_path: If True, restricts flow to shortest paths. require_capacity: If True (default), path selection considers @@ -467,8 +497,8 @@ def max_flow_detailed( def sensitivity( self, - source: Optional[str] = None, - sink: Optional[str] = None, + source: Optional[Union[str, Dict[str, Any]]] = None, + sink: Optional[Union[str, Dict[str, Any]]] = None, *, mode: Mode = Mode.COMBINE, shortest_path: bool = False, @@ -483,8 +513,10 @@ def sensitivity( removing each one. Args: - source: Source group pattern (required if unbound). - sink: Sink group pattern (required if unbound). + source: Source node selector (required if unbound). Can be a + string pattern or a selector dict with path/group_by/match. + sink: Sink node selector (required if unbound). Can be a string + pattern or a selector dict with path/group_by/match. mode: COMBINE or PAIRWISE (ignored if bound). shortest_path: If True, use shortest-path-only flow (IP/IGP mode). require_capacity: If True (default), path selection considers @@ -526,8 +558,8 @@ def sensitivity( def shortest_path_cost( self, - source: Optional[str] = None, - sink: Optional[str] = None, + source: Optional[Union[str, Dict[str, Any]]] = None, + sink: Optional[Union[str, Dict[str, Any]]] = None, *, mode: Mode = Mode.COMBINE, edge_select: EdgeSelect = EdgeSelect.ALL_MIN_COST, @@ -540,8 +572,10 @@ def shortest_path_cost( groups. Otherwise source and sink arguments are required. Args: - source: Source group pattern (required if unbound). - sink: Sink group pattern (required if unbound). + source: Source node selector (required if unbound). Can be a + string pattern or a selector dict with path/group_by/match. + sink: Sink node selector (required if unbound). Can be a string + pattern or a selector dict with path/group_by/match. mode: COMBINE or PAIRWISE (ignored if bound). edge_select: SPF edge selection strategy. excluded_nodes: Nodes to exclude from this analysis. @@ -570,8 +604,8 @@ def shortest_path_cost( def shortest_paths( self, - source: Optional[str] = None, - sink: Optional[str] = None, + source: Optional[Union[str, Dict[str, Any]]] = None, + sink: Optional[Union[str, Dict[str, Any]]] = None, *, mode: Mode = Mode.COMBINE, edge_select: EdgeSelect = EdgeSelect.ALL_MIN_COST, @@ -585,8 +619,10 @@ def shortest_paths( groups. Otherwise source and sink arguments are required. Args: - source: Source group pattern (required if unbound). - sink: Sink group pattern (required if unbound). + source: Source node selector (required if unbound). Can be a + string pattern or a selector dict with path/group_by/match. + sink: Sink node selector (required if unbound). Can be a string + pattern or a selector dict with path/group_by/match. mode: COMBINE or PAIRWISE (ignored if bound). edge_select: SPF edge selection strategy. split_parallel_edges: Expand parallel edges into distinct paths. @@ -615,8 +651,8 @@ def shortest_paths( def k_shortest_paths( self, - source: Optional[str] = None, - sink: Optional[str] = None, + source: Optional[Union[str, Dict[str, Any]]] = None, + sink: Optional[Union[str, Dict[str, Any]]] = None, *, mode: Mode = Mode.PAIRWISE, max_k: int = 3, @@ -633,8 +669,10 @@ def k_shortest_paths( groups. Otherwise source and sink arguments are required. Args: - source: Source group pattern (required if unbound). - sink: Sink group pattern (required if unbound). + source: Source node selector (required if unbound). Can be a + string pattern or a selector dict with path/group_by/match. + sink: Sink node selector (required if unbound). Can be a string + pattern or a selector dict with path/group_by/match. mode: PAIRWISE (default) or COMBINE (ignored if bound). max_k: Maximum paths per pair. edge_select: SPF/KSP edge selection strategy. @@ -673,19 +711,20 @@ def k_shortest_paths( def _resolve_source_sink( self, - source: Optional[str], - sink: Optional[str], + source: Optional[Union[str, Dict[str, Any]]], + sink: Optional[Union[str, Dict[str, Any]]], mode: Mode, - ) -> Tuple[str, str, Mode]: + ) -> Tuple[Union[str, Dict[str, Any]], Union[str, Dict[str, Any]], Mode]: """Resolve source/sink from arguments or bound context. Args: - source: Source pattern from method call (or None). - sink: Sink pattern from method call (or None). + source: Source selector from method call (or None). + sink: Sink selector from method call (or None). mode: Mode from method call. Returns: Tuple of (resolved_source, resolved_sink, resolved_mode). + Selectors can be string patterns or dict selectors. Raises: ValueError: If unbound and source/sink not provided. @@ -697,8 +736,8 @@ def _resolve_source_sink( "Bound context: source/sink already configured. " "Create new context for different groups." ) - # Use bound values - return self._source_path, self._sink_path, self._mode # type: ignore[return-value] + # Use bound values (can be str or dict) + return self._source, self._sink, self._mode # type: ignore[return-value] else: if source is None or sink is None: raise ValueError("Unbound context: source and sink are required.") @@ -800,8 +839,8 @@ def _max_flow_bound( def _max_flow_unbound( self, *, - source: str, - sink: str, + source: Union[str, Dict[str, Any]], + sink: Union[str, Dict[str, Any]], mode: Mode, shortest_path: bool, require_capacity: bool, @@ -884,8 +923,8 @@ def _max_flow_detailed_bound( def _max_flow_detailed_unbound( self, *, - source: str, - sink: str, + source: Union[str, Dict[str, Any]], + sink: Union[str, Dict[str, Any]], mode: Mode, shortest_path: bool, require_capacity: bool, @@ -953,8 +992,8 @@ def _sensitivity_bound( def _sensitivity_unbound( self, *, - source: str, - sink: str, + source: Union[str, Dict[str, Any]], + sink: Union[str, Dict[str, Any]], mode: Mode, shortest_path: bool, require_capacity: bool, @@ -979,8 +1018,13 @@ def _fill_missing_pairs_bound(self, results: Dict, default_value) -> None: if not self._pseudo_context: return - src_groups = self._network.select_node_groups_by_path(self._source_path or "") - snk_groups = self._network.select_node_groups_by_path(self._sink_path or "") + from ngraph.dsl.selectors import normalize_selector, select_nodes + + src_selector = normalize_selector(self._source or "", "workflow") + snk_selector = normalize_selector(self._sink or "", "workflow") + + src_groups = select_nodes(self._network, src_selector, default_active_only=True) + snk_groups = select_nodes(self._network, snk_selector, default_active_only=True) if self._mode == Mode.COMBINE: combined_src_label = "|".join(sorted(src_groups.keys())) @@ -996,18 +1040,20 @@ def _fill_missing_pairs_bound(self, results: Dict, default_value) -> None: def _shortest_path_costs_impl( self, *, - source: str, - sink: str, + source: Union[str, Dict[str, Any]], + sink: Union[str, Dict[str, Any]], mode: Mode, edge_select: EdgeSelect, excluded_nodes: Optional[Set[str]], excluded_links: Optional[Set[str]], ) -> Dict[Tuple[str, str], float]: """Implementation of shortest_path_cost.""" - from ngraph.utils.nodes import get_active_node_names + from ngraph.dsl.selectors import normalize_selector, select_nodes - src_groups = self._network.select_node_groups_by_path(source) - snk_groups = self._network.select_node_groups_by_path(sink) + src_selector = normalize_selector(source, "workflow") + snk_selector = normalize_selector(sink, "workflow") + src_groups = select_nodes(self._network, src_selector, default_active_only=True) + snk_groups = select_nodes(self._network, snk_selector, default_active_only=True) if not src_groups: raise ValueError(f"No source nodes found matching '{source}'.") @@ -1025,12 +1071,12 @@ def _shortest_path_costs_impl( combined_src_names = [] for group_nodes in src_groups.values(): combined_src_names.extend( - get_active_node_names(group_nodes, excluded_nodes) + _get_active_node_names(group_nodes, excluded_nodes) ) combined_snk_names = [] for group_nodes in snk_groups.values(): combined_snk_names.extend( - get_active_node_names(group_nodes, excluded_nodes) + _get_active_node_names(group_nodes, excluded_nodes) ) if not combined_src_names or not combined_snk_names: @@ -1059,8 +1105,8 @@ def _shortest_path_costs_impl( results: Dict[Tuple[str, str], float] = {} for src_label, src_nodes in src_groups.items(): for snk_label, snk_nodes in snk_groups.items(): - active_src_names = get_active_node_names(src_nodes, excluded_nodes) - active_snk_names = get_active_node_names(snk_nodes, excluded_nodes) + active_src_names = _get_active_node_names(src_nodes, excluded_nodes) + active_snk_names = _get_active_node_names(snk_nodes, excluded_nodes) if not active_src_names or not active_snk_names: results[(src_label, snk_label)] = float("inf") continue @@ -1091,8 +1137,8 @@ def _shortest_path_costs_impl( def _shortest_paths_impl( self, *, - source: str, - sink: str, + source: Union[str, Dict[str, Any]], + sink: Union[str, Dict[str, Any]], mode: Mode, edge_select: EdgeSelect, split_parallel_edges: bool, @@ -1100,10 +1146,12 @@ def _shortest_paths_impl( excluded_links: Optional[Set[str]], ) -> Dict[Tuple[str, str], List[Path]]: """Implementation of shortest_paths.""" - from ngraph.utils.nodes import get_active_node_names + from ngraph.dsl.selectors import normalize_selector, select_nodes - src_groups = self._network.select_node_groups_by_path(source) - snk_groups = self._network.select_node_groups_by_path(sink) + src_selector = normalize_selector(source, "workflow") + snk_selector = normalize_selector(sink, "workflow") + src_groups = select_nodes(self._network, src_selector, default_active_only=True) + snk_groups = select_nodes(self._network, snk_selector, default_active_only=True) if not src_groups: raise ValueError(f"No source nodes found matching '{source}'.") @@ -1176,12 +1224,12 @@ def _best_paths_for_groups( combined_src_names = [] for group_nodes in src_groups.values(): combined_src_names.extend( - get_active_node_names(group_nodes, excluded_nodes) + _get_active_node_names(group_nodes, excluded_nodes) ) combined_snk_names = [] for group_nodes in snk_groups.values(): combined_snk_names.extend( - get_active_node_names(group_nodes, excluded_nodes) + _get_active_node_names(group_nodes, excluded_nodes) ) paths_list = _best_paths_for_groups(combined_src_names, combined_snk_names) @@ -1191,8 +1239,8 @@ def _best_paths_for_groups( results: Dict[Tuple[str, str], List[Path]] = {} for src_label, src_nodes in src_groups.items(): for snk_label, snk_nodes in snk_groups.items(): - active_src_names = get_active_node_names(src_nodes, excluded_nodes) - active_snk_names = get_active_node_names(snk_nodes, excluded_nodes) + active_src_names = _get_active_node_names(src_nodes, excluded_nodes) + active_snk_names = _get_active_node_names(snk_nodes, excluded_nodes) results[(src_label, snk_label)] = _best_paths_for_groups( active_src_names, active_snk_names ) @@ -1203,8 +1251,8 @@ def _best_paths_for_groups( def _k_shortest_paths_impl( self, *, - source: str, - sink: str, + source: Union[str, Dict[str, Any]], + sink: Union[str, Dict[str, Any]], mode: Mode, max_k: int, edge_select: EdgeSelect, @@ -1215,10 +1263,12 @@ def _k_shortest_paths_impl( excluded_links: Optional[Set[str]], ) -> Dict[Tuple[str, str], List[Path]]: """Implementation of k_shortest_paths.""" - from ngraph.utils.nodes import get_active_node_names + from ngraph.dsl.selectors import normalize_selector, select_nodes - src_groups = self._network.select_node_groups_by_path(source) - snk_groups = self._network.select_node_groups_by_path(sink) + src_selector = normalize_selector(source, "workflow") + snk_selector = normalize_selector(sink, "workflow") + src_groups = select_nodes(self._network, src_selector, default_active_only=True) + snk_groups = select_nodes(self._network, snk_selector, default_active_only=True) if not src_groups: raise ValueError(f"No source nodes found matching '{source}'.") @@ -1304,12 +1354,12 @@ def _ksp_for_groups(src_names: List[str], snk_names: List[str]) -> List[Path]: combined_src_names = [] for group_nodes in src_groups.values(): combined_src_names.extend( - get_active_node_names(group_nodes, excluded_nodes) + _get_active_node_names(group_nodes, excluded_nodes) ) combined_snk_names = [] for group_nodes in snk_groups.values(): combined_snk_names.extend( - get_active_node_names(group_nodes, excluded_nodes) + _get_active_node_names(group_nodes, excluded_nodes) ) return { @@ -1322,8 +1372,8 @@ def _ksp_for_groups(src_names: List[str], snk_names: List[str]) -> List[Path]: results: Dict[Tuple[str, str], List[Path]] = {} for src_label, src_nodes in src_groups.items(): for snk_label, snk_nodes in snk_groups.items(): - active_src_names = get_active_node_names(src_nodes, excluded_nodes) - active_snk_names = get_active_node_names(snk_nodes, excluded_nodes) + active_src_names = _get_active_node_names(src_nodes, excluded_nodes) + active_snk_names = _get_active_node_names(snk_nodes, excluded_nodes) results[(src_label, snk_label)] = _ksp_for_groups( active_src_names, active_snk_names ) @@ -1339,23 +1389,33 @@ def _ksp_for_groups(src_names: List[str], snk_names: List[str]) -> List[Path]: def _build_pseudo_node_augmentations( network: "Network", - source_path: str, - sink_path: str, + source: Union[str, Dict[str, Any]], + sink: Union[str, Dict[str, Any]], mode: Mode, ) -> Tuple[List[AugmentationEdge], Dict[Tuple[str, str], Tuple[str, str]]]: """Build augmentation edges for pseudo source/sink nodes.""" - from ngraph.utils.nodes import ( - collect_active_node_names_from_groups, - get_active_node_names, - ) + from ngraph.dsl.selectors import normalize_selector, select_nodes + + # Normalize selectors and select nodes + src_selector = normalize_selector(source, "workflow") + snk_selector = normalize_selector(sink, "workflow") - src_groups = network.select_node_groups_by_path(source_path) - snk_groups = network.select_node_groups_by_path(sink_path) + # select_nodes returns Dict[str, List[Node]] with active_only=True by context + src_groups = select_nodes(network, src_selector, default_active_only=True) + snk_groups = select_nodes(network, snk_selector, default_active_only=True) if not src_groups: - raise ValueError(f"No source nodes found matching '{source_path}'.") + raise ValueError(f"No source nodes found matching '{source}'.") if not snk_groups: - raise ValueError(f"No sink nodes found matching '{sink_path}'.") + raise ValueError(f"No sink nodes found matching '{sink}'.") + + # Helper to get node names from groups + def _get_names(groups: Dict[str, List[Any]]) -> List[str]: + names: List[str] = [] + for nodes in groups.values(): + for node in nodes: + names.append(node.name) + return names augmentations: List[AugmentationEdge] = [] pair_to_pseudo_names: Dict[Tuple[str, str], Tuple[str, str]] = {} @@ -1364,8 +1424,8 @@ def _build_pseudo_node_augmentations( combined_src_label = "|".join(sorted(src_groups.keys())) combined_snk_label = "|".join(sorted(snk_groups.keys())) - combined_src_names = collect_active_node_names_from_groups(src_groups) - combined_snk_names = collect_active_node_names_from_groups(snk_groups) + combined_src_names = _get_names(src_groups) + combined_snk_names = _get_names(snk_groups) has_overlap = bool(set(combined_src_names) & set(combined_snk_names)) @@ -1390,8 +1450,8 @@ def _build_pseudo_node_augmentations( elif mode == Mode.PAIRWISE: for src_label, src_nodes in src_groups.items(): for snk_label, snk_nodes in snk_groups.items(): - active_src_names = get_active_node_names(src_nodes) - active_snk_names = get_active_node_names(snk_nodes) + active_src_names = [n.name for n in src_nodes] + active_snk_names = [n.name for n in snk_nodes] if set(active_src_names) & set(active_snk_names): continue diff --git a/ngraph/cli.py b/ngraph/cli.py index 969456f..0c04fe0 100644 --- a/ngraph/cli.py +++ b/ngraph/cli.py @@ -133,9 +133,11 @@ def _plural(n: int, singular: str, plural: Optional[str] = None) -> str: def _collect_step_path_fields(step: Any) -> list[tuple[str, str]]: - """Return (field, pattern) pairs for string fields that look like node patterns. + """Return (field, pattern) pairs for fields that represent node selectors. - Fields considered: names ending with "_path" or "_regex" with non-empty string values. + Fields considered: + - `source` and `sink` selector fields with string values + - names ending with "_path" or "_regex" with non-empty string values """ fields: list[tuple[str, str]] = [] for key, value in step.__dict__.items(): @@ -145,7 +147,8 @@ def _collect_step_path_fields(step: Any) -> list[tuple[str, str]]: continue if not value.strip(): continue - if key.endswith("_path") or key.endswith("_regex"): + # Selector fields or pattern fields + if key in ("source", "sink") or key.endswith("_path") or key.endswith("_regex"): fields.append((key, value)) return fields @@ -614,9 +617,12 @@ def _print_traffic_matrices( snk_counts: Dict[str, int] = {} pair_counts: Dict[tuple[str, str], Dict[str, float | int]] = {} for d in demands: - src_counts[d.source_path] = src_counts.get(d.source_path, 0) + 1 - snk_counts[d.sink_path] = snk_counts.get(d.sink_path, 0) + 1 - key = (d.source_path, d.sink_path) + # Handle both string and dict selectors + src_key = d.source if isinstance(d.source, str) else str(d.source) + snk_key = d.sink if isinstance(d.sink, str) else str(d.sink) + src_counts[src_key] = src_counts.get(src_key, 0) + 1 + snk_counts[snk_key] = snk_counts.get(snk_key, 0) + 1 + key = (src_key, snk_key) stats = pair_counts.setdefault(key, {"count": 0, "volume": 0.0}) stats["count"] = int(stats["count"]) + 1 stats["volume"] = float(stats["volume"]) + float(getattr(d, "demand", 0.0)) @@ -681,16 +687,18 @@ def _print_traffic_matrices( print(" Top demands (by offered volume):") top_rows: list[list[str]] = [] for d in sorted_demands: + src = d.source if isinstance(d.source, str) else str(d.source) + snk = d.sink if isinstance(d.sink, str) else str(d.sink) top_rows.append( [ - getattr(d, "source_path", ""), - getattr(d, "sink_path", ""), + src, + snk, f"{float(getattr(d, 'demand', 0.0)):,.1f}", str(getattr(d, "priority", 0)), ] ) top_table = _format_table( - ["Source Pattern", "Sink Pattern", "Offered", "Priority"], + ["Source", "Sink", "Offered", "Priority"], top_rows, ) print( @@ -701,9 +709,17 @@ def _print_traffic_matrices( if demands: for i, demand in enumerate(demands[:3]): # Show first 3 demands - print( - f" {i + 1}. {demand.source_path} → {demand.sink_path} ({demand.demand})" + src = ( + demand.source + if isinstance(demand.source, str) + else str(demand.source) + ) + snk = ( + demand.sink + if isinstance(demand.sink, str) + else str(demand.sink) ) + print(f" {i + 1}. {src} -> {snk} ({demand.demand})") if demand_count > 3: print(f" ... and {demand_count - 3} more demands") else: diff --git a/ngraph/dsl/blueprints/expand.py b/ngraph/dsl/blueprints/expand.py index da24d66..55d073e 100644 --- a/ngraph/dsl/blueprints/expand.py +++ b/ngraph/dsl/blueprints/expand.py @@ -4,12 +4,11 @@ import copy from dataclasses import dataclass, field -from itertools import product, zip_longest from typing import Any, Dict, List, Set from ngraph.dsl.blueprints import parser as _bp_parse -from ngraph.model.failure.conditions import FailureCondition as _Cond -from ngraph.model.failure.conditions import evaluate_conditions as _eval_conditions +from ngraph.dsl.expansion import ExpansionSpec, expand_risk_group_refs, expand_templates +from ngraph.dsl.selectors import normalize_selector, select_nodes from ngraph.model.network import Link, Network, Node @@ -244,7 +243,7 @@ def _expand_group( raise ValueError( f"'risk_groups' must be list or set in group '{group_name}'." ) - parent_risk_groups |= set(rg_val) + parent_risk_groups |= expand_risk_group_refs(rg_val) param_overrides: Dict[str, Any] = group_def.get("parameters", {}) if not isinstance(param_overrides, dict): @@ -269,7 +268,7 @@ def _expand_group( merged_def["attrs"] = {**parent_attrs, **child_attrs} # Merge parent's risk_groups with child's - child_rgs = set(merged_def.get("risk_groups", [])) + child_rgs = expand_risk_group_refs(merged_def.get("risk_groups", [])) merged_def["risk_groups"] = parent_risk_groups | child_rgs # Recursively expand @@ -306,7 +305,7 @@ def _expand_group( # Merge parent's risk groups parent_risk_groups = set(inherited_risk_groups) - child_rgs = set(group_def.get("risk_groups", [])) + child_rgs = expand_risk_group_refs(group_def.get("risk_groups", [])) final_risk_groups = parent_risk_groups | child_rgs for i in range(1, node_count + 1): @@ -323,6 +322,41 @@ def _expand_group( ctx.network.add_node(node) +def _normalize_adjacency_selector(sel: Any, base: str) -> Dict[str, Any]: + """Normalize a source/target selector for adjacency expansion. + + Args: + sel: String path or dict with 'path', 'group_by', and/or 'match'. + base: Parent path to prepend. + + Returns: + Normalized selector dict. + """ + if isinstance(sel, str): + return {"path": _bp_parse.join_paths(base, sel)} + if isinstance(sel, dict): + path = sel.get("path") + group_by = sel.get("group_by") + match = sel.get("match") + + # Validate: must have path, group_by, or match + if path is None and group_by is None and match is None: + raise ValueError( + "Selector object must contain 'path', 'group_by', or 'match'." + ) + + out = dict(sel) + if path is not None: + if not isinstance(path, str): + raise ValueError("Selector 'path' must be a string.") + out["path"] = _bp_parse.join_paths(base, path) + return out + raise ValueError( + "Adjacency 'source'/'target' must be string or object with " + "'path', 'group_by', or 'match'." + ) + + def _expand_blueprint_adjacency( ctx: DSLExpansionContext, adj_def: Dict[str, Any], @@ -353,28 +387,8 @@ def _expand_blueprint_adjacency( _bp_parse.check_link_params(link_params, context="blueprint adjacency") link_count = adj_def.get("link_count", 1) - def _normalize_selector(sel: Any, base: str) -> Dict[str, Any]: - if isinstance(sel, str): - # Attribute directives must not be prefixed by parent_path - if sel.startswith("attr:"): - return {"path": sel} - return {"path": _bp_parse.join_paths(base, sel)} - if isinstance(sel, dict): - path = sel.get("path") - if not isinstance(path, str): - raise ValueError("Selector object must contain string 'path'.") - out = dict(sel) - if path.startswith("attr:"): - out["path"] = path - else: - out["path"] = _bp_parse.join_paths(base, path) - return out - raise ValueError( - "Adjacency 'source'/'target' must be string or object with 'path'." - ) - - src_sel = _normalize_selector(source_rel, parent_path) - tgt_sel = _normalize_selector(target_rel, parent_path) + src_sel = _normalize_adjacency_selector(source_rel, parent_path) + tgt_sel = _normalize_adjacency_selector(target_rel, parent_path) _expand_adjacency_pattern(ctx, src_sel, tgt_sel, pattern, link_params, link_count) @@ -397,29 +411,15 @@ def _expand_adjacency(ctx: DSLExpansionContext, adj_def: Dict[str, Any]) -> None _expand_adjacency_with_variables(ctx, adj_def, parent_path="") return - source_path_raw = adj_def["source"] - target_path_raw = adj_def["target"] + source_raw = adj_def["source"] + target_raw = adj_def["target"] pattern = adj_def.get("pattern", "mesh") link_count = adj_def.get("link_count", 1) link_params = adj_def.get("link_params", {}) _bp_parse.check_link_params(link_params, context="top-level adjacency") - def _normalize_selector(sel: Any, base: str) -> Dict[str, Any]: - if isinstance(sel, str): - return {"path": _bp_parse.join_paths(base, sel)} - if isinstance(sel, dict): - path = sel.get("path") - if not isinstance(path, str): - raise ValueError("Selector object must contain string 'path'.") - out = dict(sel) - out["path"] = _bp_parse.join_paths(base, path) - return out - raise ValueError( - "Adjacency 'source'/'target' must be string or object with 'path'." - ) - - src_sel = _normalize_selector(source_path_raw, "") - tgt_sel = _normalize_selector(target_path_raw, "") + src_sel = _normalize_adjacency_selector(source_raw, "") + tgt_sel = _normalize_adjacency_selector(target_raw, "") _expand_adjacency_pattern(ctx, src_sel, tgt_sel, pattern, link_params, link_count) @@ -428,13 +428,15 @@ def _expand_adjacency_with_variables( ctx: DSLExpansionContext, adj_def: Dict[str, Any], parent_path: str ) -> None: """Handles adjacency expansions when 'expand_vars' is provided. - We substitute variables into the 'source' and 'target' templates to produce - multiple adjacency expansions. Then each expansion is passed to _expand_adjacency_pattern. + + Substitutes variables into 'source' and 'target' templates using $var or ${var} + syntax to produce multiple adjacency expansions. Supports both string paths + and dict selectors (with path/group_by). Args: - ctx (DSLExpansionContext): The DSL expansion context. - adj_def (Dict[str, Any]): The adjacency definition including expand_vars, source, target, etc. - parent_path (str): Prepended to source/target if they do not start with '/'. + ctx: The DSL expansion context. + adj_def: The adjacency definition including expand_vars, source, target, etc. + parent_path: Prepended to source/target paths. """ source_template = adj_def["source"] target_template = adj_def["target"] @@ -445,50 +447,65 @@ def _expand_adjacency_with_variables( expand_vars = adj_def["expand_vars"] expansion_mode = adj_def.get("expansion_mode", "cartesian") - var_names = sorted(expand_vars.keys()) - lists_of_values = [expand_vars[var] for var in var_names] + # Build expansion spec + spec = ExpansionSpec(expand_vars=expand_vars, expansion_mode=expansion_mode) - if expansion_mode == "zip": - lengths = [len(lst) for lst in lists_of_values] - if len(set(lengths)) != 1: - raise ValueError( - f"zip expansion requires all lists be the same length; got {lengths}" - ) + # Collect all string fields that need variable substitution + templates = _extract_selector_templates(source_template, "source") + templates.update(_extract_selector_templates(target_template, "target")) - for combo_tuple in zip_longest(*lists_of_values, fillvalue=None): - combo_dict = dict(zip(var_names, combo_tuple, strict=False)) - expanded_src = _bp_parse.join_paths( - parent_path, source_template.format(**combo_dict) - ) - expanded_tgt = _bp_parse.join_paths( - parent_path, target_template.format(**combo_dict) - ) - _expand_adjacency_pattern( - ctx, - {"path": expanded_src}, - {"path": expanded_tgt}, - pattern, - link_params, - link_count, - ) - else: - # "cartesian" default - for combo_tuple in product(*lists_of_values): - combo_dict = dict(zip(var_names, combo_tuple, strict=False)) - expanded_src = _bp_parse.join_paths( - parent_path, source_template.format(**combo_dict) - ) - expanded_tgt = _bp_parse.join_paths( - parent_path, target_template.format(**combo_dict) - ) - _expand_adjacency_pattern( - ctx, - {"path": expanded_src}, - {"path": expanded_tgt}, - pattern, - link_params, - link_count, + if not templates: + # No variables to expand - just process once + src_sel = _normalize_adjacency_selector(source_template, parent_path) + tgt_sel = _normalize_adjacency_selector(target_template, parent_path) + _expand_adjacency_pattern( + ctx, src_sel, tgt_sel, pattern, link_params, link_count + ) + return + + # Expand templates and rebuild selectors + for substituted in expand_templates(templates, spec): + src_sel = _rebuild_selector(source_template, substituted, "source", parent_path) + tgt_sel = _rebuild_selector(target_template, substituted, "target", parent_path) + _expand_adjacency_pattern( + ctx, src_sel, tgt_sel, pattern, link_params, link_count + ) + + +def _extract_selector_templates(selector: Any, prefix: str) -> Dict[str, str]: + """Extract string fields from a selector that may contain variables.""" + templates: Dict[str, str] = {} + if isinstance(selector, str): + templates[prefix] = selector + elif isinstance(selector, dict): + if "path" in selector and isinstance(selector["path"], str): + templates[f"{prefix}.path"] = selector["path"] + if "group_by" in selector and isinstance(selector["group_by"], str): + templates[f"{prefix}.group_by"] = selector["group_by"] + return templates + + +def _rebuild_selector( + original: Any, substituted: Dict[str, str], prefix: str, parent_path: str +) -> Dict[str, Any]: + """Rebuild a selector with substituted values.""" + if isinstance(original, str): + path = substituted.get(prefix, original) + return {"path": _bp_parse.join_paths(parent_path, path)} + + if isinstance(original, dict): + result = dict(original) + if f"{prefix}.path" in substituted: + result["path"] = _bp_parse.join_paths( + parent_path, substituted[f"{prefix}.path"] ) + elif "path" in result: + result["path"] = _bp_parse.join_paths(parent_path, result["path"]) + if f"{prefix}.group_by" in substituted: + result["group_by"] = substituted[f"{prefix}.group_by"] + return result + + raise ValueError(f"Selector must be string or dict, got {type(original)}") def _expand_adjacency_pattern( @@ -512,88 +529,27 @@ def _expand_adjacency_pattern( risk_groups, attrs. Args: - ctx (DSLExpansionContext): The context with the target network. - source_selector (str|dict): Path string or selector object {path, match}. - target_selector (str|dict): Path string or selector object {path, match}. - pattern (str): "mesh" or "one_to_one". - link_params (Dict[str, Any]): Additional link parameters (capacity, cost, disabled, risk_groups, attrs). - link_count (int): Number of parallel links to create for each adjacency. + ctx: The context with the target network. + source_selector: Path string or selector object {path, group_by, match}. + target_selector: Path string or selector object {path, group_by, match}. + pattern: "mesh" or "one_to_one". + link_params: Additional link parameters. + link_count: Number of parallel links to create for each adjacency. """ - - def _normalize(sel: Any) -> tuple[str, Dict[str, Any] | None]: - if isinstance(sel, str): - return sel, None - if isinstance(sel, dict): - path = sel.get("path") - if not isinstance(path, str): - raise ValueError("Selector object must contain string 'path'.") - match = sel.get("match") - if match is not None and not isinstance(match, dict): - raise ValueError("'match' must be a dictionary if provided.") - return path, match - raise ValueError("source/target must be string or selector object with 'path'.") - - source_path, source_match = _normalize(source_selector) - target_path, target_match = _normalize(target_selector) - - source_node_groups = ctx.network.select_node_groups_by_path(source_path) - target_node_groups = ctx.network.select_node_groups_by_path(target_path) - - def _flatten_node(node: Node) -> Dict[str, Any]: - return { - "name": node.name, - "disabled": node.disabled, - "risk_groups": node.risk_groups, - **{ - k: v - for k, v in node.attrs.items() - if k not in {"name", "disabled", "risk_groups"} - }, - } - - def _apply_match(nodes: List[Node], match: Dict[str, Any] | None) -> List[Node]: - if not match: - return nodes - logic = match.get("logic", "or") - cond_dicts = match.get("conditions", []) - if not isinstance(cond_dicts, list): - raise ValueError("match.conditions must be a list if provided.") - conditions: List[_Cond] = [] - for cd in cond_dicts: - if not isinstance(cd, dict) or "attr" not in cd or "operator" not in cd: - raise ValueError( - "Each condition must be a dict with 'attr' and 'operator'." - ) - conditions.append( - _Cond(attr=cd["attr"], operator=cd["operator"], value=cd.get("value")) - ) - filtered: List[Node] = [] - for n in nodes: - attrs = _flatten_node(n) - if _eval_conditions(attrs, conditions, logic): - filtered.append(n) - return filtered - - source_nodes = _apply_match( - [node for _, nodes in source_node_groups.items() for node in nodes], - source_match, - ) - target_nodes = _apply_match( - [node for _, nodes in target_node_groups.items() for node in nodes], - target_match, - ) + source_nodes = _select_adjacency_nodes(ctx.network, source_selector) + target_nodes = _select_adjacency_nodes(ctx.network, target_selector) if not source_nodes or not target_nodes: return - dedup_pairs = set() + dedup_pairs: Set[tuple[str, str]] = set() if pattern == "mesh": for sn in source_nodes: for tn in target_nodes: if sn.name == tn.name: continue - pair = tuple(sorted((sn.name, tn.name))) + pair = (min(sn.name, tn.name), max(sn.name, tn.name)) if pair not in dedup_pairs: dedup_pairs.add(pair) _create_link(ctx.network, sn.name, tn.name, link_params, link_count) @@ -612,23 +568,41 @@ def _apply_match(nodes: List[Node], match: Dict[str, Any] | None) -> List[Node]: for i in range(bigger_count): if s_count >= t_count: - sn = source_nodes[i].name - tn = target_nodes[i % t_count].name + src_name = source_nodes[i].name + tgt_name = target_nodes[i % t_count].name else: - sn = source_nodes[i % s_count].name - tn = target_nodes[i].name + src_name = source_nodes[i % s_count].name + tgt_name = target_nodes[i].name - if sn == tn: + if src_name == tgt_name: continue - pair = tuple(sorted((sn, tn))) + pair = (min(src_name, tgt_name), max(src_name, tgt_name)) if pair not in dedup_pairs: dedup_pairs.add(pair) - _create_link(ctx.network, sn, tn, link_params, link_count) + _create_link(ctx.network, src_name, tgt_name, link_params, link_count) else: raise ValueError(f"Unknown adjacency pattern: {pattern}") +def _select_adjacency_nodes(network: Network, selector: Any) -> List[Node]: + """Select nodes for adjacency based on selector. + + Uses the unified selector system. For adjacency, active_only defaults + to False (links to disabled nodes are created). + + Args: + network: The network to select from. + selector: String path or dict with path/group_by/match. + + Returns: + List of matching nodes (flattened from all groups). + """ + normalized = normalize_selector(selector, context="adjacency") + groups = select_nodes(network, normalized, default_active_only=False) + return [node for nodes in groups.values() for node in nodes] + + def _create_link( net: Network, source: str, @@ -657,7 +631,7 @@ def _create_link( attrs = copy.deepcopy(link_params.get("attrs", {})) disabled_flag = bool(link_params.get("disabled", False)) # If link_params has risk_groups, we set them (replace). - link_rgs = set(link_params.get("risk_groups", [])) + link_rgs = expand_risk_group_refs(link_params.get("risk_groups", [])) link = Link( source=source, @@ -700,8 +674,8 @@ def _process_direct_nodes(net: Network, network_data: Dict[str, Any]) -> None: attrs_dict = raw_def.get("attrs", {}) if not isinstance(attrs_dict, dict): raise ValueError(f"'attrs' must be a dict in node '{node_name}'.") - # risk_groups => set them if provided - rgs = set(raw_def.get("risk_groups", [])) + # risk_groups => set them if provided (with bracket expansion) + rgs = expand_risk_group_refs(raw_def.get("risk_groups", [])) new_node = Node( name=node_name, @@ -894,7 +868,7 @@ def _update_links( if new_disabled_val is not None: link.disabled = bool(new_disabled_val) if new_risk_groups is not None: - link.risk_groups = set(new_risk_groups) + link.risk_groups = expand_risk_group_refs(new_risk_groups) if new_attrs: link.attrs.update(new_attrs) @@ -929,7 +903,7 @@ def _update_nodes( raise ValueError( f"risk_groups override must be list or set, got {type(risk_groups_val)}." ) - node.risk_groups = set(risk_groups_val) + node.risk_groups = expand_risk_group_refs(risk_groups_val) node.attrs.update(attrs) diff --git a/ngraph/dsl/blueprints/parser.py b/ngraph/dsl/blueprints/parser.py index edfd8ee..5c271c9 100644 --- a/ngraph/dsl/blueprints/parser.py +++ b/ngraph/dsl/blueprints/parser.py @@ -6,9 +6,18 @@ from __future__ import annotations -import re -from itertools import product -from typing import Any, Dict, List +from typing import Any, Dict + +# Re-export expand_name_patterns from its canonical location +from ngraph.dsl.expansion import expand_name_patterns + +__all__ = [ + "check_no_extra_keys", + "check_adjacency_keys", + "check_link_params", + "expand_name_patterns", + "join_paths", +] def check_no_extra_keys( @@ -63,62 +72,29 @@ def check_link_params(link_params: Dict[str, Any], context: str) -> None: ) -_RANGE_REGEX = re.compile(r"\[([^\]]+)\]") - +def join_paths(parent_path: str, rel_path: str) -> str: + """Join two path segments according to DSL conventions. -def expand_name_patterns(name: str) -> List[str]: - """Expand bracket expressions in a group name. + The DSL has no concept of absolute paths. All paths are relative to the + current context (parent_path). A leading "/" on rel_path is stripped and + has no functional effect - it serves only as a visual indicator that the + path starts from the current scope's root. - Examples: - - "fa[1-3]" -> ["fa1", "fa2", "fa3"] - - "dc[1,3,5-6]" -> ["dc1", "dc3", "dc5", "dc6"] - - "fa[1-2]_plane[5-6]" -> ["fa1_plane5", "fa1_plane6", "fa2_plane5", "fa2_plane6"] - """ - matches = list(_RANGE_REGEX.finditer(name)) - if not matches: - return [name] - - expansions_list = [] - for match in matches: - range_expr = match.group(1) - expansions_list.append(_parse_range_expr(range_expr)) - - expanded_names = [] - for combo in product(*expansions_list): - result_str = "" - last_end = 0 - for m_idx, match in enumerate(matches): - start, end = match.span() - result_str += name[last_end:start] - result_str += combo[m_idx] - last_end = end - result_str += name[last_end:] - expanded_names.append(result_str) - - return expanded_names - - -def _parse_range_expr(expr: str) -> List[str]: - values: List[str] = [] - parts = [x.strip() for x in expr.split(",")] - for part in parts: - if "-" in part: - start_str, end_str = part.split("-", 1) - start = int(start_str) - end = int(end_str) - for val in range(start, end + 1): - values.append(str(val)) - else: - values.append(part) - return values + Behavior: + - Leading "/" on rel_path is stripped (not treated as filesystem root) + - Result is always: "{parent_path}/{stripped_rel_path}" if parent_path is non-empty + - Examples: + join_paths("", "/leaf") -> "leaf" + join_paths("pod1", "/leaf") -> "pod1/leaf" + join_paths("pod1", "leaf") -> "pod1/leaf" (same result) + Args: + parent_path: Parent path prefix (e.g., "pod1" when expanding a blueprint). + rel_path: Path to join. Leading "/" is stripped if present. -def join_paths(parent_path: str, rel_path: str) -> str: - """Join two path segments according to the DSL conventions.""" - # Attribute directive paths are global selectors and must not be prefixed - # by any parent blueprint path. - if rel_path.startswith("attr:"): - return rel_path + Returns: + Combined path string. + """ if rel_path.startswith("/"): rel_path = rel_path[1:] if parent_path: diff --git a/ngraph/dsl/expansion/__init__.py b/ngraph/dsl/expansion/__init__.py new file mode 100644 index 0000000..b6238c6 --- /dev/null +++ b/ngraph/dsl/expansion/__init__.py @@ -0,0 +1,31 @@ +"""Variable and pattern expansion for NetGraph DSL. + +This module provides template expansion with $var syntax and +bracket pattern expansion for name generation. + +Usage: + from ngraph.dsl.expansion import expand_templates, expand_name_patterns, ExpansionSpec + + # Variable expansion + spec = ExpansionSpec(expand_vars={"dc": [1, 2, 3]}) + for result in expand_templates({"path": "dc${dc}/leaf"}, spec): + print(result) # {"path": "dc1/leaf"}, {"path": "dc2/leaf"}, ... + + # Bracket expansion + names = expand_name_patterns("leaf[1-4]") # ["leaf1", "leaf2", "leaf3", "leaf4"] +""" + +from .brackets import expand_name_patterns, expand_risk_group_refs +from .schema import ExpansionSpec +from .variables import expand_templates, substitute_vars + +__all__ = [ + # Schema + "ExpansionSpec", + # Variable expansion + "expand_templates", + "substitute_vars", + # Bracket expansion + "expand_name_patterns", + "expand_risk_group_refs", +] diff --git a/ngraph/dsl/expansion/brackets.py b/ngraph/dsl/expansion/brackets.py new file mode 100644 index 0000000..9292982 --- /dev/null +++ b/ngraph/dsl/expansion/brackets.py @@ -0,0 +1,107 @@ +"""Bracket expansion for name patterns. + +Provides expand_name_patterns() for expanding bracket expressions +like "fa[1-3]" into ["fa1", "fa2", "fa3"]. +""" + +from __future__ import annotations + +import re +from itertools import product +from typing import Iterable, List, Set + +__all__ = [ + "expand_name_patterns", + "expand_risk_group_refs", +] + +_RANGE_REGEX = re.compile(r"\[([^\]]+)\]") + + +def expand_name_patterns(name: str) -> List[str]: + """Expand bracket expressions in a group name. + + Supports: + - Ranges: [1-3] -> 1, 2, 3 + - Lists: [a,b,c] -> a, b, c + - Mixed: [1,3,5-7] -> 1, 3, 5, 6, 7 + - Multiple brackets: Cartesian product + + Args: + name: Name pattern with optional bracket expressions. + + Returns: + List of expanded names. + + Examples: + >>> expand_name_patterns("fa[1-3]") + ["fa1", "fa2", "fa3"] + >>> expand_name_patterns("dc[1,3,5-6]") + ["dc1", "dc3", "dc5", "dc6"] + >>> expand_name_patterns("fa[1-2]_plane[5-6]") + ["fa1_plane5", "fa1_plane6", "fa2_plane5", "fa2_plane6"] + """ + matches = list(_RANGE_REGEX.finditer(name)) + if not matches: + return [name] + + expansions_list = [] + for match in matches: + range_expr = match.group(1) + expansions_list.append(_parse_range_expr(range_expr)) + + expanded_names = [] + for combo in product(*expansions_list): + result_str = "" + last_end = 0 + for m_idx, match in enumerate(matches): + start, end = match.span() + result_str += name[last_end:start] + result_str += combo[m_idx] + last_end = end + result_str += name[last_end:] + expanded_names.append(result_str) + + return expanded_names + + +def expand_risk_group_refs(rg_list: Iterable[str]) -> Set[str]: + """Expand bracket patterns in a list of risk group references. + + Takes an iterable of risk group names (possibly containing bracket + expressions) and returns a set of all expanded names. + + Args: + rg_list: Iterable of risk group name patterns. + + Returns: + Set of expanded risk group names. + + Examples: + >>> expand_risk_group_refs(["RG1"]) + {"RG1"} + >>> expand_risk_group_refs(["RG[1-3]"]) + {"RG1", "RG2", "RG3"} + >>> expand_risk_group_refs(["A[1-2]", "B[a,b]"]) + {"A1", "A2", "Ba", "Bb"} + """ + result: Set[str] = set() + for rg in rg_list: + result.update(expand_name_patterns(rg)) + return result + + +def _parse_range_expr(expr: str) -> List[str]: + """Parse a bracket range expression like '1-3' or 'a,b,1-2'.""" + values: List[str] = [] + parts = [x.strip() for x in expr.split(",")] + for part in parts: + if "-" in part: + start_str, end_str = part.split("-", 1) + start = int(start_str) + end = int(end_str) + for val in range(start, end + 1): + values.append(str(val)) + else: + values.append(part) + return values diff --git a/ngraph/dsl/expansion/schema.py b/ngraph/dsl/expansion/schema.py new file mode 100644 index 0000000..28bb684 --- /dev/null +++ b/ngraph/dsl/expansion/schema.py @@ -0,0 +1,28 @@ +"""Schema definitions for variable expansion. + +Provides dataclasses for template expansion configuration. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Literal + + +@dataclass +class ExpansionSpec: + """Specification for variable-based expansion. + + Attributes: + expand_vars: Mapping of variable names to lists of values. + expansion_mode: How to combine variable values. + - "cartesian": All combinations (default) + - "zip": Pair values by position + """ + + expand_vars: Dict[str, List[Any]] = field(default_factory=dict) + expansion_mode: Literal["cartesian", "zip"] = "cartesian" + + def is_empty(self) -> bool: + """Check if no variables are defined.""" + return not self.expand_vars diff --git a/ngraph/dsl/expansion/variables.py b/ngraph/dsl/expansion/variables.py new file mode 100644 index 0000000..d47ed36 --- /dev/null +++ b/ngraph/dsl/expansion/variables.py @@ -0,0 +1,108 @@ +"""Variable expansion for templates. + +Provides expand_templates() function for substituting $var and ${var} +placeholders in template strings. +""" + +from __future__ import annotations + +import re +from itertools import product +from typing import TYPE_CHECKING, Any, Dict, Iterator + +if TYPE_CHECKING: + from .schema import ExpansionSpec + +__all__ = [ + "expand_templates", + "substitute_vars", +] + +# Pattern to match $var or ${var} placeholders +_VAR_PATTERN = re.compile(r"\$\{([a-zA-Z_][a-zA-Z0-9_]*)\}|\$([a-zA-Z_][a-zA-Z0-9_]*)") + +# Expansion limits +MAX_TEMPLATE_EXPANSIONS = 10_000 + + +def substitute_vars(template: str, var_dict: Dict[str, Any]) -> str: + """Substitute $var and ${var} placeholders in a template string. + + Uses $ prefix to avoid collision with regex {m,n} quantifiers. + + Args: + template: String containing $var or ${var} placeholders. + var_dict: Mapping of variable names to values. + + Returns: + Template with variables substituted. + + Raises: + KeyError: If a referenced variable is not in var_dict. + """ + + def replace(match: re.Match[str]) -> str: + var_name = match.group(1) or match.group(2) + if var_name not in var_dict: + raise KeyError(f"Variable '${var_name}' not found in expand_vars") + return str(var_dict[var_name]) + + return _VAR_PATTERN.sub(replace, template) + + +def expand_templates( + templates: Dict[str, str], + spec: "ExpansionSpec", +) -> Iterator[Dict[str, str]]: + """Expand template strings with variable substitution. + + Uses $var or ${var} syntax only. + + Args: + templates: Dict of template strings, e.g. {"source": "dc${dc}/...", "sink": "..."}. + spec: Expansion specification with variables and mode. + + Yields: + Dicts with same keys as templates, values substituted. + + Raises: + ValueError: If zip mode has mismatched list lengths or expansion exceeds limit. + KeyError: If a template references an undefined variable. + + Example: + >>> spec = ExpansionSpec(expand_vars={"dc": [1, 2]}) + >>> list(expand_templates({"src": "dc${dc}"}, spec)) + [{"src": "dc1"}, {"src": "dc2"}] + """ + if spec.is_empty(): + yield templates + return + + var_names = sorted(spec.expand_vars.keys()) + var_values = [spec.expand_vars[k] for k in var_names] + + if spec.expansion_mode == "zip": + lengths = [len(v) for v in var_values] + if len(set(lengths)) != 1: + raise ValueError( + f"zip expansion requires equal-length lists; got lengths {lengths}" + ) + combos: Iterator[tuple[Any, ...]] = zip(*var_values, strict=True) + expansion_size = lengths[0] if lengths else 0 + else: + # Cartesian product + expansion_size = 1 + for v in var_values: + expansion_size *= len(v) + combos = product(*var_values) + + if expansion_size > MAX_TEMPLATE_EXPANSIONS: + raise ValueError( + f"Template expansion would create {expansion_size} items " + f"(limit: {MAX_TEMPLATE_EXPANSIONS}). " + f"Consider using fewer variables or splitting into multiple entries." + ) + + for combo in combos: + var_dict = dict(zip(var_names, combo, strict=True)) + yield {k: substitute_vars(v, var_dict) for k, v in templates.items()} diff --git a/ngraph/dsl/selectors/__init__.py b/ngraph/dsl/selectors/__init__.py new file mode 100644 index 0000000..65dee4e --- /dev/null +++ b/ngraph/dsl/selectors/__init__.py @@ -0,0 +1,35 @@ +"""Unified node selection for NetGraph DSL. + +This module provides a single abstraction for node selection used across +adjacency, demands, overrides, and workflow steps. + +Usage: + from ngraph.dsl.selectors import normalize_selector, select_nodes, NodeSelector + + # From YAML config (string or dict) + selector = normalize_selector(raw_config["source"], "demand") + + # Evaluate against network + groups = select_nodes(network, selector, default_active_only=True) +""" + +from .conditions import evaluate_condition, evaluate_conditions +from .normalize import normalize_selector +from .schema import Condition, MatchSpec, NodeSelector +from .select import flatten_link_attrs, flatten_node_attrs, select_nodes + +__all__ = [ + # Schema + "Condition", + "MatchSpec", + "NodeSelector", + # Parsing + "normalize_selector", + # Evaluation + "select_nodes", + "evaluate_condition", + "evaluate_conditions", + # Attribute flattening + "flatten_node_attrs", + "flatten_link_attrs", +] diff --git a/ngraph/dsl/selectors/conditions.py b/ngraph/dsl/selectors/conditions.py new file mode 100644 index 0000000..ce8ff5d --- /dev/null +++ b/ngraph/dsl/selectors/conditions.py @@ -0,0 +1,129 @@ +"""Condition evaluation for node/entity filtering. + +Provides evaluation logic for attribute conditions used in selectors +and failure policies. Supports operators: ==, !=, <, <=, >, >=, +contains, not_contains, in, not_in, any_value, no_value. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Dict, Iterable + +if TYPE_CHECKING: + from .schema import Condition + +__all__ = [ + "evaluate_condition", + "evaluate_conditions", +] + + +def evaluate_condition(attrs: Dict[str, Any], cond: "Condition") -> bool: + """Evaluate a single condition against an attribute dict. + + Args: + attrs: Flat mapping of entity attributes. + cond: Condition to evaluate. + + Returns: + True if condition passes, False otherwise. + + Raises: + ValueError: If operator is unknown or value type is invalid. + """ + has_attr = cond.attr in attrs + attr_value = attrs.get(cond.attr) + op = cond.operator + expected = cond.value + + # Existence operators + if op == "any_value": + return has_attr and attr_value is not None + if op == "no_value": + return (not has_attr) or (attr_value is None) + + # For all other operators, missing/None attribute means no match + if attr_value is None: + return False + + # Equality operators + if op == "==": + return attr_value == expected + if op == "!=": + return attr_value != expected + + # Numeric comparisons + if op in ("<", "<=", ">", ">="): + try: + left = float(attr_value) + right = float(expected) + except (TypeError, ValueError): + return False + if op == "<": + return left < right + if op == "<=": + return left <= right + if op == ">": + return left > right + if op == ">=": + return left >= right + + # String/collection containment + if op == "contains": + if isinstance(attr_value, str): + return str(expected) in attr_value + if isinstance(attr_value, (list, tuple, set)): + return expected in attr_value + return False + + if op == "not_contains": + if isinstance(attr_value, str): + return str(expected) not in attr_value + if isinstance(attr_value, (list, tuple, set)): + return expected not in attr_value + return True + + # List membership operators + if op == "in": + if not isinstance(expected, (list, tuple, set)): + raise ValueError(f"'in' operator requires list value, got {type(expected)}") + return attr_value in expected + + if op == "not_in": + if not isinstance(expected, (list, tuple, set)): + raise ValueError( + f"'not_in' operator requires list value, got {type(expected)}" + ) + return attr_value not in expected + + raise ValueError(f"Unknown operator: {op}") + + +def evaluate_conditions( + attrs: Dict[str, Any], + conditions: Iterable["Condition"], + logic: str = "or", +) -> bool: + """Evaluate multiple conditions with AND/OR logic. + + Args: + attrs: Flat mapping of entity attributes. + conditions: Iterable of Condition objects. + logic: "and" (all must match) or "or" (any must match). + + Returns: + True if combined predicate passes. + + Raises: + ValueError: If logic is not "and" or "or". + """ + cond_list = list(conditions) + if not cond_list: + return True + + if logic == "and": + return all(evaluate_condition(attrs, c) for c in cond_list) + if logic == "or": + return any(evaluate_condition(attrs, c) for c in cond_list) + + raise ValueError(f"Unsupported logic: {logic}") diff --git a/ngraph/dsl/selectors/normalize.py b/ngraph/dsl/selectors/normalize.py new file mode 100644 index 0000000..d46ffab --- /dev/null +++ b/ngraph/dsl/selectors/normalize.py @@ -0,0 +1,114 @@ +"""Selector parsing and normalization. + +Provides the single entry point for converting raw selector values +(strings or dicts) into NodeSelector objects. +""" + +from __future__ import annotations + +from dataclasses import replace +from typing import Any, Dict, Union + +from .schema import Condition, MatchSpec, NodeSelector + +__all__ = [ + "normalize_selector", +] + +# Context-aware defaults for active_only +_ACTIVE_ONLY_DEFAULTS: Dict[str, bool] = { + "adjacency": False, + "override": False, + "demand": True, + "workflow": True, +} + + +def normalize_selector( + raw: Union[str, Dict[str, Any], NodeSelector], + context: str, +) -> NodeSelector: + """Normalize a raw selector (string or dict) to a NodeSelector. + + This is the single entry point for all selector parsing. All downstream + code works with NodeSelector objects only. + + Args: + raw: Either a regex string, selector dict, or existing NodeSelector. + context: Usage context ("adjacency", "demand", "override", "workflow"). + Determines the default for active_only. + + Returns: + Normalized NodeSelector instance. + + Raises: + ValueError: If selector format is invalid or context is unknown. + """ + default_active_only = _ACTIVE_ONLY_DEFAULTS.get(context) + if default_active_only is None: + raise ValueError( + f"Unknown context '{context}'. " + f"Expected one of: {list(_ACTIVE_ONLY_DEFAULTS.keys())}" + ) + + if isinstance(raw, NodeSelector): + if raw.active_only is None: + return replace(raw, active_only=default_active_only) + return raw + + if isinstance(raw, str): + return NodeSelector(path=raw, active_only=default_active_only) + + if isinstance(raw, dict): + return _parse_dict(raw, default_active_only) + + raise ValueError(f"Selector must be string or dict, got {type(raw).__name__}") + + +def _parse_dict(raw: Dict[str, Any], default_active_only: bool) -> NodeSelector: + """Parse a selector dictionary into a NodeSelector.""" + match_spec = None + if "match" in raw: + match_spec = _parse_match(raw["match"]) + + path = raw.get("path") + group_by = raw.get("group_by") + active_only = raw.get("active_only", default_active_only) + + # Validate at least one selection mechanism + if path is None and group_by is None and match_spec is None: + raise ValueError( + "Selector dict requires at least one of: path, group_by, or match" + ) + + return NodeSelector( + path=path, + group_by=group_by, + match=match_spec, + active_only=active_only, + ) + + +def _parse_match(raw: Dict[str, Any]) -> MatchSpec: + """Parse a match specification dict.""" + conditions = [] + for cond_dict in raw.get("conditions", []): + if not isinstance(cond_dict, dict): + raise ValueError( + f"Each condition must be a dict, got {type(cond_dict).__name__}" + ) + if "attr" not in cond_dict or "operator" not in cond_dict: + raise ValueError("Each condition must have 'attr' and 'operator'") + + conditions.append( + Condition( + attr=cond_dict["attr"], + operator=cond_dict["operator"], + value=cond_dict.get("value"), + ) + ) + + return MatchSpec( + conditions=conditions, + logic=raw.get("logic", "or"), + ) diff --git a/ngraph/dsl/selectors/schema.py b/ngraph/dsl/selectors/schema.py new file mode 100644 index 0000000..551eec2 --- /dev/null +++ b/ngraph/dsl/selectors/schema.py @@ -0,0 +1,82 @@ +"""Schema definitions for unified node selection. + +Provides dataclasses for node selection configuration used across +adjacency, demands, overrides, and workflow steps. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any, List, Literal, Optional + + +@dataclass +class Condition: + """A single attribute condition for filtering. + + Attributes: + attr: Attribute name to match. + operator: Comparison operator. + value: Right-hand operand (unused for any_value/no_value). + """ + + attr: str + operator: Literal[ + "==", + "!=", + "<", + "<=", + ">", + ">=", + "contains", + "not_contains", + "in", + "not_in", + "any_value", + "no_value", + ] + value: Any = None + + +@dataclass +class MatchSpec: + """Specification for filtering nodes by attribute conditions. + + Attributes: + conditions: List of conditions to evaluate. + logic: How to combine conditions ("and" = all, "or" = any). + """ + + conditions: List[Condition] = field(default_factory=list) + logic: Literal["and", "or"] = "or" + + +@dataclass +class NodeSelector: + """Unified node selection specification. + + Evaluation order: + 1. Select nodes matching `path` regex (default ".*" if omitted) + 2. Filter by `match` conditions + 3. Filter by `active_only` flag + 4. Group by `group_by` attribute (if specified) + + At least one of path, group_by, or match must be specified. + + Attributes: + path: Regex pattern on node.name. + group_by: Attribute name to group nodes by. + match: Attribute-based filtering conditions. + active_only: Whether to exclude disabled nodes. None uses context default. + """ + + path: Optional[str] = None + group_by: Optional[str] = None + match: Optional[MatchSpec] = None + active_only: Optional[bool] = None + + def __post_init__(self) -> None: + if self.path is None and self.group_by is None and self.match is None: + raise ValueError( + "NodeSelector requires at least one of: path, group_by, or match" + ) diff --git a/ngraph/dsl/selectors/select.py b/ngraph/dsl/selectors/select.py new file mode 100644 index 0000000..757e186 --- /dev/null +++ b/ngraph/dsl/selectors/select.py @@ -0,0 +1,187 @@ +"""Node selection and evaluation. + +Provides the unified select_nodes() function that handles regex matching, +attribute filtering, active-only filtering, and grouping. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set + +from .conditions import evaluate_conditions +from .schema import MatchSpec, NodeSelector + +if TYPE_CHECKING: + from ngraph.model.network import Link, Network, Node + +__all__ = [ + "select_nodes", + "flatten_node_attrs", + "flatten_link_attrs", +] + + +def select_nodes( + network: "Network", + selector: NodeSelector, + default_active_only: bool, + excluded_nodes: Optional[Set[str]] = None, +) -> Dict[str, List["Node"]]: + """Unified entry point for node selection. + + Evaluation order: + 1. Select nodes matching `path` regex (or all nodes if path is None) + 2. Filter by `match` conditions + 3. Filter by `active_only` flag and excluded_nodes + 4. Group by `group_by` attribute (overrides regex capture grouping) + + Args: + network: The network graph. + selector: Node selection specification. + default_active_only: Context-aware default for active_only flag. + Required parameter to prevent silent bugs. + excluded_nodes: Additional node names to exclude. + + Returns: + Dict mapping group labels to lists of nodes. + """ + excluded = excluded_nodes or set() + + # Resolve effective active_only flag + active_only = ( + selector.active_only + if selector.active_only is not None + else default_active_only + ) + + # Step 1: Select by path regex (or all nodes) + if selector.path is not None: + candidates = _select_by_regex(network, selector.path) + else: + candidates = {"_all_": list(network.nodes.values())} + + # Step 2: Apply match conditions + if selector.match is not None: + candidates = _filter_by_match(candidates, selector.match) + + # Step 3: Filter active only + excluded + if active_only or excluded: + candidates = _filter_active_and_excluded(candidates, active_only, excluded) + + # Step 4: Apply grouping (overrides regex capture grouping) + if selector.group_by is not None: + return _group_by_attribute(candidates, selector.group_by) + + return candidates + + +def _select_by_regex(network: "Network", pattern: str) -> Dict[str, List["Node"]]: + """Select nodes by regex pattern with capture group handling. + + Delegates to Network.select_node_groups_by_path() which provides caching. + """ + return network.select_node_groups_by_path(pattern) + + +def _filter_by_match( + groups: Dict[str, List["Node"]], + match: MatchSpec, +) -> Dict[str, List["Node"]]: + """Filter nodes in each group by match conditions.""" + result: Dict[str, List["Node"]] = {} + for label, nodes in groups.items(): + filtered = [n for n in nodes if _node_matches(n, match)] + if filtered: + result[label] = filtered + return result + + +def _node_matches(node: "Node", match: MatchSpec) -> bool: + """Check if a node matches the match specification.""" + attrs = flatten_node_attrs(node) + return evaluate_conditions(attrs, match.conditions, match.logic) + + +def flatten_node_attrs(node: "Node") -> Dict[str, Any]: + """Build flat attribute dict for condition evaluation. + + Merges node's top-level fields (name, disabled, risk_groups) with + node.attrs. Top-level fields take precedence on key conflicts. + + Args: + node: Node object to flatten. + + Returns: + Flat dict suitable for condition evaluation. + """ + attrs: Dict[str, Any] = { + "name": node.name, + "disabled": node.disabled, + "risk_groups": list(node.risk_groups), + } + # Add user attrs, but don't overwrite top-level fields + attrs.update({k: v for k, v in node.attrs.items() if k not in attrs}) + return attrs + + +def flatten_link_attrs(link: "Link", link_id: str) -> Dict[str, Any]: + """Build flat attribute dict for condition evaluation on links. + + Merges link's top-level fields with link.attrs. Top-level fields + take precedence on key conflicts. + + Args: + link: Link object to flatten. + link_id: The link's ID in the network. + + Returns: + Flat dict suitable for condition evaluation. + """ + attrs: Dict[str, Any] = { + "id": link_id, + "source": link.source, + "target": link.target, + "capacity": link.capacity, + "cost": link.cost, + "disabled": link.disabled, + "risk_groups": list(link.risk_groups), + } + attrs.update({k: v for k, v in link.attrs.items() if k not in attrs}) + return attrs + + +def _filter_active_and_excluded( + groups: Dict[str, List["Node"]], + active_only: bool, + excluded: Set[str], +) -> Dict[str, List["Node"]]: + """Remove disabled and/or explicitly excluded nodes.""" + result: Dict[str, List["Node"]] = {} + for label, nodes in groups.items(): + filtered = [] + for n in nodes: + if n.name in excluded: + continue + if active_only and n.disabled: + continue + filtered.append(n) + if filtered: + result[label] = filtered + return result + + +def _group_by_attribute( + groups: Dict[str, List["Node"]], + attr_name: str, +) -> Dict[str, List["Node"]]: + """Re-group nodes by attribute value. + + Note: This discards any existing grouping (including regex captures). + """ + result: Dict[str, List["Node"]] = {} + for nodes in groups.values(): + for node in nodes: + if attr_name in node.attrs: + key = str(node.attrs[attr_name]) + result.setdefault(key, []).append(node) + return result diff --git a/ngraph/exec/analysis/flow.py b/ngraph/exec/analysis/flow.py index 02f9ca7..3ecec1e 100644 --- a/ngraph/exec/analysis/flow.py +++ b/ngraph/exec/analysis/flow.py @@ -45,10 +45,13 @@ def _reconstruct_traffic_demands( return [ TrafficDemand( id=config.get("id") or "", - source_path=config["source_path"], - sink_path=config["sink_path"], + source=config["source"], + sink=config["sink"], demand=config["demand"], mode=config.get("mode", "pairwise"), + group_mode=config.get("group_mode", "flatten"), + expand_vars=config.get("expand_vars", {}), + expansion_mode=config.get("expansion_mode", "cartesian"), flow_policy_config=config.get("flow_policy_config"), priority=config.get("priority", 0), ) @@ -315,8 +318,8 @@ def max_flow_analysis( network: "Network", excluded_nodes: Set[str], excluded_links: Set[str], - source_path: str, - sink_path: str, + source: str | dict[str, Any], + sink: str | dict[str, Any], mode: str = "combine", shortest_path: bool = False, require_capacity: bool = True, @@ -332,8 +335,8 @@ def max_flow_analysis( network: Network instance. excluded_nodes: Set of node names to exclude temporarily. excluded_links: Set of link IDs to exclude temporarily. - source_path: Selection expression for source node groups. - sink_path: Selection expression for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). shortest_path: Whether to use shortest paths only. require_capacity: If True (default), path selection considers available @@ -354,7 +357,7 @@ def max_flow_analysis( if context is not None: ctx = context else: - ctx = analyze(network, source=source_path, sink=sink_path, mode=mode_enum) + ctx = analyze(network, source=source, sink=sink, mode=mode_enum) flow_entries: list[FlowEntry] = [] total_demand = 0.0 @@ -624,8 +627,8 @@ def sensitivity_analysis( network: "Network", excluded_nodes: Set[str], excluded_links: Set[str], - source_path: str, - sink_path: str, + source: str | dict[str, Any], + sink: str | dict[str, Any], mode: str = "combine", shortest_path: bool = False, flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL, @@ -645,8 +648,8 @@ def sensitivity_analysis( network: Network instance. excluded_nodes: Set of node names to exclude temporarily. excluded_links: Set of link IDs to exclude temporarily. - source_path: Selection expression for source node groups. - sink_path: Selection expression for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). shortest_path: If True, use single-tier shortest-path flow (IP/IGP mode). Reports only edges used under ECMP routing. If False (default), use @@ -665,7 +668,7 @@ def sensitivity_analysis( if context is not None: ctx = context else: - ctx = analyze(network, source=source_path, sink=sink_path, mode=mode_enum) + ctx = analyze(network, source=source, sink=sink, mode=mode_enum) # Get max flow values for each pair flow_values = ctx.max_flow( @@ -744,8 +747,8 @@ def build_demand_context( def build_maxflow_context( network: "Network", - source_path: str, - sink_path: str, + source: str | dict[str, Any], + sink: str | dict[str, Any], mode: str = "combine", ) -> AnalysisContext: """Build an AnalysisContext for repeated max-flow analysis. @@ -755,12 +758,12 @@ def build_maxflow_context( Args: network: Network instance. - source_path: Selection expression for source node groups. - sink_path: Selection expression for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). Returns: AnalysisContext ready for use with max_flow_analysis or sensitivity_analysis. """ mode_enum = Mode.COMBINE if mode == "combine" else Mode.PAIRWISE - return analyze(network, source=source_path, sink=sink_path, mode=mode_enum) + return analyze(network, source=source, sink=sink, mode=mode_enum) diff --git a/ngraph/exec/demand/builder.py b/ngraph/exec/demand/builder.py index 65f738e..4e01645 100644 --- a/ngraph/exec/demand/builder.py +++ b/ngraph/exec/demand/builder.py @@ -1,7 +1,6 @@ """Builders for traffic matrices. Construct `TrafficMatrixSet` from raw dictionaries (e.g. parsed YAML). -This logic was previously embedded in `Scenario.from_yaml`. """ from __future__ import annotations @@ -25,7 +24,8 @@ def build_traffic_matrix_set(raw: Dict[str, List[dict]]) -> TrafficMatrixSet: Initialized `TrafficMatrixSet` with constructed `TrafficDemand` objects. Raises: - ValueError: If ``raw`` is not a mapping of name -> list[dict]. + ValueError: If ``raw`` is not a mapping of name -> list[dict], + or if required fields are missing. """ if not isinstance(raw, dict): raise ValueError( @@ -46,14 +46,36 @@ def build_traffic_matrix_set(raw: Dict[str, List[dict]]) -> TrafficMatrixSet: f"Entries in matrix '{name}' must be dicts, got {type(d).__name__}" ) + # Validate required fields + if "source" not in d or "sink" not in d: + raise ValueError( + f"Each demand in matrix '{name}' requires 'source' and 'sink' fields" + ) + + # Build normalized dict for TrafficDemand constructor + td_kwargs: Dict[str, Any] = { + "source": d["source"], + "sink": d["sink"], + "demand": d.get("demand", 0.0), + "priority": d.get("priority", 0), + "mode": d.get("mode", "combine"), + "group_mode": d.get("group_mode", "flatten"), + "expand_vars": d.get("expand_vars", {}), + "expansion_mode": d.get("expansion_mode", "cartesian"), + "attrs": d.get("attrs", {}), + } + + # Optional id + if "id" in d: + td_kwargs["id"] = d["id"] + # Coerce flow_policy_config into FlowPolicyPreset enum when provided if "flow_policy_config" in d: - d = dict(d) # shallow copy to avoid mutating caller data - d["flow_policy_config"] = _coerce_flow_policy_config( - d.get("flow_policy_config") + td_kwargs["flow_policy_config"] = _coerce_flow_policy_config( + d["flow_policy_config"] ) - coerced.append(TrafficDemand(**d)) + coerced.append(TrafficDemand(**td_kwargs)) tms.add(name, coerced) @@ -69,8 +91,8 @@ def _coerce_flow_policy_config(value: Any) -> Optional[FlowPolicyPreset]: - int: mapped by value (e.g., 1 -> SHORTEST_PATHS_ECMP) - str: name of enum (case-insensitive); numeric strings are allowed - Any other type is returned unchanged to preserve backwards-compat behavior - for advanced usages (e.g., dict configs handled elsewhere). + Any other type is returned unchanged for advanced usages + (e.g., dict configs handled elsewhere). """ if value is None: return None diff --git a/ngraph/exec/demand/expand.py b/ngraph/exec/demand/expand.py index cb3f015..3bf869d 100644 --- a/ngraph/exec/demand/expand.py +++ b/ngraph/exec/demand/expand.py @@ -1,21 +1,20 @@ """Demand expansion: converts TrafficDemand specs into concrete placement demands. Supports both pairwise and combine modes through augmentation-based pseudo nodes. +Uses unified selectors for node selection. """ from __future__ import annotations -from dataclasses import dataclass -from typing import List +from dataclasses import dataclass, replace +from typing import Any, Dict, Iterator, List from ngraph.analysis.context import LARGE_CAPACITY, AugmentationEdge +from ngraph.dsl.expansion import ExpansionSpec, expand_templates +from ngraph.dsl.selectors import normalize_selector, select_nodes from ngraph.model.demand.spec import TrafficDemand from ngraph.model.flow.policy_config import FlowPolicyPreset -from ngraph.model.network import Network -from ngraph.utils.nodes import ( - collect_active_node_names_from_groups, - collect_active_nodes_from_groups, -) +from ngraph.model.network import Network, Node @dataclass @@ -55,29 +54,42 @@ class DemandExpansion: augmentations: List[AugmentationEdge] +def _flatten_groups(groups: Dict[str, List[Node]]) -> List[Node]: + """Flatten grouped nodes into a single list.""" + result: List[Node] = [] + for nodes in groups.values(): + result.extend(nodes) + return result + + +def _flatten_group_names(groups: Dict[str, List[Node]]) -> List[str]: + """Flatten grouped nodes into a list of names.""" + return [node.name for node in _flatten_groups(groups)] + + def _expand_combine( td: TrafficDemand, - src_groups, - dst_groups, + src_groups: Dict[str, List[Node]], + dst_groups: Dict[str, List[Node]], policy_preset: FlowPolicyPreset, ) -> tuple[list[ExpandedDemand], list[AugmentationEdge]]: """Expand combine mode: aggregate sources/sinks through pseudo nodes.""" pseudo_src = f"_src_{td.id}" pseudo_snk = f"_snk_{td.id}" - src_names = collect_active_node_names_from_groups(src_groups) - dst_names = collect_active_node_names_from_groups(dst_groups) + src_names = _flatten_group_names(src_groups) + dst_names = _flatten_group_names(dst_groups) if not src_names or not dst_names: return [], [] augmentations = [] - # Pseudo-source → real sources (unidirectional OUT) + # Pseudo-source -> real sources (unidirectional OUT) for src_name in src_names: augmentations.append(AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)) - # Real sinks → pseudo-sink (unidirectional IN) + # Real sinks -> pseudo-sink (unidirectional IN) for dst_name in dst_names: augmentations.append(AugmentationEdge(dst_name, pseudo_snk, LARGE_CAPACITY, 0)) @@ -96,13 +108,13 @@ def _expand_combine( def _expand_pairwise( td: TrafficDemand, - src_groups, - dst_groups, + src_groups: Dict[str, List[Node]], + dst_groups: Dict[str, List[Node]], policy_preset: FlowPolicyPreset, ) -> tuple[list[ExpandedDemand], list[AugmentationEdge]]: """Expand pairwise mode: create demand for each (src, dst) pair.""" - src_nodes = collect_active_nodes_from_groups(src_groups) - dst_nodes = collect_active_nodes_from_groups(dst_groups) + src_nodes = _flatten_groups(src_groups) + dst_nodes = _flatten_groups(dst_groups) # Filter self-pairs pairs = [ @@ -130,6 +142,181 @@ def _expand_pairwise( return demands, [] # No augmentations for pairwise +def _extract_selector_templates(selector: Any, prefix: str) -> Dict[str, str]: + """Extract string fields from a selector that may contain variables. + + Args: + selector: String path or dict selector. + prefix: Key prefix for the returned template dict. + + Returns: + Dict mapping template keys to string values that may contain $var. + """ + templates: Dict[str, str] = {} + if isinstance(selector, str): + templates[prefix] = selector + elif isinstance(selector, dict): + if "path" in selector and isinstance(selector["path"], str): + templates[f"{prefix}.path"] = selector["path"] + if "group_by" in selector and isinstance(selector["group_by"], str): + templates[f"{prefix}.group_by"] = selector["group_by"] + return templates + + +def _rebuild_selector(original: Any, substituted: Dict[str, str], prefix: str) -> Any: + """Rebuild a selector with substituted values. + + Args: + original: Original selector (string or dict). + substituted: Dict of substituted template values. + prefix: Key prefix used in substituted dict. + + Returns: + Selector with variables substituted. + """ + if isinstance(original, str): + return substituted.get(prefix, original) + + if isinstance(original, dict): + result = dict(original) + if f"{prefix}.path" in substituted: + result["path"] = substituted[f"{prefix}.path"] + if f"{prefix}.group_by" in substituted: + result["group_by"] = substituted[f"{prefix}.group_by"] + return result + + return original + + +def _expand_with_variables(td: TrafficDemand) -> Iterator[TrafficDemand]: + """Expand a TrafficDemand using its expand_vars specification. + + Yields one or more TrafficDemand instances with variables substituted. + Handles both string and dict selectors correctly. + """ + if not td.expand_vars: + yield td + return + + spec = ExpansionSpec( + expand_vars=td.expand_vars, + expansion_mode=td.expansion_mode, # type: ignore[arg-type] + ) + + # Extract string templates from selectors (handles both str and dict) + templates = _extract_selector_templates(td.source, "source") + templates.update(_extract_selector_templates(td.sink, "sink")) + + if not templates: + # No expandable string fields - yield as-is + yield td + return + + # Expand templates and rebuild selectors + for substituted in expand_templates(templates, spec): + yield replace( + td, + source=_rebuild_selector(td.source, substituted, "source"), + sink=_rebuild_selector(td.sink, substituted, "sink"), + expand_vars={}, # Clear to prevent re-expansion + ) + + +def _expand_by_group_mode( + td: TrafficDemand, + src_groups: Dict[str, List[Node]], + dst_groups: Dict[str, List[Node]], + policy_preset: FlowPolicyPreset, +) -> tuple[list[ExpandedDemand], list[AugmentationEdge]]: + """Expand demands based on group_mode. + + group_mode semantics: + - flatten: All nodes combined (default, current behavior) + - per_group: One demand per (src_group, dst_group) pair + - group_pairwise: Pairwise expansion within each group pair + """ + if td.group_mode == "flatten": + # Standard behavior: flatten all groups, then apply mode + if td.mode == "combine": + return _expand_combine(td, src_groups, dst_groups, policy_preset) + elif td.mode == "pairwise": + return _expand_pairwise(td, src_groups, dst_groups, policy_preset) + else: + raise ValueError(f"Unknown demand mode: {td.mode}") + + elif td.group_mode == "per_group": + # One demand per (src_group, dst_group) pair + all_demands: List[ExpandedDemand] = [] + all_augmentations: List[AugmentationEdge] = [] + + for src_label, src_nodes in src_groups.items(): + for dst_label, dst_nodes in dst_groups.items(): + if src_label == dst_label: + continue # Skip same-group pairs + + group_td = replace(td, id=f"{td.id}|{src_label}|{dst_label}") + single_src = {src_label: src_nodes} + single_dst = {dst_label: dst_nodes} + + if td.mode == "combine": + demands, augs = _expand_combine( + group_td, single_src, single_dst, policy_preset + ) + else: + demands, augs = _expand_pairwise( + group_td, single_src, single_dst, policy_preset + ) + + all_demands.extend(demands) + all_augmentations.extend(augs) + + return all_demands, all_augmentations + + elif td.group_mode == "group_pairwise": + # Pairwise between groups: each src group to each dst group + all_demands: List[ExpandedDemand] = [] + all_augmentations: List[AugmentationEdge] = [] + + group_pairs = [ + (src_label, dst_label) + for src_label in src_groups + for dst_label in dst_groups + if src_label != dst_label + ] + + if not group_pairs: + return [], [] + + # Divide volume among group pairs + volume_per_group_pair = td.demand / len(group_pairs) + + for src_label, dst_label in group_pairs: + group_td = replace( + td, + id=f"{td.id}|{src_label}|{dst_label}", + demand=volume_per_group_pair, + ) + single_src = {src_label: src_groups[src_label]} + single_dst = {dst_label: dst_groups[dst_label]} + + if td.mode == "combine": + demands, augs = _expand_combine( + group_td, single_src, single_dst, policy_preset + ) + else: + demands, augs = _expand_pairwise( + group_td, single_src, single_dst, policy_preset + ) + + all_demands.extend(demands) + all_augmentations.extend(augs) + + return all_demands, all_augmentations + + else: + raise ValueError(f"Unknown group_mode: {td.group_mode}") + + def expand_demands( network: Network, traffic_demands: List[TrafficDemand], @@ -138,10 +325,11 @@ def expand_demands( """Expand TrafficDemand specifications into concrete demands with augmentations. Pure function that: - 1. Selects node groups using Network's selection API - 2. Distributes volume based on mode (combine/pairwise) - 3. Generates augmentation edges for combine mode (pseudo nodes) - 4. Returns demands (node names) + augmentations + 1. Expands variables in selectors using expand_vars + 2. Normalizes and evaluates selectors to get node groups + 3. Distributes volume based on mode (combine/pairwise) and group_mode + 4. Generates augmentation edges for combine mode (pseudo nodes) + 5. Returns demands (node names) + augmentations Node names are used (not IDs) so expansion happens BEFORE graph building. IDs are resolved after graph is built with augmentations. @@ -161,34 +349,33 @@ def expand_demands( all_augmentations: List[AugmentationEdge] = [] for td in traffic_demands: - # Select node groups - src_groups = network.select_node_groups_by_path(td.source_path) - dst_groups = network.select_node_groups_by_path(td.sink_path) + # Step 1: Variable expansion (if expand_vars present) + for expanded_td in _expand_with_variables(td): + # Step 2: Normalize selectors + src_sel = normalize_selector(expanded_td.source, "demand") + sink_sel = normalize_selector(expanded_td.sink, "demand") - if not src_groups or not dst_groups: - continue + # Step 3: Select nodes (active_only=True for demands by context default) + src_groups = select_nodes(network, src_sel, default_active_only=True) + dst_groups = select_nodes(network, sink_sel, default_active_only=True) - policy_preset = td.flow_policy_config or default_policy_preset + if not src_groups or not dst_groups: + continue - # Expand based on mode - if td.mode == "combine": - demands, augmentations = _expand_combine( - td, src_groups, dst_groups, policy_preset - ) - elif td.mode == "pairwise": - demands, augmentations = _expand_pairwise( - td, src_groups, dst_groups, policy_preset + policy_preset = expanded_td.flow_policy_config or default_policy_preset + + # Step 4: Expand by group_mode + demands, augmentations = _expand_by_group_mode( + expanded_td, src_groups, dst_groups, policy_preset ) - else: - raise ValueError(f"Unknown demand mode: {td.mode}") - all_demands.extend(demands) - all_augmentations.extend(augmentations) + all_demands.extend(demands) + all_augmentations.extend(augmentations) if not all_demands: raise ValueError( "No demands could be expanded. Possible causes:\n" - " - Source/sink paths don't match any nodes\n" + " - Source/sink selectors don't match any nodes\n" " - All matching nodes are disabled\n" " - Source and sink are identical (self-loops not allowed)" ) diff --git a/ngraph/exec/failure/manager.py b/ngraph/exec/failure/manager.py index 866c713..cba033c 100644 --- a/ngraph/exec/failure/manager.py +++ b/ngraph/exec/failure/manager.py @@ -28,6 +28,7 @@ from concurrent.futures import ThreadPoolExecutor from typing import TYPE_CHECKING, Any, Dict, Protocol, Set, TypeVar +from ngraph.dsl.selectors import flatten_link_attrs, flatten_node_attrs from ngraph.logging import get_logger from ngraph.model.failure.policy_set import FailurePolicySet from ngraph.types.base import FlowPlacement @@ -340,49 +341,20 @@ def compute_exclusions( return excluded_nodes, excluded_links # Build merged views of nodes and links including top-level fields required by - # policy matching and risk-group expansion. This ensures attributes like - # 'risk_groups' are available to the policy engine. - def _merge_node_attrs() -> dict[str, dict[str, Any]]: - if self._merged_node_attrs is not None: - return self._merged_node_attrs - - merged: dict[str, dict[str, Any]] = {} - for node_name, node in self.network.nodes.items(): - attrs: dict[str, Any] = { - "name": node.name, - "disabled": node.disabled, - "risk_groups": node.risk_groups, - } - # Top-level fields take precedence over attrs on conflict - attrs.update({k: v for k, v in node.attrs.items() if k not in attrs}) - merged[node_name] = attrs - - self._merged_node_attrs = merged - return merged - - def _merge_link_attrs() -> dict[str, dict[str, Any]]: - if self._merged_link_attrs is not None: - return self._merged_link_attrs - - merged: dict[str, dict[str, Any]] = {} - for link_id, link in self.network.links.items(): - attrs: dict[str, Any] = { - "id": link_id, - "source": link.source, - "target": link.target, - "capacity": link.capacity, - "cost": link.cost, - "disabled": link.disabled, - "risk_groups": link.risk_groups, - } - attrs.update({k: v for k, v in link.attrs.items() if k not in attrs}) - merged[link_id] = attrs - - self._merged_link_attrs = merged - return merged - - node_map = _merge_node_attrs() - link_map = _merge_link_attrs() + # policy matching and risk-group expansion. Results are cached for reuse. + if self._merged_node_attrs is None: + self._merged_node_attrs = { + node_name: flatten_node_attrs(node) + for node_name, node in self.network.nodes.items() + } + if self._merged_link_attrs is None: + self._merged_link_attrs = { + link_id: flatten_link_attrs(link, link_id) + for link_id, link in self.network.links.items() + } + + node_map = self._merged_node_attrs + link_map = self._merged_link_attrs # Apply failure policy with optional deterministic seed override failed_ids = policy.apply_failures( @@ -494,15 +466,15 @@ def run_monte_carlo_analysis( ) logger.debug(f"Context built in {time.time() - cache_start:.3f}s") - elif "source_path" in analysis_kwargs and "sink_path" in analysis_kwargs: + elif "source" in analysis_kwargs and "sink" in analysis_kwargs: # Max-flow analysis or sensitivity analysis from ngraph.exec.analysis.flow import build_maxflow_context logger.debug("Pre-building context for max-flow analysis") analysis_kwargs["context"] = build_maxflow_context( self.network, - analysis_kwargs["source_path"], - analysis_kwargs["sink_path"], + analysis_kwargs["source"], + analysis_kwargs["sink"], mode=analysis_kwargs.get("mode", "combine"), ) logger.debug(f"Context built in {time.time() - cache_start:.3f}s") @@ -897,8 +869,8 @@ def run_single_failure_scenario( def run_max_flow_monte_carlo( self, - source_path: str, - sink_path: str, + source: str | dict[str, Any], + sink: str | dict[str, Any], mode: str = "combine", iterations: int = 100, parallelism: int = 1, @@ -918,8 +890,8 @@ def run_max_flow_monte_carlo( frequency-based capacity envelopes and optional failure pattern analysis. Args: - source_path: Regex pattern for source node groups. - sink_path: Regex pattern for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: "combine" (aggregate) or "pairwise" (individual flows). iterations: Number of failure scenarios to simulate. parallelism: Number of parallel workers (auto-adjusted if needed). @@ -952,8 +924,8 @@ def run_max_flow_monte_carlo( baseline=baseline, seed=seed, store_failure_patterns=store_failure_patterns, - source_path=source_path, - sink_path=sink_path, + source=source, + sink=sink, mode=mode, shortest_path=shortest_path, require_capacity=require_capacity, @@ -1063,10 +1035,15 @@ def run_demand_placement_monte_carlo( serializable_demands.append( { "id": getattr(demand, "id", None), - "source_path": getattr(demand, "source_path", ""), - "sink_path": getattr(demand, "sink_path", ""), + "source": getattr(demand, "source", ""), + "sink": getattr(demand, "sink", ""), "demand": float(getattr(demand, "demand", 0.0)), "mode": getattr(demand, "mode", "pairwise"), + "group_mode": getattr(demand, "group_mode", "flatten"), + "expand_vars": getattr(demand, "expand_vars", {}), + "expansion_mode": getattr( + demand, "expansion_mode", "cartesian" + ), "flow_policy_config": getattr( demand, "flow_policy_config", None ), @@ -1093,8 +1070,8 @@ def run_demand_placement_monte_carlo( def run_sensitivity_monte_carlo( self, - source_path: str, - sink_path: str, + source: str | dict[str, Any], + sink: str | dict[str, Any], mode: str = "combine", iterations: int = 100, parallelism: int = 1, @@ -1112,8 +1089,8 @@ def run_sensitivity_monte_carlo( scores showing which components have the greatest effect on network capacity. Args: - source_path: Regex pattern for source node groups. - sink_path: Regex pattern for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: "combine" (aggregate) or "pairwise" (individual flows). iterations: Number of failure scenarios to simulate. parallelism: Number of parallel workers (auto-adjusted if needed). @@ -1143,8 +1120,8 @@ def run_sensitivity_monte_carlo( baseline=baseline, seed=seed, store_failure_patterns=store_failure_patterns, - source_path=source_path, - sink_path=sink_path, + source=source, + sink=sink, mode=mode, shortest_path=shortest_path, flow_placement=flow_placement, @@ -1157,8 +1134,8 @@ def run_sensitivity_monte_carlo( ) # Augment metadata with analysis-specific context - raw_results["metadata"]["source_pattern"] = source_path - raw_results["metadata"]["sink_pattern"] = sink_path + raw_results["metadata"]["source"] = source + raw_results["metadata"]["sink"] = sink raw_results["metadata"]["mode"] = mode return raw_results diff --git a/ngraph/model/demand/spec.py b/ngraph/model/demand/spec.py index 2c1ef65..0840b9d 100644 --- a/ngraph/model/demand/spec.py +++ b/ngraph/model/demand/spec.py @@ -6,7 +6,7 @@ """ from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Dict, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from ngraph.model.flow.policy_config import FlowPolicyPreset from ngraph.utils.ids import new_base64_uuid @@ -21,35 +21,43 @@ @dataclass class TrafficDemand: - """Single traffic demand input. + """Traffic demand specification using unified selectors. Attributes: - source_path: Regex string selecting source nodes. - sink_path: Regex string selecting sink nodes. - priority: Priority class for this demand (lower value = higher priority). + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). demand: Total demand volume. demand_placed: Portion of this demand placed so far. - flow_policy_config: Policy preset (FlowPolicyPreset enum) used to build - a `FlowPolicy`` if ``flow_policy`` is not provided. - flow_policy: Concrete policy instance. If set, it overrides - ``flow_policy_config``. - mode: Expansion mode, ``"combine"`` or ``"pairwise"``. + priority: Priority class (lower = higher priority). + mode: Node pairing mode ("combine" or "pairwise"). + group_mode: How grouped nodes produce demands + ("flatten", "per_group", "group_pairwise"). + expand_vars: Variable substitutions using $var syntax. + expansion_mode: How to combine expand_vars ("cartesian" or "zip"). + flow_policy_config: Policy preset for routing. + flow_policy: Concrete policy instance (overrides flow_policy_config). attrs: Arbitrary user metadata. - id: Unique identifier. Auto-generated if empty or not provided. + id: Unique identifier. Auto-generated if empty. """ - source_path: str = "" - sink_path: str = "" - priority: int = 0 + source: Union[str, Dict[str, Any]] = "" + sink: Union[str, Dict[str, Any]] = "" demand: float = 0.0 demand_placed: float = 0.0 + priority: int = 0 + mode: str = "combine" + group_mode: str = "flatten" + expand_vars: Dict[str, List[Any]] = field(default_factory=dict) + expansion_mode: str = "cartesian" flow_policy_config: Optional[FlowPolicyPreset] = None flow_policy: Optional["FlowPolicy"] = None # type: ignore[valid-type] - mode: str = "combine" attrs: Dict[str, Any] = field(default_factory=dict) id: str = "" def __post_init__(self) -> None: """Generate id if not provided.""" if not self.id: - self.id = f"{self.source_path}|{self.sink_path}|{new_base64_uuid()}" + # Build a stable identifier from source/sink + src_key = self.source if isinstance(self.source, str) else str(self.source) + sink_key = self.sink if isinstance(self.sink, str) else str(self.sink) + self.id = f"{src_key}|{sink_key}|{new_base64_uuid()}" diff --git a/ngraph/model/failure/conditions.py b/ngraph/model/failure/conditions.py deleted file mode 100644 index d7f68e2..0000000 --- a/ngraph/model/failure/conditions.py +++ /dev/null @@ -1,110 +0,0 @@ -"""Shared condition primitives and evaluators. - -This module provides a small, dependency-free condition evaluation utility -that can be reused by failure policies and DSL selection filters. - -Operators supported: -- ==, !=, <, <=, >, >= -- contains, not_contains -- any_value, no_value - -The evaluator operates on a flat attribute mapping for an entity. Callers are -responsible for constructing that mapping (e.g. merging top-level fields with -``attrs`` and ensuring appropriate precedence rules). -""" - -from __future__ import annotations - -from dataclasses import dataclass -from typing import Any, Iterable - -__all__ = [ - "FailureCondition", - "evaluate_condition", - "evaluate_conditions", -] - - -@dataclass -class FailureCondition: - """A single condition for matching an entity attribute. - - Args: - attr: Attribute name to inspect in the entity mapping. - operator: Comparison operator. See module docstring for the list. - value: Right-hand operand for the comparison (unused for any_value/no_value). - """ - - attr: str - operator: str - value: Any | None = None - - -def evaluate_condition(entity_attrs: dict[str, Any], cond: FailureCondition) -> bool: - """Evaluate a single condition against an entity attribute mapping. - - Args: - entity_attrs: Flat mapping of attributes for the entity. - cond: Condition to evaluate. - - Returns: - True if the condition passes, False otherwise. - """ - has_attr = cond.attr in entity_attrs - derived_value = entity_attrs.get(cond.attr, None) - op = cond.operator - - if op == "==": - return derived_value == cond.value - elif op == "!=": - return derived_value != cond.value - elif op == "<": - return (derived_value is not None) and (derived_value < cond.value) - elif op == "<=": - return (derived_value is not None) and (derived_value <= cond.value) - elif op == ">": - return (derived_value is not None) and (derived_value > cond.value) - elif op == ">=": - return (derived_value is not None) and (derived_value >= cond.value) - elif op == "contains": - if derived_value is None: - return False - try: - return cond.value in derived_value # type: ignore[operator] - except TypeError: - return False - elif op == "not_contains": - if derived_value is None: - return True - try: - return cond.value not in derived_value # type: ignore[operator] - except TypeError: - return True - elif op == "any_value": - return has_attr - elif op == "no_value": - return (not has_attr) or (derived_value is None) - else: - raise ValueError(f"Unsupported operator: {op}") - - -def evaluate_conditions( - entity_attrs: dict[str, Any], - conditions: Iterable[FailureCondition], - logic: str, -) -> bool: - """Evaluate multiple conditions with AND/OR logic. - - Args: - entity_attrs: Flat mapping of attributes for the entity. - conditions: Iterable of conditions to evaluate. - logic: "and" or "or". - - Returns: - True if the combined predicate passes, False otherwise. - """ - if logic == "and": - return all(evaluate_condition(entity_attrs, c) for c in conditions) - if logic == "or": - return any(evaluate_condition(entity_attrs, c) for c in conditions) - raise ValueError(f"Unsupported logic: {logic}") diff --git a/ngraph/model/failure/parser.py b/ngraph/model/failure/parser.py index 6d76cfb..a8bdb0a 100644 --- a/ngraph/model/failure/parser.py +++ b/ngraph/model/failure/parser.py @@ -19,17 +19,47 @@ def build_risk_groups(rg_data: List[Dict[str, Any]]) -> List[RiskGroup]: + """Build RiskGroup objects from raw config data. + + Supports bracket expansion in risk group names. For example: + - `{name: "DC[1-3]_Power"}` creates DC1_Power, DC2_Power, DC3_Power + - Children are also expanded recursively + + Args: + rg_data: List of risk group definition dicts. + + Returns: + List of RiskGroup objects with names expanded. + """ + from ngraph.dsl.expansion import expand_name_patterns + def build_one(d: Dict[str, Any]) -> RiskGroup: + """Build a single RiskGroup (name already expanded).""" name = d.get("name") if not name: raise ValueError("RiskGroup entry missing 'name' field.") disabled = d.get("disabled", False) + # Recursively expand and build children children_list = d.get("children", []) - child_objs = [build_one(cd) for cd in children_list] + child_objs = expand_and_build(children_list) attrs = normalize_yaml_dict_keys(d.get("attrs", {})) return RiskGroup(name=name, disabled=disabled, children=child_objs, attrs=attrs) - return [build_one(entry) for entry in rg_data] + def expand_and_build(entries: List[Dict[str, Any]]) -> List[RiskGroup]: + """Expand names and build RiskGroups for a list of entries.""" + result: List[RiskGroup] = [] + for entry in entries: + name = entry.get("name", "") + if not name: + raise ValueError("RiskGroup entry missing 'name' field.") + expanded_names = expand_name_patterns(name) + for exp_name in expanded_names: + modified = dict(entry) + modified["name"] = exp_name + result.append(build_one(modified)) + return result + + return expand_and_build(rg_data) def build_failure_policy( diff --git a/ngraph/model/failure/policy.py b/ngraph/model/failure/policy.py index ecb140d..6a53755 100644 --- a/ngraph/model/failure/policy.py +++ b/ngraph/model/failure/policy.py @@ -15,17 +15,11 @@ from dataclasses import dataclass, field from typing import Any, Dict, List, Literal, Optional, Sequence, Set, Tuple -from .conditions import FailureCondition as EvalCondition -from .conditions import evaluate_conditions as _shared_evaluate_conditions - - -@dataclass -class FailureCondition(EvalCondition): - """Alias to the shared condition dataclass. - - This maintains a consistent import path within the failure policy module. - """ +from ngraph.dsl.selectors import Condition +from ngraph.dsl.selectors import evaluate_conditions as _shared_evaluate_conditions +# Alias for clarity in failure policy context +FailureCondition = Condition # Supported entity scopes for a rule EntityScope = Literal["node", "link", "risk_group"] diff --git a/ngraph/model/network.py b/ngraph/model/network.py index 9587a2d..8bab079 100644 --- a/ngraph/model/network.py +++ b/ngraph/model/network.py @@ -142,22 +142,17 @@ def add_link(self, link: Link) -> None: self.links[link.id] = link def select_node_groups_by_path(self, path: str) -> Dict[str, List[Node]]: - r"""Select and group nodes by regex on name or by attribute directive. + r"""Select and group nodes by regex pattern on node name. - There are two selection modes: + Uses re.match() (anchored at start of string). Grouping behavior: + - With capturing groups: label is "|"-joined non-None captures. + - Without captures: label is the original pattern string. - 1) Regex on node.name (default): Uses re.match(), anchored at start. - - With capturing groups: label is "|"-joined non-None captures. - - Without captures: label is the original pattern string. - - 2) Attribute directive: If ``path`` fully matches ``attr:`` where - ```` matches ``[A-Za-z_]\w*``, nodes are grouped by the value of - ``node.attrs[]``. Nodes missing the attribute are omitted. Group - labels are ``str(value)`` for readability. If no nodes have the - attribute, an empty mapping is returned and a debug log entry is made. + Note: For attribute-based grouping, use the unified selector system + with ``{"group_by": "attr_name"}`` dict selectors. Args: - path: Regex for node name, or strict attribute directive ``attr:``. + path: Regex pattern for node name. Returns: Mapping from group label to list of nodes. @@ -166,25 +161,6 @@ def select_node_groups_by_path(self, path: str) -> Dict[str, List[Node]]: if path in self._selection_cache: return self._selection_cache[path] - # Strict attribute directive detection: attr: - attr_match = re.fullmatch(r"attr:([A-Za-z_]\w*)", path) - if attr_match: - attr_name = attr_match.group(1) - groups_by_attr: Dict[str, List[Node]] = {} - for node in self.nodes.values(): - if attr_name in node.attrs: - value = node.attrs[attr_name] - label = str(value) - groups_by_attr.setdefault(label, []).append(node) - if not groups_by_attr: - LOGGER.debug( - "Attribute directive '%s' matched no nodes (attribute missing)", - path, - ) - self._selection_cache[path] = groups_by_attr - return groups_by_attr - - # Fallback: regex over node.name pattern = re.compile(path) groups_map: Dict[str, List[Node]] = {} diff --git a/ngraph/results/snapshot.py b/ngraph/results/snapshot.py index 52dfa01..85ebfdf 100644 --- a/ngraph/results/snapshot.py +++ b/ngraph/results/snapshot.py @@ -55,11 +55,12 @@ def build_scenario_snapshot( entries.append( { "id": getattr(d, "id", None), - "source_path": getattr(d, "source_path", ""), - "sink_path": getattr(d, "sink_path", ""), + "source": getattr(d, "source", ""), + "sink": getattr(d, "sink", ""), "demand": float(getattr(d, "demand", 0.0)), "priority": int(getattr(d, "priority", 0)), "mode": getattr(d, "mode", "pairwise"), + "group_mode": getattr(d, "group_mode", "flatten"), "flow_policy_config": getattr(d, "flow_policy_config", None), "attrs": dict(getattr(d, "attrs", {}) or {}), } diff --git a/ngraph/schemas/scenario.json b/ngraph/schemas/scenario.json index dd61df6..5d166fc 100644 --- a/ngraph/schemas/scenario.json +++ b/ngraph/schemas/scenario.json @@ -4,6 +4,90 @@ "title": "NetGraph Scenario Schema", "description": "JSON Schema for NetGraph network scenario YAML files", "type": "object", + "$defs": { + "condition": { + "type": "object", + "properties": { + "attr": { + "type": "string" + }, + "operator": { + "type": "string", + "enum": [ + "==", + "!=", + ">", + "<", + ">=", + "<=", + "contains", + "not_contains", + "in", + "not_in", + "any_value", + "no_value" + ] + }, + "value": {} + }, + "required": [ + "attr", + "operator" + ], + "additionalProperties": false + }, + "matchSpec": { + "type": "object", + "properties": { + "logic": { + "type": "string", + "enum": [ + "and", + "or" + ] + }, + "conditions": { + "type": "array", + "items": { + "$ref": "#/$defs/condition" + } + } + }, + "additionalProperties": false + }, + "nodeSelector": { + "type": "object", + "description": "Node selector with path, group_by, match, and active_only", + "properties": { + "path": { + "type": "string", + "description": "Regex pattern on node.name" + }, + "group_by": { + "type": "string", + "description": "Attribute name to group nodes by" + }, + "match": { + "$ref": "#/$defs/matchSpec" + }, + "active_only": { + "type": "boolean", + "description": "Whether to exclude disabled nodes" + } + }, + "additionalProperties": false + }, + "selectorOrString": { + "oneOf": [ + { + "type": "string" + }, + { + "$ref": "#/$defs/nodeSelector" + } + ] + } + }, "properties": { "vars": { "type": "object", @@ -23,8 +107,12 @@ }, "version": { "oneOf": [ - {"type": "string"}, - {"type": "number"} + { + "type": "string" + }, + { + "type": "number" + } ], "description": "Network version" }, @@ -42,8 +130,13 @@ "hardware": { "type": "object", "properties": { - "component": {"type": "string"}, - "count": {"type": "number", "minimum": 0} + "component": { + "type": "string" + }, + "count": { + "type": "number", + "minimum": 0 + } }, "additionalProperties": false } @@ -56,7 +149,9 @@ }, "risk_groups": { "type": "array", - "items": {"type": "string"}, + "items": { + "type": "string" + }, "description": "Risk groups this node belongs to" } }, @@ -96,12 +191,14 @@ }, "risk_groups": { "type": "array", - "items": {"type": "string"}, + "items": { + "type": "string" + }, "description": "Risk groups this link belongs to" }, "attrs": { "type": "object", - "description": "Additional link attributes. Supports per-end hardware under 'hardware': {src|lhs|source: {component, count}, dst|rhs|target: {component, count}}", + "description": "Additional link attributes. Supports per-end hardware under 'hardware': {source: {component, count, exclusive}, target: {component, count, exclusive}}", "properties": { "hardware": { "type": "object", @@ -109,18 +206,32 @@ "source": { "type": "object", "properties": { - "component": {"type": "string"}, - "count": {"type": "number", "minimum": 0}, - "exclusive": {"type": "boolean"} + "component": { + "type": "string" + }, + "count": { + "type": "number", + "minimum": 0 + }, + "exclusive": { + "type": "boolean" + } }, "additionalProperties": false }, "target": { "type": "object", "properties": { - "component": {"type": "string"}, - "count": {"type": "number", "minimum": 0}, - "exclusive": {"type": "boolean"} + "component": { + "type": "string" + }, + "count": { + "type": "number", + "minimum": 0 + }, + "exclusive": { + "type": "boolean" + } }, "additionalProperties": false } @@ -139,7 +250,10 @@ "description": "Number of parallel links to create" } }, - "required": ["source", "target"], + "required": [ + "source", + "target" + ], "additionalProperties": false } }, @@ -150,15 +264,30 @@ "^[a-zA-Z0-9_\\[\\]-]+$": { "type": "object", "properties": { - "use_blueprint": {"type": "string"}, - "parameters": {"type": "object"}, - "node_count": {"type": "integer", "minimum": 1}, - "name_template": {"type": "string"}, - "attrs": {"type": "object"}, - "disabled": {"type": "boolean"}, + "use_blueprint": { + "type": "string" + }, + "parameters": { + "type": "object" + }, + "node_count": { + "type": "integer", + "minimum": 1 + }, + "name_template": { + "type": "string" + }, + "attrs": { + "type": "object" + }, + "disabled": { + "type": "boolean" + }, "risk_groups": { "type": "array", - "items": {"type": "string"} + "items": { + "type": "string" + } } }, "additionalProperties": false @@ -172,117 +301,45 @@ "type": "object", "properties": { "source": { - "oneOf": [ - {"type": "string"}, - { - "type": "object", - "properties": { - "path": {"type": "string"}, - "match": { - "type": "object", - "properties": { - "logic": {"type": "string", "enum": ["and", "or"]}, - "conditions": { - "type": "array", - "items": { - "type": "object", - "properties": { - "attr": {"type": "string"}, - "operator": { - "type": "string", - "enum": [ - "==", - "!=", - ">", - "<", - ">=", - "<=", - "contains", - "not_contains", - "any_value", - "no_value" - ] - }, - "value": {} - }, - "required": ["attr", "operator"], - "additionalProperties": false - } - } - }, - "additionalProperties": false - } - }, - "required": ["path"], - "additionalProperties": false - } - ] + "$ref": "#/$defs/selectorOrString" }, "target": { - "oneOf": [ - {"type": "string"}, - { - "type": "object", - "properties": { - "path": {"type": "string"}, - "match": { - "type": "object", - "properties": { - "logic": {"type": "string", "enum": ["and", "or"]}, - "conditions": { - "type": "array", - "items": { - "type": "object", - "properties": { - "attr": {"type": "string"}, - "operator": { - "type": "string", - "enum": [ - "==", - "!=", - ">", - "<", - ">=", - "<=", - "contains", - "not_contains", - "any_value", - "no_value" - ] - }, - "value": {} - }, - "required": ["attr", "operator"], - "additionalProperties": false - } - } - }, - "additionalProperties": false - } - }, - "required": ["path"], - "additionalProperties": false - } - ] + "$ref": "#/$defs/selectorOrString" }, "pattern": { "type": "string" }, - "link_count": {"type": "integer", "minimum": 1}, + "link_count": { + "type": "integer", + "minimum": 1 + }, "link_params": { "type": "object", "properties": { - "capacity": {"type": "number"}, - "cost": {"type": "number"}, - "disabled": {"type": "boolean"}, - "risk_groups": {"type": "array", "items": {"type": "string"}}, - "attrs": {"type": "object"} + "capacity": { + "type": "number" + }, + "cost": { + "type": "number" + }, + "disabled": { + "type": "boolean" + }, + "risk_groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "attrs": { + "type": "object" + } }, "additionalProperties": false }, "expand_vars": { "type": "object", - "description": "Variable substitutions for adjacency expansion", + "description": "Variable substitutions using $var or ${var} syntax", "additionalProperties": { "type": "array", "items": {} @@ -290,11 +347,17 @@ }, "expansion_mode": { "type": "string", - "enum": ["cartesian", "zip"], + "enum": [ + "cartesian", + "zip" + ], "description": "How to combine expand_vars lists" } }, - "required": ["source", "target"], + "required": [ + "source", + "target" + ], "additionalProperties": false } }, @@ -304,15 +367,25 @@ "items": { "type": "object", "properties": { - "path": {"type": "string"}, - "attrs": {"type": "object"}, - "disabled": {"type": "boolean"}, + "path": { + "type": "string" + }, + "match": { + "$ref": "#/$defs/matchSpec" + }, + "attrs": { + "type": "object" + }, + "disabled": { + "type": "boolean" + }, "risk_groups": { "type": "array", - "items": {"type": "string"} + "items": { + "type": "string" + } } }, - "required": ["path"], "additionalProperties": false } }, @@ -322,20 +395,36 @@ "items": { "type": "object", "properties": { - "source": {"type": "string"}, - "target": {"type": "string"}, - "any_direction": {"type": "boolean"}, + "source": { + "$ref": "#/$defs/selectorOrString" + }, + "target": { + "$ref": "#/$defs/selectorOrString" + }, + "any_direction": { + "type": "boolean" + }, "link_params": { "type": "object", "properties": { - "capacity": {"type": "number"}, - "cost": {"type": "number"}, - "disabled": {"type": "boolean"}, + "capacity": { + "type": "number" + }, + "cost": { + "type": "number" + }, + "disabled": { + "type": "boolean" + }, "risk_groups": { "type": "array", - "items": {"type": "string"} + "items": { + "type": "string" + } }, - "attrs": {"type": "object"} + "attrs": { + "type": "object" + } }, "additionalProperties": false } @@ -355,20 +444,35 @@ "properties": { "groups": { "type": "object", - "description": "Node group definitions for blueprint expansion. NOTE: Runtime validation enforces that groups with 'use_blueprint' can only have {use_blueprint, parameters, attrs, disabled, risk_groups}, while groups without 'use_blueprint' can only have {node_count, name_template, attrs, disabled, risk_groups}.", + "description": "Node group definitions for blueprint expansion.", "patternProperties": { "^[a-zA-Z0-9_\\[\\]-]+$": { "type": "object", "properties": { - "use_blueprint": {"type": "string"}, - "parameters": {"type": "object"}, - "node_count": {"type": "integer", "minimum": 1}, - "name_template": {"type": "string"}, - "attrs": {"type": "object"}, - "disabled": {"type": "boolean"}, + "use_blueprint": { + "type": "string" + }, + "parameters": { + "type": "object" + }, + "node_count": { + "type": "integer", + "minimum": 1 + }, + "name_template": { + "type": "string" + }, + "attrs": { + "type": "object" + }, + "disabled": { + "type": "boolean" + }, "risk_groups": { "type": "array", - "items": {"type": "string"} + "items": { + "type": "string" + } } }, "additionalProperties": false @@ -382,121 +486,49 @@ "type": "object", "properties": { "source": { - "oneOf": [ - {"type": "string"}, - { - "type": "object", - "properties": { - "path": {"type": "string"}, - "match": { - "type": "object", - "properties": { - "logic": {"type": "string", "enum": ["and", "or"]}, - "conditions": { - "type": "array", - "items": { - "type": "object", - "properties": { - "attr": {"type": "string"}, - "operator": { - "type": "string", - "enum": [ - "==", - "!=", - ">", - "<", - ">=", - "<=", - "contains", - "not_contains", - "any_value", - "no_value" - ] - }, - "value": {} - }, - "required": ["attr", "operator"], - "additionalProperties": false - } - } - }, - "additionalProperties": false - } - }, - "required": ["path"], - "additionalProperties": false - } - ] + "$ref": "#/$defs/selectorOrString" }, "target": { - "oneOf": [ - {"type": "string"}, - { - "type": "object", - "properties": { - "path": {"type": "string"}, - "match": { - "type": "object", - "properties": { - "logic": {"type": "string", "enum": ["and", "or"]}, - "conditions": { - "type": "array", - "items": { - "type": "object", - "properties": { - "attr": {"type": "string"}, - "operator": { - "type": "string", - "enum": [ - "==", - "!=", - ">", - "<", - ">=", - "<=", - "contains", - "not_contains", - "any_value", - "no_value" - ] - }, - "value": {} - }, - "required": ["attr", "operator"], - "additionalProperties": false - } - } - }, - "additionalProperties": false - } - }, - "required": ["path"], - "additionalProperties": false - } - ] + "$ref": "#/$defs/selectorOrString" }, "pattern": { "type": "string", - "enum": ["mesh", "one_to_one"] + "enum": [ + "mesh", + "one_to_one" + ] + }, + "link_count": { + "type": "integer", + "minimum": 1 }, - "link_count": {"type": "integer", "minimum": 1}, "link_params": { "type": "object", "properties": { - "capacity": {"type": "number"}, - "cost": {"type": "number"}, - "disabled": {"type": "boolean"}, + "capacity": { + "type": "number" + }, + "cost": { + "type": "number" + }, + "disabled": { + "type": "boolean" + }, "risk_groups": { "type": "array", - "items": {"type": "string"} + "items": { + "type": "string" + } }, - "attrs": {"type": "object"} + "attrs": { + "type": "object" + } }, "additionalProperties": false }, "expand_vars": { "type": "object", - "description": "Variable substitutions for adjacency expansion", + "description": "Variable substitutions using $var or ${var} syntax", "additionalProperties": { "type": "array", "items": {} @@ -504,11 +536,17 @@ }, "expansion_mode": { "type": "string", - "enum": ["cartesian", "zip"], + "enum": [ + "cartesian", + "zip" + ], "description": "How to combine expand_vars lists" } }, - "required": ["source", "target"], + "required": [ + "source", + "target" + ], "additionalProperties": false } } @@ -544,7 +582,9 @@ } } }, - "required": ["name"], + "required": [ + "name" + ], "additionalProperties": false } }, @@ -573,8 +613,13 @@ "items": { "type": "object", "properties": { - "weight": {"type": "number", "minimum": 0}, - "attrs": {"type": "object"}, + "weight": { + "type": "number", + "minimum": 0 + }, + "attrs": { + "type": "object" + }, "rules": { "type": "array", "items": { @@ -582,34 +627,35 @@ "properties": { "entity_scope": { "type": "string", - "enum": ["node", "link", "risk_group"], + "enum": [ + "node", + "link", + "risk_group" + ], "description": "What entities this rule applies to" }, "conditions": { "type": "array", "description": "Conditions that must be met", "items": { - "type": "object", - "properties": { - "attr": {"type": "string"}, - "operator": { - "type": "string", - "enum": ["==", "!=", ">", "<", ">=", "<=", "in", "not_in"] - }, - "value": {} - }, - "required": ["attr", "operator", "value"], - "additionalProperties": false + "$ref": "#/$defs/condition" } }, "logic": { "type": "string", - "enum": ["and", "or"], + "enum": [ + "and", + "or" + ], "description": "Logic for combining conditions" }, "rule_type": { "type": "string", - "enum": ["all", "choice", "random"], + "enum": [ + "all", + "choice", + "random" + ], "description": "How to apply the rule" }, "probability": { @@ -632,7 +678,10 @@ } } }, - "required": ["weight", "rules"], + "required": [ + "weight", + "rules" + ], "additionalProperties": false } } @@ -652,26 +701,83 @@ "items": { "type": "object", "properties": { - "source_path": { - "type": "string", - "description": "Source node pattern" + "source": { + "$ref": "#/$defs/selectorOrString", + "description": "Source node selector" }, - "sink_path": { - "type": "string", - "description": "Sink node pattern" + "sink": { + "$ref": "#/$defs/selectorOrString", + "description": "Sink node selector" }, "demand": { "type": "number", "description": "Traffic demand amount" }, - "priority": {"type": "integer", "description": "Priority class"}, - "demand_placed": {"type": "number", "description": "Pre-placed demand amount"}, - "mode": {"type": "string", "description": "Expansion mode for sub-demands"}, - "flow_policy_config": {"description": "Routing policy config. Accepts enum name (string), enum value (integer), object, or null.", "oneOf": [{"type": "string"}, {"type": "integer"}, {"type": "object"}, {"type": "null"}]}, - "flow_policy": {"type": "object", "description": "Inline FlowPolicy definition"}, - "attrs": {"type": "object", "description": "Additional demand attributes"} + "priority": { + "type": "integer", + "description": "Priority class" + }, + "demand_placed": { + "type": "number", + "description": "Pre-placed demand amount" + }, + "mode": { + "type": "string", + "enum": [ + "combine", + "pairwise" + ], + "description": "Expansion mode" + }, + "group_mode": { + "type": "string", + "enum": [ + "flatten", + "per_group", + "group_pairwise" + ], + "description": "How grouped nodes produce demands" + }, + "expand_vars": { + "type": "object", + "description": "Variable substitutions using $var or ${var} syntax", + "additionalProperties": { + "type": "array" + } + }, + "expansion_mode": { + "type": "string", + "enum": [ + "cartesian", + "zip" + ] + }, + "flow_policy_config": { + "description": "Routing policy config", + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "object" + }, + { + "type": "null" + } + ] + }, + "flow_policy": { + "type": "object", + "description": "Inline FlowPolicy definition" + }, + "attrs": { + "type": "object", + "description": "Additional demand attributes" + } }, - "required": ["source_path", "sink_path", "demand"], "additionalProperties": false } } @@ -685,16 +791,42 @@ "^[a-zA-Z0-9_\\-\\+]+$": { "type": "object", "properties": { - "component_type": {"type": "string"}, - "description": {"type": "string"}, - "capex": {"type": "number"}, - "power_watts": {"type": "number"}, - "power_watts_max": {"type": "number"}, - "capacity": {"type": "number"}, - "ports": {"type": "integer"}, - "count": {"type": "integer"}, - "attrs": {"type": "object"}, - "children": {"type": "object", "patternProperties": {"^[a-zA-Z0-9_\\-\\+]+$": {"type": "object", "additionalProperties": true}}} + "component_type": { + "type": "string" + }, + "description": { + "type": "string" + }, + "capex": { + "type": "number" + }, + "power_watts": { + "type": "number" + }, + "power_watts_max": { + "type": "number" + }, + "capacity": { + "type": "number" + }, + "ports": { + "type": "integer" + }, + "count": { + "type": "integer" + }, + "attrs": { + "type": "object" + }, + "children": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9_\\-\\+]+$": { + "type": "object", + "additionalProperties": true + } + } + } }, "additionalProperties": false } @@ -707,10 +839,26 @@ "items": { "type": "object", "properties": { - "step_type": {"type": "string", "description": "Type of workflow step"}, - "name": {"type": "string", "description": "Step name"} + "step_type": { + "type": "string", + "description": "Type of workflow step" + }, + "name": { + "type": "string", + "description": "Step name" + }, + "source": { + "$ref": "#/$defs/selectorOrString", + "description": "Source node selector" + }, + "sink": { + "$ref": "#/$defs/selectorOrString", + "description": "Sink node selector" + } }, - "required": ["step_type"], + "required": [ + "step_type" + ], "additionalProperties": true } } diff --git a/ngraph/utils/__init__.py b/ngraph/utils/__init__.py index 0bbb287..9723b04 100644 --- a/ngraph/utils/__init__.py +++ b/ngraph/utils/__init__.py @@ -4,17 +4,4 @@ project internals. Keep modules minimal and focused. """ -from ngraph.utils.nodes import ( - collect_active_node_names_from_groups, - collect_active_nodes_from_groups, - get_active_node_names, - get_active_nodes, -) - -__all__ = [ - # Node filtering utilities - "get_active_node_names", - "get_active_nodes", - "collect_active_node_names_from_groups", - "collect_active_nodes_from_groups", -] +__all__: list[str] = [] diff --git a/ngraph/utils/nodes.py b/ngraph/utils/nodes.py deleted file mode 100644 index 4b3e143..0000000 --- a/ngraph/utils/nodes.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Node utility functions for filtering and selection. - -Provides centralized helpers for filtering active (non-disabled) nodes, -used across analysis, workflows, and demand expansion. -""" - -from __future__ import annotations - -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set - -if TYPE_CHECKING: - from ngraph.model.network import Node - - -def get_active_node_names( - nodes: Iterable["Node"], - excluded_nodes: Optional[Set[str]] = None, -) -> List[str]: - """Extract names of active (non-disabled) nodes, optionally excluding some. - - Args: - nodes: Iterable of Node objects to filter. - excluded_nodes: Optional set of node names to exclude. - - Returns: - List of node names that are not disabled and not in excluded_nodes. - """ - if excluded_nodes: - return [ - n.name for n in nodes if not n.disabled and n.name not in excluded_nodes - ] - return [n.name for n in nodes if not n.disabled] - - -def get_active_nodes( - nodes: Iterable["Node"], - excluded_nodes: Optional[Set[str]] = None, -) -> List["Node"]: - """Extract active (non-disabled) nodes, optionally excluding some. - - Args: - nodes: Iterable of Node objects to filter. - excluded_nodes: Optional set of node names to exclude. - - Returns: - List of Node objects that are not disabled and not in excluded_nodes. - """ - if excluded_nodes: - return [n for n in nodes if not n.disabled and n.name not in excluded_nodes] - return [n for n in nodes if not n.disabled] - - -def collect_active_node_names_from_groups( - groups: Dict[str, List["Node"]], - excluded_nodes: Optional[Set[str]] = None, -) -> List[str]: - """Extract active (non-disabled) node names from selection groups dict. - - Flattens all group values and filters to active nodes. - - Args: - groups: Dictionary mapping group labels to lists of Node objects. - excluded_nodes: Optional set of node names to exclude. - - Returns: - List of node names from all groups that are active. - """ - result: List[str] = [] - for nodes in groups.values(): - result.extend(get_active_node_names(nodes, excluded_nodes)) - return result - - -def collect_active_nodes_from_groups( - groups: Dict[str, List["Node"]], - excluded_nodes: Optional[Set[str]] = None, -) -> List["Node"]: - """Extract active (non-disabled) nodes from selection groups dict. - - Flattens all group values and filters to active nodes. - - Args: - groups: Dictionary mapping group labels to lists of Node objects. - excluded_nodes: Optional set of node names to exclude. - - Returns: - List of Node objects from all groups that are active. - """ - result: List["Node"] = [] - for nodes in groups.values(): - result.extend(get_active_nodes(nodes, excluded_nodes)) - return result diff --git a/ngraph/workflow/build_graph.py b/ngraph/workflow/build_graph.py index c7b1eb1..9313948 100644 --- a/ngraph/workflow/build_graph.py +++ b/ngraph/workflow/build_graph.py @@ -45,7 +45,7 @@ class BuildGraph(WorkflowStep): Attributes: add_reverse: If True, adds reverse edges for bidirectional connectivity. - Defaults to True for backward compatibility. + Defaults to True. """ add_reverse: bool = True diff --git a/ngraph/workflow/max_flow_step.py b/ngraph/workflow/max_flow_step.py index 2554ab6..922d1b8 100644 --- a/ngraph/workflow/max_flow_step.py +++ b/ngraph/workflow/max_flow_step.py @@ -8,8 +8,8 @@ workflow: - step_type: MaxFlow name: "maxflow_dc_to_edge" - source_path: "^datacenter/.*" - sink_path: "^edge/.*" + source: "^datacenter/.*" + sink: "^edge/.*" mode: "combine" failure_policy: "random_failures" iterations: 100 @@ -28,7 +28,7 @@ import time from dataclasses import dataclass -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Dict, Union from ngraph.exec.failure.manager import FailureManager from ngraph.logging import get_logger @@ -51,8 +51,8 @@ class MaxFlow(WorkflowStep): """Maximum flow Monte Carlo workflow step. Attributes: - source_path: Regex pattern for source node groups. - sink_path: Regex pattern for sink node groups. + source: Source node selector (string path or selector dict). + sink: Sink node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). failure_policy: Name of failure policy in scenario.failure_policy_set. iterations: Number of Monte Carlo trials. @@ -68,8 +68,8 @@ class MaxFlow(WorkflowStep): include_min_cut: Whether to include min-cut edges per flow. """ - source_path: str = "" - sink_path: str = "" + source: Union[str, Dict[str, Any]] = "" + sink: Union[str, Dict[str, Any]] = "" mode: str = "combine" failure_policy: str | None = None iterations: int = 1 @@ -106,10 +106,10 @@ def run(self, scenario: "Scenario") -> None: t0 = time.perf_counter() logger.info(f"Starting max-flow: {self.name}") logger.debug( - "Parameters: source_path=%s, sink_path=%s, mode=%s, iterations=%s, parallelism=%s, " + "Parameters: source=%s, sink=%s, mode=%s, iterations=%s, parallelism=%s, " "failure_policy=%s, baseline=%s, include_flow_details=%s, include_min_cut=%s", - self.source_path, - self.sink_path, + self.source, + self.sink, self.mode, str(self.iterations), str(self.parallelism), @@ -126,8 +126,8 @@ def run(self, scenario: "Scenario") -> None: ) effective_parallelism = resolve_parallelism(self.parallelism) raw = fm.run_max_flow_monte_carlo( - source_path=self.source_path, - sink_path=self.sink_path, + source=self.source, + sink=self.sink, mode=self.mode, iterations=self.iterations, parallelism=effective_parallelism, @@ -152,8 +152,8 @@ def run(self, scenario: "Scenario") -> None: flow_results.append(item) context = { - "source_path": self.source_path, - "sink_path": self.sink_path, + "source": self.source, + "sink": self.sink, "mode": self.mode, "shortest_path": bool(self.shortest_path), "require_capacity": bool(self.require_capacity), diff --git a/ngraph/workflow/maximum_supported_demand_step.py b/ngraph/workflow/maximum_supported_demand_step.py index 47bb379..ee5a69e 100644 --- a/ngraph/workflow/maximum_supported_demand_step.py +++ b/ngraph/workflow/maximum_supported_demand_step.py @@ -105,8 +105,8 @@ def run(self, scenario: "Any") -> None: base_demands: list[dict[str, Any]] = [ { "id": getattr(td, "id", None), - "source_path": getattr(td, "source_path", ""), - "sink_path": getattr(td, "sink_path", ""), + "source": getattr(td, "source", ""), + "sink": getattr(td, "sink", ""), "demand": float(getattr(td, "demand", 0.0)), "mode": getattr(td, "mode", "pairwise"), "priority": int(getattr(td, "priority", 0)), @@ -241,12 +241,15 @@ def _build_cache(scenario: Any, matrix_name: str) -> _MSDCache: stable_demands: list[TrafficDemand] = [ TrafficDemand( id=getattr(td, "id", "") or "", - source_path=str(getattr(td, "source_path", "")), - sink_path=str(getattr(td, "sink_path", "")), + source=getattr(td, "source", ""), + sink=getattr(td, "sink", ""), priority=int(getattr(td, "priority", 0)), demand=float(getattr(td, "demand", 0.0)), flow_policy_config=getattr(td, "flow_policy_config", None), mode=str(getattr(td, "mode", "pairwise")), + group_mode=str(getattr(td, "group_mode", "flatten")), + expand_vars=getattr(td, "expand_vars", None) or {}, + expansion_mode=str(getattr(td, "expansion_mode", "cartesian")), ) for td in base_tds ] @@ -354,12 +357,15 @@ def _build_scaled_demands( return [ TrafficDemand( id=d.get("id") or "", - source_path=str(d["source_path"]), - sink_path=str(d["sink_path"]), + source=d["source"], + sink=d["sink"], priority=int(d["priority"]), demand=float(d["demand"]) * alpha, flow_policy_config=d.get("flow_policy_config"), mode=str(d.get("mode", "pairwise")), + group_mode=str(d.get("group_mode", "flatten")), + expand_vars=d.get("expand_vars") or {}, + expansion_mode=str(d.get("expansion_mode", "cartesian")), ) for d in base_demands ] diff --git a/ngraph/workflow/traffic_matrix_placement_step.py b/ngraph/workflow/traffic_matrix_placement_step.py index 7ea2a58..2e9287e 100644 --- a/ngraph/workflow/traffic_matrix_placement_step.py +++ b/ngraph/workflow/traffic_matrix_placement_step.py @@ -119,25 +119,31 @@ def run(self, scenario: "Scenario") -> None: demands_config.append( { "id": td.id, - "source_path": td.source_path, - "sink_path": td.sink_path, + "source": td.source, + "sink": td.sink, "demand": float(td.demand) * float(effective_alpha), "mode": getattr(td, "mode", "pairwise"), "flow_policy_config": getattr(td, "flow_policy_config", None), "priority": getattr(td, "priority", 0), + "group_mode": getattr(td, "group_mode", "flatten"), + "expand_vars": getattr(td, "expand_vars", None) or {}, + "expansion_mode": getattr(td, "expansion_mode", "cartesian"), } ) base_demands.append( { "id": td.id, - "source_path": getattr(td, "source_path", ""), - "sink_path": getattr(td, "sink_path", ""), + "source": getattr(td, "source", ""), + "sink": getattr(td, "sink", ""), "demand": float(getattr(td, "demand", 0.0)), "mode": getattr(td, "mode", "pairwise"), "priority": int(getattr(td, "priority", 0)), "flow_policy_config": serialize_policy_preset( getattr(td, "flow_policy_config", None) ), + "group_mode": getattr(td, "group_mode", "flatten"), + "expand_vars": getattr(td, "expand_vars", None) or {}, + "expansion_mode": getattr(td, "expansion_mode", "cartesian"), } ) diff --git a/pyproject.toml b/pyproject.toml index 4fe281e..4359b83 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" # --------------------------------------------------------------------- [project] name = "ngraph" -version = "0.12.3" +version = "0.13.0" description = "A tool and a library for network modeling and analysis." readme = "README.md" authors = [{ name = "Andrey Golovanov" }] diff --git a/scenarios/backbone_clos.yml b/scenarios/backbone_clos.yml index 8c787a3..ad8a31f 100644 --- a/scenarios/backbone_clos.yml +++ b/scenarios/backbone_clos.yml @@ -21,21 +21,21 @@ blueprints: component: LeafRouter count: 1 adjacency: - - source: /leaf - target: /spine - pattern: mesh - link_params: - capacity: 3200 - cost: 1 - attrs: - link_type: leaf_spine - hardware: - source: - component: 800G-DR4 - count: 4.0 - target: - component: 1600G-2xDR4 - count: 2.0 + - source: /leaf + target: /spine + pattern: mesh + link_params: + capacity: 3200 + cost: 1 + attrs: + link_type: leaf_spine + hardware: + source: + component: 800G-DR4 + count: 4.0 + target: + component: 1600G-2xDR4 + count: 2.0 DCRegion: groups: dc: @@ -120,7 +120,7 @@ network: attrs: metro_name: new-york-jersey-city-newark metro_name_orig: New York--Jersey City--Newark, NY--NJ - metro_id: '63217' + metro_id: "63217" location_x: 1815314.3 location_y: 2173703.3 radius_km: 51.75 @@ -130,7 +130,7 @@ network: attrs: metro_name: new-york-jersey-city-newark metro_name_orig: New York--Jersey City--Newark, NY--NJ - metro_id: '63217' + metro_id: "63217" location_x: 1815314.3 location_y: 2173703.3 radius_km: 51.75 @@ -142,7 +142,7 @@ network: attrs: metro_name: washington-arlington metro_name_orig: Washington--Arlington, DC--VA--MD - metro_id: '92242' + metro_id: "92242" location_x: 1602747.8 location_y: 1921533.9 radius_km: 32.67 @@ -152,7 +152,7 @@ network: attrs: metro_name: washington-arlington metro_name_orig: Washington--Arlington, DC--VA--MD - metro_id: '92242' + metro_id: "92242" location_x: 1602747.8 location_y: 1921533.9 radius_km: 32.67 @@ -164,7 +164,7 @@ network: attrs: metro_name: chicago metro_name_orig: Chicago, IL--IN - metro_id: '16264' + metro_id: "16264" location_x: 664412.8 location_y: 2133983.6 radius_km: 43.9 @@ -184,7 +184,7 @@ network: attrs: metro_name: columbus metro_name_orig: Columbus, OH - metro_id: '19234' + metro_id: "19234" location_x: 1097168.9 location_y: 1967599.1 radius_km: 20.63 @@ -194,7 +194,7 @@ network: attrs: metro_name: columbus metro_name_orig: Columbus, OH - metro_id: '19234' + metro_id: "19234" location_x: 1097168.9 location_y: 1967599.1 radius_km: 20.63 @@ -202,1016 +202,1016 @@ network: mw_per_dc_region: 150.0 gbps_per_mw: 200.0 adjacency: - - source: - path: metro1/pop1 - match: &id001 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - target: - path: metro1/pop2 - match: *id001 - pattern: one_to_one - link_params: - capacity: 1400.0 - cost: 163 - attrs: - link_type: intra_metro - source_metro: new-york-jersey-city-newark - target_metro: new-york-jersey-city-newark - target_capacity: 22400.0 - site_edge: metro1/pop1|metro1/pop2|intra_metro:new-york-jersey-city-newark:1-2 - adjacency_id: intra_metro:new-york-jersey-city-newark - distance_km: 163 - hardware: - source: - component: 800G-ZR+ - count: 2.0 - target: - component: 800G-ZR+ - count: 2.0 - - source: - path: metro1/pop1 - match: &id002 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: dc - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro1/dc1 - match: *id002 - pattern: one_to_one - link_params: - capacity: 2000.0 - cost: 1 - attrs: - link_type: dc_to_pop - source_metro: new-york-jersey-city-newark - target_metro: new-york-jersey-city-newark - target_capacity: 32000.0 - site_edge: metro1/pop1|metro1/dc1|dc_to_pop:new-york-jersey-city-newark:1-1 - adjacency_id: dc_to_pop:new-york-jersey-city-newark - distance_km: 1 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 - - source: - path: metro1/pop1 - match: &id003 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro2/pop1 - match: *id003 - pattern: one_to_one - link_params: - capacity: 800.0 - cost: 412 - attrs: - link_type: inter_metro_corridor - source_metro: new-york-jersey-city-newark - target_metro: washington-arlington - target_capacity: 12800.0 - site_edge: metro1/pop1|metro2/pop1|inter_metro:1-2:1-1 - adjacency_id: inter_metro:1-2 - distance_km: 412 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: &id005 - - corridor_risk_columbus_new-york-jersey-city-newark - - corridor_risk_new-york-jersey-city-newark_washington-arlington - - source: - path: metro1/pop1 - match: &id004 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro5/pop1 - match: *id004 - pattern: one_to_one - link_params: - capacity: 800.0 - cost: 977 - attrs: - link_type: inter_metro_corridor - source_metro: new-york-jersey-city-newark - target_metro: columbus - target_capacity: 12800.0 - site_edge: metro1/pop1|metro5/pop1|inter_metro:1-5:1-1 - adjacency_id: inter_metro:1-5 - distance_km: 977 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: &id006 - - corridor_risk_chicago_washington-arlington - - corridor_risk_columbus_new-york-jersey-city-newark - - corridor_risk_columbus_washington-arlington - - corridor_risk_new-york-jersey-city-newark_washington-arlington - - source: - path: metro1/pop2 - match: *id002 - target: - path: metro1/dc1 - match: *id002 - pattern: one_to_one - link_params: - capacity: 2000.0 - cost: 163 - attrs: - link_type: dc_to_pop - source_metro: new-york-jersey-city-newark - target_metro: new-york-jersey-city-newark - target_capacity: 32000.0 - site_edge: metro1/pop2|metro1/dc1|dc_to_pop:new-york-jersey-city-newark:1-2 - adjacency_id: dc_to_pop:new-york-jersey-city-newark - distance_km: 163 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 - # Inter-metro corridor connectivity (backbone links between metros) - - source: - path: metro1/pop2 - match: *id003 - target: - path: metro2/pop2 - match: *id003 - pattern: one_to_one - link_params: - capacity: 800.0 - cost: 412 - attrs: - link_type: inter_metro_corridor - source_metro: new-york-jersey-city-newark - target_metro: washington-arlington - target_capacity: 12800.0 - site_edge: metro1/pop2|metro2/pop2|inter_metro:1-2:2-2 - adjacency_id: inter_metro:1-2 - distance_km: 412 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: *id005 - - source: - path: metro1/pop2 - match: *id004 - target: - path: metro5/pop2 - match: *id004 - pattern: one_to_one - link_params: - capacity: 800.0 - cost: 977 - attrs: - link_type: inter_metro_corridor - source_metro: new-york-jersey-city-newark - target_metro: columbus - target_capacity: 12800.0 - site_edge: metro1/pop2|metro5/pop2|inter_metro:1-5:2-2 - adjacency_id: inter_metro:1-5 - distance_km: 977 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: *id006 - - source: - path: metro2/pop1 - match: &id007 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - target: - path: metro2/pop2 - match: *id007 - pattern: one_to_one - link_params: - capacity: 1600.0 - cost: 103 - attrs: - link_type: intra_metro - source_metro: washington-arlington - target_metro: washington-arlington - target_capacity: 25600.0 - site_edge: metro2/pop1|metro2/pop2|intra_metro:washington-arlington:1-2 - adjacency_id: intra_metro:washington-arlington - distance_km: 103 - hardware: - source: - component: 800G-ZR+ - count: 2.0 - target: - component: 800G-ZR+ - count: 2.0 - - source: - path: metro2/pop1 - match: &id008 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: dc - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro2/dc1 - match: *id008 - pattern: one_to_one - link_params: - capacity: 2400.0 - cost: 1 - attrs: - link_type: dc_to_pop - source_metro: washington-arlington - target_metro: washington-arlington - target_capacity: 38400.0 - site_edge: metro2/pop1|metro2/dc1|dc_to_pop:washington-arlington:1-1 - adjacency_id: dc_to_pop:washington-arlington - distance_km: 1 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 - - source: - path: metro2/pop1 - match: &id009 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro3/pop1 - match: *id009 - pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1261 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: chicago - target_capacity: 3200.0 - site_edge: metro2/pop1|metro3/pop1|inter_metro:2-3:1-1 - adjacency_id: inter_metro:2-3 - distance_km: 1261 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: &id012 - - corridor_risk_chicago_washington-arlington - - corridor_risk_columbus_new-york-jersey-city-newark - - corridor_risk_columbus_washington-arlington - - source: - path: metro2/pop1 - match: &id010 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro4/pop1 - match: *id010 - pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1216 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: atlanta - target_capacity: 3200.0 - site_edge: metro2/pop1|metro4/pop1|inter_metro:2-4:1-1 - adjacency_id: inter_metro:2-4 - distance_km: 1216 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: &id013 - - corridor_risk_atlanta_washington-arlington - - source: - path: metro2/pop1 - match: &id011 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro5/pop1 - match: *id011 - pattern: one_to_one - link_params: - capacity: 800.0 - cost: 733 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: columbus - target_capacity: 12800.0 - site_edge: metro2/pop1|metro5/pop1|inter_metro:2-5:1-1 - adjacency_id: inter_metro:2-5 - distance_km: 733 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: &id014 - - corridor_risk_chicago_washington-arlington - - corridor_risk_columbus_new-york-jersey-city-newark - - corridor_risk_columbus_washington-arlington - - source: - path: metro2/pop2 - match: *id008 - target: - path: metro2/dc1 - match: *id008 - pattern: one_to_one - link_params: - capacity: 2400.0 - cost: 103 - attrs: - link_type: dc_to_pop - source_metro: washington-arlington - target_metro: washington-arlington - target_capacity: 38400.0 - site_edge: metro2/pop2|metro2/dc1|dc_to_pop:washington-arlington:1-2 - adjacency_id: dc_to_pop:washington-arlington - distance_km: 103 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 - - source: - path: metro2/pop2 - match: *id009 - target: - path: metro3/pop2 - match: *id009 - pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1261 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: chicago - target_capacity: 3200.0 - site_edge: metro2/pop2|metro3/pop2|inter_metro:2-3:2-2 - adjacency_id: inter_metro:2-3 - distance_km: 1261 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: *id012 - - source: - path: metro2/pop2 - match: *id010 - target: - path: metro4/pop2 - match: *id010 - pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1216 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: atlanta - target_capacity: 3200.0 - site_edge: metro2/pop2|metro4/pop2|inter_metro:2-4:2-2 - adjacency_id: inter_metro:2-4 - distance_km: 1216 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: *id013 - - source: - path: metro2/pop2 - match: *id011 - target: - path: metro5/pop2 - match: *id011 - pattern: one_to_one - link_params: - capacity: 800.0 - cost: 733 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: columbus - target_capacity: 12800.0 - site_edge: metro2/pop2|metro5/pop2|inter_metro:2-5:2-2 - adjacency_id: inter_metro:2-5 - distance_km: 733 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: *id014 - - source: - path: metro3/pop1 - match: &id015 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - target: - path: metro3/pop2 - match: *id015 - pattern: one_to_one - link_params: - capacity: 12800.0 - cost: 138 - attrs: - link_type: intra_metro - source_metro: chicago - target_metro: chicago - target_capacity: 12800.0 - site_edge: metro3/pop1|metro3/pop2|intra_metro:chicago:1-2 - adjacency_id: intra_metro:chicago - distance_km: 138 - hardware: - source: - component: 800G-ZR+ - count: 16.0 - target: - component: 800G-ZR+ - count: 16.0 - - source: - path: metro3/pop1 - match: &id016 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro5/pop1 - match: *id016 - pattern: one_to_one - link_params: - capacity: 200.0 - cost: 658 - attrs: - link_type: inter_metro_corridor - source_metro: chicago - target_metro: columbus - target_capacity: 3200.0 - site_edge: metro3/pop1|metro5/pop1|inter_metro:3-5:1-1 - adjacency_id: inter_metro:3-5 - distance_km: 658 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: &id018 - - corridor_risk_atlanta_chicago - - corridor_risk_chicago_columbus - - source: - path: metro3/pop1 - match: &id017 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro4/pop1 - match: *id017 - pattern: one_to_one - link_params: - capacity: 3200.0 - cost: 1302 - attrs: - link_type: inter_metro_corridor - source_metro: chicago - target_metro: atlanta - target_capacity: 3200.0 - site_edge: metro3/pop1|metro4/pop1|inter_metro:3-4:1-1 - adjacency_id: inter_metro:3-4 - distance_km: 1302 - hardware: - source: - component: 800G-ZR+ - count: 4.0 - target: - component: 800G-ZR+ - count: 4.0 - risk_groups: &id019 - - corridor_risk_atlanta_chicago - - corridor_risk_atlanta_columbus - - corridor_risk_chicago_columbus - - source: - path: metro3/pop2 - match: *id016 - target: - path: metro5/pop2 - match: *id016 - pattern: one_to_one - link_params: - capacity: 200.0 - cost: 658 - attrs: - link_type: inter_metro_corridor - source_metro: chicago - target_metro: columbus - target_capacity: 3200.0 - site_edge: metro3/pop2|metro5/pop2|inter_metro:3-5:2-2 - adjacency_id: inter_metro:3-5 - distance_km: 658 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: *id018 - - source: - path: metro3/pop2 - match: *id017 - target: - path: metro4/pop2 - match: *id017 - pattern: one_to_one - link_params: - capacity: 3200.0 - cost: 1302 - attrs: - link_type: inter_metro_corridor - source_metro: chicago - target_metro: atlanta - target_capacity: 3200.0 - site_edge: metro3/pop2|metro4/pop2|inter_metro:3-4:2-2 - adjacency_id: inter_metro:3-4 - distance_km: 1302 - hardware: - source: - component: 800G-ZR+ - count: 4.0 - target: - component: 800G-ZR+ - count: 4.0 - risk_groups: *id019 - - source: - path: metro4/pop1 - match: &id020 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - target: - path: metro4/pop2 - match: *id020 - pattern: one_to_one - link_params: - capacity: 12800.0 - cost: 142 - attrs: - link_type: intra_metro - source_metro: atlanta - target_metro: atlanta - target_capacity: 12800.0 - site_edge: metro4/pop1|metro4/pop2|intra_metro:atlanta:1-2 - adjacency_id: intra_metro:atlanta - distance_km: 142 - hardware: - source: - component: 800G-ZR+ - count: 16.0 - target: - component: 800G-ZR+ - count: 16.0 - - source: - path: metro4/pop1 - match: &id021 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro5/pop1 - match: *id021 - pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1028 - attrs: - link_type: inter_metro_corridor - source_metro: atlanta - target_metro: columbus - target_capacity: 3200.0 - site_edge: metro4/pop1|metro5/pop1|inter_metro:4-5:1-1 - adjacency_id: inter_metro:4-5 - distance_km: 1028 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: &id022 - - corridor_risk_atlanta_chicago - - corridor_risk_atlanta_columbus - - source: - path: metro4/pop2 - match: *id021 - target: - path: metro5/pop2 - match: *id021 - pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1028 - attrs: - link_type: inter_metro_corridor - source_metro: atlanta - target_metro: columbus - target_capacity: 3200.0 - site_edge: metro4/pop2|metro5/pop2|inter_metro:4-5:2-2 - adjacency_id: inter_metro:4-5 - distance_km: 1028 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 - risk_groups: *id022 - - source: - path: metro5/pop1 - match: &id023 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: leaf - target: - path: metro5/pop2 - match: *id023 - pattern: one_to_one - link_params: - capacity: 1600.0 - cost: 65 - attrs: - link_type: intra_metro - source_metro: columbus - target_metro: columbus - target_capacity: 25600.0 - site_edge: metro5/pop1|metro5/pop2|intra_metro:columbus:1-2 - adjacency_id: intra_metro:columbus - distance_km: 65 - hardware: - source: - component: 800G-ZR+ - count: 2.0 - target: - component: 800G-ZR+ - count: 2.0 - - source: - path: metro5/pop1 - match: &id024 - conditions: - - attr: role - operator: == - value: core - - attr: role - operator: == - value: dc - - attr: role - operator: == - value: leaf - logic: or - target: - path: metro5/dc1 - match: *id024 - pattern: one_to_one - link_params: - capacity: 2400.0 - cost: 1 - attrs: - link_type: dc_to_pop - source_metro: columbus - target_metro: columbus - target_capacity: 38400.0 - site_edge: metro5/pop1|metro5/dc1|dc_to_pop:columbus:1-1 - adjacency_id: dc_to_pop:columbus - distance_km: 1 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 - - source: - path: metro5/pop2 - match: *id024 - target: - path: metro5/dc1 - match: *id024 - pattern: one_to_one - link_params: - capacity: 2400.0 - cost: 65 - attrs: - link_type: dc_to_pop - source_metro: columbus - target_metro: columbus - target_capacity: 38400.0 - site_edge: metro5/pop2|metro5/dc1|dc_to_pop:columbus:1-2 - adjacency_id: dc_to_pop:columbus - distance_km: 65 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 + - source: + path: metro1/pop1 + match: &id001 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + target: + path: metro1/pop2 + match: *id001 + pattern: one_to_one + link_params: + capacity: 1400.0 + cost: 163 + attrs: + link_type: intra_metro + source_metro: new-york-jersey-city-newark + target_metro: new-york-jersey-city-newark + target_capacity: 22400.0 + site_edge: metro1/pop1|metro1/pop2|intra_metro:new-york-jersey-city-newark:1-2 + adjacency_id: intra_metro:new-york-jersey-city-newark + distance_km: 163 + hardware: + source: + component: 800G-ZR+ + count: 2.0 + target: + component: 800G-ZR+ + count: 2.0 + - source: + path: metro1/pop1 + match: &id002 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: dc + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro1/dc1 + match: *id002 + pattern: one_to_one + link_params: + capacity: 2000.0 + cost: 1 + attrs: + link_type: dc_to_pop + source_metro: new-york-jersey-city-newark + target_metro: new-york-jersey-city-newark + target_capacity: 32000.0 + site_edge: metro1/pop1|metro1/dc1|dc_to_pop:new-york-jersey-city-newark:1-1 + adjacency_id: dc_to_pop:new-york-jersey-city-newark + distance_km: 1 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 + - source: + path: metro1/pop1 + match: &id003 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro2/pop1 + match: *id003 + pattern: one_to_one + link_params: + capacity: 800.0 + cost: 412 + attrs: + link_type: inter_metro_corridor + source_metro: new-york-jersey-city-newark + target_metro: washington-arlington + target_capacity: 12800.0 + site_edge: metro1/pop1|metro2/pop1|inter_metro:1-2:1-1 + adjacency_id: inter_metro:1-2 + distance_km: 412 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: &id005 + - corridor_risk_columbus_new-york-jersey-city-newark + - corridor_risk_new-york-jersey-city-newark_washington-arlington + - source: + path: metro1/pop1 + match: &id004 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro5/pop1 + match: *id004 + pattern: one_to_one + link_params: + capacity: 800.0 + cost: 977 + attrs: + link_type: inter_metro_corridor + source_metro: new-york-jersey-city-newark + target_metro: columbus + target_capacity: 12800.0 + site_edge: metro1/pop1|metro5/pop1|inter_metro:1-5:1-1 + adjacency_id: inter_metro:1-5 + distance_km: 977 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: &id006 + - corridor_risk_chicago_washington-arlington + - corridor_risk_columbus_new-york-jersey-city-newark + - corridor_risk_columbus_washington-arlington + - corridor_risk_new-york-jersey-city-newark_washington-arlington + - source: + path: metro1/pop2 + match: *id002 + target: + path: metro1/dc1 + match: *id002 + pattern: one_to_one + link_params: + capacity: 2000.0 + cost: 163 + attrs: + link_type: dc_to_pop + source_metro: new-york-jersey-city-newark + target_metro: new-york-jersey-city-newark + target_capacity: 32000.0 + site_edge: metro1/pop2|metro1/dc1|dc_to_pop:new-york-jersey-city-newark:1-2 + adjacency_id: dc_to_pop:new-york-jersey-city-newark + distance_km: 163 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 + # Inter-metro corridor connectivity (backbone links between metros) + - source: + path: metro1/pop2 + match: *id003 + target: + path: metro2/pop2 + match: *id003 + pattern: one_to_one + link_params: + capacity: 800.0 + cost: 412 + attrs: + link_type: inter_metro_corridor + source_metro: new-york-jersey-city-newark + target_metro: washington-arlington + target_capacity: 12800.0 + site_edge: metro1/pop2|metro2/pop2|inter_metro:1-2:2-2 + adjacency_id: inter_metro:1-2 + distance_km: 412 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: *id005 + - source: + path: metro1/pop2 + match: *id004 + target: + path: metro5/pop2 + match: *id004 + pattern: one_to_one + link_params: + capacity: 800.0 + cost: 977 + attrs: + link_type: inter_metro_corridor + source_metro: new-york-jersey-city-newark + target_metro: columbus + target_capacity: 12800.0 + site_edge: metro1/pop2|metro5/pop2|inter_metro:1-5:2-2 + adjacency_id: inter_metro:1-5 + distance_km: 977 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: *id006 + - source: + path: metro2/pop1 + match: &id007 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + target: + path: metro2/pop2 + match: *id007 + pattern: one_to_one + link_params: + capacity: 1600.0 + cost: 103 + attrs: + link_type: intra_metro + source_metro: washington-arlington + target_metro: washington-arlington + target_capacity: 25600.0 + site_edge: metro2/pop1|metro2/pop2|intra_metro:washington-arlington:1-2 + adjacency_id: intra_metro:washington-arlington + distance_km: 103 + hardware: + source: + component: 800G-ZR+ + count: 2.0 + target: + component: 800G-ZR+ + count: 2.0 + - source: + path: metro2/pop1 + match: &id008 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: dc + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro2/dc1 + match: *id008 + pattern: one_to_one + link_params: + capacity: 2400.0 + cost: 1 + attrs: + link_type: dc_to_pop + source_metro: washington-arlington + target_metro: washington-arlington + target_capacity: 38400.0 + site_edge: metro2/pop1|metro2/dc1|dc_to_pop:washington-arlington:1-1 + adjacency_id: dc_to_pop:washington-arlington + distance_km: 1 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 + - source: + path: metro2/pop1 + match: &id009 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro3/pop1 + match: *id009 + pattern: one_to_one + link_params: + capacity: 200.0 + cost: 1261 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: chicago + target_capacity: 3200.0 + site_edge: metro2/pop1|metro3/pop1|inter_metro:2-3:1-1 + adjacency_id: inter_metro:2-3 + distance_km: 1261 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: &id012 + - corridor_risk_chicago_washington-arlington + - corridor_risk_columbus_new-york-jersey-city-newark + - corridor_risk_columbus_washington-arlington + - source: + path: metro2/pop1 + match: &id010 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro4/pop1 + match: *id010 + pattern: one_to_one + link_params: + capacity: 200.0 + cost: 1216 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: atlanta + target_capacity: 3200.0 + site_edge: metro2/pop1|metro4/pop1|inter_metro:2-4:1-1 + adjacency_id: inter_metro:2-4 + distance_km: 1216 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: &id013 + - corridor_risk_atlanta_washington-arlington + - source: + path: metro2/pop1 + match: &id011 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro5/pop1 + match: *id011 + pattern: one_to_one + link_params: + capacity: 800.0 + cost: 733 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: columbus + target_capacity: 12800.0 + site_edge: metro2/pop1|metro5/pop1|inter_metro:2-5:1-1 + adjacency_id: inter_metro:2-5 + distance_km: 733 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: &id014 + - corridor_risk_chicago_washington-arlington + - corridor_risk_columbus_new-york-jersey-city-newark + - corridor_risk_columbus_washington-arlington + - source: + path: metro2/pop2 + match: *id008 + target: + path: metro2/dc1 + match: *id008 + pattern: one_to_one + link_params: + capacity: 2400.0 + cost: 103 + attrs: + link_type: dc_to_pop + source_metro: washington-arlington + target_metro: washington-arlington + target_capacity: 38400.0 + site_edge: metro2/pop2|metro2/dc1|dc_to_pop:washington-arlington:1-2 + adjacency_id: dc_to_pop:washington-arlington + distance_km: 103 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 + - source: + path: metro2/pop2 + match: *id009 + target: + path: metro3/pop2 + match: *id009 + pattern: one_to_one + link_params: + capacity: 200.0 + cost: 1261 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: chicago + target_capacity: 3200.0 + site_edge: metro2/pop2|metro3/pop2|inter_metro:2-3:2-2 + adjacency_id: inter_metro:2-3 + distance_km: 1261 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: *id012 + - source: + path: metro2/pop2 + match: *id010 + target: + path: metro4/pop2 + match: *id010 + pattern: one_to_one + link_params: + capacity: 200.0 + cost: 1216 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: atlanta + target_capacity: 3200.0 + site_edge: metro2/pop2|metro4/pop2|inter_metro:2-4:2-2 + adjacency_id: inter_metro:2-4 + distance_km: 1216 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: *id013 + - source: + path: metro2/pop2 + match: *id011 + target: + path: metro5/pop2 + match: *id011 + pattern: one_to_one + link_params: + capacity: 800.0 + cost: 733 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: columbus + target_capacity: 12800.0 + site_edge: metro2/pop2|metro5/pop2|inter_metro:2-5:2-2 + adjacency_id: inter_metro:2-5 + distance_km: 733 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: *id014 + - source: + path: metro3/pop1 + match: &id015 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + target: + path: metro3/pop2 + match: *id015 + pattern: one_to_one + link_params: + capacity: 12800.0 + cost: 138 + attrs: + link_type: intra_metro + source_metro: chicago + target_metro: chicago + target_capacity: 12800.0 + site_edge: metro3/pop1|metro3/pop2|intra_metro:chicago:1-2 + adjacency_id: intra_metro:chicago + distance_km: 138 + hardware: + source: + component: 800G-ZR+ + count: 16.0 + target: + component: 800G-ZR+ + count: 16.0 + - source: + path: metro3/pop1 + match: &id016 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro5/pop1 + match: *id016 + pattern: one_to_one + link_params: + capacity: 200.0 + cost: 658 + attrs: + link_type: inter_metro_corridor + source_metro: chicago + target_metro: columbus + target_capacity: 3200.0 + site_edge: metro3/pop1|metro5/pop1|inter_metro:3-5:1-1 + adjacency_id: inter_metro:3-5 + distance_km: 658 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: &id018 + - corridor_risk_atlanta_chicago + - corridor_risk_chicago_columbus + - source: + path: metro3/pop1 + match: &id017 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro4/pop1 + match: *id017 + pattern: one_to_one + link_params: + capacity: 3200.0 + cost: 1302 + attrs: + link_type: inter_metro_corridor + source_metro: chicago + target_metro: atlanta + target_capacity: 3200.0 + site_edge: metro3/pop1|metro4/pop1|inter_metro:3-4:1-1 + adjacency_id: inter_metro:3-4 + distance_km: 1302 + hardware: + source: + component: 800G-ZR+ + count: 4.0 + target: + component: 800G-ZR+ + count: 4.0 + risk_groups: &id019 + - corridor_risk_atlanta_chicago + - corridor_risk_atlanta_columbus + - corridor_risk_chicago_columbus + - source: + path: metro3/pop2 + match: *id016 + target: + path: metro5/pop2 + match: *id016 + pattern: one_to_one + link_params: + capacity: 200.0 + cost: 658 + attrs: + link_type: inter_metro_corridor + source_metro: chicago + target_metro: columbus + target_capacity: 3200.0 + site_edge: metro3/pop2|metro5/pop2|inter_metro:3-5:2-2 + adjacency_id: inter_metro:3-5 + distance_km: 658 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: *id018 + - source: + path: metro3/pop2 + match: *id017 + target: + path: metro4/pop2 + match: *id017 + pattern: one_to_one + link_params: + capacity: 3200.0 + cost: 1302 + attrs: + link_type: inter_metro_corridor + source_metro: chicago + target_metro: atlanta + target_capacity: 3200.0 + site_edge: metro3/pop2|metro4/pop2|inter_metro:3-4:2-2 + adjacency_id: inter_metro:3-4 + distance_km: 1302 + hardware: + source: + component: 800G-ZR+ + count: 4.0 + target: + component: 800G-ZR+ + count: 4.0 + risk_groups: *id019 + - source: + path: metro4/pop1 + match: &id020 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + target: + path: metro4/pop2 + match: *id020 + pattern: one_to_one + link_params: + capacity: 12800.0 + cost: 142 + attrs: + link_type: intra_metro + source_metro: atlanta + target_metro: atlanta + target_capacity: 12800.0 + site_edge: metro4/pop1|metro4/pop2|intra_metro:atlanta:1-2 + adjacency_id: intra_metro:atlanta + distance_km: 142 + hardware: + source: + component: 800G-ZR+ + count: 16.0 + target: + component: 800G-ZR+ + count: 16.0 + - source: + path: metro4/pop1 + match: &id021 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro5/pop1 + match: *id021 + pattern: one_to_one + link_params: + capacity: 200.0 + cost: 1028 + attrs: + link_type: inter_metro_corridor + source_metro: atlanta + target_metro: columbus + target_capacity: 3200.0 + site_edge: metro4/pop1|metro5/pop1|inter_metro:4-5:1-1 + adjacency_id: inter_metro:4-5 + distance_km: 1028 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: &id022 + - corridor_risk_atlanta_chicago + - corridor_risk_atlanta_columbus + - source: + path: metro4/pop2 + match: *id021 + target: + path: metro5/pop2 + match: *id021 + pattern: one_to_one + link_params: + capacity: 200.0 + cost: 1028 + attrs: + link_type: inter_metro_corridor + source_metro: atlanta + target_metro: columbus + target_capacity: 3200.0 + site_edge: metro4/pop2|metro5/pop2|inter_metro:4-5:2-2 + adjacency_id: inter_metro:4-5 + distance_km: 1028 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 + risk_groups: *id022 + - source: + path: metro5/pop1 + match: &id023 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: leaf + target: + path: metro5/pop2 + match: *id023 + pattern: one_to_one + link_params: + capacity: 1600.0 + cost: 65 + attrs: + link_type: intra_metro + source_metro: columbus + target_metro: columbus + target_capacity: 25600.0 + site_edge: metro5/pop1|metro5/pop2|intra_metro:columbus:1-2 + adjacency_id: intra_metro:columbus + distance_km: 65 + hardware: + source: + component: 800G-ZR+ + count: 2.0 + target: + component: 800G-ZR+ + count: 2.0 + - source: + path: metro5/pop1 + match: &id024 + conditions: + - attr: role + operator: == + value: core + - attr: role + operator: == + value: dc + - attr: role + operator: == + value: leaf + logic: or + target: + path: metro5/dc1 + match: *id024 + pattern: one_to_one + link_params: + capacity: 2400.0 + cost: 1 + attrs: + link_type: dc_to_pop + source_metro: columbus + target_metro: columbus + target_capacity: 38400.0 + site_edge: metro5/pop1|metro5/dc1|dc_to_pop:columbus:1-1 + adjacency_id: dc_to_pop:columbus + distance_km: 1 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 + - source: + path: metro5/pop2 + match: *id024 + target: + path: metro5/dc1 + match: *id024 + pattern: one_to_one + link_params: + capacity: 2400.0 + cost: 65 + attrs: + link_type: dc_to_pop + source_metro: columbus + target_metro: columbus + target_capacity: 38400.0 + site_edge: metro5/pop2|metro5/dc1|dc_to_pop:columbus:1-2 + adjacency_id: dc_to_pop:columbus + distance_km: 65 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 risk_groups: -- name: corridor_risk_atlanta_chicago - attrs: - type: corridor_risk - distance_km: 1302 -- name: corridor_risk_atlanta_columbus - attrs: - type: corridor_risk - distance_km: 1028 -- name: corridor_risk_atlanta_washington-arlington - attrs: - type: corridor_risk - distance_km: 1216 -- name: corridor_risk_chicago_columbus - attrs: - type: corridor_risk - distance_km: 658 -- name: corridor_risk_chicago_washington-arlington - attrs: - type: corridor_risk - distance_km: 1261 -- name: corridor_risk_columbus_new-york-jersey-city-newark - attrs: - type: corridor_risk - distance_km: 977 -- name: corridor_risk_columbus_washington-arlington - attrs: - type: corridor_risk - distance_km: 733 -- name: corridor_risk_new-york-jersey-city-newark_washington-arlington - attrs: - type: corridor_risk - distance_km: 412 -failure_policy_set: - weighted_modes: + - name: corridor_risk_atlanta_chicago attrs: - description: 'Balanced MC: SRLG + DC->PoP + node(maint) + intra-site fabric' - modes: - - weight: 0.3 - rules: - - entity_scope: risk_group - rule_type: choice - count: 1 - weight_by: distance_km - - weight: 0.35 - rules: - - entity_scope: link - rule_type: choice - count: 3 - conditions: - - attr: link_type - operator: == - value: dc_to_pop - logic: and - weight_by: target_capacity - - weight: 0.25 - rules: - - entity_scope: node - rule_type: choice - count: 1 - conditions: - - attr: node_type - operator: '!=' - value: dc_region - logic: and - weight_by: attached_capacity_gbps - - weight: 0.1 - rules: - - entity_scope: link - rule_type: choice - count: 4 - conditions: - - attr: link_type - operator: == - value: leaf_spine - - attr: link_type - operator: == - value: intra_group - - attr: link_type - operator: == - value: inter_group - - attr: link_type - operator: == - value: internal_mesh - logic: or -traffic_matrix_set: - baseline_traffic_matrix: - - source_path: ^metro1/dc1/.* - sink_path: ^metro2/dc1/.* - mode: pairwise - priority: 0 - demand: 15000.0 + type: corridor_risk + distance_km: 1302 + - name: corridor_risk_atlanta_columbus attrs: - euclidean_km: 330 - flow_policy_config: TE_WCMP_UNLIM - - source_path: ^metro2/dc1/.* - sink_path: ^metro1/dc1/.* - mode: pairwise - priority: 0 - demand: 15000.0 + type: corridor_risk + distance_km: 1028 + - name: corridor_risk_atlanta_washington-arlington attrs: - euclidean_km: 330 - flow_policy_config: TE_WCMP_UNLIM - - source_path: ^metro1/dc1/.* - sink_path: ^metro5/dc1/.* - mode: pairwise - priority: 0 - demand: 15000.0 + type: corridor_risk + distance_km: 1216 + - name: corridor_risk_chicago_columbus attrs: - euclidean_km: 748 - flow_policy_config: TE_WCMP_UNLIM - - source_path: ^metro5/dc1/.* - sink_path: ^metro1/dc1/.* - mode: pairwise - priority: 0 - demand: 15000.0 + type: corridor_risk + distance_km: 658 + - name: corridor_risk_chicago_washington-arlington attrs: - euclidean_km: 748 - flow_policy_config: TE_WCMP_UNLIM - - source_path: ^metro2/dc1/.* - sink_path: ^metro5/dc1/.* - mode: pairwise - priority: 0 - demand: 15000.0 + type: corridor_risk + distance_km: 1261 + - name: corridor_risk_columbus_new-york-jersey-city-newark attrs: - euclidean_km: 508 - flow_policy_config: TE_WCMP_UNLIM - - source_path: ^metro5/dc1/.* - sink_path: ^metro2/dc1/.* - mode: pairwise - priority: 0 - demand: 15000.0 + type: corridor_risk + distance_km: 977 + - name: corridor_risk_columbus_washington-arlington attrs: - euclidean_km: 508 - flow_policy_config: TE_WCMP_UNLIM + type: corridor_risk + distance_km: 733 + - name: corridor_risk_new-york-jersey-city-newark_washington-arlington + attrs: + type: corridor_risk + distance_km: 412 +failure_policy_set: + weighted_modes: + attrs: + description: "Balanced MC: SRLG + DC->PoP + node(maint) + intra-site fabric" + modes: + - weight: 0.3 + rules: + - entity_scope: risk_group + rule_type: choice + count: 1 + weight_by: distance_km + - weight: 0.35 + rules: + - entity_scope: link + rule_type: choice + count: 3 + conditions: + - attr: link_type + operator: == + value: dc_to_pop + logic: and + weight_by: target_capacity + - weight: 0.25 + rules: + - entity_scope: node + rule_type: choice + count: 1 + conditions: + - attr: node_type + operator: "!=" + value: dc_region + logic: and + weight_by: attached_capacity_gbps + - weight: 0.1 + rules: + - entity_scope: link + rule_type: choice + count: 4 + conditions: + - attr: link_type + operator: == + value: leaf_spine + - attr: link_type + operator: == + value: intra_group + - attr: link_type + operator: == + value: inter_group + - attr: link_type + operator: == + value: internal_mesh + logic: or +traffic_matrix_set: + baseline_traffic_matrix: + - source: ^metro1/dc1/.* + sink: ^metro2/dc1/.* + mode: pairwise + priority: 0 + demand: 15000.0 + attrs: + euclidean_km: 330 + flow_policy_config: TE_WCMP_UNLIM + - source: ^metro2/dc1/.* + sink: ^metro1/dc1/.* + mode: pairwise + priority: 0 + demand: 15000.0 + attrs: + euclidean_km: 330 + flow_policy_config: TE_WCMP_UNLIM + - source: ^metro1/dc1/.* + sink: ^metro5/dc1/.* + mode: pairwise + priority: 0 + demand: 15000.0 + attrs: + euclidean_km: 748 + flow_policy_config: TE_WCMP_UNLIM + - source: ^metro5/dc1/.* + sink: ^metro1/dc1/.* + mode: pairwise + priority: 0 + demand: 15000.0 + attrs: + euclidean_km: 748 + flow_policy_config: TE_WCMP_UNLIM + - source: ^metro2/dc1/.* + sink: ^metro5/dc1/.* + mode: pairwise + priority: 0 + demand: 15000.0 + attrs: + euclidean_km: 508 + flow_policy_config: TE_WCMP_UNLIM + - source: ^metro5/dc1/.* + sink: ^metro2/dc1/.* + mode: pairwise + priority: 0 + demand: 15000.0 + attrs: + euclidean_km: 508 + flow_policy_config: TE_WCMP_UNLIM workflow: -- step_type: NetworkStats - name: network_statistics -- step_type: MaximumSupportedDemand - name: msd_baseline - matrix_name: baseline_traffic_matrix - acceptance_rule: hard - alpha_start: 1.0 - growth_factor: 2.0 - alpha_min: 0.001 - alpha_max: 1000000.0 - resolution: 0.05 - max_bracket_iters: 16 - max_bisect_iters: 32 - seeds_per_alpha: 1 - placement_rounds: 2 -- step_type: TrafficMatrixPlacement - name: tm_placement - seed: 42 - matrix_name: baseline_traffic_matrix - failure_policy: weighted_modes - iterations: 1000 - parallelism: 7 - placement_rounds: auto - baseline: true - store_failure_patterns: false - include_flow_details: true - include_used_edges: false - alpha_from_step: msd_baseline - alpha_from_field: data.alpha_star -- step_type: CostPower - name: cost_power - include_disabled: true - aggregation_level: 2 + - step_type: NetworkStats + name: network_statistics + - step_type: MaximumSupportedDemand + name: msd_baseline + matrix_name: baseline_traffic_matrix + acceptance_rule: hard + alpha_start: 1.0 + growth_factor: 2.0 + alpha_min: 0.001 + alpha_max: 1000000.0 + resolution: 0.05 + max_bracket_iters: 16 + max_bisect_iters: 32 + seeds_per_alpha: 1 + placement_rounds: 2 + - step_type: TrafficMatrixPlacement + name: tm_placement + seed: 42 + matrix_name: baseline_traffic_matrix + failure_policy: weighted_modes + iterations: 1000 + parallelism: 7 + placement_rounds: auto + baseline: true + store_failure_patterns: false + include_flow_details: true + include_used_edges: false + alpha_from_step: msd_baseline + alpha_from_field: data.alpha_star + - step_type: CostPower + name: cost_power + include_disabled: true + aggregation_level: 2 diff --git a/scenarios/nsfnet.yaml b/scenarios/nsfnet.yaml index 0b11361..4f6494b 100644 --- a/scenarios/nsfnet.yaml +++ b/scenarios/nsfnet.yaml @@ -191,8 +191,8 @@ workflow: # MaxFlow capacity matrix between all node pairs - step_type: MaxFlow name: node_to_node_capacity_matrix_1 - source_path: "^(.+)$" - sink_path: "^(.+)$" + source: "^(.+)$" + sink: "^(.+)$" mode: pairwise failure_policy: single_link_failure iterations: 1000 @@ -207,8 +207,8 @@ workflow: - step_type: MaxFlow name: node_to_node_capacity_matrix_2 - source_path: "^(.+)$" - sink_path: "^(.+)$" + source: "^(.+)$" + sink: "^(.+)$" mode: pairwise failure_policy: availability_1992 iterations: 1000 diff --git a/scenarios/square_mesh.yaml b/scenarios/square_mesh.yaml index f800c20..358ac19 100644 --- a/scenarios/square_mesh.yaml +++ b/scenarios/square_mesh.yaml @@ -55,8 +55,8 @@ failure_policy_set: traffic_matrix_set: baseline_traffic_matrix: - - source_path: "^N([1-4])$" - sink_path: "^N([1-4])$" + - source: "^N([1-4])$" + sink: "^N([1-4])$" demand: 12.0 mode: "pairwise" attrs: @@ -96,8 +96,8 @@ workflow: # 3) MaxFlow capacity matrix between all node pairs - step_type: MaxFlow name: node_to_node_capacity_matrix - source_path: "^(N[1-4])$" - sink_path: "^(N[1-4])$" + source: "^(N[1-4])$" + sink: "^(N[1-4])$" mode: pairwise failure_policy: single_link_failure iterations: 1000 diff --git a/tests/cli/test_cli.py b/tests/cli/test_cli.py index 8f82cd9..6c2a40d 100644 --- a/tests/cli/test_cli.py +++ b/tests/cli/test_cli.py @@ -319,8 +319,8 @@ def test_inspect_workflow_node_selection_preview_basic(tmp_path: Path) -> None: workflow: - step_type: MaxFlow name: cap - source_path: "^src" - sink_path: "^dst" + source: "^src" + sink: "^dst" """ ) @@ -329,7 +329,7 @@ def test_inspect_workflow_node_selection_preview_basic(tmp_path: Path) -> None: out = "\n".join(str(c.args[0]) for c in mprint.call_args_list) assert "Node selection preview:" in out - assert "source_path:" in out and "sink_path:" in out + assert "source:" in out and "sink:" in out assert "groups" in out and "nodes" in out @@ -344,8 +344,8 @@ def test_inspect_workflow_node_selection_detail_and_warning(tmp_path: Path) -> N workflow: - step_type: MaxFlow name: cap2 - source_path: "^none" - sink_path: "^none" + source: "^none" + sink: "^none" """ ) @@ -374,8 +374,8 @@ def test_inspect_capacity_vs_demand_summary_basic(tmp_path: Path) -> None: capacity: 100 traffic_matrix_set: default: - - source_path: "^A$" - sink_path: "^B$" + - source: "^A$" + sink: "^B$" demand: 50 workflow: - step_type: BuildGraph diff --git a/tests/dsl/test_examples.py b/tests/dsl/test_examples.py index 6988496..003e85e 100644 --- a/tests/dsl/test_examples.py +++ b/tests/dsl/test_examples.py @@ -272,8 +272,8 @@ def test_traffic_matrix_set_example(): traffic_matrix_set: default: - - source_path: "source.*" - sink_path: "sink.*" + - source: "source.*" + sink: "sink.*" demand: 100 mode: "combine" priority: 1 @@ -285,8 +285,8 @@ def test_traffic_matrix_set_example(): default_demands = scenario.traffic_matrix_set.get_default_matrix() assert len(default_demands) == 1 demand = default_demands[0] - assert demand.source_path == "source.*" - assert demand.sink_path == "sink.*" + assert demand.source == "source.*" + assert demand.sink == "sink.*" assert demand.demand == 100 assert demand.mode == "combine" @@ -437,7 +437,7 @@ def test_link_overrides_example(): def test_variable_expansion(): - """Test variable expansion in adjacency.""" + """Test variable expansion in adjacency using $var syntax.""" yaml_content = """ blueprints: test_expansion: @@ -452,7 +452,7 @@ def test_variable_expansion(): node_count: 2 name_template: "spine-{node_num}" adjacency: - - source: "plane{p}_rack" + - source: "plane${p}_rack" target: "spine" expand_vars: p: [1, 2] @@ -586,8 +586,8 @@ def test_zip_variable_mismatch_raises(): RackC: node_count: 1 adjacency: - - source: /Rack{rack_id} - target: /Rack{other_rack_id} + - source: /Rack${rack_id} + target: /Rack${other_rack_id} expand_vars: rack_id: [A, B] other_rack_id: [C, A, B] @@ -596,7 +596,7 @@ def test_zip_variable_mismatch_raises(): with pytest.raises(ValueError) as exc: Scenario.from_yaml(yaml_content) - assert "zip expansion requires all lists be the same length" in str(exc.value) + assert "zip expansion requires equal-length lists" in str(exc.value) def test_direct_link_unknown_node_raises(): @@ -615,17 +615,17 @@ def test_direct_link_unknown_node_raises(): assert "Link references unknown node(s)" in str(exc.value) -def test_attr_selector_inside_blueprint_paths(): - """Attribute directive paths inside blueprint adjacency should not be prefixed. +def test_group_by_selector_inside_blueprint(): + """Test group_by selector in blueprint adjacency. - When a blueprint adjacency uses a selector with path "attr:", the - attribute directive must be treated as global, not joined with the blueprint - instantiation path. This test ensures the expansion connects leaf->spine - using attribute-based selectors inside the blueprint. + When a blueprint adjacency uses a selector with group_by, nodes are + grouped by that attribute value regardless of path prefix. This test + ensures the expansion connects leaf->spine using attribute-based + selectors inside the blueprint. """ yaml_content = """ blueprints: - bp_attr: + bp_group: groups: leaf: node_count: 2 @@ -639,14 +639,14 @@ def test_attr_selector_inside_blueprint_paths(): role: "spine" adjacency: - source: - path: "attr:role" + group_by: "role" match: conditions: - attr: "role" operator: "==" value: "leaf" target: - path: "attr:role" + group_by: "role" match: conditions: - attr: "role" @@ -659,7 +659,7 @@ def test_attr_selector_inside_blueprint_paths(): network: groups: pod1: - use_blueprint: bp_attr + use_blueprint: bp_group """ scenario = Scenario.from_yaml(yaml_content) @@ -669,16 +669,15 @@ def test_attr_selector_inside_blueprint_paths(): assert len(scenario.network.links) == 2 -def test_attr_selector_with_expand_vars_inside_blueprint_paths(): - """Attribute directive with expand_vars inside blueprint adjacency should not be prefixed. +def test_group_by_with_variable_expansion(): + """Test group_by selector combined with variable expansion. - Use different attribute names for source and target to avoid cross-connections and - validate that at least some edges are created when using attr: paths generated via - expand_vars within a blueprint adjacency. + Validates that group_by selectors work correctly when the attribute + name is generated via variable expansion using $var syntax. """ yaml_content = """ blueprints: - bp_attr_vars: + bp_group_vars: groups: leaf: node_count: 2 @@ -691,11 +690,13 @@ def test_attr_selector_with_expand_vars_inside_blueprint_paths(): attrs: dst_role: "spine" adjacency: - - source: "attr:{src_key}" - target: "attr:{dst_key}" + - source: + group_by: "${src_attr}" + target: + group_by: "${dst_attr}" expand_vars: - src_key: ["src_role"] - dst_key: ["dst_role"] + src_attr: ["src_role"] + dst_attr: ["dst_role"] pattern: "mesh" link_params: capacity: 10 @@ -703,14 +704,14 @@ def test_attr_selector_with_expand_vars_inside_blueprint_paths(): network: groups: pod1: - use_blueprint: bp_attr_vars + use_blueprint: bp_group_vars """ scenario = Scenario.from_yaml(yaml_content) # Expect 3 nodes total (2 leaf, 1 spine) assert len(scenario.network.nodes) == 3 - # Without the fix, zero links would be created due to prefixed attr: paths. - assert len(scenario.network.links) > 0 + # Mesh between 2 leaf and 1 spine = 2 links + assert len(scenario.network.links) == 2 def test_invalid_nodes_type_raises(): diff --git a/tests/dsl/test_expansion.py b/tests/dsl/test_expansion.py new file mode 100644 index 0000000..3c0e7e8 --- /dev/null +++ b/tests/dsl/test_expansion.py @@ -0,0 +1,293 @@ +"""Comprehensive tests for the variable expansion system. + +Tests for ngraph.dsl.expansion modules: +- ExpansionSpec: schema for expansion configuration +- expand_templates: variable substitution in templates +- substitute_vars: single template substitution +- expand_name_patterns: bracket expansion for names +""" + +import pytest + +from ngraph.dsl.expansion import ( + ExpansionSpec, + expand_name_patterns, + expand_risk_group_refs, + expand_templates, + substitute_vars, +) + +# ────────────────────────────────────────────────────────────────────────────── +# ExpansionSpec Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestExpansionSpec: + """Tests for ExpansionSpec dataclass.""" + + def test_default_values(self) -> None: + """Default ExpansionSpec has empty vars and cartesian mode.""" + spec = ExpansionSpec() + assert spec.expand_vars == {} + assert spec.expansion_mode == "cartesian" + + def test_is_empty(self) -> None: + """is_empty returns True for empty expand_vars.""" + assert ExpansionSpec().is_empty() is True + assert ExpansionSpec(expand_vars={"x": [1]}).is_empty() is False + + def test_custom_values(self) -> None: + """Custom values are preserved.""" + spec = ExpansionSpec(expand_vars={"dc": [1, 2]}, expansion_mode="zip") + assert spec.expand_vars == {"dc": [1, 2]} + assert spec.expansion_mode == "zip" + + +# ────────────────────────────────────────────────────────────────────────────── +# substitute_vars Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestSubstituteVars: + """Tests for substitute_vars function.""" + + def test_dollar_var_syntax(self) -> None: + """$var syntax is substituted.""" + result = substitute_vars("dc$num/leaf", {"num": 1}) + assert result == "dc1/leaf" + + def test_dollar_brace_syntax(self) -> None: + """${var} syntax is substituted.""" + result = substitute_vars("dc${num}/leaf", {"num": 1}) + assert result == "dc1/leaf" + + def test_multiple_vars(self) -> None: + """Multiple variables are substituted.""" + result = substitute_vars("dc${dc}_rack${rack}", {"dc": 1, "rack": 2}) + assert result == "dc1_rack2" + + def test_same_var_multiple_times(self) -> None: + """Same variable used multiple times.""" + result = substitute_vars("dc${dc}/pod${dc}", {"dc": 1}) + assert result == "dc1/pod1" + + def test_no_vars_passthrough(self) -> None: + """String without variables passes through unchanged.""" + result = substitute_vars("static_path", {}) + assert result == "static_path" + + def test_missing_var_raises(self) -> None: + """Missing variable raises KeyError.""" + with pytest.raises(KeyError, match="not found"): + substitute_vars("dc${missing}", {}) + + def test_regex_not_confused(self) -> None: + """Regex quantifiers {m,n} are not confused with variables.""" + result = substitute_vars("^node{1,3}$", {}) + assert result == "^node{1,3}$" + + def test_underscore_in_var_name(self) -> None: + """Variable names with underscores work.""" + result = substitute_vars("${my_var}", {"my_var": "value"}) + assert result == "value" + + +# ────────────────────────────────────────────────────────────────────────────── +# expand_templates Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestExpandTemplatesCartesian: + """Tests for expand_templates with cartesian mode.""" + + def test_single_var_expands(self) -> None: + """Single variable expands to multiple results.""" + spec = ExpansionSpec(expand_vars={"dc": [1, 2, 3]}) + results = list(expand_templates({"path": "dc${dc}"}, spec)) + + assert len(results) == 3 + assert results[0] == {"path": "dc1"} + assert results[1] == {"path": "dc2"} + assert results[2] == {"path": "dc3"} + + def test_multiple_vars_cartesian(self) -> None: + """Multiple variables create cartesian product.""" + spec = ExpansionSpec(expand_vars={"dc": [1, 2], "rack": ["a", "b"]}) + results = list(expand_templates({"path": "dc${dc}_rack${rack}"}, spec)) + + assert len(results) == 4 # 2 * 2 + paths = [r["path"] for r in results] + assert "dc1_racka" in paths + assert "dc1_rackb" in paths + assert "dc2_racka" in paths + assert "dc2_rackb" in paths + + def test_multiple_templates(self) -> None: + """Multiple template fields are all expanded.""" + spec = ExpansionSpec(expand_vars={"dc": [1, 2]}) + results = list( + expand_templates({"source": "dc${dc}/leaf", "sink": "dc${dc}/spine"}, spec) + ) + + assert len(results) == 2 + assert results[0] == {"source": "dc1/leaf", "sink": "dc1/spine"} + assert results[1] == {"source": "dc2/leaf", "sink": "dc2/spine"} + + def test_empty_vars_yields_original(self) -> None: + """Empty expand_vars yields original template.""" + spec = ExpansionSpec() + results = list(expand_templates({"path": "static"}, spec)) + + assert len(results) == 1 + assert results[0] == {"path": "static"} + + +class TestExpandTemplatesZip: + """Tests for expand_templates with zip mode.""" + + def test_zip_pairs_by_index(self) -> None: + """Zip mode pairs variables by index.""" + spec = ExpansionSpec( + expand_vars={"src": ["a", "b"], "dst": ["x", "y"]}, expansion_mode="zip" + ) + results = list(expand_templates({"path": "${src}->${dst}"}, spec)) + + assert len(results) == 2 + assert results[0] == {"path": "a->x"} + assert results[1] == {"path": "b->y"} + + def test_zip_mismatched_lengths_raises(self) -> None: + """Zip mode with mismatched list lengths raises.""" + spec = ExpansionSpec( + expand_vars={"src": ["a", "b"], "dst": ["x", "y", "z"]}, + expansion_mode="zip", + ) + with pytest.raises(ValueError, match="equal-length"): + list(expand_templates({"path": "${src}->${dst}"}, spec)) + + +class TestExpandTemplatesLimits: + """Tests for expansion limits.""" + + def test_large_expansion_raises(self) -> None: + """Expansion exceeding limit raises.""" + # Create vars that would produce > 10,000 combinations + spec = ExpansionSpec( + expand_vars={ + "a": list(range(50)), + "b": list(range(50)), + "c": list(range(50)), + } + ) + with pytest.raises(ValueError, match="limit"): + list(expand_templates({"path": "${a}${b}${c}"}, spec)) + + +# ────────────────────────────────────────────────────────────────────────────── +# expand_name_patterns Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestExpandNamePatterns: + """Tests for bracket expansion in group names.""" + + def test_no_brackets_returns_unchanged(self) -> None: + """Name without brackets returns as single-item list.""" + assert expand_name_patterns("simple") == ["simple"] + + def test_single_range(self) -> None: + """Single range bracket expands.""" + assert expand_name_patterns("node[1-3]") == ["node1", "node2", "node3"] + + def test_single_list(self) -> None: + """Single list bracket expands.""" + assert expand_name_patterns("dc[a,b,c]") == ["dca", "dcb", "dcc"] + + def test_mixed_range_and_list(self) -> None: + """Mixed range and list in single bracket.""" + result = expand_name_patterns("node[1,3,5-7]") + assert result == ["node1", "node3", "node5", "node6", "node7"] + + def test_multiple_brackets_cartesian(self) -> None: + """Multiple brackets create cartesian product.""" + result = expand_name_patterns("dc[1-2]_rack[a,b]") + assert sorted(result) == sorted( + ["dc1_racka", "dc1_rackb", "dc2_racka", "dc2_rackb"] + ) + + def test_three_brackets(self) -> None: + """Three brackets create full cartesian product.""" + result = expand_name_patterns("a[1-2]b[3-4]c[5-6]") + assert len(result) == 8 # 2 * 2 * 2 + + def test_brackets_at_start(self) -> None: + """Brackets at start of name.""" + assert expand_name_patterns("[a,b]suffix") == ["asuffix", "bsuffix"] + + def test_brackets_at_end(self) -> None: + """Brackets at end of name.""" + assert expand_name_patterns("prefix[1-2]") == ["prefix1", "prefix2"] + + def test_adjacent_brackets(self) -> None: + """Adjacent brackets expand correctly.""" + result = expand_name_patterns("[a,b][1-2]") + assert sorted(result) == sorted(["a1", "a2", "b1", "b2"]) + + def test_single_value_range(self) -> None: + """Single value range [n-n] produces one result.""" + assert expand_name_patterns("node[5-5]") == ["node5"] + + def test_single_value_list(self) -> None: + """Single value list [x] produces one result.""" + assert expand_name_patterns("node[x]") == ["nodex"] + + +# ────────────────────────────────────────────────────────────────────────────── +# expand_risk_group_refs Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestExpandRiskGroupRefs: + """Tests for bracket expansion in risk group reference lists.""" + + def test_no_brackets_passthrough(self) -> None: + """List without brackets returns unchanged as set.""" + assert expand_risk_group_refs(["RG1"]) == {"RG1"} + + def test_multiple_literals(self) -> None: + """Multiple literal names return as set.""" + assert expand_risk_group_refs(["RG1", "RG2", "RG3"]) == {"RG1", "RG2", "RG3"} + + def test_single_pattern(self) -> None: + """Single pattern expands to multiple risk groups.""" + assert expand_risk_group_refs(["RG[1-3]"]) == {"RG1", "RG2", "RG3"} + + def test_multiple_patterns(self) -> None: + """Multiple patterns expand independently.""" + result = expand_risk_group_refs(["A[1-2]", "B[a,b]"]) + assert result == {"A1", "A2", "Ba", "Bb"} + + def test_mixed_literal_and_pattern(self) -> None: + """Mix of literal and pattern names.""" + result = expand_risk_group_refs(["Literal", "Pattern[1-2]"]) + assert result == {"Literal", "Pattern1", "Pattern2"} + + def test_empty_list(self) -> None: + """Empty list returns empty set.""" + assert expand_risk_group_refs([]) == set() + + def test_cartesian_in_single_entry(self) -> None: + """Multiple brackets in single entry create cartesian product.""" + result = expand_risk_group_refs(["DC[1-2]_Rack[a,b]"]) + assert result == {"DC1_Racka", "DC1_Rackb", "DC2_Racka", "DC2_Rackb"} + + def test_set_input(self) -> None: + """Accepts set as input (not just list).""" + result = expand_risk_group_refs({"RG[1-2]"}) + assert result == {"RG1", "RG2"} + + def test_duplicates_deduplicated(self) -> None: + """Duplicate results are deduplicated.""" + # Two patterns that produce overlapping results + result = expand_risk_group_refs(["RG[1-2]", "RG[2-3]"]) + assert result == {"RG1", "RG2", "RG3"} diff --git a/tests/dsl/test_risk_group_expansion.py b/tests/dsl/test_risk_group_expansion.py new file mode 100644 index 0000000..77c1b25 --- /dev/null +++ b/tests/dsl/test_risk_group_expansion.py @@ -0,0 +1,385 @@ +"""Integration tests for bracket expansion in risk groups. + +Tests: +- Risk group definition expansion (top-level risk_groups with bracket patterns) +- Risk group membership expansion (risk_groups arrays on nodes/links/groups) +""" + +from ngraph.scenario import Scenario + + +class TestRiskGroupDefinitionExpansion: + """Tests for bracket expansion in risk group definitions.""" + + def test_simple_name_expansion(self) -> None: + """Single bracket pattern creates multiple risk groups.""" + yaml_content = """ +network: + nodes: + A: {} + +risk_groups: + - name: "RG[1-3]" +""" + scenario = Scenario.from_yaml(yaml_content) + rg_names = set(scenario.network.risk_groups.keys()) + assert rg_names == {"RG1", "RG2", "RG3"} + + def test_list_expansion(self) -> None: + """List bracket pattern creates multiple risk groups.""" + yaml_content = """ +network: + nodes: + A: {} + +risk_groups: + - name: "DC[a,b,c]_Power" +""" + scenario = Scenario.from_yaml(yaml_content) + rg_names = set(scenario.network.risk_groups.keys()) + assert rg_names == {"DCa_Power", "DCb_Power", "DCc_Power"} + + def test_cartesian_expansion(self) -> None: + """Multiple brackets create cartesian product of risk groups.""" + yaml_content = """ +network: + nodes: + A: {} + +risk_groups: + - name: "DC[1-2]_Rack[a,b]" +""" + scenario = Scenario.from_yaml(yaml_content) + rg_names = set(scenario.network.risk_groups.keys()) + assert rg_names == {"DC1_Racka", "DC1_Rackb", "DC2_Racka", "DC2_Rackb"} + + def test_attrs_preserved_on_expansion(self) -> None: + """Attributes are copied to all expanded risk groups.""" + yaml_content = """ +network: + nodes: + A: {} + +risk_groups: + - name: "Power[1-3]" + attrs: + type: power + criticality: high +""" + scenario = Scenario.from_yaml(yaml_content) + for i in range(1, 4): + rg = scenario.network.risk_groups[f"Power{i}"] + assert rg.attrs["type"] == "power" + assert rg.attrs["criticality"] == "high" + + def test_disabled_preserved_on_expansion(self) -> None: + """Disabled flag is copied to all expanded risk groups.""" + yaml_content = """ +network: + nodes: + A: {} + +risk_groups: + - name: "Maint[1-2]" + disabled: true +""" + scenario = Scenario.from_yaml(yaml_content) + assert scenario.network.risk_groups["Maint1"].disabled is True + assert scenario.network.risk_groups["Maint2"].disabled is True + + def test_children_expansion(self) -> None: + """Children names are also expanded.""" + yaml_content = """ +network: + nodes: + A: {} + +risk_groups: + - name: "DC1" + children: + - name: "Rack[1-3]" +""" + scenario = Scenario.from_yaml(yaml_content) + dc1 = scenario.network.risk_groups["DC1"] + child_names = {c.name for c in dc1.children} + assert child_names == {"Rack1", "Rack2", "Rack3"} + + def test_parent_and_children_both_expand(self) -> None: + """Both parent and children can have bracket patterns.""" + yaml_content = """ +network: + nodes: + A: {} + +risk_groups: + - name: "DC[1-2]" + children: + - name: "Rack[a,b]" +""" + scenario = Scenario.from_yaml(yaml_content) + # Should have DC1 and DC2 + assert "DC1" in scenario.network.risk_groups + assert "DC2" in scenario.network.risk_groups + # Each should have Racka and Rackb children + for dc_name in ["DC1", "DC2"]: + dc = scenario.network.risk_groups[dc_name] + child_names = {c.name for c in dc.children} + assert child_names == {"Racka", "Rackb"} + + def test_no_expansion_needed(self) -> None: + """Literal names work unchanged.""" + yaml_content = """ +network: + nodes: + A: {} + +risk_groups: + - name: "PowerSupply" + - name: "Cooling" +""" + scenario = Scenario.from_yaml(yaml_content) + rg_names = set(scenario.network.risk_groups.keys()) + assert rg_names == {"PowerSupply", "Cooling"} + + +class TestRiskGroupMembershipExpansion: + """Tests for bracket expansion in risk group membership arrays.""" + + def test_node_risk_groups_expansion(self) -> None: + """Node risk_groups array expands bracket patterns.""" + yaml_content = """ +network: + nodes: + ServerA: + risk_groups: ["RG[1-3]"] + +risk_groups: + - name: "RG[1-3]" +""" + scenario = Scenario.from_yaml(yaml_content) + node = scenario.network.nodes["ServerA"] + assert node.risk_groups == {"RG1", "RG2", "RG3"} + + def test_group_risk_groups_expansion(self) -> None: + """Group risk_groups array expands and inherits to nodes.""" + yaml_content = """ +network: + groups: + servers: + node_count: 2 + risk_groups: ["Power[1-2]"] + +risk_groups: + - name: "Power[1-2]" +""" + scenario = Scenario.from_yaml(yaml_content) + for node_name in ["servers/servers-1", "servers/servers-2"]: + node = scenario.network.nodes[node_name] + assert node.risk_groups == {"Power1", "Power2"} + + def test_adjacency_link_risk_groups_expansion(self) -> None: + """Adjacency link_params risk_groups expands.""" + yaml_content = """ +network: + groups: + leaf: + node_count: 2 + spine: + node_count: 2 + adjacency: + - source: /leaf + target: /spine + pattern: mesh + link_params: + risk_groups: ["Fiber[1-2]"] + +risk_groups: + - name: "Fiber[1-2]" +""" + scenario = Scenario.from_yaml(yaml_content) + for link in scenario.network.links.values(): + assert link.risk_groups == {"Fiber1", "Fiber2"} + + def test_direct_link_risk_groups_expansion(self) -> None: + """Direct link risk_groups expands.""" + yaml_content = """ +network: + nodes: + A: {} + B: {} + links: + - source: A + target: B + link_params: + risk_groups: ["Cable[a,b,c]"] + +risk_groups: + - name: "Cable[a,b,c]" +""" + scenario = Scenario.from_yaml(yaml_content) + # Get the link (there's only one) + link = next(iter(scenario.network.links.values())) + assert link.risk_groups == {"Cablea", "Cableb", "Cablec"} + + def test_node_override_risk_groups_expansion(self) -> None: + """Node override risk_groups expands.""" + yaml_content = """ +network: + groups: + servers: + node_count: 2 + node_overrides: + - path: servers + risk_groups: ["Zone[1-2]"] + +risk_groups: + - name: "Zone[1-2]" +""" + scenario = Scenario.from_yaml(yaml_content) + for node_name in ["servers/servers-1", "servers/servers-2"]: + node = scenario.network.nodes[node_name] + assert node.risk_groups == {"Zone1", "Zone2"} + + def test_link_override_risk_groups_expansion(self) -> None: + """Link override risk_groups expands.""" + yaml_content = """ +network: + groups: + leaf: + node_count: 2 + spine: + node_count: 1 + adjacency: + - source: leaf + target: spine + pattern: mesh + link_overrides: + - source: leaf + target: spine + link_params: + risk_groups: ["Path[1-3]"] + +risk_groups: + - name: "Path[1-3]" +""" + scenario = Scenario.from_yaml(yaml_content) + for link in scenario.network.links.values(): + assert link.risk_groups == {"Path1", "Path2", "Path3"} + + def test_mixed_literal_and_pattern(self) -> None: + """Mix of literal and pattern in risk_groups array.""" + yaml_content = """ +network: + nodes: + Server: + risk_groups: ["Static", "Dynamic[1-2]"] + +risk_groups: + - name: "Static" + - name: "Dynamic[1-2]" +""" + scenario = Scenario.from_yaml(yaml_content) + node = scenario.network.nodes["Server"] + assert node.risk_groups == {"Static", "Dynamic1", "Dynamic2"} + + def test_empty_risk_groups_array(self) -> None: + """Empty risk_groups array works correctly.""" + yaml_content = """ +network: + nodes: + Server: + risk_groups: [] +""" + scenario = Scenario.from_yaml(yaml_content) + node = scenario.network.nodes["Server"] + assert node.risk_groups == set() + + +class TestRiskGroupExpansionEdgeCases: + """Edge cases and error handling for risk group expansion.""" + + def test_overlapping_patterns_deduplicated(self) -> None: + """Overlapping patterns in membership array are deduplicated.""" + yaml_content = """ +network: + nodes: + Server: + risk_groups: ["RG[1-3]", "RG[2-4]"] + +risk_groups: + - name: "RG[1-4]" +""" + scenario = Scenario.from_yaml(yaml_content) + node = scenario.network.nodes["Server"] + # RG2 and RG3 appear in both patterns but should only be in set once + assert node.risk_groups == {"RG1", "RG2", "RG3", "RG4"} + + def test_inherited_plus_own_risk_groups(self) -> None: + """Parent and child risk groups combine correctly via blueprint.""" + yaml_content = """ +blueprints: + pod: + groups: + servers: + node_count: 2 + risk_groups: ["Child[a,b]"] + +network: + groups: + parent: + use_blueprint: pod + risk_groups: ["Parent[1-2]"] + +risk_groups: + - name: "Parent[1-2]" + - name: "Child[a,b]" +""" + scenario = Scenario.from_yaml(yaml_content) + # Nodes should have both parent and child risk groups + for node_name in ["parent/servers/servers-1", "parent/servers/servers-2"]: + node = scenario.network.nodes[node_name] + assert node.risk_groups == {"Parent1", "Parent2", "Childa", "Childb"} + + def test_blueprint_risk_groups_expansion(self) -> None: + """Risk groups in blueprint groups expand correctly.""" + yaml_content = """ +blueprints: + pod: + groups: + leaf: + node_count: 2 + risk_groups: ["Leaf[1-2]"] + +network: + groups: + pod1: + use_blueprint: pod + +risk_groups: + - name: "Leaf[1-2]" +""" + scenario = Scenario.from_yaml(yaml_content) + for node_name in ["pod1/leaf/leaf-1", "pod1/leaf/leaf-2"]: + node = scenario.network.nodes[node_name] + assert node.risk_groups == {"Leaf1", "Leaf2"} + + def test_definition_and_membership_consistency(self) -> None: + """Expanded definitions and memberships reference same groups.""" + yaml_content = """ +network: + groups: + servers: + node_count: 3 + risk_groups: ["Power[1-3]"] + +risk_groups: + - name: "Power[1-3]" + attrs: + type: power +""" + scenario = Scenario.from_yaml(yaml_content) + # All referenced risk groups should exist + node = scenario.network.nodes["servers/servers-1"] + for rg_name in node.risk_groups: + assert rg_name in scenario.network.risk_groups + assert scenario.network.risk_groups[rg_name].attrs["type"] == "power" diff --git a/tests/dsl/test_selectors.py b/tests/dsl/test_selectors.py new file mode 100644 index 0000000..bf106ba --- /dev/null +++ b/tests/dsl/test_selectors.py @@ -0,0 +1,583 @@ +"""Comprehensive tests for the unified selector system. + +Tests for ngraph.dsl.selectors modules: +- normalize_selector: parsing and normalization +- select_nodes: node selection with all stages +- conditions: all condition operators +""" + +import pytest + +from ngraph.dsl.selectors import ( + Condition, + MatchSpec, + NodeSelector, + evaluate_condition, + evaluate_conditions, + normalize_selector, + select_nodes, +) +from ngraph.model.network import Network, Node + +# ────────────────────────────────────────────────────────────────────────────── +# Fixtures +# ────────────────────────────────────────────────────────────────────────────── + + +@pytest.fixture +def simple_network() -> Network: + """A simple network with 4 nodes for basic testing.""" + network = Network() + for name in ["A", "B", "C", "D"]: + network.add_node(Node(name)) + return network + + +@pytest.fixture +def attributed_network() -> Network: + """Network with nodes having various attributes for selector testing.""" + network = Network() + + # Datacenter 1: 2 leafs, 1 spine + network.add_node(Node("dc1_leaf_1", attrs={"dc": "dc1", "role": "leaf", "tier": 1})) + network.add_node(Node("dc1_leaf_2", attrs={"dc": "dc1", "role": "leaf", "tier": 1})) + network.add_node( + Node("dc1_spine_1", attrs={"dc": "dc1", "role": "spine", "tier": 2}) + ) + + # Datacenter 2: 2 leafs, 1 spine (one disabled) + network.add_node(Node("dc2_leaf_1", attrs={"dc": "dc2", "role": "leaf", "tier": 1})) + network.add_node( + Node( + "dc2_leaf_2", attrs={"dc": "dc2", "role": "leaf", "tier": 1}, disabled=True + ) + ) + network.add_node( + Node("dc2_spine_1", attrs={"dc": "dc2", "role": "spine", "tier": 2}) + ) + + return network + + +# ────────────────────────────────────────────────────────────────────────────── +# NodeSelector Schema Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestNodeSelectorSchema: + """Tests for NodeSelector dataclass validation.""" + + def test_path_only_valid(self) -> None: + """NodeSelector with only path is valid.""" + sel = NodeSelector(path="^dc1/.*") + assert sel.path == "^dc1/.*" + assert sel.group_by is None + assert sel.match is None + + def test_group_by_only_valid(self) -> None: + """NodeSelector with only group_by is valid.""" + sel = NodeSelector(group_by="role") + assert sel.group_by == "role" + assert sel.path is None + + def test_match_only_valid(self) -> None: + """NodeSelector with only match is valid.""" + cond = Condition(attr="role", operator="==", value="leaf") + match = MatchSpec(conditions=[cond]) + sel = NodeSelector(match=match) + assert sel.match is not None + assert sel.path is None + assert sel.group_by is None + + def test_all_fields_valid(self) -> None: + """NodeSelector with all fields is valid.""" + cond = Condition(attr="role", operator="==", value="leaf") + match = MatchSpec(conditions=[cond]) + sel = NodeSelector(path="^dc1/.*", group_by="role", match=match) + assert sel.path == "^dc1/.*" + assert sel.group_by == "role" + assert sel.match is not None + + def test_no_fields_raises(self) -> None: + """NodeSelector with no fields raises ValueError.""" + with pytest.raises(ValueError, match="at least one of"): + NodeSelector() + + +# ────────────────────────────────────────────────────────────────────────────── +# normalize_selector Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestNormalizeSelector: + """Tests for normalize_selector function.""" + + def test_string_input_creates_path_selector(self) -> None: + """String input creates NodeSelector with path.""" + sel = normalize_selector("^dc1/.*", "demand") + assert sel.path == "^dc1/.*" + assert sel.group_by is None + assert sel.match is None + + def test_dict_with_path(self) -> None: + """Dict with path key creates selector.""" + sel = normalize_selector({"path": "^dc1/.*"}, "demand") + assert sel.path == "^dc1/.*" + + def test_dict_with_group_by(self) -> None: + """Dict with group_by key creates selector.""" + sel = normalize_selector({"group_by": "role"}, "demand") + assert sel.group_by == "role" + assert sel.path is None + + def test_dict_with_match(self) -> None: + """Dict with match key creates selector.""" + sel = normalize_selector( + { + "match": { + "conditions": [{"attr": "role", "operator": "==", "value": "leaf"}], + "logic": "and", + } + }, + "demand", + ) + assert sel.match is not None + assert len(sel.match.conditions) == 1 + assert sel.match.logic == "and" + + def test_dict_combined_fields(self) -> None: + """Dict with multiple fields creates combined selector.""" + sel = normalize_selector( + { + "path": "^dc1/.*", + "group_by": "role", + "match": { + "conditions": [{"attr": "tier", "operator": "==", "value": 1}] + }, + }, + "demand", + ) + assert sel.path == "^dc1/.*" + assert sel.group_by == "role" + assert sel.match is not None + + def test_existing_node_selector_with_active_only_set(self) -> None: + """NodeSelector with active_only already set is returned as-is.""" + original = NodeSelector(path="^A$", active_only=False) + result = normalize_selector(original, "demand") + assert result is original + assert result.active_only is False # Preserved, not overwritten + + def test_existing_node_selector_active_only_none_gets_default(self) -> None: + """NodeSelector with active_only=None gets context default (new object).""" + original = NodeSelector(path="^A$") # active_only defaults to None + assert original.active_only is None + + result = normalize_selector(original, "demand") + assert result is not original # New object created + assert result.active_only is True # demand context default + assert original.active_only is None # Original unchanged + + def test_empty_dict_raises(self) -> None: + """Empty dict raises ValueError.""" + with pytest.raises(ValueError, match="at least one of"): + normalize_selector({}, "demand") + + def test_invalid_type_raises(self) -> None: + """Invalid type raises ValueError.""" + with pytest.raises(ValueError, match="must be string or dict"): + normalize_selector(123, "demand") # type: ignore + + def test_unknown_context_raises(self) -> None: + """Unknown context raises ValueError.""" + with pytest.raises(ValueError, match="Unknown context"): + normalize_selector("^A$", "unknown_context") + + # Context-aware active_only defaults + def test_demand_context_active_only_true(self) -> None: + """Demand context defaults active_only to True.""" + sel = normalize_selector("^A$", "demand") + assert sel.active_only is True + + def test_workflow_context_active_only_true(self) -> None: + """Workflow context defaults active_only to True.""" + sel = normalize_selector("^A$", "workflow") + assert sel.active_only is True + + def test_adjacency_context_active_only_false(self) -> None: + """Adjacency context defaults active_only to False.""" + sel = normalize_selector("^A$", "adjacency") + assert sel.active_only is False + + def test_override_context_active_only_false(self) -> None: + """Override context defaults active_only to False.""" + sel = normalize_selector("^A$", "override") + assert sel.active_only is False + + def test_explicit_active_only_overrides_default(self) -> None: + """Explicit active_only in dict overrides context default.""" + sel = normalize_selector({"path": "^A$", "active_only": False}, "demand") + assert sel.active_only is False + + +# ────────────────────────────────────────────────────────────────────────────── +# select_nodes Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestSelectNodesByPath: + """Tests for path-based node selection.""" + + def test_simple_regex_match(self, simple_network: Network) -> None: + """Simple regex matches nodes.""" + sel = NodeSelector(path="^[AB]$") + groups = select_nodes(simple_network, sel, default_active_only=False) + + # Pattern without capture groups uses pattern as label + assert "^[AB]$" in groups + node_names = [n.name for n in groups["^[AB]$"]] + assert sorted(node_names) == ["A", "B"] + + def test_capture_groups_create_labels(self, attributed_network: Network) -> None: + """Capture groups in regex create group labels.""" + sel = NodeSelector(path="^(dc[12])_.*") + groups = select_nodes(attributed_network, sel, default_active_only=False) + + # Captured groups become labels + assert "dc1" in groups + assert "dc2" in groups + assert len(groups["dc1"]) == 3 + assert len(groups["dc2"]) == 3 + + def test_no_match_returns_empty(self, simple_network: Network) -> None: + """No matching nodes returns empty dict.""" + sel = NodeSelector(path="^nonexistent$") + groups = select_nodes(simple_network, sel, default_active_only=False) + assert groups == {} + + +class TestSelectNodesByMatch: + """Tests for match-based filtering.""" + + def test_match_filters_nodes(self, attributed_network: Network) -> None: + """Match conditions filter nodes.""" + sel = NodeSelector( + path=".*", + match=MatchSpec( + conditions=[Condition(attr="role", operator="==", value="leaf")] + ), + ) + groups = select_nodes(attributed_network, sel, default_active_only=False) + + # Only leaf nodes should match + all_nodes = [n for nodes in groups.values() for n in nodes] + assert len(all_nodes) == 4 # 2 dc1 leafs + 2 dc2 leafs + assert all(n.attrs["role"] == "leaf" for n in all_nodes) + + def test_match_with_and_logic(self, attributed_network: Network) -> None: + """Match with AND logic requires all conditions.""" + sel = NodeSelector( + path=".*", + match=MatchSpec( + conditions=[ + Condition(attr="role", operator="==", value="leaf"), + Condition(attr="dc", operator="==", value="dc1"), + ], + logic="and", + ), + ) + groups = select_nodes(attributed_network, sel, default_active_only=False) + + all_nodes = [n for nodes in groups.values() for n in nodes] + assert len(all_nodes) == 2 # Only dc1 leafs + assert all(n.attrs["dc"] == "dc1" for n in all_nodes) + + def test_match_with_or_logic(self, attributed_network: Network) -> None: + """Match with OR logic requires any condition.""" + sel = NodeSelector( + path=".*", + match=MatchSpec( + conditions=[ + Condition(attr="role", operator="==", value="leaf"), + Condition(attr="role", operator="==", value="spine"), + ], + logic="or", + ), + ) + groups = select_nodes(attributed_network, sel, default_active_only=False) + + all_nodes = [n for nodes in groups.values() for n in nodes] + assert len(all_nodes) == 6 # All nodes match leaf or spine + + +class TestSelectNodesActiveOnly: + """Tests for active_only filtering.""" + + def test_active_only_excludes_disabled(self, attributed_network: Network) -> None: + """active_only=True excludes disabled nodes.""" + sel = NodeSelector(path="^dc2_.*", active_only=True) + groups = select_nodes(attributed_network, sel, default_active_only=True) + + all_nodes = [n for nodes in groups.values() for n in nodes] + # dc2_leaf_2 is disabled, should be excluded + assert len(all_nodes) == 2 + assert all(not n.disabled for n in all_nodes) + + def test_active_only_false_includes_disabled( + self, attributed_network: Network + ) -> None: + """active_only=False includes disabled nodes.""" + sel = NodeSelector(path="^dc2_.*", active_only=False) + groups = select_nodes(attributed_network, sel, default_active_only=False) + + all_nodes = [n for nodes in groups.values() for n in nodes] + assert len(all_nodes) == 3 # Includes disabled dc2_leaf_2 + + def test_excluded_nodes_always_excluded(self, attributed_network: Network) -> None: + """excluded_nodes parameter always excludes specified nodes.""" + sel = NodeSelector(path="^dc1_.*") + groups = select_nodes( + attributed_network, + sel, + default_active_only=False, + excluded_nodes={"dc1_leaf_1"}, + ) + + all_nodes = [n for nodes in groups.values() for n in nodes] + node_names = [n.name for n in all_nodes] + assert "dc1_leaf_1" not in node_names + assert len(all_nodes) == 2 + + +class TestSelectNodesByGroupBy: + """Tests for group_by attribute grouping.""" + + def test_group_by_attribute(self, attributed_network: Network) -> None: + """group_by creates groups by attribute value.""" + sel = NodeSelector(path=".*", group_by="role") + groups = select_nodes(attributed_network, sel, default_active_only=False) + + assert "leaf" in groups + assert "spine" in groups + assert len(groups["leaf"]) == 4 + assert len(groups["spine"]) == 2 + + def test_group_by_overrides_capture_groups( + self, attributed_network: Network + ) -> None: + """group_by overrides regex capture group labels.""" + sel = NodeSelector(path="^(dc[12])_.*", group_by="role") + groups = select_nodes(attributed_network, sel, default_active_only=False) + + # group_by should override capture group labels + assert "dc1" not in groups + assert "dc2" not in groups + assert "leaf" in groups + assert "spine" in groups + + def test_group_by_missing_attribute_excludes_nodes( + self, attributed_network: Network + ) -> None: + """Nodes missing group_by attribute are excluded.""" + # Add a node without the grouping attribute + attributed_network.add_node(Node("orphan", attrs={"other": "value"})) + + sel = NodeSelector(path=".*", group_by="role") + groups = select_nodes(attributed_network, sel, default_active_only=False) + + all_nodes = [n for nodes in groups.values() for n in nodes] + node_names = [n.name for n in all_nodes] + assert "orphan" not in node_names + + +class TestSelectNodesMatchOnly: + """Tests for match-only selectors (no path specified).""" + + def test_match_only_selects_all_then_filters( + self, attributed_network: Network + ) -> None: + """Match-only selector starts with all nodes, then filters.""" + sel = NodeSelector( + match=MatchSpec(conditions=[Condition(attr="tier", operator="==", value=2)]) + ) + groups = select_nodes(attributed_network, sel, default_active_only=False) + + # Only spine nodes (tier=2) should match + all_nodes = [n for nodes in groups.values() for n in nodes] + assert len(all_nodes) == 2 + assert all(n.attrs["tier"] == 2 for n in all_nodes) + + +# ────────────────────────────────────────────────────────────────────────────── +# Condition Operators Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestConditionOperators: + """Tests for all supported condition operators.""" + + def test_equality_operator(self) -> None: + """Test == operator.""" + attrs = {"x": 5, "y": "abc"} + assert evaluate_condition(attrs, Condition("x", "==", 5)) is True + assert evaluate_condition(attrs, Condition("x", "==", 6)) is False + assert evaluate_condition(attrs, Condition("y", "==", "abc")) is True + + def test_inequality_operator(self) -> None: + """Test != operator.""" + attrs = {"x": 5} + assert evaluate_condition(attrs, Condition("x", "!=", 6)) is True + assert evaluate_condition(attrs, Condition("x", "!=", 5)) is False + + def test_less_than_operator(self) -> None: + """Test < operator.""" + attrs = {"x": 5} + assert evaluate_condition(attrs, Condition("x", "<", 10)) is True + assert evaluate_condition(attrs, Condition("x", "<", 5)) is False + assert evaluate_condition(attrs, Condition("x", "<", 3)) is False + + def test_less_than_or_equal_operator(self) -> None: + """Test <= operator.""" + attrs = {"x": 5} + assert evaluate_condition(attrs, Condition("x", "<=", 5)) is True + assert evaluate_condition(attrs, Condition("x", "<=", 10)) is True + assert evaluate_condition(attrs, Condition("x", "<=", 3)) is False + + def test_greater_than_operator(self) -> None: + """Test > operator.""" + attrs = {"x": 5} + assert evaluate_condition(attrs, Condition("x", ">", 3)) is True + assert evaluate_condition(attrs, Condition("x", ">", 5)) is False + assert evaluate_condition(attrs, Condition("x", ">", 10)) is False + + def test_greater_than_or_equal_operator(self) -> None: + """Test >= operator.""" + attrs = {"x": 5} + assert evaluate_condition(attrs, Condition("x", ">=", 5)) is True + assert evaluate_condition(attrs, Condition("x", ">=", 3)) is True + assert evaluate_condition(attrs, Condition("x", ">=", 10)) is False + + def test_contains_operator_string(self) -> None: + """Test contains operator with strings.""" + attrs = {"s": "hello world"} + assert evaluate_condition(attrs, Condition("s", "contains", "world")) is True + assert evaluate_condition(attrs, Condition("s", "contains", "xyz")) is False + + def test_contains_operator_list(self) -> None: + """Test contains operator with lists.""" + attrs = {"l": [1, 2, 3]} + assert evaluate_condition(attrs, Condition("l", "contains", 2)) is True + assert evaluate_condition(attrs, Condition("l", "contains", 5)) is False + + def test_not_contains_operator(self) -> None: + """Test not_contains operator.""" + attrs = {"s": "hello", "l": [1, 2]} + assert evaluate_condition(attrs, Condition("s", "not_contains", "xyz")) is True + assert evaluate_condition(attrs, Condition("s", "not_contains", "ell")) is False + assert evaluate_condition(attrs, Condition("l", "not_contains", 5)) is True + assert evaluate_condition(attrs, Condition("l", "not_contains", 1)) is False + + def test_in_operator(self) -> None: + """Test in operator (value in list).""" + attrs = {"x": "b"} + assert evaluate_condition(attrs, Condition("x", "in", ["a", "b", "c"])) is True + assert evaluate_condition(attrs, Condition("x", "in", ["x", "y"])) is False + + def test_not_in_operator(self) -> None: + """Test not_in operator.""" + attrs = {"x": "d"} + assert ( + evaluate_condition(attrs, Condition("x", "not_in", ["a", "b", "c"])) is True + ) + assert evaluate_condition(attrs, Condition("x", "not_in", ["d", "e"])) is False + + def test_any_value_operator(self) -> None: + """Test any_value operator (attribute exists and is not None).""" + attrs = {"x": 0, "y": None, "z": ""} + assert evaluate_condition(attrs, Condition("x", "any_value")) is True + assert evaluate_condition(attrs, Condition("y", "any_value")) is False + assert evaluate_condition(attrs, Condition("z", "any_value")) is True + assert evaluate_condition(attrs, Condition("missing", "any_value")) is False + + def test_no_value_operator(self) -> None: + """Test no_value operator (attribute missing or None).""" + attrs = {"x": 0, "y": None} + assert evaluate_condition(attrs, Condition("x", "no_value")) is False + assert evaluate_condition(attrs, Condition("y", "no_value")) is True + assert evaluate_condition(attrs, Condition("missing", "no_value")) is True + + def test_missing_attribute_returns_false(self) -> None: + """Missing attribute returns False for most operators.""" + attrs = {} + assert evaluate_condition(attrs, Condition("x", "==", 5)) is False + assert evaluate_condition(attrs, Condition("x", ">", 0)) is False + assert evaluate_condition(attrs, Condition("x", "contains", "a")) is False + + def test_none_value_returns_false(self) -> None: + """None value returns False for comparison operators.""" + attrs = {"x": None} + assert evaluate_condition(attrs, Condition("x", "==", None)) is False + assert evaluate_condition(attrs, Condition("x", ">", 0)) is False + + def test_invalid_operator_raises(self) -> None: + """Invalid operator raises ValueError.""" + # Provide an attribute that exists so we don't return early + with pytest.raises(ValueError, match="Unknown operator"): + evaluate_condition({"x": 5}, Condition("x", "invalid_op", 5)) # type: ignore + + def test_in_operator_requires_list(self) -> None: + """in operator requires list value.""" + with pytest.raises(ValueError, match="requires list"): + evaluate_condition({"x": "a"}, Condition("x", "in", "abc")) + + +class TestEvaluateConditions: + """Tests for evaluate_conditions with multiple conditions.""" + + def test_and_logic_all_true(self) -> None: + """AND logic returns True when all conditions pass.""" + attrs = {"x": 5, "y": "abc"} + conds = [ + Condition("x", ">", 3), + Condition("y", "==", "abc"), + ] + assert evaluate_conditions(attrs, conds, "and") is True + + def test_and_logic_one_false(self) -> None: + """AND logic returns False when any condition fails.""" + attrs = {"x": 5, "y": "abc"} + conds = [ + Condition("x", ">", 10), # False + Condition("y", "==", "abc"), # True + ] + assert evaluate_conditions(attrs, conds, "and") is False + + def test_or_logic_one_true(self) -> None: + """OR logic returns True when any condition passes.""" + attrs = {"x": 5} + conds = [ + Condition("x", ">", 10), # False + Condition("x", "<", 10), # True + ] + assert evaluate_conditions(attrs, conds, "or") is True + + def test_or_logic_all_false(self) -> None: + """OR logic returns False when all conditions fail.""" + attrs = {"x": 5} + conds = [ + Condition("x", ">", 10), + Condition("x", "<", 3), + ] + assert evaluate_conditions(attrs, conds, "or") is False + + def test_empty_conditions_returns_true(self) -> None: + """Empty conditions list returns True.""" + assert evaluate_conditions({}, [], "and") is True + assert evaluate_conditions({}, [], "or") is True + + def test_invalid_logic_raises(self) -> None: + """Invalid logic raises ValueError.""" + # Provide non-empty conditions so we don't return early + conds = [Condition("x", "==", 1)] + with pytest.raises(ValueError, match="Unsupported logic"): + evaluate_conditions({}, conds, "xor") diff --git a/tests/exec/analysis/test_functions.py b/tests/exec/analysis/test_functions.py index 2b3edb7..d384510 100644 --- a/tests/exec/analysis/test_functions.py +++ b/tests/exec/analysis/test_functions.py @@ -37,8 +37,8 @@ def test_max_flow_analysis_basic(self, simple_network: Network) -> None: network=simple_network, excluded_nodes=set(), excluded_links=set(), - source_path="datacenter.*", - sink_path="edge.*", + source="datacenter.*", + sink="edge.*", mode="combine", ) @@ -58,8 +58,8 @@ def test_max_flow_analysis_with_summary(self, simple_network: Network) -> None: network=simple_network, excluded_nodes=set(), excluded_links=set(), - source_path="datacenter.*", - sink_path="edge.*", + source="datacenter.*", + sink="edge.*", include_flow_details=True, include_min_cut=True, ) @@ -84,8 +84,8 @@ def test_max_flow_analysis_with_optional_params( network=simple_network, excluded_nodes=set(), excluded_links=set(), - source_path="datacenter.*", - sink_path="edge.*", + source="datacenter.*", + sink="edge.*", mode="pairwise", shortest_path=True, flow_placement=FlowPlacement.EQUAL_BALANCED, @@ -108,8 +108,8 @@ def test_max_flow_analysis_empty_result(self, simple_network: Network) -> None: network=simple_network, excluded_nodes=set(), excluded_links=set(), - source_path="nonexistent.*", - sink_path="also_nonexistent.*", + source="nonexistent.*", + sink="also_nonexistent.*", ) @@ -137,8 +137,8 @@ def test_demand_placement_analysis_basic(self, diamond_network: Network) -> None # Use a smaller demand that should definitely fit demands_config = [ { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 50.0, "mode": "pairwise", "priority": 0, @@ -177,8 +177,8 @@ def test_demand_placement_analysis_zero_total_demand( """Handles zero total demand without division by zero.""" demands_config = [ { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 0.0, } ] @@ -222,8 +222,8 @@ def test_context_caching_pairwise_mode(self, diamond_network: Network) -> None: demands_config = [ { "id": "stable-pairwise-id", - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 50.0, "mode": "pairwise", }, @@ -251,8 +251,8 @@ def test_context_caching_combine_mode(self, diamond_network: Network) -> None: demands_config = [ { "id": "stable-combine-id", - "source_path": "[AB]", - "sink_path": "[CD]", + "source": "[AB]", + "sink": "[CD]", "demand": 50.0, "mode": "combine", }, @@ -282,8 +282,8 @@ def test_context_caching_combine_multiple_iterations( demands_config = [ { "id": "reusable-id", - "source_path": "[AB]", - "sink_path": "[CD]", + "source": "[AB]", + "sink": "[CD]", "demand": 50.0, "mode": "combine", }, @@ -309,8 +309,8 @@ def test_context_caching_without_id_raises(self, diamond_network: Network) -> No # Config without explicit ID - each reconstruction generates new ID demands_config = [ { - "source_path": "[AB]", - "sink_path": "[CD]", + "source": "[AB]", + "sink": "[CD]", "demand": 50.0, "mode": "combine", }, @@ -350,8 +350,8 @@ def test_sensitivity_analysis_basic(self, simple_network: Network) -> None: network=simple_network, excluded_nodes=set(), excluded_links=set(), - source_path="A", - sink_path="C", + source="A", + sink="C", mode="combine", ) @@ -380,6 +380,6 @@ def test_sensitivity_analysis_empty_result(self, simple_network: Network) -> Non network=simple_network, excluded_nodes=set(), excluded_links=set(), - source_path="nonexistent.*", - sink_path="also_nonexistent.*", + source="nonexistent.*", + sink="also_nonexistent.*", ) diff --git a/tests/exec/analysis/test_functions_details.py b/tests/exec/analysis/test_functions_details.py index 35d3036..68f3134 100644 --- a/tests/exec/analysis/test_functions_details.py +++ b/tests/exec/analysis/test_functions_details.py @@ -22,8 +22,8 @@ def test_demand_placement_analysis_includes_flow_details_costs_and_edges() -> No demands_config = [ { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 150.0, # Exceeds single path capacity, will use both paths "mode": "pairwise", "priority": 0, diff --git a/tests/exec/analysis/test_spf_caching.py b/tests/exec/analysis/test_spf_caching.py index 4bc1da9..c6d356d 100644 --- a/tests/exec/analysis/test_spf_caching.py +++ b/tests/exec/analysis/test_spf_caching.py @@ -145,8 +145,8 @@ def test_single_demand_ecmp(self, diamond_network: Network) -> None: """Test that single demand with ECMP works correctly with caching.""" demands_config = [ { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 50.0, "mode": "pairwise", "priority": 0, @@ -175,15 +175,15 @@ def test_multiple_demands_same_source_reuses_cache( # Multiple demands from S1 to different destinations demands_config = [ { - "source_path": "S1", - "sink_path": "D1", + "source": "S1", + "sink": "D1", "demand": 30.0, "mode": "pairwise", "priority": 0, }, { - "source_path": "S1", - "sink_path": "D2", + "source": "S1", + "sink": "D2", "demand": 30.0, "mode": "pairwise", "priority": 0, @@ -206,20 +206,20 @@ def test_demands_from_multiple_sources(self, multi_source_network: Network) -> N """Test that demands from multiple sources each get their own cache entry.""" demands_config = [ { - "source_path": "S1", - "sink_path": "D1", + "source": "S1", + "sink": "D1", "demand": 50.0, "mode": "pairwise", }, { - "source_path": "S2", - "sink_path": "D1", + "source": "S2", + "sink": "D1", "demand": 50.0, "mode": "pairwise", }, { - "source_path": "S3", - "sink_path": "D2", + "source": "S3", + "sink": "D2", "demand": 50.0, "mode": "pairwise", }, @@ -289,8 +289,8 @@ def _run_demand_placement_without_cache( traffic_demands = [] for config in demands_config: demand = TrafficDemand( - source_path=config["source_path"], - sink_path=config["sink_path"], + source=config["source"], + sink=config["sink"], demand=config["demand"], mode=config.get("mode", "pairwise"), flow_policy_config=config.get("flow_policy_config"), @@ -410,8 +410,8 @@ def test_equivalence_ecmp_single_demand(self, mesh_network: Network) -> None: """Test that ECMP placement is equivalent with and without caching.""" demands_config = [ { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 80.0, "mode": "pairwise", }, @@ -446,10 +446,10 @@ def test_equivalence_ecmp_single_demand(self, mesh_network: Network) -> None: def test_equivalence_ecmp_multiple_demands(self, mesh_network: Network) -> None: """Test ECMP placement equivalence with multiple demands.""" demands_config = [ - {"source_path": "A", "sink_path": "B", "demand": 30.0, "mode": "pairwise"}, - {"source_path": "A", "sink_path": "D", "demand": 40.0, "mode": "pairwise"}, - {"source_path": "C", "sink_path": "B", "demand": 25.0, "mode": "pairwise"}, - {"source_path": "C", "sink_path": "D", "demand": 35.0, "mode": "pairwise"}, + {"source": "A", "sink": "B", "demand": 30.0, "mode": "pairwise"}, + {"source": "A", "sink": "D", "demand": 40.0, "mode": "pairwise"}, + {"source": "C", "sink": "B", "demand": 25.0, "mode": "pairwise"}, + {"source": "C", "sink": "D", "demand": 35.0, "mode": "pairwise"}, ] cached_result = demand_placement_analysis( @@ -484,7 +484,7 @@ def test_equivalence_ecmp_multiple_demands(self, mesh_network: Network) -> None: def test_equivalence_with_flow_details(self, mesh_network: Network) -> None: """Test equivalence when include_flow_details is True.""" demands_config = [ - {"source_path": "A", "sink_path": "D", "demand": 50.0, "mode": "pairwise"}, + {"source": "A", "sink": "D", "demand": 50.0, "mode": "pairwise"}, ] cached_result = demand_placement_analysis( @@ -516,7 +516,7 @@ def test_equivalence_with_flow_details(self, mesh_network: Network) -> None: def test_equivalence_with_used_edges(self, mesh_network: Network) -> None: """Test equivalence when include_used_edges is True.""" demands_config = [ - {"source_path": "A", "sink_path": "D", "demand": 50.0, "mode": "pairwise"}, + {"source": "A", "sink": "D", "demand": 50.0, "mode": "pairwise"}, ] cached_result = demand_placement_analysis( @@ -573,8 +573,8 @@ def test_te_wcmp_basic_placement(self, constrained_network: Network) -> None: """Test TE_WCMP_UNLIM basic placement without fallback.""" demands_config = [ { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 40.0, "mode": "pairwise", "flow_policy_config": FlowPolicyPreset.TE_WCMP_UNLIM, @@ -598,8 +598,8 @@ def test_te_wcmp_fallback_on_saturation(self, constrained_network: Network) -> N """Test TE_WCMP_UNLIM fallback when primary path saturates.""" demands_config = [ { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 80.0, # Exceeds primary path capacity "mode": "pairwise", "flow_policy_config": FlowPolicyPreset.TE_WCMP_UNLIM, @@ -632,15 +632,15 @@ def test_te_wcmp_multiple_demands_same_source( """Test TE_WCMP_UNLIM with multiple demands sharing source.""" demands_config = [ { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 30.0, "mode": "pairwise", "flow_policy_config": FlowPolicyPreset.TE_WCMP_UNLIM, }, { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 30.0, "mode": "pairwise", "priority": 1, # Different priority = different demand @@ -687,8 +687,8 @@ def test_unreachable_destination(self, disconnected_network: Network) -> None: """Test placement to unreachable destination returns zero.""" demands_config = [ { - "source_path": "A", - "sink_path": "D", # Unreachable from A + "source": "A", + "sink": "D", # Unreachable from A "demand": 50.0, "mode": "pairwise", }, @@ -715,8 +715,8 @@ def test_zero_demand(self) -> None: demands_config = [ { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 0.0, "mode": "pairwise", }, @@ -743,8 +743,8 @@ def test_partial_placement_due_to_capacity(self) -> None: demands_config = [ { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 50.0, "mode": "pairwise", }, @@ -772,8 +772,8 @@ def test_empty_cost_distribution_when_not_requested(self) -> None: demands_config = [ { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 50.0, "mode": "pairwise", }, @@ -799,8 +799,8 @@ def test_empty_edges_when_not_requested(self) -> None: demands_config = [ { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 50.0, "mode": "pairwise", }, @@ -838,8 +838,8 @@ def test_placement_with_excluded_link(self, triangle_network: Network) -> None: """Test that excluded links are respected in cached placement.""" demands_config = [ { - "source_path": "A", - "sink_path": "C", + "source": "A", + "sink": "C", "demand": 50.0, "mode": "pairwise", }, @@ -866,8 +866,8 @@ def test_placement_with_excluded_node(self, triangle_network: Network) -> None: """Test that excluded nodes are respected in cached placement.""" demands_config = [ { - "source_path": "A", - "sink_path": "C", + "source": "A", + "sink": "C", "demand": 50.0, "mode": "pairwise", }, @@ -918,8 +918,8 @@ def test_cost_distribution_single_tier(self, multi_tier_network: Network) -> Non """Test cost distribution when only one tier is used.""" demands_config = [ { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 25.0, # Fits in tier 1 "mode": "pairwise", }, @@ -944,8 +944,8 @@ def test_cost_distribution_multiple_tiers_te_policy( """Test cost distribution with TE policy using multiple tiers.""" demands_config = [ { - "source_path": "A", - "sink_path": "D", + "source": "A", + "sink": "D", "demand": 50.0, # Exceeds tier 1 capacity "mode": "pairwise", "flow_policy_config": FlowPolicyPreset.TE_WCMP_UNLIM, diff --git a/tests/exec/demand/test_builder.py b/tests/exec/demand/test_builder.py index 099266a..e656483 100644 --- a/tests/exec/demand/test_builder.py +++ b/tests/exec/demand/test_builder.py @@ -14,8 +14,8 @@ def test_build_traffic_matrix_set_basic(): raw = { "tm1": [ { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 100.0, } ] @@ -25,16 +25,16 @@ def test_build_traffic_matrix_set_basic(): assert "tm1" in tms.matrices demands = tms.get_matrix("tm1") assert len(demands) == 1 - assert demands[0].source_path == "A" - assert demands[0].sink_path == "B" + assert demands[0].source == "A" + assert demands[0].sink == "B" assert demands[0].demand == 100.0 def test_build_traffic_matrix_set_multiple_matrices(): """Test building multiple traffic matrices.""" raw = { - "tm1": [{"source_path": "A", "sink_path": "B", "demand": 100.0}], - "tm2": [{"source_path": "C", "sink_path": "D", "demand": 200.0}], + "tm1": [{"source": "A", "sink": "B", "demand": 100.0}], + "tm2": [{"source": "C", "sink": "D", "demand": 200.0}], } tms = build_traffic_matrix_set(raw) @@ -48,8 +48,8 @@ def test_build_traffic_matrix_set_multiple_demands(): """Test building traffic matrix with multiple demands.""" raw = { "tm1": [ - {"source_path": "A", "sink_path": "B", "demand": 100.0}, - {"source_path": "C", "sink_path": "D", "demand": 200.0}, + {"source": "A", "sink": "B", "demand": 100.0}, + {"source": "C", "sink": "D", "demand": 200.0}, ] } @@ -65,8 +65,8 @@ def test_build_traffic_matrix_set_with_flow_policy_enum(): raw = { "tm1": [ { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 100.0, "flow_policy_config": FlowPolicyPreset.SHORTEST_PATHS_ECMP, } @@ -83,8 +83,8 @@ def test_build_traffic_matrix_set_with_flow_policy_string(): raw = { "tm1": [ { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 100.0, "flow_policy_config": "SHORTEST_PATHS_ECMP", } @@ -101,8 +101,8 @@ def test_build_traffic_matrix_set_with_flow_policy_int(): raw = { "tm1": [ { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 100.0, "flow_policy_config": 1, } diff --git a/tests/exec/demand/test_expand.py b/tests/exec/demand/test_expand.py index 5c24e8c..a14ef76 100644 --- a/tests/exec/demand/test_expand.py +++ b/tests/exec/demand/test_expand.py @@ -26,23 +26,23 @@ def test_explicit_id_preserved(self) -> None: """TrafficDemand with explicit ID preserves it.""" td = TrafficDemand( id="my-stable-id", - source_path="A", - sink_path="B", + source="A", + sink="B", demand=100.0, ) assert td.id == "my-stable-id" def test_auto_generated_id_when_none(self) -> None: """TrafficDemand without explicit ID auto-generates one.""" - td = TrafficDemand(source_path="A", sink_path="B", demand=100.0) + td = TrafficDemand(source="A", sink="B", demand=100.0) assert td.id is not None assert "|" in td.id # Format: source|sink|uuid def test_id_round_trip_through_dict(self) -> None: """TrafficDemand ID survives dict serialization round-trip.""" original = TrafficDemand( - source_path="A", - sink_path="B", + source="A", + sink="B", demand=100.0, mode="combine", priority=1, @@ -52,8 +52,8 @@ def test_id_round_trip_through_dict(self) -> None: # Serialize to dict (as done in workflow steps) config = { "id": original.id, - "source_path": original.source_path, - "sink_path": original.sink_path, + "source": original.source, + "sink": original.sink, "demand": original.demand, "mode": original.mode, "priority": original.priority, @@ -62,8 +62,8 @@ def test_id_round_trip_through_dict(self) -> None: # Reconstruct (as done in flow.py) reconstructed = TrafficDemand( id=config.get("id"), - source_path=config["source_path"], - sink_path=config["sink_path"], + source=config["source"], + sink=config["sink"], demand=config["demand"], mode=config.get("mode", "pairwise"), priority=config.get("priority", 0), @@ -74,19 +74,19 @@ def test_id_round_trip_through_dict(self) -> None: def test_id_mismatch_without_explicit_id(self) -> None: """Two TrafficDemands from same config get different IDs if id not passed.""" config = { - "source_path": "A", - "sink_path": "B", + "source": "A", + "sink": "B", "demand": 100.0, } td1 = TrafficDemand( - source_path=config["source_path"], - sink_path=config["sink_path"], + source=config["source"], + sink=config["sink"], demand=config["demand"], ) td2 = TrafficDemand( - source_path=config["source_path"], - sink_path=config["sink_path"], + source=config["source"], + sink=config["sink"], demand=config["demand"], ) @@ -99,9 +99,7 @@ class TestExpandDemandsPairwise: def test_pairwise_single_pair(self, simple_network: Network) -> None: """Pairwise mode with single source-sink creates one demand.""" - td = TrafficDemand( - source_path="A", sink_path="D", demand=100.0, mode="pairwise" - ) + td = TrafficDemand(source="A", sink="D", demand=100.0, mode="pairwise") expansion = expand_demands(simple_network, [td]) assert len(expansion.demands) == 1 @@ -115,8 +113,8 @@ def test_pairwise_single_pair(self, simple_network: Network) -> None: def test_pairwise_multiple_sources(self, simple_network: Network) -> None: """Pairwise mode with regex creates demand per (src, dst) pair.""" td = TrafficDemand( - source_path="[AB]", # A and B - sink_path="[CD]", # C and D + source="[AB]", # A and B + sink="[CD]", # C and D demand=100.0, mode="pairwise", ) @@ -133,8 +131,8 @@ def test_pairwise_multiple_sources(self, simple_network: Network) -> None: def test_pairwise_no_self_loops(self, simple_network: Network) -> None: """Pairwise mode excludes self-loops.""" td = TrafficDemand( - source_path="[AB]", - sink_path="[AB]", # Same as sources + source="[AB]", + sink="[AB]", # Same as sources demand=100.0, mode="pairwise", ) @@ -152,8 +150,8 @@ class TestExpandDemandsCombine: def test_combine_creates_pseudo_nodes(self, simple_network: Network) -> None: """Combine mode creates pseudo source and sink nodes.""" td = TrafficDemand( - source_path="[AB]", - sink_path="[CD]", + source="[AB]", + sink="[CD]", demand=100.0, mode="combine", ) @@ -174,8 +172,8 @@ def test_combine_pseudo_node_names_use_id(self, simple_network: Network) -> None """Combine mode pseudo node names include TrafficDemand.id.""" td = TrafficDemand( id="stable-id-123", - source_path="A", - sink_path="D", + source="A", + sink="D", demand=100.0, mode="combine", ) @@ -189,8 +187,8 @@ def test_combine_augmentations_structure(self, simple_network: Network) -> None: """Combine mode augmentations connect pseudo nodes to real nodes.""" td = TrafficDemand( id="test-id", - source_path="[AB]", - sink_path="[CD]", + source="[AB]", + sink="[CD]", demand=100.0, mode="combine", ) @@ -215,15 +213,15 @@ def test_same_id_produces_same_pseudo_nodes(self, simple_network: Network) -> No """Same TrafficDemand ID produces identical pseudo node names.""" td1 = TrafficDemand( id="shared-id", - source_path="A", - sink_path="D", + source="A", + sink="D", demand=100.0, mode="combine", ) td2 = TrafficDemand( id="shared-id", - source_path="A", - sink_path="D", + source="A", + sink="D", demand=200.0, # Different demand mode="combine", ) @@ -241,15 +239,15 @@ def test_different_ids_produce_different_pseudo_nodes( """Different TrafficDemand IDs produce different pseudo node names.""" td1 = TrafficDemand( id="id-alpha", - source_path="A", - sink_path="D", + source="A", + sink="D", demand=100.0, mode="combine", ) td2 = TrafficDemand( id="id-beta", - source_path="A", - sink_path="D", + source="A", + sink="D", demand=100.0, mode="combine", ) @@ -273,8 +271,8 @@ def test_empty_demands_raises(self, simple_network: Network) -> None: def test_no_matching_nodes_raises(self, simple_network: Network) -> None: """Demand with no matching nodes raises ValueError.""" td = TrafficDemand( - source_path="nonexistent", - sink_path="also_nonexistent", + source="nonexistent", + sink="also_nonexistent", demand=100.0, ) with pytest.raises(ValueError, match="No demands could be expanded"): @@ -283,14 +281,14 @@ def test_no_matching_nodes_raises(self, simple_network: Network) -> None: def test_multiple_demands_mixed_modes(self, simple_network: Network) -> None: """Multiple demands with different modes expand correctly.""" td_pairwise = TrafficDemand( - source_path="A", - sink_path="B", + source="A", + sink="B", demand=50.0, mode="pairwise", ) td_combine = TrafficDemand( - source_path="[CD]", - sink_path="[AB]", + source="[CD]", + sink="[AB]", demand=100.0, mode="combine", ) @@ -302,3 +300,251 @@ def test_multiple_demands_mixed_modes(self, simple_network: Network) -> None: # Only combine mode creates augmentations assert len(expansion.augmentations) == 4 # 2 sources + 2 sinks + + +class TestDictSelectors: + """Test dict-based selectors in demands (group_by, match).""" + + @pytest.fixture + def network_with_attrs(self) -> Network: + """Create a network with node attributes for selector testing.""" + network = Network() + # Two datacenters with leaf/spine roles + for dc in ["dc1", "dc2"]: + for role in ["leaf", "spine"]: + for i in [1, 2]: + name = f"{dc}_{role}_{i}" + network.add_node(Node(name, attrs={"dc": dc, "role": role})) + + # Connect within each DC: leaf -> spine + for dc in ["dc1", "dc2"]: + for i in [1, 2]: + for j in [1, 2]: + network.add_link( + Link(f"{dc}_leaf_{i}", f"{dc}_spine_{j}", capacity=100.0) + ) + # Connect spines between DCs + for i in [1, 2]: + network.add_link(Link(f"dc1_spine_{i}", f"dc2_spine_{i}", capacity=50.0)) + + return network + + def test_group_by_selector(self, network_with_attrs: Network) -> None: + """Dict selector with group_by groups nodes by attribute.""" + td = TrafficDemand( + source={"group_by": "dc"}, # Group by datacenter + sink={"group_by": "dc"}, + demand=100.0, + mode="pairwise", + ) + expansion = expand_demands(network_with_attrs, [td]) + + # With group_by=dc and pairwise mode, we get: + # dc1->dc2 and dc2->dc1 (excluding self-pairs) + # Each group has 4 nodes, so 16 pairs per direction = 32 total + # But wait, pairwise is between individual nodes, not groups + # Actually pairwise still creates per-node pairs + assert len(expansion.demands) > 0 + # Volume is distributed across pairs + total_volume = sum(d.volume for d in expansion.demands) + assert total_volume == pytest.approx(100.0, rel=1e-6) + + def test_match_selector_filters_nodes(self, network_with_attrs: Network) -> None: + """Dict selector with match filters nodes by attribute conditions.""" + td = TrafficDemand( + source={ + "path": ".*", + "match": { + "conditions": [{"attr": "role", "operator": "==", "value": "leaf"}] + }, + }, + sink={ + "path": ".*", + "match": { + "conditions": [{"attr": "role", "operator": "==", "value": "spine"}] + }, + }, + demand=100.0, + mode="pairwise", + ) + expansion = expand_demands(network_with_attrs, [td]) + + # 4 leaf nodes -> 4 spine nodes = 16 pairs + assert len(expansion.demands) == 16 + + def test_combined_path_and_match(self, network_with_attrs: Network) -> None: + """Dict selector combining path regex and match conditions.""" + td = TrafficDemand( + source={ + "path": "^dc1_.*", # Only dc1 + "match": { + "conditions": [{"attr": "role", "operator": "==", "value": "leaf"}] + }, + }, + sink={ + "path": "^dc2_.*", # Only dc2 + "match": { + "conditions": [{"attr": "role", "operator": "==", "value": "spine"}] + }, + }, + demand=100.0, + mode="pairwise", + ) + expansion = expand_demands(network_with_attrs, [td]) + + # 2 dc1 leafs -> 2 dc2 spines = 4 pairs + assert len(expansion.demands) == 4 + + +class TestVariableExpansion: + """Test expand_vars in demands.""" + + @pytest.fixture + def multi_dc_network(self) -> Network: + """Create a network with multiple datacenters.""" + network = Network() + for dc in ["dc1", "dc2", "dc3"]: + for i in [1, 2]: + network.add_node(Node(f"{dc}_server_{i}")) + # Full mesh between datacenters + for src_dc in ["dc1", "dc2", "dc3"]: + for dst_dc in ["dc1", "dc2", "dc3"]: + if src_dc != dst_dc: + for i in [1, 2]: + for j in [1, 2]: + network.add_link( + Link( + f"{src_dc}_server_{i}", + f"{dst_dc}_server_{j}", + capacity=100.0, + ) + ) + return network + + def test_expand_vars_cartesian(self, multi_dc_network: Network) -> None: + """Variable expansion with cartesian mode creates all combinations.""" + td = TrafficDemand( + source="^${src_dc}_server_.*", + sink="^${dst_dc}_server_.*", + demand=100.0, + mode="combine", + expand_vars={ + "src_dc": ["dc1", "dc2"], + "dst_dc": ["dc2", "dc3"], + }, + expansion_mode="cartesian", + ) + expansion = expand_demands(multi_dc_network, [td]) + + # Cartesian: 2 src_dc x 2 dst_dc = 4 combinations + # (dc1->dc2, dc1->dc3, dc2->dc2-skip self, dc2->dc3) + # Actually dc2->dc2 is not a self-pair at demand level + assert len(expansion.demands) == 4 + + def test_expand_vars_zip(self, multi_dc_network: Network) -> None: + """Variable expansion with zip mode pairs variables by index.""" + td = TrafficDemand( + source="^${src_dc}_server_.*", + sink="^${dst_dc}_server_.*", + demand=100.0, + mode="combine", + expand_vars={ + "src_dc": ["dc1", "dc2"], + "dst_dc": ["dc2", "dc3"], + }, + expansion_mode="zip", + ) + expansion = expand_demands(multi_dc_network, [td]) + + # Zip: (dc1, dc2) and (dc2, dc3) = 2 combinations + assert len(expansion.demands) == 2 + + def test_expand_vars_with_dict_selector(self, multi_dc_network: Network) -> None: + """Variable expansion works with dict selectors.""" + # Add dc attribute to nodes + for node in multi_dc_network.nodes.values(): + dc = node.name.split("_")[0] + node.attrs["dc"] = dc + + td = TrafficDemand( + source={"path": "^${dc}_server_.*"}, + sink={"path": "^${dc}_server_.*"}, + demand=100.0, + mode="pairwise", + expand_vars={"dc": ["dc1", "dc2"]}, + ) + expansion = expand_demands(multi_dc_network, [td]) + + # For each dc: 2 servers, 2 pairs (1->2 and 2->1) + # 2 dcs x 2 pairs = 4 total + assert len(expansion.demands) == 4 + + +class TestTrafficDemandFieldPreservation: + """Test that TrafficDemand fields are preserved in workflow contexts. + + Verifies that group_mode, expand_vars, and expansion_mode fields + are correctly preserved when TrafficDemand objects are copied/serialized. + """ + + def test_all_fields_preserved_in_dict_round_trip(self) -> None: + """All new fields survive dict serialization.""" + original = TrafficDemand( + id="test-id", + source="^dc1/.*", + sink="^dc2/.*", + demand=100.0, + mode="combine", + group_mode="per_group", + expand_vars={"dc": ["dc1", "dc2"]}, + expansion_mode="zip", + priority=5, + ) + + # Serialize as done in workflow steps + serialized = { + "id": original.id, + "source": original.source, + "sink": original.sink, + "demand": original.demand, + "mode": original.mode, + "group_mode": original.group_mode, + "expand_vars": original.expand_vars, + "expansion_mode": original.expansion_mode, + "priority": original.priority, + } + + # Reconstruct + reconstructed = TrafficDemand( + id=serialized.get("id") or "", + source=serialized["source"], + sink=serialized["sink"], + demand=float(serialized["demand"]), + mode=str(serialized.get("mode", "pairwise")), + group_mode=str(serialized.get("group_mode", "flatten")), + expand_vars=serialized.get("expand_vars") or {}, + expansion_mode=str(serialized.get("expansion_mode", "cartesian")), + priority=int(serialized.get("priority", 0)), + ) + + assert reconstructed.id == original.id + assert reconstructed.source == original.source + assert reconstructed.sink == original.sink + assert reconstructed.demand == original.demand + assert reconstructed.mode == original.mode + assert reconstructed.group_mode == original.group_mode + assert reconstructed.expand_vars == original.expand_vars + assert reconstructed.expansion_mode == original.expansion_mode + assert reconstructed.priority == original.priority + + def test_default_values_for_new_fields(self) -> None: + """New fields have sensible defaults when not specified.""" + td = TrafficDemand( + source="^A$", + sink="^B$", + demand=100.0, + ) + + assert td.group_mode == "flatten" + assert td.expand_vars == {} + assert td.expansion_mode == "cartesian" diff --git a/tests/exec/failure/test_manager.py b/tests/exec/failure/test_manager.py index b218e61..ea617e8 100644 --- a/tests/exec/failure/test_manager.py +++ b/tests/exec/failure/test_manager.py @@ -284,8 +284,8 @@ def test_run_max_flow_monte_carlo_delegates( } result = failure_manager.run_max_flow_monte_carlo( - source_path="datacenter.*", - sink_path="edge.*", + source="datacenter.*", + sink="edge.*", mode="combine", iterations=2, parallelism=1, @@ -323,8 +323,8 @@ def test_flow_placement_string_conversion_max_flow( mock_mc.return_value = {"results": [], "metadata": {}} failure_manager.run_max_flow_monte_carlo( - source_path="src.*", - sink_path="dst.*", + source="src.*", + sink="dst.*", flow_placement="EQUAL_BALANCED", iterations=1, ) @@ -338,8 +338,8 @@ def test_invalid_flow_placement_string_raises_error( """Test that invalid flow_placement string raises clear error.""" with pytest.raises(ValueError) as exc_info: failure_manager.run_max_flow_monte_carlo( - source_path="src.*", - sink_path="dst.*", + source="src.*", + sink="dst.*", flow_placement="INVALID_OPTION", iterations=1, ) @@ -358,8 +358,8 @@ def test_case_insensitive_flow_placement_conversion( mock_mc.return_value = {"results": [], "metadata": {}} failure_manager.run_max_flow_monte_carlo( - source_path="src.*", - sink_path="dst.*", + source="src.*", + sink="dst.*", flow_placement="proportional", # lowercase iterations=1, ) diff --git a/tests/exec/failure/test_manager_integration.py b/tests/exec/failure/test_manager_integration.py index a311cc6..c2009ba 100644 --- a/tests/exec/failure/test_manager_integration.py +++ b/tests/exec/failure/test_manager_integration.py @@ -119,8 +119,8 @@ def test_run_monte_carlo_analysis(self, simple_network, failure_policy_set): parallelism=1, # Serial execution for deterministic testing seed=42, # Pass analysis parameters directly as kwargs - source_path="A", - sink_path="C", + source="A", + sink="C", mode="combine", ) @@ -155,8 +155,8 @@ def test_analysis_with_parallel_execution(self, simple_network, failure_policy_s iterations=4, parallelism=2, # Multiple workers seed=42, - source_path="A", - sink_path="C", + source="A", + sink="C", mode="combine", ) @@ -173,8 +173,8 @@ def test_baseline_iteration_handling(self, simple_network, failure_policy_set): parallelism=1, baseline=True, # Include baseline seed=42, - source_path="A", - sink_path="C", + source="A", + sink="C", mode="combine", ) @@ -196,8 +196,8 @@ def test_failure_pattern_storage(self, simple_network, failure_policy_set): parallelism=1, store_failure_patterns=True, seed=42, - source_path="A", - sink_path="C", + source="A", + sink="C", mode="combine", ) @@ -253,8 +253,8 @@ def test_capacity_envelope_analysis_integration(self): iterations=10, parallelism=1, seed=123, - source_path="spine.*", - sink_path="leaf.*", + source="spine.*", + sink="leaf.*", mode="pairwise", ) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 9dac528..d3139b9 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -244,7 +244,7 @@ def validate_traffic_demands(self, expected_count: int) -> None: assert actual_count == expected_count, ( f"Traffic demand count mismatch: expected {expected_count}, found {actual_count}. " - f"Demands: {[(d.source_path, d.sink_path, d.demand) for d in default_demands[:5]]}" + f"Demands: {[(d.source, d.sink, d.demand) for d in default_demands[:5]]}" f"{'...' if actual_count > 5 else ''}" ) @@ -628,7 +628,7 @@ def with_traffic_demand( self.data["traffic_matrix_set"][matrix_name] = [] self.data["traffic_matrix_set"][matrix_name].append( - {"source_path": source, "sink_path": sink, "demand": demand} + {"source": source, "sink": sink, "demand": demand} ) return self diff --git a/tests/integration/scenario_1.yaml b/tests/integration/scenario_1.yaml index d621058..75f6261 100644 --- a/tests/integration/scenario_1.yaml +++ b/tests/integration/scenario_1.yaml @@ -124,17 +124,17 @@ failure_policy_set: traffic_matrix_set: default: - - source_path: SEA - sink_path: JFK + - source: SEA + sink: JFK demand: 50 - - source_path: SFO - sink_path: DCA + - source: SFO + sink: DCA demand: 50 - - source_path: SEA - sink_path: DCA + - source: SEA + sink: DCA demand: 50 - - source_path: SFO - sink_path: JFK + - source: SFO + sink: JFK demand: 50 workflow: diff --git a/tests/integration/scenario_2.yaml b/tests/integration/scenario_2.yaml index 512ab6a..ac3b22b 100644 --- a/tests/integration/scenario_2.yaml +++ b/tests/integration/scenario_2.yaml @@ -4,10 +4,12 @@ seed: 2002 # Hierarchical DSL describing sub-topologies and multi-node expansions. # -# Paths and Scopes: -# - Within a blueprint, a leading '/' (e.g., '/leaf') means "blueprint local root". -# - In the main network definition, a leading '/' (e.g., '/SFO') means "global root". -# - Omitting '/' means a relative path within the current scope (blueprint or network). +# Path Conventions: +# - All paths are relative to the current scope (blueprint instantiation path or network root). +# - Leading '/' is stripped and has no functional effect - '/leaf' and 'leaf' are equivalent. +# - In blueprints, paths resolve relative to the instantiation path (e.g., '/leaf' in a +# blueprint instantiated as 'pod1' becomes 'pod1/leaf'). +# - At top-level network, parent path is empty so '/SFO' and 'SFO' both become 'SFO'. # # Adjacency Patterns: # - "mesh" cross-connects all nodes from source to all nodes from target, skipping self-loops. @@ -196,17 +198,17 @@ failure_policy_set: traffic_matrix_set: default: - - source_path: SEA - sink_path: JFK + - source: SEA + sink: JFK demand: 50 - - source_path: SFO - sink_path: DCA + - source: SFO + sink: DCA demand: 50 - - source_path: SEA - sink_path: DCA + - source: SEA + sink: DCA demand: 50 - - source_path: SFO - sink_path: JFK + - source: SFO + sink: JFK demand: 50 workflow: diff --git a/tests/integration/scenario_3.yaml b/tests/integration/scenario_3.yaml index 404240e..f3d2e0c 100644 --- a/tests/integration/scenario_3.yaml +++ b/tests/integration/scenario_3.yaml @@ -111,8 +111,8 @@ workflow: # Forward direction analysis - equivalent to capacity_probe - step_type: MaxFlow name: capacity_analysis_forward - source_path: my_clos1/b.*/t1 - sink_path: my_clos2/b.*/t1 + source: my_clos1/b.*/t1 + sink: my_clos2/b.*/t1 mode: combine shortest_path: true flow_placement: PROPORTIONAL @@ -123,8 +123,8 @@ workflow: # Reverse direction analysis - equivalent to capacity_probe with probe_reverse - step_type: MaxFlow name: capacity_analysis_reverse - source_path: my_clos2/b.*/t1 - sink_path: my_clos1/b.*/t1 + source: my_clos2/b.*/t1 + sink: my_clos1/b.*/t1 mode: combine shortest_path: true flow_placement: PROPORTIONAL @@ -135,8 +135,8 @@ workflow: # Forward direction with EQUAL_BALANCED - equivalent to capacity_probe2 - step_type: MaxFlow name: capacity_analysis_forward_balanced - source_path: my_clos1/b.*/t1 - sink_path: my_clos2/b.*/t1 + source: my_clos1/b.*/t1 + sink: my_clos2/b.*/t1 mode: combine shortest_path: true flow_placement: EQUAL_BALANCED @@ -147,8 +147,8 @@ workflow: # Reverse direction with EQUAL_BALANCED - equivalent to capacity_probe2 with probe_reverse - step_type: MaxFlow name: capacity_analysis_reverse_balanced - source_path: my_clos2/b.*/t1 - sink_path: my_clos1/b.*/t1 + source: my_clos2/b.*/t1 + sink: my_clos1/b.*/t1 mode: combine shortest_path: true flow_placement: EQUAL_BALANCED diff --git a/tests/integration/scenario_4.yaml b/tests/integration/scenario_4.yaml index 3f5815a..d2088ef 100644 --- a/tests/integration/scenario_4.yaml +++ b/tests/integration/scenario_4.yaml @@ -143,9 +143,9 @@ blueprints: role: "spine" risk_groups: ["Spine_Fabric_SRG"] adjacency: - # Variable expansion for leaf-spine connectivity - - source: "leaf-{leaf_id}" - target: "spine-{spine_id}" + # Variable expansion for leaf-spine connectivity using $var syntax + - source: "leaf-${leaf_id}" + target: "spine-${spine_id}" expand_vars: leaf_id: [1, 2] spine_id: [1, 2] @@ -180,11 +180,11 @@ network: datacenter: "dc1" risk_groups: ["DC1_NetworkUplink"] - # Top-level adjacency with variable expansion + # Top-level adjacency with variable expansion using $var syntax adjacency: # Connect racks to fabric using variable expansion - - source: "dc{dc}_pod{pod}_rack{rack}/tor" - target: "dc{dc}_fabric/leaf" + - source: "dc${dc}_pod${pod}_rack${rack}/tor" + target: "dc${dc}_fabric/leaf" expand_vars: dc: [1, 2] pod: ["a", "b"] @@ -260,16 +260,16 @@ network: traffic_matrix_set: default: # East-west traffic within DC - - source_path: "dc1_pod[ab]_rack.*/servers/.*" - sink_path: "dc1_pod[ab]_rack.*/servers/.*" + - source: "dc1_pod[ab]_rack.*/servers/.*" + sink: "dc1_pod[ab]_rack.*/servers/.*" demand: 5.0 # 5 Gb/s east-west traffic mode: "pairwise" attrs: traffic_type: "east_west" # North-south traffic to external - - source_path: "dc1_.*servers/.*" - sink_path: "dc2_.*servers/.*" + - source: "dc1_.*servers/.*" + sink: "dc2_.*servers/.*" demand: 10.0 # 10 Gb/s inter-DC traffic mode: "combine" attrs: @@ -277,8 +277,8 @@ traffic_matrix_set: # High-performance computing workload hpc_workload: - - source_path: "dc1_poda_rack1/servers/srv-[1-4]" - sink_path: "dc1_poda_rack1/servers/srv-[1-4]" + - source: "dc1_poda_rack1/servers/srv-[1-4]" + sink: "dc1_poda_rack1/servers/srv-[1-4]" demand: 20.0 # 20 Gb/s HPC collective communication mode: "pairwise" attrs: @@ -321,16 +321,12 @@ workflow: - step_type: BuildGraph name: build_graph - - - - # Capacity analysis with different traffic patterns # Forward intra-DC capacity analysis - step_type: MaxFlow name: intra_dc_capacity_forward - source_path: "dc1_pod[ab]_rack.*/servers/.*" - sink_path: "dc1_pod[ab]_rack.*/servers/.*" + source: "dc1_pod[ab]_rack.*/servers/.*" + sink: "dc1_pod[ab]_rack.*/servers/.*" mode: "combine" shortest_path: false flow_placement: "PROPORTIONAL" @@ -341,8 +337,8 @@ workflow: # Reverse intra-DC capacity analysis - step_type: MaxFlow name: intra_dc_capacity_reverse - source_path: "dc1_pod[ab]_rack.*/servers/.*" - sink_path: "dc1_pod[ab]_rack.*/servers/.*" + source: "dc1_pod[ab]_rack.*/servers/.*" + sink: "dc1_pod[ab]_rack.*/servers/.*" mode: "combine" shortest_path: false flow_placement: "PROPORTIONAL" @@ -353,8 +349,8 @@ workflow: # Forward inter-DC capacity analysis - step_type: MaxFlow name: inter_dc_capacity_forward - source_path: "dc1_.*servers/.*" - sink_path: "dc2_.*servers/.*" + source: "dc1_.*servers/.*" + sink: "dc2_.*servers/.*" mode: "combine" shortest_path: false flow_placement: "EQUAL_BALANCED" @@ -365,8 +361,8 @@ workflow: # Reverse inter-DC capacity analysis - step_type: MaxFlow name: inter_dc_capacity_reverse - source_path: "dc2_.*servers/.*" - sink_path: "dc1_.*servers/.*" + source: "dc2_.*servers/.*" + sink: "dc1_.*servers/.*" mode: "combine" shortest_path: false flow_placement: "EQUAL_BALANCED" @@ -377,8 +373,8 @@ workflow: # Failure analysis with different policies - step_type: MaxFlow name: rack_failure_analysis - source_path: "dc1_pod[ab]_rack.*/servers/.*" - sink_path: "dc1_pod[ab]_rack.*/servers/.*" + source: "dc1_pod[ab]_rack.*/servers/.*" + sink: "dc1_pod[ab]_rack.*/servers/.*" mode: "combine" failure_policy: "single_link_failure" iterations: 10 # 10 iterations for test efficiency @@ -388,8 +384,8 @@ workflow: - step_type: MaxFlow name: spine_failure_analysis - source_path: "dc1_.*servers/.*" - sink_path: "dc2_.*servers/.*" + source: "dc1_.*servers/.*" + sink: "dc2_.*servers/.*" mode: "combine" failure_policy: "single_node_failure" iterations: 20 # 20 iterations for test efficiency diff --git a/tests/integration/test_data_templates.py b/tests/integration/test_data_templates.py index 221c57f..9748668 100644 --- a/tests/integration/test_data_templates.py +++ b/tests/integration/test_data_templates.py @@ -366,8 +366,8 @@ def all_to_all_uniform( if source != sink: # Skip self-demands demands.append( { - "source_path": source, - "sink_path": sink, + "source": source, + "sink": sink, "demand": demand_value, } ) @@ -383,13 +383,13 @@ def star_traffic( # Traffic from leaves to center for leaf in leaf_nodes: demands.append( - {"source_path": leaf, "sink_path": center_node, "demand": demand_value} + {"source": leaf, "sink": center_node, "demand": demand_value} ) # Traffic from center to leaves for leaf in leaf_nodes: demands.append( - {"source_path": center_node, "sink_path": leaf, "demand": demand_value} + {"source": center_node, "sink": leaf, "demand": demand_value} ) return demands @@ -413,9 +413,7 @@ def random_demands( sink = random.choice([n for n in node_names if n != source]) demand_value = random.uniform(min_demand, max_demand) - demands.append( - {"source_path": source, "sink_path": sink, "demand": demand_value} - ) + demands.append({"source": source, "sink": sink, "demand": demand_value}) return demands @@ -434,8 +432,8 @@ def hotspot_traffic( for hotspot in hotspot_nodes: demands.append( { - "source_path": source, - "sink_path": hotspot, + "source": source, + "sink": hotspot, "demand": hotspot_demand, } ) @@ -446,8 +444,8 @@ def hotspot_traffic( if source != sink: demands.append( { - "source_path": source, - "sink_path": sink, + "source": source, + "sink": sink, "demand": normal_demand, } ) @@ -478,8 +476,8 @@ def capacity_analysis_workflow( { "step_type": "MaxFlow", "name": f"capacity_analysis_{i}", - "source_path": source_pattern, - "sink_path": sink_pattern, + "source": source_pattern, + "sink": sink_pattern, "mode": mode, "iterations": 1, "baseline": False, @@ -500,8 +498,8 @@ def failure_analysis_workflow( { "step_type": "MaxFlow", "name": "failure_analysis", - "source_path": source_pattern, - "sink_path": sink_pattern, + "source": source_pattern, + "sink": sink_pattern, "iterations": 100, "parallelism": 4, }, @@ -517,8 +515,8 @@ def comprehensive_analysis_workflow( { "step_type": "MaxFlow", "name": "capacity_analysis_combine", - "source_path": source_pattern, - "sink_path": sink_pattern, + "source": source_pattern, + "sink": sink_pattern, "mode": "combine", "iterations": 1, "baseline": True, @@ -526,8 +524,8 @@ def comprehensive_analysis_workflow( { "step_type": "MaxFlow", "name": "capacity_analysis_pairwise", - "source_path": source_pattern, - "sink_path": sink_pattern, + "source": source_pattern, + "sink": sink_pattern, "mode": "pairwise", "shortest_path": True, "iterations": 1, @@ -536,8 +534,8 @@ def comprehensive_analysis_workflow( { "step_type": "MaxFlow", "name": "envelope_analysis", - "source_path": source_pattern, - "sink_path": sink_pattern, + "source": source_pattern, + "sink": sink_pattern, "iterations": 50, }, ] @@ -620,8 +618,8 @@ def with_uniform_traffic( if source_pattern != sink_pattern: demands.append( { - "source_path": source_pattern, - "sink_path": sink_pattern, + "source": source_pattern, + "sink": sink_pattern, "demand": demand_value, } ) @@ -803,7 +801,7 @@ def missing_workflow_params_builder() -> ScenarioDataBuilder: { "step_type": "CapacityEnvelopeAnalysis", "name": "incomplete_analysis", - # Missing source_path and sink_path + # Missing source and sink } ] return builder diff --git a/tests/integration/test_scenario_1.py b/tests/integration/test_scenario_1.py index a55f023..2f678e7 100644 --- a/tests/integration/test_scenario_1.py +++ b/tests/integration/test_scenario_1.py @@ -138,8 +138,7 @@ def test_traffic_demands_configuration(self, helper): # Convert to a more testable format demands_dict = { - (demand.source_path, demand.sink_path): demand.demand - for demand in default_demands + (demand.source, demand.sink): demand.demand for demand in default_demands } expected_demands = { diff --git a/tests/integration/test_scenario_2.py b/tests/integration/test_scenario_2.py index d45f477..a04299c 100644 --- a/tests/integration/test_scenario_2.py +++ b/tests/integration/test_scenario_2.py @@ -205,8 +205,7 @@ def test_traffic_demands_configuration(self, helper): # Same traffic demands as scenario 1 default_demands = helper.scenario.traffic_matrix_set.get_default_matrix() demands_dict = { - (demand.source_path, demand.sink_path): demand.demand - for demand in default_demands + (demand.source, demand.sink): demand.demand for demand in default_demands } expected_demands = { diff --git a/tests/integration/test_template_examples.py b/tests/integration/test_template_examples.py index 6c4a757..fdc2ac4 100644 --- a/tests/integration/test_template_examples.py +++ b/tests/integration/test_template_examples.py @@ -345,10 +345,10 @@ def test_scenario_1_template_variant(self): # Add traffic demands matching scenario 1 demands = [ - {"source_path": "SEA", "sink_path": "JFK", "demand": 50}, - {"source_path": "SFO", "sink_path": "DCA", "demand": 50}, - {"source_path": "SEA", "sink_path": "DCA", "demand": 50}, - {"source_path": "SFO", "sink_path": "JFK", "demand": 50}, + {"source": "SEA", "sink": "JFK", "demand": 50}, + {"source": "SFO", "sink": "DCA", "demand": 50}, + {"source": "SEA", "sink": "DCA", "demand": 50}, + {"source": "SFO", "sink": "JFK", "demand": 50}, ] builder.builder.data["traffic_matrix_set"] = {"default": demands} @@ -499,10 +499,10 @@ def test_scenario_2_template_variant(self): # Add traffic and failure policy same as scenario 1 demands = [ - {"source_path": "SEA", "sink_path": "JFK", "demand": 50}, - {"source_path": "SFO", "sink_path": "DCA", "demand": 50}, - {"source_path": "SEA", "sink_path": "DCA", "demand": 50}, - {"source_path": "SFO", "sink_path": "JFK", "demand": 50}, + {"source": "SEA", "sink": "JFK", "demand": 50}, + {"source": "SFO", "sink": "DCA", "demand": 50}, + {"source": "SEA", "sink": "DCA", "demand": 50}, + {"source": "SFO", "sink": "JFK", "demand": 50}, ] builder.builder.data["traffic_matrix_set"] = {"default": demands} @@ -598,8 +598,8 @@ def test_scenario_3_template_variant(self): { "step_type": "MaxFlow", "name": "capacity_analysis", - "source_path": "my_clos1/b.*/t1", - "sink_path": "my_clos2/b.*/t1", + "source": "my_clos1/b.*/t1", + "sink": "my_clos2/b.*/t1", "mode": "combine", "shortest_path": True, "flow_placement": "PROPORTIONAL", @@ -610,8 +610,8 @@ def test_scenario_3_template_variant(self): { "step_type": "MaxFlow", "name": "capacity_analysis2", - "source_path": "my_clos1/b.*/t1", - "sink_path": "my_clos2/b.*/t1", + "source": "my_clos1/b.*/t1", + "sink": "my_clos2/b.*/t1", "mode": "combine", "shortest_path": True, "flow_placement": "EQUAL_BALANCED", diff --git a/tests/model/demand/test_spec.py b/tests/model/demand/test_spec.py index f50f019..fa221ed 100644 --- a/tests/model/demand/test_spec.py +++ b/tests/model/demand/test_spec.py @@ -4,7 +4,7 @@ def test_defaults_and_id_generation() -> None: """TrafficDemand sets sane defaults and generates a unique, structured id.""" - demand = TrafficDemand(source_path="Src", sink_path="Dst") + demand = TrafficDemand(source="Src", sink="Dst") # Defaults assert demand.priority == 0 @@ -20,7 +20,7 @@ def test_defaults_and_id_generation() -> None: assert len(parts) == 3 assert all(parts) - demand2 = TrafficDemand(source_path="Src", sink_path="Dst") + demand2 = TrafficDemand(source="Src", sink="Dst") assert demand2.id != demand.id @@ -28,8 +28,8 @@ def test_explicit_id_preserved() -> None: """TrafficDemand with explicit ID preserves it unchanged.""" demand = TrafficDemand( id="my-explicit-id", - source_path="Src", - sink_path="Dst", + source="Src", + sink="Dst", demand=100.0, ) assert demand.id == "my-explicit-id" @@ -37,14 +37,14 @@ def test_explicit_id_preserved() -> None: def test_explicit_id_round_trip() -> None: """TrafficDemand ID survives serialization to dict and reconstruction.""" - original = TrafficDemand(source_path="A", sink_path="B", demand=50.0) + original = TrafficDemand(source="A", sink="B", demand=50.0) original_id = original.id # Simulate serialization (as done in workflow steps) config = { "id": original.id, - "source_path": original.source_path, - "sink_path": original.sink_path, + "source": original.source, + "sink": original.sink, "demand": original.demand, "mode": original.mode, "priority": original.priority, @@ -53,8 +53,8 @@ def test_explicit_id_round_trip() -> None: # Simulate reconstruction (as done in flow.py) reconstructed = TrafficDemand( id=config.get("id"), - source_path=config["source_path"], - sink_path=config["sink_path"], + source=config["source"], + sink=config["sink"], demand=config["demand"], mode=config.get("mode", "pairwise"), priority=config.get("priority", 0), @@ -65,8 +65,8 @@ def test_explicit_id_round_trip() -> None: def test_attrs_isolation_between_instances() -> None: """Each instance gets its own attrs dict; mutating one does not affect others.""" - d1 = TrafficDemand(source_path="A", sink_path="B") - d2 = TrafficDemand(source_path="A", sink_path="B") + d1 = TrafficDemand(source="A", sink="B") + d2 = TrafficDemand(source="A", sink="B") d1.attrs["k"] = "v" assert d1.attrs == {"k": "v"} @@ -76,8 +76,8 @@ def test_attrs_isolation_between_instances() -> None: def test_custom_assignment_including_policy_config() -> None: """Custom field values are preserved, including mode and policy config.""" demand = TrafficDemand( - source_path="SourceNode", - sink_path="TargetNode", + source="SourceNode", + sink="TargetNode", priority=5, demand=42.5, demand_placed=10.0, @@ -86,8 +86,8 @@ def test_custom_assignment_including_policy_config() -> None: flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP, ) - assert demand.source_path == "SourceNode" - assert demand.sink_path == "TargetNode" + assert demand.source == "SourceNode" + assert demand.sink == "TargetNode" assert demand.priority == 5 assert demand.demand == 42.5 assert demand.demand_placed == 10.0 diff --git a/tests/model/failure/test_conditions_unit.py b/tests/model/failure/test_conditions_unit.py index 528cbec..714a033 100644 --- a/tests/model/failure/test_conditions_unit.py +++ b/tests/model/failure/test_conditions_unit.py @@ -2,11 +2,10 @@ import pytest -from ngraph.model.failure.conditions import ( - FailureCondition, - evaluate_condition, - evaluate_conditions, -) +from ngraph.dsl.selectors import Condition, evaluate_condition, evaluate_conditions + +# Use Condition directly (FailureCondition is just an alias) +FailureCondition = Condition class TestEvaluateCondition: @@ -36,12 +35,12 @@ def test_contains_and_not_contains(self) -> None: evaluate_condition(attrs, FailureCondition("s", "not_contains", "xyz")) is True ) - # None yields False for contains and True for not_contains + # None yields False for both contains and not_contains (can't evaluate on None) assert evaluate_condition(attrs, FailureCondition("n", "contains", 1)) is False assert ( - evaluate_condition(attrs, FailureCondition("n", "not_contains", 1)) is True + evaluate_condition(attrs, FailureCondition("n", "not_contains", 1)) is False ) - # Non-iterable must not raise + # Non-iterable: contains returns False, not_contains returns True assert evaluate_condition(attrs, FailureCondition("i", "contains", 1)) is False assert ( evaluate_condition(attrs, FailureCondition("i", "not_contains", 1)) is True @@ -50,7 +49,8 @@ def test_contains_and_not_contains(self) -> None: def test_any_value_and_no_value(self) -> None: attrs: dict[str, Any] = {"p": 0, "q": None} assert evaluate_condition(attrs, FailureCondition("p", "any_value")) is True - assert evaluate_condition(attrs, FailureCondition("q", "any_value")) is True + # any_value with None returns False (attr must have non-None value) + assert evaluate_condition(attrs, FailureCondition("q", "any_value")) is False assert ( evaluate_condition(attrs, FailureCondition("missing", "any_value")) is False ) @@ -61,8 +61,8 @@ def test_any_value_and_no_value(self) -> None: assert evaluate_condition(attrs, FailureCondition("p", "no_value")) is False def test_unsupported_operator_raises(self) -> None: - with pytest.raises(ValueError, match="Unsupported operator"): - evaluate_condition({}, FailureCondition("x", "bad")) + with pytest.raises(ValueError, match="Unknown operator"): + evaluate_condition({"x": 1}, FailureCondition("x", "bad")) class TestEvaluateConditions: @@ -77,5 +77,7 @@ def test_and_or_logic(self) -> None: assert evaluate_conditions(attrs, conds2, "or") is False def test_unsupported_logic(self) -> None: + # Need non-empty conditions to trigger logic check + conds = [FailureCondition("x", "==", 1)] with pytest.raises(ValueError, match="Unsupported logic"): - evaluate_conditions({}, [], "xor") + evaluate_conditions({}, conds, "xor") diff --git a/tests/model/test_flow.py b/tests/model/test_flow.py index 2b8b993..08f47b3 100644 --- a/tests/model/test_flow.py +++ b/tests/model/test_flow.py @@ -73,7 +73,7 @@ def test_max_flow_with_attribute_grouping_combine(self): net.add_link(Link("S2", "T1", capacity=3.0)) flow = analyze(net).max_flow( - "attr:src_group", "attr:dst_group", mode=Mode.COMBINE + {"group_by": "src_group"}, {"group_by": "dst_group"}, mode=Mode.COMBINE ) assert flow == {("src", "dst"): 8.0} @@ -88,9 +88,9 @@ def test_max_flow_with_mixed_attr_and_regex(self): net.add_link(Link("S1", "T1", capacity=2.0)) net.add_link(Link("S2", "T2", capacity=3.0)) - flow = analyze(net).max_flow("attr:role", r"^T\d$", mode=Mode.PAIRWISE) + flow = analyze(net).max_flow({"group_by": "role"}, r"^T\d$", mode=Mode.PAIRWISE) # Groups: sources -> {"edge": [S1, S2]}, sinks -> {"T1": [T1], "T2": [T2]} - # In pairwise mode with attr:role, we get (edge, T1), (edge, T2) + # In pairwise mode with group_by: role, we get (edge, T1), (edge, T2) # The sink pattern r"^T\d$" creates individual labels per node assert len(flow) >= 1 # Total flow for pairwise is computed per pair entries @@ -127,9 +127,9 @@ def test_max_flow_disabled_nodes_coverage(self): net.add_link(Link("A", "B", capacity=10)) net.add_link(Link("B", "C", capacity=10)) - # Source A is disabled, so no flow should be possible - flow = analyze(net).max_flow("^A$", "^C$", mode=Mode.COMBINE) - assert flow[("^A$", "^C$")] == 0.0 + # Source A is disabled, so no active source nodes match - should raise + with pytest.raises(ValueError, match="No source nodes found"): + analyze(net).max_flow("^A$", "^C$", mode=Mode.COMBINE) def test_max_flow_disabled_link_coverage(self): """Test max_flow with disabled links for coverage.""" diff --git a/tests/model/test_network_integration.py b/tests/model/test_network_integration.py index da41db4..209759c 100644 --- a/tests/model/test_network_integration.py +++ b/tests/model/test_network_integration.py @@ -45,10 +45,10 @@ def test_risk_group_with_flow_analysis(self): flow = analyze(net).max_flow("^A$", "^D$", mode=Mode.COMBINE) assert flow[("^A$", "^D$")] == 1.0 - # Flow should be 0 when critical nodes are disabled + # No flow possible when all nodes are disabled - raises error net.disable_risk_group("critical") - flow = analyze(net).max_flow("^A$", "^D$", mode=Mode.COMBINE) - assert flow[("^A$", "^D$")] == 0.0 + with pytest.raises(ValueError, match="No source nodes found"): + analyze(net).max_flow("^A$", "^D$", mode=Mode.COMBINE) # Flow should resume when risk group is re-enabled net.enable_risk_group("critical") diff --git a/tests/model/test_selection.py b/tests/model/test_selection.py index e4ad747..f4f6b6d 100644 --- a/tests/model/test_selection.py +++ b/tests/model/test_selection.py @@ -97,47 +97,9 @@ def test_select_node_groups_multiple_capture_groups(self, complex_network): # Should have groups for each combination found assert len(node_groups) >= 2 - def test_select_node_groups_by_attribute_basic(self): - """Group nodes by attribute using strict attr: directive.""" - net = Network() - a1 = Node("A1", attrs={"role": "compute"}) - a2 = Node("A2", attrs={"role": "compute"}) - b1 = Node("B1", attrs={"role": "network"}) - c1 = Node("C1") # missing attribute -> omitted - for n in [a1, a2, b1, c1]: - net.add_node(n) - - groups = net.select_node_groups_by_path("attr:role") - assert set(groups.keys()) == {"compute", "network"} - assert {n.name for n in groups["compute"]} == {"A1", "A2"} - assert {n.name for n in groups["network"]} == {"B1"} - - def test_select_node_groups_by_attribute_non_string_labels(self): - """Non-string attribute values should label groups via str().""" - net = Network() - net.add_node(Node("S1", attrs={"dc_site_id": 1})) - net.add_node(Node("S2", attrs={"dc_site_id": 1})) - net.add_node(Node("S3", attrs={"dc_site_id": 2})) - net.add_node(Node("S4")) # missing attribute - - groups = net.select_node_groups_by_path("attr:dc_site_id") - assert set(groups.keys()) == {"1", "2"} - assert {n.name for n in groups["1"]} == {"S1", "S2"} - assert {n.name for n in groups["2"]} == {"S3"} - - def test_select_node_groups_by_attribute_strict_detection(self): - """Only full match attr: triggers attribute grouping.""" - net = Network() - net.add_node(Node("node1", attrs={"role": "edge"})) - net.add_node(Node("node2", attrs={"role": "core"})) - - # Pattern that looks like it references attr but is a regex should not trigger attr mode - groups_regex = net.select_node_groups_by_path(r"^attr:.*") - assert groups_regex == {} # No node names start with 'attr:' - - # Missing attribute name -> no groups - missing = net.select_node_groups_by_path("attr:missing") - assert missing == {} + # Note: The legacy attr: syntax has been removed. + # For attribute-based grouping, use the unified selector system with + # {"group_by": "attr_name"} dict selectors via normalize_selector/select_nodes. class TestLinkUtilities: diff --git a/tests/scenario/test_scenario.py b/tests/scenario/test_scenario.py index d69e995..e2b18cc 100644 --- a/tests/scenario/test_scenario.py +++ b/tests/scenario/test_scenario.py @@ -110,11 +110,11 @@ def valid_scenario_yaml() -> str: rule_type: "all" traffic_matrix_set: default: - - source_path: NodeA - sink_path: NodeB + - source: NodeA + sink: NodeB demand: 15 - - source_path: NodeA - sink_path: NodeC + - source: NodeA + sink: NodeC demand: 5 workflow: - step_type: DoSmth @@ -149,8 +149,8 @@ def missing_step_type_yaml() -> str: rules: [] traffic_matrix_set: default: - - source_path: NodeA - sink_path: NodeB + - source: NodeA + sink: NodeB demand: 10 workflow: - name: StepWithoutType @@ -181,8 +181,8 @@ def unrecognized_step_type_yaml() -> str: rules: [] traffic_matrix_set: default: - - source_path: NodeA - sink_path: NodeB + - source: NodeA + sink: NodeB demand: 10 workflow: - step_type: NonExistentStep @@ -311,11 +311,11 @@ def test_scenario_from_yaml_valid(valid_scenario_yaml: str) -> None: assert len(default_demands) == 2 d1 = default_demands[0] d2 = default_demands[1] - assert d1.source_path == "NodeA" - assert d1.sink_path == "NodeB" + assert d1.source == "NodeA" + assert d1.sink == "NodeB" assert d1.demand == 15 - assert d2.source_path == "NodeA" - assert d2.sink_path == "NodeC" + assert d2.source == "NodeA" + assert d2.sink == "NodeC" assert d2.demand == 5 # Check workflow diff --git a/tests/scenario/test_schema_validation.py b/tests/scenario/test_schema_validation.py index f4042d5..0fdb4c5 100644 --- a/tests/scenario/test_schema_validation.py +++ b/tests/scenario/test_schema_validation.py @@ -70,8 +70,8 @@ def test_schema_validates_simple_scenario(self, schema): name: build_graph - step_type: CapacityEnvelopeAnalysis name: capacity_test - source_path: "A" - sink_path: "C" + source: "A" + sink: "C" iterations: 1 baseline: false failure_policy: null @@ -375,16 +375,16 @@ def test_schema_validates_traffic_matrices(self, schema): traffic_scenario = """ traffic_matrix_set: default: - - source_path: "^spine.*" - sink_path: "^leaf.*" + - source: "^spine.*" + sink: "^leaf.*" demand: 1000.0 mode: "combine" priority: 1 attrs: traffic_type: "north_south" hpc_workload: - - source_path: "compute.*" - sink_path: "storage.*" + - source: "compute.*" + sink: "storage.*" demand: 5000.0 mode: "pairwise" flow_policy_config: @@ -404,8 +404,8 @@ def test_schema_validates_traffic_matrices(self, schema): name: build_graph - step_type: CapacityEnvelopeAnalysis name: capacity_test - source_path: "spine1" - sink_path: "leaf1" + source: "spine1" + sink: "leaf1" iterations: 1 baseline: false failure_policy: null diff --git a/tests/solver/test_maxflow_cache.py b/tests/solver/test_maxflow_cache.py index 7d01e78..3f1aa5b 100644 --- a/tests/solver/test_maxflow_cache.py +++ b/tests/solver/test_maxflow_cache.py @@ -281,10 +281,12 @@ class TestPairwiseMode: """Tests for AnalysisContext in pairwise mode with disabled topology.""" def test_disabled_node_pairwise_mode(self) -> None: - """Disabled node should be respected in pairwise mode.""" + """Disabled node should be excluded from pairwise mode results.""" net = Network() net.add_node(Node("S1")) - net.add_node(Node("S2", disabled=True)) # Disabled source + net.add_node( + Node("S2", disabled=True) + ) # Disabled source - excluded from selection net.add_node(Node("M")) net.add_node(Node("T1")) net.add_node(Node("T2")) @@ -297,13 +299,14 @@ def test_disabled_node_pairwise_mode(self) -> None: ctx = analyze(net, source=r"^(S\d)$", sink=r"^(T\d)$", mode=Mode.PAIRWISE) result = ctx.max_flow() - # S1 -> T1 and S1 -> T2 should have flow + # Only S1 is active, so only S1 -> T1 and S1 -> T2 pairs exist + assert len(result) == 2 assert result[("S1", "T1")] == 5.0 assert result[("S1", "T2")] == 5.0 - # S2 -> anything should be 0 (S2 is disabled) - assert result[("S2", "T1")] == 0.0 - assert result[("S2", "T2")] == 0.0 + # S2 pairs are not in result (S2 is disabled and excluded from selection) + assert ("S2", "T1") not in result + assert ("S2", "T2") not in result class TestContextReuse: diff --git a/tests/solver/test_paths.py b/tests/solver/test_paths.py index 0ab16a5..aeef604 100644 --- a/tests/solver/test_paths.py +++ b/tests/solver/test_paths.py @@ -90,10 +90,10 @@ def test_pairwise_mode(self) -> None: net = _multi_source_sink_network() results = analyze(net).shortest_path_cost( - "attr:group", "attr:group", mode=Mode.PAIRWISE + {"group_by": "group"}, {"group_by": "group"}, mode=Mode.PAIRWISE ) - # With attr:group, nodes are grouped by their group attribute value + # With group_by, nodes are grouped by their group attribute value # Sources: src (A, B), Sinks: dst (C, D) # The only non-self pair is src -> dst assert ("src", "dst") in results @@ -155,7 +155,7 @@ def test_combine_mode_paths(self) -> None: net = _group_network() results = analyze(net).shortest_paths( - "attr:group", "attr:group", mode=Mode.COMBINE + {"group_by": "group"}, {"group_by": "group"}, mode=Mode.COMBINE ) # Should have one result with combined labels @@ -171,7 +171,7 @@ def test_pairwise_mode_paths(self) -> None: net = _multi_source_sink_network() results = analyze(net).shortest_paths( - "attr:group", "attr:group", mode=Mode.PAIRWISE + {"group_by": "group"}, {"group_by": "group"}, mode=Mode.PAIRWISE ) # Should have a result for src->dst pair @@ -225,7 +225,7 @@ def test_combine_mode(self) -> None: net = _group_network() results = analyze(net).k_shortest_paths( - "attr:group", "attr:group", max_k=3, mode=Mode.COMBINE + {"group_by": "group"}, {"group_by": "group"}, max_k=3, mode=Mode.COMBINE ) # Should have one combined result @@ -257,6 +257,59 @@ def test_max_path_cost_factor(self) -> None: # Both paths (cost 2 and 3) should be included since 3 <= 3.0 assert len(paths) == 2 + +class TestDictSelectorsWithShortestPaths: + """Tests for dict-based selectors with shortest path methods. + + Verifies that shortest_path_cost, shortest_paths, and k_shortest_paths + correctly handle dict selectors (group_by, match) in both unbound and + bound context modes. + """ + + def test_shortest_path_cost_with_dict_selector(self) -> None: + """shortest_path_cost works with dict selectors.""" + net = _multi_source_sink_network() + + results = analyze(net).shortest_path_cost( + {"group_by": "group"}, {"group_by": "group"}, mode=Mode.PAIRWISE + ) + + assert ("src", "dst") in results + assert pytest.approx(results[("src", "dst")], abs=1e-9) == 2.0 + + def test_shortest_paths_with_match_selector(self) -> None: + """shortest_paths works with match conditions in selectors.""" + net = _multi_source_sink_network() + + results = analyze(net).shortest_paths( + { + "path": ".*", + "match": { + "conditions": [{"attr": "group", "operator": "==", "value": "src"}] + }, + }, + { + "path": ".*", + "match": { + "conditions": [{"attr": "group", "operator": "==", "value": "dst"}] + }, + }, + mode=Mode.COMBINE, + ) + + # Should find paths from src group to dst group + assert len(results) == 1 + + def test_k_shortest_paths_with_dict_selector(self) -> None: + """k_shortest_paths works with dict selectors.""" + net = _group_network() + + results = analyze(net).k_shortest_paths( + {"group_by": "group"}, {"group_by": "group"}, max_k=3, mode=Mode.COMBINE + ) + + assert len(results) == 1 + def test_empty_result_when_no_path(self) -> None: """Test that no paths are returned when unreachable.""" net = Network() diff --git a/tests/utils/test_boolean_keys.py b/tests/utils/test_boolean_keys.py index 839d284..02cd62c 100644 --- a/tests/utils/test_boolean_keys.py +++ b/tests/utils/test_boolean_keys.py @@ -88,34 +88,34 @@ def test_yaml_boolean_keys_converted_to_strings(): traffic_matrix_set: # Regular string key peak: - - source_path: "^A$" - sink_path: "^B$" + - source: "^A$" + sink: "^B$" demand: 100 # YAML 1.1 boolean keys - these get parsed as Python booleans true: - - source_path: "^C$" - sink_path: "^D$" + - source: "^C$" + sink: "^D$" demand: 200 false: - - source_path: "^E$" - sink_path: "^F$" + - source: "^E$" + sink: "^F$" demand: 50 yes: - - source_path: "^G$" - sink_path: "^H$" + - source: "^G$" + sink: "^H$" demand: 25 no: - - source_path: "^I$" - sink_path: "^J$" + - source: "^I$" + sink: "^J$" demand: 75 on: - - source_path: "^K$" - sink_path: "^L$" + - source: "^K$" + sink: "^L$" demand: 150 off: - - source_path: "^M$" - sink_path: "^N$" + - source: "^M$" + sink: "^N$" demand: 125 """) @@ -146,16 +146,16 @@ def test_quoted_boolean_keys_remain_strings(): name: test traffic_matrix_set: "true": - - source_path: "^A$" - sink_path: "^B$" + - source: "^A$" + sink: "^B$" demand: 100 "false": - - source_path: "^C$" - sink_path: "^D$" + - source: "^C$" + sink: "^D$" demand: 200 "off": - - source_path: "^E$" - sink_path: "^F$" + - source: "^E$" + sink: "^F$" demand: 300 """) diff --git a/tests/workflow/test_capacity_envelope_analysis.py b/tests/workflow/test_capacity_envelope_analysis.py index 19cc638..eaf4f9f 100644 --- a/tests/workflow/test_capacity_envelope_analysis.py +++ b/tests/workflow/test_capacity_envelope_analysis.py @@ -62,10 +62,10 @@ class TestMaxFlowStep: def test_initialization_defaults(self): """Test MaxFlow initialization with defaults.""" - step = MaxFlow(source_path="^A", sink_path="^C") + step = MaxFlow(source="^A", sink="^C") - assert step.source_path == "^A" - assert step.sink_path == "^C" + assert step.source == "^A" + assert step.sink == "^C" assert step.mode == "combine" assert step.failure_policy is None assert step.iterations == 1 @@ -80,8 +80,8 @@ def test_initialization_defaults(self): def test_initialization_custom_values(self): """Test MaxFlow initialization with custom values.""" step = MaxFlow( - source_path="^src", - sink_path="^dst", + source="^src", + sink="^dst", mode="pairwise", failure_policy="test_policy", iterations=100, @@ -94,8 +94,8 @@ def test_initialization_custom_values(self): include_flow_details=True, ) - assert step.source_path == "^src" - assert step.sink_path == "^dst" + assert step.source == "^src" + assert step.sink == "^dst" assert step.mode == "pairwise" assert step.failure_policy == "test_policy" assert step.iterations == 100 @@ -110,21 +110,21 @@ def test_initialization_custom_values(self): def test_validation_errors(self): """Test parameter validation.""" with pytest.raises(ValueError, match="iterations must be >= 1"): - MaxFlow(source_path="^A", sink_path="^C", iterations=0) + MaxFlow(source="^A", sink="^C", iterations=0) with pytest.raises(ValueError, match="parallelism must be >= 1"): - MaxFlow(source_path="^A", sink_path="^C", parallelism=0) + MaxFlow(source="^A", sink="^C", parallelism=0) with pytest.raises(ValueError, match="mode must be 'combine' or 'pairwise'"): - MaxFlow(source_path="^A", sink_path="^C", mode="invalid") + MaxFlow(source="^A", sink="^C", mode="invalid") with pytest.raises(ValueError, match="baseline=True requires iterations >= 2"): - MaxFlow(source_path="^A", sink_path="^C", baseline=True, iterations=1) + MaxFlow(source="^A", sink="^C", baseline=True, iterations=1) def test_flow_placement_enum_usage(self): """Test that FlowPlacement enum is used correctly.""" step = MaxFlow( - source_path="^A", sink_path="^C", flow_placement=FlowPlacement.PROPORTIONAL + source="^A", sink="^C", flow_placement=FlowPlacement.PROPORTIONAL ) assert step.flow_placement == FlowPlacement.PROPORTIONAL @@ -170,8 +170,8 @@ def test_run_with_mock_failure_manager( # Create and run the step step = MaxFlow( - source_path="^A", - sink_path="^C", + source="^A", + sink="^C", failure_policy="test_policy", iterations=1, parallelism=1, @@ -188,8 +188,8 @@ def test_run_with_mock_failure_manager( # Verify convenience method was called with correct parameters _, kwargs = mock_failure_manager.run_max_flow_monte_carlo.call_args - assert kwargs["source_path"] == "^A" - assert kwargs["sink_path"] == "^C" + assert kwargs["source"] == "^A" + assert kwargs["sink"] == "^C" assert kwargs["mode"] == "combine" assert kwargs["iterations"] == 1 assert kwargs["parallelism"] == 1 @@ -247,8 +247,8 @@ def test_run_with_failure_patterns(self, mock_failure_manager_class, mock_scenar # Create and run the step with failure pattern storage step = MaxFlow( - source_path="^A", - sink_path="^C", + source="^A", + sink="^C", iterations=2, store_failure_patterns=True, parallelism=1, @@ -263,8 +263,8 @@ def test_run_with_failure_patterns(self, mock_failure_manager_class, mock_scenar def test_capacity_envelope_with_failures_mocked(self): """Test capacity envelope step with mocked FailureManager.""" step = MaxFlow( - source_path="^A", - sink_path="^C", + source="^A", + sink="^C", mode="combine", iterations=2, parallelism=1, @@ -356,8 +356,8 @@ def test_include_flow_summary_functionality( # Test with include_flow_details=True step = MaxFlow( - source_path="^A", - sink_path="^C", + source="^A", + sink="^C", iterations=1, include_flow_details=True, parallelism=1, diff --git a/tests/workflow/test_maximum_supported_demand.py b/tests/workflow/test_maximum_supported_demand.py index 86d35d3..3cace34 100644 --- a/tests/workflow/test_maximum_supported_demand.py +++ b/tests/workflow/test_maximum_supported_demand.py @@ -11,8 +11,8 @@ def _mock_scenario_with_matrix() -> MagicMock: mock_scenario = MagicMock() td = MagicMock() - td.source_path = "A" - td.sink_path = "B" + td.source = "A" + td.sink = "B" td.demand = 10.0 td.mode = "pairwise" td.priority = 0 @@ -60,7 +60,7 @@ def _eval(cache, alpha, seeds): ctx = exported["steps"]["msd_step"]["data"].get("context", {}) assert ctx.get("acceptance_rule") == "hard" base = exported["steps"]["msd_step"]["data"].get("base_demands", []) - assert base and base[0]["source_path"] == "A" + assert base and base[0]["source"] == "A" @patch.object(MaximumSupportedDemand, "_evaluate_alpha") @@ -135,8 +135,8 @@ def test_msd_end_to_end_single_link() -> None: demands_config = [ { "id": d.id, - "source_path": d.source_path, - "sink_path": d.sink_path, + "source": d.source, + "sink": d.sink, "demand": d.demand, "mode": d.mode, "priority": d.priority, @@ -162,8 +162,8 @@ def test_msd_end_to_end_single_link() -> None: demands_config_above = [ { "id": d.id, - "source_path": d.source_path, - "sink_path": d.sink_path, + "source": d.source, + "sink": d.sink, "demand": d.demand, "mode": d.mode, "priority": d.priority, diff --git a/tests/workflow/test_msd_perf_safety.py b/tests/workflow/test_msd_perf_safety.py index 92523c0..cdba8eb 100644 --- a/tests/workflow/test_msd_perf_safety.py +++ b/tests/workflow/test_msd_perf_safety.py @@ -27,7 +27,7 @@ def test_msd_reuse_tm_across_seeds_is_behaviorally_identical(monkeypatch): tmset = TrafficMatrixSet() tmset.add( "default", - [TrafficDemand(source_path="A", sink_path="C", demand=2.0, mode="pairwise")], + [TrafficDemand(source="A", sink="C", demand=2.0, mode="pairwise")], ) from ngraph.results.store import Results diff --git a/tests/workflow/test_tm_analysis_perf_safety.py b/tests/workflow/test_tm_analysis_perf_safety.py index 61dae72..8d84cbf 100644 --- a/tests/workflow/test_tm_analysis_perf_safety.py +++ b/tests/workflow/test_tm_analysis_perf_safety.py @@ -32,7 +32,7 @@ def test_tm_basic_behavior_unchanged(monkeypatch): tmset = TrafficMatrixSet() tmset.add( "default", - [TrafficDemand(source_path="A", sink_path="C", demand=2.0, mode="pairwise")], + [TrafficDemand(source="A", sink="C", demand=2.0, mode="pairwise")], ) class _ResultsStore: diff --git a/tests/workflow/test_traffic_matrix_placement.py b/tests/workflow/test_traffic_matrix_placement.py index a14d669..eae48bd 100644 --- a/tests/workflow/test_traffic_matrix_placement.py +++ b/tests/workflow/test_traffic_matrix_placement.py @@ -17,8 +17,8 @@ def test_traffic_matrix_placement_stores_core_outputs( # Prepare mock scenario with traffic matrix and results store mock_scenario = MagicMock() mock_td = MagicMock() - mock_td.source_path = "A" - mock_td.sink_path = "B" + mock_td.source = "A" + mock_td.sink = "B" mock_td.demand = 10.0 mock_td.mode = "pairwise" mock_td.priority = 0 @@ -95,8 +95,8 @@ def test_traffic_matrix_placement_flow_details_edges( # Prepare mock scenario with traffic matrix and results store mock_scenario = MagicMock() mock_td = MagicMock() - mock_td.source_path = "A" - mock_td.sink_path = "B" + mock_td.source = "A" + mock_td.sink = "B" mock_td.demand = 10.0 mock_td.mode = "pairwise" mock_td.priority = 0 @@ -187,8 +187,8 @@ def test_traffic_matrix_placement_alpha_scales_demands( # Prepare mock scenario with a single traffic demand mock_scenario = MagicMock() mock_td = MagicMock() - mock_td.source_path = "S" - mock_td.sink_path = "T" + mock_td.source = "S" + mock_td.sink = "T" mock_td.demand = 10.0 mock_td.mode = "pairwise" mock_td.priority = 0 @@ -229,8 +229,8 @@ def test_traffic_matrix_placement_alpha_scales_demands( _, kwargs = mock_failure_manager.run_demand_placement_monte_carlo.call_args dcfg = kwargs.get("demands_config") assert isinstance(dcfg, list) and len(dcfg) == 1 - assert dcfg[0]["source_path"] == "S" - assert dcfg[0]["sink_path"] == "T" + assert dcfg[0]["source"] == "S" + assert dcfg[0]["sink"] == "T" assert abs(float(dcfg[0]["demand"]) - 25.0) < 1e-12 @@ -240,8 +240,8 @@ def test_traffic_matrix_placement_metadata_includes_alpha( ) -> None: mock_scenario = MagicMock() mock_td = MagicMock() - mock_td.source_path = "A" - mock_td.sink_path = "B" + mock_td.source = "A" + mock_td.sink = "B" mock_td.demand = 1.0 mock_td.mode = "pairwise" mock_td.priority = 0 @@ -288,8 +288,8 @@ def test_traffic_matrix_placement_alpha_auto_uses_msd( # Scenario with one TD mock_scenario = MagicMock() td = MagicMock() - td.source_path = "S" - td.sink_path = "T" + td.source = "S" + td.sink = "T" td.demand = 4.0 td.mode = "pairwise" td.priority = 0 @@ -308,8 +308,8 @@ def test_traffic_matrix_placement_alpha_auto_uses_msd( "context": {"matrix_name": "default", "placement_rounds": "auto"}, "base_demands": [ { - "source_path": "S", - "sink_path": "T", + "source": "S", + "sink": "T", "demand": 4.0, "mode": "pairwise", "priority": 0, @@ -362,8 +362,8 @@ def test_traffic_matrix_placement_alpha_auto_missing_msd_raises( ) -> None: mock_scenario = MagicMock() td = MagicMock() - td.source_path = "S" - td.sink_path = "T" + td.source = "S" + td.sink = "T" td.demand = 4.0 td.mode = "pairwise" td.priority = 0