From 999285c2c0fc201f0798eeda3a0123a5c7cfdd47 Mon Sep 17 00:00:00 2001 From: Andrey Golovanov Date: Mon, 3 Mar 2025 11:04:47 +0000 Subject: [PATCH 1/2] demand placement implemented --- ngraph/lib/demand.py | 99 +++--- ngraph/lib/flow.py | 8 +- ngraph/lib/flow_policy.py | 12 +- ngraph/traffic_demand.py | 49 ++- ngraph/traffic_manager.py | 492 ++++++++++++++++++++++++++++++ ngraph/workflow/capacity_probe.py | 6 +- notebooks/scenario.ipynb | 259 ++++++++++++++++ tests/lib/test_demand.py | 208 ++++++++++--- tests/scenarios/scenario_1.yaml | 16 +- tests/scenarios/scenario_2.yaml | 16 +- tests/test_readme_examples.py | 16 +- tests/test_scenario.py | 20 +- tests/test_traffic_demand.py | 26 +- tests/test_traffic_manager.py | 235 ++++++++++++++ 14 files changed, 1298 insertions(+), 164 deletions(-) create mode 100644 ngraph/traffic_manager.py create mode 100644 notebooks/scenario.ipynb create mode 100644 tests/test_traffic_manager.py diff --git a/ngraph/lib/demand.py b/ngraph/lib/demand.py index 353db4c..37d094c 100644 --- a/ngraph/lib/demand.py +++ b/ngraph/lib/demand.py @@ -1,95 +1,100 @@ from __future__ import annotations +from dataclasses import dataclass, field from typing import Optional, Tuple -from ngraph.lib.graph import NodeID, StrictMultiDiGraph from ngraph.lib.flow_policy import FlowPolicy +from ngraph.lib.graph import NodeID, StrictMultiDiGraph +@dataclass class Demand: """ - Represents a network demand between two nodes. - - A Demand can be realized through one or more flows. + Represents a network demand between two nodes. It is realized via one or more + flows through a single FlowPolicy. """ - def __init__( - self, - src_node: NodeID, - dst_node: NodeID, - volume: float, - demand_class: int = 0, - ) -> None: + src_node: NodeID + dst_node: NodeID + volume: float + demand_class: int = 0 + flow_policy: Optional[FlowPolicy] = None + placed_demand: float = field(default=0.0, init=False) + + def __lt__(self, other: Demand) -> bool: """ - Initializes a Demand instance. + Compare Demands by their demand_class (priority). A lower demand_class + indicates higher priority, so it should come first in sorting. Args: - src_node: The source node identifier. - dst_node: The destination node identifier. - volume: The total volume of the demand. - demand_class: An integer representing the demand's class or priority. - """ - self.src_node: NodeID = src_node - self.dst_node: NodeID = dst_node - self.volume: float = volume - self.demand_class: int = demand_class - self.placed_demand: float = 0.0 + other (Demand): Demand to compare against. - def __lt__(self, other: Demand) -> bool: - """Compares Demands based on their demand class.""" + Returns: + bool: True if self has higher priority (lower class value). + """ return self.demand_class < other.demand_class def __str__(self) -> str: - """Returns a string representation of the Demand.""" + """ + String representation showing src, dst, volume, priority, and placed_demand. + """ return ( f"Demand(src_node={self.src_node}, dst_node={self.dst_node}, " - f"volume={self.volume}, demand_class={self.demand_class}, placed_demand={self.placed_demand})" + f"volume={self.volume}, demand_class={self.demand_class}, " + f"placed_demand={self.placed_demand})" ) def place( self, flow_graph: StrictMultiDiGraph, - flow_policy: FlowPolicy, max_fraction: float = 1.0, max_placement: Optional[float] = None, ) -> Tuple[float, float]: """ - Places demand volume onto the network graph using the specified flow policy. - - The function computes the remaining volume to place, applies any maximum - placement or fraction constraints, and delegates the flow placement to the - provided flow policy. It then updates the placed demand. + Places demand volume onto the network via self.flow_policy. Args: - flow_graph: The network graph on which flows are placed. - flow_policy: The flow policy used to place the demand. - max_fraction: Maximum fraction of the total demand volume to place in this call. - max_placement: Optional absolute limit on the volume to place. + flow_graph (StrictMultiDiGraph): The graph to place flows onto. + max_fraction (float): The fraction of the remaining demand to place now. + max_placement (Optional[float]): An absolute upper bound on volume. Returns: - A tuple (placed, remaining) where 'placed' is the volume successfully placed, - and 'remaining' is the volume that could not be placed. + Tuple[float, float]: + placed_now: Volume placed in this call. + remaining: Volume that could not be placed in this call. + + Raises: + RuntimeError: If no FlowPolicy is set on this Demand. + ValueError: If max_fraction is outside [0, 1]. """ - to_place = self.volume - self.placed_demand + if self.flow_policy is None: + raise RuntimeError("No FlowPolicy set on this Demand.") + if not (0 <= max_fraction <= 1): + raise ValueError("max_fraction must be in the range [0, 1].") + + to_place = self.volume - self.placed_demand if max_placement is not None: to_place = min(to_place, max_placement) if max_fraction > 0: to_place = min(to_place, self.volume * max_fraction) else: - # When max_fraction is non-positive, place the entire volume only if infinite; - # otherwise, no placement is performed. - to_place = self.volume if self.volume == float("inf") else 0 + # If max_fraction <= 0, do not place any new volume (unless volume is infinite). + to_place = self.volume if self.volume == float("inf") else 0.0 - flow_policy.place_demand( + # Delegate flow placement + self.flow_policy.place_demand( flow_graph, self.src_node, self.dst_node, self.demand_class, to_place, ) - placed = flow_policy.placed_demand - self.placed_demand - self.placed_demand = flow_policy.placed_demand - remaining = to_place - placed - return placed, remaining + + # placed_now is the difference from the old placed_demand + placed_now = self.flow_policy.placed_demand - self.placed_demand + self.placed_demand = self.flow_policy.placed_demand + remaining = to_place - placed_now + + return placed_now, remaining diff --git a/ngraph/lib/flow.py b/ngraph/lib/flow.py index d22ce70..0543ed1 100644 --- a/ngraph/lib/flow.py +++ b/ngraph/lib/flow.py @@ -20,13 +20,13 @@ class FlowIndex(NamedTuple): src_node (NodeID): The source node of the flow. dst_node (NodeID): The destination node of the flow. flow_class (int): Integer representing the 'class' of this flow (e.g., traffic class). - flow_id (int): A unique integer ID for this flow. + flow_id (str): A unique ID for this flow. """ src_node: NodeID dst_node: NodeID flow_class: int - flow_id: int + flow_id: str class Flow: @@ -34,8 +34,8 @@ class Flow: Represents a fraction of demand routed along a given PathBundle. In traffic-engineering scenarios, a `Flow` object can model: - - An MPLS LSP/tunnel, - - IP forwarding behavior (with ECMP), + - MPLS LSPs/tunnels with explicit paths, + - IP forwarding behavior (with ECMP or UCMP), - Or anything that follows a specific set of paths. """ diff --git a/ngraph/lib/flow_policy.py b/ngraph/lib/flow_policy.py index d0469f6..21b0dec 100644 --- a/ngraph/lib/flow_policy.py +++ b/ngraph/lib/flow_policy.py @@ -426,11 +426,15 @@ def place_demand( raise RuntimeError("Infinite loop detected in place_demand.") # For EQUAL_BALANCED placement, rebalance flows to maintain equal volumes. - if self.flow_placement == FlowPlacement.EQUAL_BALANCED: - target_flow_volume = self.placed_demand / len(self.flows) + if ( + self.flow_placement == FlowPlacement.EQUAL_BALANCED + and len(self.flows) > 0 # must not rebalance if no flows + ): + target_flow_volume = self.placed_demand / float(len(self.flows)) + # If the flows are not already near balanced if any( - abs(target_flow_volume - flow.placed_flow) >= base.MIN_FLOW - for flow in self.flows.values() + abs(target_flow_volume - f.placed_flow) >= base.MIN_FLOW + for f in self.flows.values() ): total_placed_flow, excess_flow = self.rebalance_demand( flow_graph, src_node, dst_node, flow_class, target_flow_volume diff --git a/ngraph/traffic_demand.py b/ngraph/traffic_demand.py index dfb85b7..17e0c57 100644 --- a/ngraph/traffic_demand.py +++ b/ngraph/traffic_demand.py @@ -1,27 +1,58 @@ from __future__ import annotations + from dataclasses import dataclass, field from typing import Any, Dict +from ngraph.lib.flow_policy import FlowPolicyConfig +from ngraph.network import new_base64_uuid + @dataclass(slots=True) class TrafficDemand: """ Represents a single traffic demand in a network. + This class provides: + - Source and sink regex patterns to match sets of nodes in the network. + - A total demand volume and a priority (lower number = higher priority). + - A flow policy configuration to specify routing/placement logic (if + not supplied, defaults to SHORTEST_PATHS_ECMP). + - A 'mode' that determines how the demand expands into per-node-pair + demands. Supported modes include: + * "node_to_node": default behavior (each (src, dst) pair shares + the demand). + * "combine": combine all matched sources and all matched sinks, + then distribute the demand among the cross-product of nodes. + * "pairwise": for each (src_label, dst_label) pair, split up the + total demand so each label cross-product receives an equal fraction. + * "one_to_one": match src_labels[i] to dst_labels[i], then split + demand among node pairs in those matched labels. + Attributes: - source (str): The name of the source node. - target (str): The name of the target node. - priority (int): The priority of this traffic demand. Lower values indicate higher priority (default=0). + source_path (str): A regex pattern (string) for selecting source nodes. + sink_path (str): A regex pattern (string) for selecting sink nodes. + priority (int): A priority class for this demand (default=0). demand (float): The total demand volume (default=0.0). - demand_placed (float): The placed portion of the demand (default=0.0). - demand_unplaced (float): The unplaced portion of the demand (default=0.0). - attrs (dict[str, Any]): A dictionary for any additional attributes (default={}). + demand_placed (float): The portion of this demand that has been placed + so far (default=0.0). This is updated when flows are placed. + flow_policy_config (FlowPolicyConfig): The routing/placement policy. + mode (str): Expansion mode for generating sub-demands (defaults to "node_to_node"). + attrs (Dict[str, Any]): Additional arbitrary attributes. + id (str): Unique ID assigned at initialization. """ - source: str - target: str + source_path: str = "" + sink_path: str = "" priority: int = 0 demand: float = 0.0 demand_placed: float = 0.0 - demand_unplaced: float = 0.0 + flow_policy_config: FlowPolicyConfig = FlowPolicyConfig.SHORTEST_PATHS_ECMP + mode: str = "node_to_node" attrs: Dict[str, Any] = field(default_factory=dict) + id: str = field(init=False) + + def __post_init__(self) -> None: + """ + Generate a unique ID by combining source, sink, and a random Base64 UUID. + """ + self.id = f"{self.source_path}|{self.sink_path}|{new_base64_uuid()}" diff --git a/ngraph/traffic_manager.py b/ngraph/traffic_manager.py new file mode 100644 index 0000000..75f1290 --- /dev/null +++ b/ngraph/traffic_manager.py @@ -0,0 +1,492 @@ +from __future__ import annotations + +from collections import defaultdict +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple + +from ngraph.lib.algorithms import base +from ngraph.lib.algorithms.flow_init import init_flow_graph +from ngraph.lib.demand import Demand +from ngraph.lib.flow_policy import FlowPolicy, FlowPolicyConfig, get_flow_policy +from ngraph.lib.graph import StrictMultiDiGraph +from ngraph.network import Network, Node +from ngraph.traffic_demand import TrafficDemand + + +@dataclass +class TrafficManager: + """ + Manages the expansion and placement of traffic demands on a Network. + + This class: + 1) Builds (or rebuilds) a StrictMultiDiGraph from the given Network. + 2) Expands each TrafficDemand into one or more Demand objects, according + to a configurable "mode" (e.g., combine, pairwise, node_to_node, + one_to_one). + 3) Each Demand is associated with a FlowPolicy, which handles how flows + are placed (split across paths, balancing, etc.). + 4) Provides methods to place all demands incrementally with optional + re-optimization, reset usage, and retrieve flow/usage summaries. + + The sum of volumes of the expanded Demands for a given TrafficDemand + matches that TrafficDemand's `demand` value. + + Attributes: + network (Network): The underlying network object. + traffic_demands (List[TrafficDemand]): The scenario-level demands. + default_flow_policy_config (FlowPolicyConfig): Default FlowPolicy if + a TrafficDemand does not specify one. + graph (StrictMultiDiGraph): Active graph built from the network. + demands (List[Demand]): The expanded demands from traffic_demands. + _td_to_demands (Dict[str, List[Demand]]): Internal mapping from TrafficDemand.id + to its expanded Demand objects. + """ + + network: Network + traffic_demands: List[TrafficDemand] = field(default_factory=list) + default_flow_policy_config: FlowPolicyConfig = FlowPolicyConfig.SHORTEST_PATHS_ECMP + + graph: Optional[StrictMultiDiGraph] = None + demands: List[Demand] = field(default_factory=list) + + _td_to_demands: Dict[str, List[Demand]] = field(default_factory=dict) + + def build_graph(self, add_reverse: bool = True) -> None: + """ + Builds or rebuilds the internal StrictMultiDiGraph from self.network. + + This also initializes flow-related edge attributes (like flow=0). + + Args: + add_reverse (bool): If True, for every link A->B, add a mirrored link + B->A with the same capacity/cost. Default True. + """ + self.graph = self.network.to_strict_multidigraph(add_reverse=add_reverse) + init_flow_graph(self.graph) # Initialize flow-related attributes + + def expand_demands(self) -> None: + """ + Converts each TrafficDemand into one or more Demand objects according + to the demand's 'mode'. The sum of volumes for all expanded Demands of + a TrafficDemand equals that TrafficDemand's `demand`. + + Supported modes: + - 'node_to_node' + - 'combine' + - 'pairwise' + - 'one_to_one' + + Each Demand is assigned a FlowPolicy (from the demand or default). + The expanded demands are stored in self.demands, sorted by ascending + priority (lower demand_class -> earlier). + + Also populates _td_to_demands[td.id] with the corresponding Demand list. + """ + self._td_to_demands.clear() + expanded: List[Demand] = [] + + for td in self.traffic_demands: + # Collect node groups for src and dst + src_groups = self.network.select_node_groups_by_path(td.source_path) + snk_groups = self.network.select_node_groups_by_path(td.sink_path) + + # If no node matches, store empty and skip + if not src_groups or not snk_groups: + self._td_to_demands[td.id] = [] + continue + + # Sort labels for deterministic expansion + src_labels = sorted(src_groups.keys()) + snk_labels = sorted(snk_groups.keys()) + mode = td.mode + + local_demands: List[Demand] = [] + if mode == "combine": + self._expand_combine(local_demands, td, src_groups, snk_groups) + elif mode == "pairwise": + self._expand_pairwise( + local_demands, + td, + src_labels, + snk_labels, + src_groups, + snk_groups, + ) + elif mode == "one_to_one": + self._expand_one_to_one( + local_demands, + td, + src_labels, + snk_labels, + src_groups, + snk_groups, + ) + else: + # Default to "node_to_node" + self._expand_node_to_node(local_demands, td, src_groups, snk_groups) + + expanded.extend(local_demands) + self._td_to_demands[td.id] = local_demands + + # Sort final demands by ascending priority + expanded.sort() + self.demands = expanded + + def place_all_demands( + self, + placement_rounds: int = 5, + reoptimize_after_each_round: bool = False, + ) -> float: + """ + Places all expanded demands in ascending priority order, using a + multi-round approach for demands of the same priority. + + Each priority class is processed with `placement_rounds` passes, distributing + demand incrementally. Optionally re-optimizes flows after each round. + + Finally, updates each TrafficDemand's `demand_placed` with the sum of + its expanded demands' placed volumes. + + Args: + placement_rounds (int): Number of incremental passes per priority. + reoptimize_after_each_round (bool): Whether to re-run an optimization + pass after each round of placement. + + Returns: + float: Total volume successfully placed across all demands. + + Raises: + RuntimeError: If the graph has not been built. + """ + if self.graph is None: + raise RuntimeError("Graph not built yet. Call build_graph() first.") + + # Group demands by priority + prio_map: Dict[int, List[Demand]] = defaultdict(list) + for d in self.demands: + prio_map[d.demand_class].append(d) + + total_placed = 0.0 + sorted_priorities = sorted(prio_map.keys()) + + for priority in sorted_priorities: + demands_in_prio = prio_map[priority] + + # Multi-round fractional placement + for round_idx in range(placement_rounds): + placement_this_round = 0.0 + + for demand in demands_in_prio: + leftover = demand.volume - demand.placed_demand + if leftover < base.MIN_FLOW: + # Already fully placed (or negligible leftover) + continue + + # Distribute in fractional increments + rounds_left = placement_rounds - round_idx + step_to_place = leftover / float(rounds_left) + + placed_now, _remain = demand.place( + flow_graph=self.graph, + max_placement=step_to_place, + ) + total_placed += placed_now + placement_this_round += placed_now + + # Re-optimize if requested + if reoptimize_after_each_round and placement_this_round > 0.0: + self._reoptimize_priority_demands(demands_in_prio) + + # No progress -> break + if placement_this_round < base.MIN_FLOW: + break + + # Update each TrafficDemand with the sum of its expanded demands + for td in self.traffic_demands: + demand_list = self._td_to_demands.get(td.id, []) + td.demand_placed = sum(d.placed_demand for d in demand_list) + + return total_placed + + def reset_all_flow_usages(self) -> None: + """ + Removes flow usage from the graph for each Demand's FlowPolicy, + resets placed_demand=0 for each Demand, and sets + TrafficDemand.demand_placed=0. + """ + if self.graph is None: + return + + # Clear usage from each Demand's FlowPolicy + for d in self.demands: + if d.flow_policy: + d.flow_policy.remove_demand(self.graph) + d.placed_demand = 0.0 + + # Reset top-level traffic demands + for td in self.traffic_demands: + td.demand_placed = 0.0 + + def get_flow_details(self) -> Dict[Tuple[int, int], Dict[str, object]]: + """ + Summarizes flows from each Demand's FlowPolicy. + + Returns: + Dict[Tuple[int, int], Dict[str, object]]: + Keyed by (demand_index, flow_index), with info on placed_flow, + src_node, dst_node, and the path edges. + """ + details: Dict[Tuple[int, int], Dict[str, object]] = {} + for i, d in enumerate(self.demands): + if not d.flow_policy: + continue + for f_idx, flow_obj in d.flow_policy.flows.items(): + details[(i, f_idx)] = { + "placed_flow": flow_obj.placed_flow, + "src_node": flow_obj.src_node, + "dst_node": flow_obj.dst_node, + "edges": list(flow_obj.path_bundle.edges), + } + return details + + def summarize_link_usage(self) -> Dict[str, float]: + """ + Returns flow usage per edge in the graph. + + Returns: + Dict[str, float]: edge_key -> used capacity (flow). + """ + usage: Dict[str, float] = {} + if self.graph is None: + return usage + + for edge_key, edge_tuple in self.graph.get_edges().items(): + attr_dict = edge_tuple[3] + usage[edge_key] = attr_dict.get("flow", 0.0) + return usage + + def _reoptimize_priority_demands(self, demands_in_prio: List[Demand]) -> None: + """ + Optionally re-run flow-policy optimization for each Demand in + the same priority class. + + Args: + demands_in_prio (List[Demand]): Demands of the same priority. + """ + if self.graph is None: + return + + for d in demands_in_prio: + if not d.flow_policy: + continue + placed_volume = d.placed_demand + d.flow_policy.remove_demand(self.graph) + d.flow_policy.place_demand( + self.graph, + d.src_node, + d.dst_node, + d.demand_class, + placed_volume, + ) + d.placed_demand = d.flow_policy.placed_demand + + def _expand_node_to_node( + self, + expanded: List[Demand], + td: TrafficDemand, + src_groups: Dict[str, List[Node]], + snk_groups: Dict[str, List[Node]], + ) -> None: + """ + 'node_to_node' mode: Each matched (src_node, dst_node) pair + gets an equal fraction of td.demand (skips self-pairs). + """ + # Determine the flow policy configuration + fp_config = td.flow_policy_config or self.default_flow_policy_config + + src_nodes: List[Node] = [] + for group_nodes in src_groups.values(): + src_nodes.extend(group_nodes) + + dst_nodes: List[Node] = [] + for group_nodes in snk_groups.values(): + dst_nodes.extend(group_nodes) + + valid_pairs = [] + for s_node in src_nodes: + for t_node in dst_nodes: + if s_node.name != t_node.name: + valid_pairs.append((s_node, t_node)) + + if not valid_pairs: + return + + demand_per_pair = td.demand / float(len(valid_pairs)) + for s_node, t_node in valid_pairs: + flow_policy = get_flow_policy(fp_config) + expanded.append( + Demand( + src_node=s_node.name, + dst_node=t_node.name, + volume=demand_per_pair, + demand_class=td.priority, + flow_policy=flow_policy, + ) + ) + + def _expand_combine( + self, + expanded: List[Demand], + td: TrafficDemand, + src_groups: Dict[str, List[Node]], + snk_groups: Dict[str, List[Node]], + ) -> None: + """ + 'combine' mode: Combine all matched sources into one set, all sinks into another, + then distribute td.demand among all valid pairs. + """ + # Determine the flow policy configuration + fp_config = td.flow_policy_config or self.default_flow_policy_config + + combined_src_nodes: List[Node] = [] + combined_snk_nodes: List[Node] = [] + + for nodes in src_groups.values(): + combined_src_nodes.extend(nodes) + for nodes in snk_groups.values(): + combined_snk_nodes.extend(nodes) + + valid_pairs = [] + for s_node in combined_src_nodes: + for t_node in combined_snk_nodes: + if s_node.name != t_node.name: + valid_pairs.append((s_node, t_node)) + + if not valid_pairs: + return + + demand_per_pair = td.demand / float(len(valid_pairs)) + for s_node, t_node in valid_pairs: + flow_policy = get_flow_policy(fp_config) + expanded.append( + Demand( + src_node=s_node.name, + dst_node=t_node.name, + volume=demand_per_pair, + demand_class=td.priority, + flow_policy=flow_policy, + ) + ) + + def _expand_pairwise( + self, + expanded: List[Demand], + td: TrafficDemand, + src_labels: List[str], + snk_labels: List[str], + src_groups: Dict[str, List[Node]], + snk_groups: Dict[str, List[Node]], + ) -> None: + """ + 'pairwise' mode: For each (src_label, snk_label) pair, allocate a fraction + of td.demand, then split among valid node pairs (excluding self-pairs). + """ + # Determine the flow policy configuration + fp_config = td.flow_policy_config or self.default_flow_policy_config + + label_pairs_count = len(src_labels) * len(snk_labels) + if label_pairs_count == 0: + return + + label_share = td.demand / float(label_pairs_count) + + for s_label in src_labels: + s_nodes = src_groups[s_label] + if not s_nodes: + continue + + for t_label in snk_labels: + t_nodes = snk_groups[t_label] + if not t_nodes: + continue + + valid_pairs = [] + for s_node in s_nodes: + for t_node in t_nodes: + if s_node.name != t_node.name: + valid_pairs.append((s_node, t_node)) + + if not valid_pairs: + continue + + demand_per_pair = label_share / float(len(valid_pairs)) + for s_node, t_node in valid_pairs: + flow_policy = get_flow_policy(fp_config) + expanded.append( + Demand( + src_node=s_node.name, + dst_node=t_node.name, + volume=demand_per_pair, + demand_class=td.priority, + flow_policy=flow_policy, + ) + ) + + def _expand_one_to_one( + self, + expanded: List[Demand], + td: TrafficDemand, + src_labels: List[str], + snk_labels: List[str], + src_groups: Dict[str, List[Node]], + snk_groups: Dict[str, List[Node]], + ) -> None: + """ + 'one_to_one' mode: Match src_labels[i] to snk_labels[i], splitting td.demand + evenly among label pairs, then distributing that share among valid node pairs. + + Raises: + ValueError: If the number of src_labels != number of snk_labels. + """ + # Determine the flow policy configuration + fp_config = td.flow_policy_config or self.default_flow_policy_config + if len(src_labels) != len(snk_labels): + raise ValueError( + "one_to_one mode requires equal counts of src and sink labels. " + f"Got {len(src_labels)} vs {len(snk_labels)}." + ) + + label_count = len(src_labels) + if label_count == 0: + return + + pair_share = td.demand / float(label_count) + + for i, s_label in enumerate(src_labels): + t_label = snk_labels[i] + s_nodes = src_groups[s_label] + t_nodes = snk_groups[t_label] + if not s_nodes or not t_nodes: + continue + + valid_pairs = [] + for s_node in s_nodes: + for t_node in t_nodes: + if s_node.name != t_node.name: + valid_pairs.append((s_node, t_node)) + + if not valid_pairs: + continue + + demand_per_pair = pair_share / float(len(valid_pairs)) + for s_node, t_node in valid_pairs: + flow_policy = get_flow_policy(fp_config) + expanded.append( + Demand( + src_node=s_node.name, + dst_node=t_node.name, + volume=demand_per_pair, + demand_class=td.priority, + flow_policy=flow_policy, + ) + ) diff --git a/ngraph/workflow/capacity_probe.py b/ngraph/workflow/capacity_probe.py index afa27d7..b83fa6d 100644 --- a/ngraph/workflow/capacity_probe.py +++ b/ngraph/workflow/capacity_probe.py @@ -1,7 +1,7 @@ from __future__ import annotations from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Dict, Tuple +from typing import TYPE_CHECKING, Dict, Tuple, Pattern from ngraph.workflow.base import WorkflowStep, register_workflow_step from ngraph.lib.algorithms.base import FlowPlacement @@ -26,8 +26,8 @@ class CapacityProbe(WorkflowStep): flow_placement (FlowPlacement): Handling strategy for parallel equal cost paths (default PROPORTIONAL). """ - source_path: str = "" - sink_path: str = "" + source_path: Pattern[str] = "" + sink_path: Pattern[str] = "" mode: str = "combine" probe_reverse: bool = False shortest_path: bool = False diff --git a/notebooks/scenario.ipynb b/notebooks/scenario.ipynb new file mode 100644 index 0000000..f78af78 --- /dev/null +++ b/notebooks/scenario.ipynb @@ -0,0 +1,259 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from ngraph.scenario import Scenario\n", + "from ngraph.traffic_demand import TrafficDemand\n", + "from ngraph.traffic_manager import TrafficManager\n", + "from ngraph.lib.flow_policy import FlowPolicyConfig" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "scenario_yaml = \"\"\"\n", + "blueprints:\n", + " brick_2tier:\n", + " groups:\n", + " t1:\n", + " node_count: 4\n", + " name_template: t1-{node_num}\n", + " t2:\n", + " node_count: 4\n", + " name_template: t2-{node_num}\n", + "\n", + " adjacency:\n", + " - source: /t1\n", + " target: /t2\n", + " pattern: mesh\n", + " link_params:\n", + " capacity: 2\n", + " cost: 1\n", + "\n", + " 3tier_clos:\n", + " groups:\n", + " b1:\n", + " use_blueprint: brick_2tier\n", + " b2:\n", + " use_blueprint: brick_2tier\n", + " spine:\n", + " node_count: 16\n", + " name_template: t3-{node_num}\n", + "\n", + " adjacency:\n", + " - source: b1/t2\n", + " target: spine\n", + " pattern: one_to_one\n", + " link_params:\n", + " capacity: 2\n", + " cost: 1\n", + " - source: b2/t2\n", + " target: spine\n", + " pattern: one_to_one\n", + " link_params:\n", + " capacity: 2\n", + " cost: 1\n", + "\n", + "network:\n", + " name: \"3tier_clos\"\n", + " version: 1.0\n", + "\n", + " groups:\n", + " my_clos1:\n", + " use_blueprint: 3tier_clos\n", + "\"\"\"\n", + "scenario = Scenario.from_yaml(scenario_yaml)\n", + "network = scenario.network" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{('b1', 'b1'): inf, ('b1', 'b2'): 32.0, ('b2', 'b1'): 32.0, ('b2', 'b2'): inf}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "network.max_flow(\n", + " source_path=r\".*(b[0-9]*)/t1\",\n", + " sink_path=r\".*(b[0-9]*)/t1\",\n", + " mode=\"pairwise\",\n", + " shortest_path=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "31.999999999999904" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "d = TrafficDemand(\n", + " source_path=r\".*(b[0-9]*)/t1\",\n", + " sink_path=r\".*(b[0-9]*)/t1\",\n", + " demand=32,\n", + " mode=\"combine\",\n", + ")\n", + "demands = [d]\n", + "tm = TrafficManager(\n", + " network=network,\n", + " traffic_demands=demands,\n", + " default_flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,\n", + ")\n", + "tm.build_graph()\n", + "tm.expand_demands()\n", + "tm.place_all_demands()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "TrafficDemand(source_path='.*(b[0-9]*)/t1', sink_path='.*(b[0-9]*)/t1', priority=0, demand=32, demand_placed=32.0, flow_policy_config=, mode='combine', attrs={}, id='.*(b[0-9]*)/t1|.*(b[0-9]*)/t1|_WaSeCXEShS7JxwxzJLm3g')" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "d" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714)]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tm.demands" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ngraph-venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tests/lib/test_demand.py b/tests/lib/test_demand.py index af48a96..c286f21 100644 --- a/tests/lib/test_demand.py +++ b/tests/lib/test_demand.py @@ -7,7 +7,6 @@ from .algorithms.sample_graphs import line1, square1, square2, triangle1, graph3 -# Helper to create a FlowPolicy for testing given a config or explicit parameters. def create_flow_policy( *, path_alg: PathAlg, @@ -17,6 +16,7 @@ def create_flow_policy( max_flow_count: int = None, max_path_cost_factor: float = None ) -> FlowPolicy: + """Helper to create a FlowPolicy for testing.""" return FlowPolicy( path_alg=path_alg, flow_placement=flow_placement, @@ -29,23 +29,38 @@ def create_flow_policy( class TestDemand: def test_demand_initialization(self) -> None: - """Test that a Demand object initializes correctly.""" + """ + Test that a Demand object initializes correctly. + """ d = Demand("A", "C", float("inf")) assert d.src_node == "A" assert d.dst_node == "C" assert d.volume == float("inf") - # Default demand_class is 0 assert d.demand_class == 0 + assert d.flow_policy is None + assert d.placed_demand == 0.0 + + def test_demand_no_flow_policy_raises_error(self, line1) -> None: + """ + Test that placing a Demand without a flow policy raises RuntimeError. + """ + r = init_flow_graph(line1) + d = Demand("A", "C", 10) + with pytest.raises(RuntimeError, match="No FlowPolicy set on this Demand."): + d.place(r) def test_demand_comparison(self) -> None: - """Test that Demand instances are compared based on their demand class.""" + """ + Test that Demand instances are compared based on their demand class. + """ d_high = Demand("A", "C", float("inf"), demand_class=99) d_low = Demand("A", "C", float("inf"), demand_class=0) assert d_high > d_low def test_demand_place_basic(self, line1) -> None: - """Test placing a demand using a basic flow policy and check edge values.""" - # Initialize flow graph from fixture 'line1' + """ + Test placing a demand using a basic flow policy and check edge values. + """ r = init_flow_graph(line1) flow_policy = create_flow_policy( path_alg=PathAlg.SPF, @@ -53,8 +68,8 @@ def test_demand_place_basic(self, line1) -> None: edge_select=EdgeSelect.ALL_ANY_COST_WITH_CAP_REMAINING, multipath=True, ) - d = Demand("A", "C", float("inf"), demand_class=99) - placed_demand, remaining_demand = d.place(r, flow_policy) + d = Demand("A", "C", float("inf"), demand_class=99, flow_policy=flow_policy) + placed_demand, remaining_demand = d.place(r) # Check placed/remaining values assert placed_demand == 5 @@ -135,7 +150,9 @@ def test_demand_place_basic(self, line1) -> None: assert r.get_edges() == expected_edges def test_demand_place_with_square1(self, square1) -> None: - """Test demand placement on 'square1' graph with min cost flow policy.""" + """ + Test demand placement on 'square1' graph with min cost flow policy. + """ r = init_flow_graph(square1) flow_policy = create_flow_policy( path_alg=PathAlg.SPF, @@ -144,13 +161,15 @@ def test_demand_place_with_square1(self, square1) -> None: multipath=True, max_path_cost_factor=1, ) - d = Demand("A", "C", float("inf"), demand_class=99) - placed_demand, remaining_demand = d.place(r, flow_policy) + d = Demand("A", "C", float("inf"), demand_class=99, flow_policy=flow_policy) + placed_demand, remaining_demand = d.place(r) assert placed_demand == 1 assert remaining_demand == float("inf") def test_demand_place_with_square1_anycost(self, square1) -> None: - """Test demand placement on 'square1' graph using any-cost flow policy.""" + """ + Test demand placement on 'square1' graph using any-cost flow policy. + """ r = init_flow_graph(square1) flow_policy = create_flow_policy( path_alg=PathAlg.SPF, @@ -158,13 +177,15 @@ def test_demand_place_with_square1_anycost(self, square1) -> None: edge_select=EdgeSelect.ALL_ANY_COST_WITH_CAP_REMAINING, multipath=True, ) - d = Demand("A", "C", float("inf"), demand_class=99) - placed_demand, remaining_demand = d.place(r, flow_policy) + d = Demand("A", "C", float("inf"), demand_class=99, flow_policy=flow_policy) + placed_demand, remaining_demand = d.place(r) assert placed_demand == 3 assert remaining_demand == float("inf") def test_demand_place_with_square2_equal_balanced(self, square2) -> None: - """Test demand placement on 'square2' graph with equal-balanced flow placement.""" + """ + Test demand placement on 'square2' graph with equal-balanced flow placement. + """ r = init_flow_graph(square2) flow_policy = create_flow_policy( path_alg=PathAlg.SPF, @@ -173,28 +194,63 @@ def test_demand_place_with_square2_equal_balanced(self, square2) -> None: multipath=True, max_flow_count=1, ) - d = Demand("A", "C", float("inf"), demand_class=99) - placed_demand, remaining_demand = d.place(r, flow_policy) + d = Demand("A", "C", float("inf"), demand_class=99, flow_policy=flow_policy) + placed_demand, remaining_demand = d.place(r) assert placed_demand == 2 assert remaining_demand == float("inf") def test_multiple_demands_on_triangle(self, triangle1) -> None: - """Test multiple demands placement on a triangle graph.""" + """ + Test multiple demands placement on a triangle graph. + """ r = init_flow_graph(triangle1) - # Create a list of six demands with same volume and demand class. demands = [ - Demand("A", "B", 10, demand_class=42), - Demand("B", "A", 10, demand_class=42), - Demand("B", "C", 10, demand_class=42), - Demand("C", "B", 10, demand_class=42), - Demand("A", "C", 10, demand_class=42), - Demand("C", "A", 10, demand_class=42), + Demand( + "A", + "B", + 10, + demand_class=42, + flow_policy=get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM), + ), + Demand( + "B", + "A", + 10, + demand_class=42, + flow_policy=get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM), + ), + Demand( + "B", + "C", + 10, + demand_class=42, + flow_policy=get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM), + ), + Demand( + "C", + "B", + 10, + demand_class=42, + flow_policy=get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM), + ), + Demand( + "A", + "C", + 10, + demand_class=42, + flow_policy=get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM), + ), + Demand( + "C", + "A", + 10, + demand_class=42, + flow_policy=get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM), + ), ] for demand in demands: - flow_policy = get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM) - demand.place(r, flow_policy) + demand.place(r) - # Expected consolidated edges from the triangle graph. expected_edges = { 0: ( "A", @@ -307,12 +363,13 @@ def test_multiple_demands_on_triangle(self, triangle1) -> None: } assert r.get_edges() == expected_edges - # Verify each demand has been fully placed (placed_demand == demand volume). for demand in demands: assert demand.placed_demand == 10 def test_demand_place_partial_with_fraction(self, square2) -> None: - """Test placing a demand in partial fractions on 'square2' graph.""" + """ + Test placing a demand in partial fractions on 'square2' graph. + """ r = init_flow_graph(square2) flow_policy = create_flow_policy( path_alg=PathAlg.SPF, @@ -321,49 +378,102 @@ def test_demand_place_partial_with_fraction(self, square2) -> None: multipath=False, max_flow_count=2, ) - d = Demand("A", "C", 3, demand_class=99) + d = Demand("A", "C", 3, demand_class=99, flow_policy=flow_policy) + # First placement: only half of the remaining demand should be placed. - placed_demand, remaining_demand = d.place(r, flow_policy, max_fraction=0.5) + placed_demand, remaining_demand = d.place(r, max_fraction=0.5) assert placed_demand == 1.5 assert remaining_demand == 0 # Second placement: only 0.5 should be placed, leaving 1 unit unplaced. - placed_demand, remaining_demand = d.place(r, flow_policy, max_fraction=0.5) + placed_demand, remaining_demand = d.place(r, max_fraction=0.5) assert placed_demand == 0.5 assert remaining_demand == 1 + def test_demand_place_max_placement(self, line1) -> None: + """ + Test placing a demand with a max_placement limit. + """ + r = init_flow_graph(line1) + flow_policy = create_flow_policy( + path_alg=PathAlg.SPF, + flow_placement=FlowPlacement.EQUAL_BALANCED, + edge_select=EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING, + multipath=True, + max_flow_count=16, + ) + d = Demand("A", "C", 10, demand_class=99, flow_policy=flow_policy) + placed, remaining = d.place(r, max_placement=4) + # Should place only up to 4 in this call + assert placed == 4 + assert remaining == 0 + # Place again with unlimited in second call + placed, remaining = d.place(r) + # Should have placed what it could from the remaining + assert placed == 1 + assert remaining == 5 + assert d.placed_demand == 5 + def test_demand_place_te_ucmp_unlim(self, square2) -> None: - """Test demand placement using TE_UCMP_UNLIM flow policy on 'square2'.""" + """ + Test demand placement using TE_UCMP_UNLIM flow policy on 'square2'. + """ r = init_flow_graph(square2) - d = Demand("A", "C", 3, demand_class=99) - flow_policy = get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM) - placed_demand, remaining_demand = d.place(r, flow_policy) + d = Demand( + "A", + "C", + 3, + demand_class=99, + flow_policy=get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM), + ) + placed_demand, remaining_demand = d.place(r) assert placed_demand == 3 assert remaining_demand == 0 def test_demand_place_shortest_paths_ecmp(self, square2) -> None: - """Test demand placement using SHORTEST_PATHS_ECMP flow policy on 'square2'.""" + """ + Test demand placement using SHORTEST_PATHS_ECMP flow policy on 'square2'. + """ r = init_flow_graph(square2) - d = Demand("A", "C", 3, demand_class=99) - flow_policy = get_flow_policy(FlowPolicyConfig.SHORTEST_PATHS_ECMP) - placed_demand, remaining_demand = d.place(r, flow_policy) + d = Demand( + "A", + "C", + 3, + demand_class=99, + flow_policy=get_flow_policy(FlowPolicyConfig.SHORTEST_PATHS_ECMP), + ) + placed_demand, remaining_demand = d.place(r) assert placed_demand == 2 assert remaining_demand == 1 def test_demand_place_graph3_sp_ecmp(self, graph3) -> None: - """Test demand placement on 'graph3' using SHORTEST_PATHS_ECMP.""" + """ + Test demand placement on 'graph3' using SHORTEST_PATHS_ECMP. + """ r = init_flow_graph(graph3) - d = Demand("A", "D", float("inf"), demand_class=99) - flow_policy = get_flow_policy(FlowPolicyConfig.SHORTEST_PATHS_ECMP) - placed_demand, remaining_demand = d.place(r, flow_policy) + d = Demand( + "A", + "D", + float("inf"), + demand_class=99, + flow_policy=get_flow_policy(FlowPolicyConfig.SHORTEST_PATHS_ECMP), + ) + placed_demand, remaining_demand = d.place(r) assert placed_demand == 2.5 assert remaining_demand == float("inf") def test_demand_place_graph3_te_ucmp(self, graph3) -> None: - """Test demand placement on 'graph3' using TE_UCMP_UNLIM.""" + """ + Test demand placement on 'graph3' using TE_UCMP_UNLIM. + """ r = init_flow_graph(graph3) - d = Demand("A", "D", float("inf"), demand_class=99) - flow_policy = get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM) - placed_demand, remaining_demand = d.place(r, flow_policy) + d = Demand( + "A", + "D", + float("inf"), + demand_class=99, + flow_policy=get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM), + ) + placed_demand, remaining_demand = d.place(r) assert placed_demand == 6 assert remaining_demand == float("inf") diff --git a/tests/scenarios/scenario_1.yaml b/tests/scenarios/scenario_1.yaml index 5abe225..2b0229e 100644 --- a/tests/scenarios/scenario_1.yaml +++ b/tests/scenarios/scenario_1.yaml @@ -114,17 +114,17 @@ failure_policy: count: 1 traffic_demands: - - source: SEA - target: JFK + - source_path: SEA + sink_path: JFK demand: 50 - - source: SFO - target: DCA + - source_path: SFO + sink_path: DCA demand: 50 - - source: SEA - target: DCA + - source_path: SEA + sink_path: DCA demand: 50 - - source: SFO - target: JFK + - source_path: SFO + sink_path: JFK demand: 50 workflow: diff --git a/tests/scenarios/scenario_2.yaml b/tests/scenarios/scenario_2.yaml index 9aad30c..ddaf9e5 100644 --- a/tests/scenarios/scenario_2.yaml +++ b/tests/scenarios/scenario_2.yaml @@ -186,17 +186,17 @@ failure_policy: count: 1 traffic_demands: - - source: SEA - target: JFK + - source_path: SEA + sink_path: JFK demand: 50 - - source: SFO - target: DCA + - source_path: SFO + sink_path: DCA demand: 50 - - source: SEA - target: DCA + - source_path: SEA + sink_path: DCA demand: 50 - - source: SFO - target: JFK + - source_path: SFO + sink_path: JFK demand: 50 workflow: diff --git a/tests/test_readme_examples.py b/tests/test_readme_examples.py index 50a2600..22588c9 100644 --- a/tests/test_readme_examples.py +++ b/tests/test_readme_examples.py @@ -93,19 +93,21 @@ def test_traffic_engineering_simulation(): # Initialize flow-related structures (e.g., to track placed flows in the graph). flow_graph = init_flow_graph(g) - # Demand from A→C (volume 20). - demand_ac = Demand("A", "C", 20) + # Create flow policies for each demand. flow_policy_ac = get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM) - demand_ac.place(flow_graph, flow_policy_ac) + flow_policy_ca = get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM) + + # Demand from A→C (volume 20). + demand_ac = Demand("A", "C", 20, flow_policy=flow_policy_ac) + demand_ac.place(flow_graph) assert demand_ac.placed_demand == 20, ( f"Demand from {demand_ac.src_node} to {demand_ac.dst_node} " f"expected to be fully placed." ) - # Demand from C→A (volume 20), using a separate FlowPolicy instance. - demand_ca = Demand("C", "A", 20) - flow_policy_ca = get_flow_policy(FlowPolicyConfig.TE_UCMP_UNLIM) - demand_ca.place(flow_graph, flow_policy_ca) + # Demand from C→A (volume 20). + demand_ca = Demand("C", "A", 20, flow_policy=flow_policy_ca) + demand_ca.place(flow_graph) assert demand_ca.placed_demand == 20, ( f"Demand from {demand_ca.src_node} to {demand_ca.dst_node} " f"expected to be fully placed." diff --git a/tests/test_scenario.py b/tests/test_scenario.py index 3f3f238..95200d0 100644 --- a/tests/test_scenario.py +++ b/tests/test_scenario.py @@ -102,11 +102,11 @@ def valid_scenario_yaml() -> str: logic: "and" rule_type: "all" traffic_demands: - - source: NodeA - target: NodeB + - source_path: NodeA + sink_path: NodeB demand: 15 - - source: NodeA - target: NodeC + - source_path: NodeA + sink_path: NodeC demand: 5 workflow: - step_type: DoSmth @@ -137,8 +137,8 @@ def missing_step_type_yaml() -> str: failure_policy: rules: [] traffic_demands: - - source: NodeA - target: NodeB + - source_path: NodeA + sink_path: NodeB demand: 10 workflow: - name: StepWithoutType @@ -165,8 +165,8 @@ def unrecognized_step_type_yaml() -> str: failure_policy: rules: [] traffic_demands: - - source: NodeA - target: NodeB + - source_path: NodeA + sink_path: NodeB demand: 10 workflow: - step_type: NonExistentStep @@ -276,7 +276,7 @@ def test_scenario_from_yaml_valid(valid_scenario_yaml: str) -> None: ( d for d in scenario.traffic_demands - if d.source == "NodeA" and d.target == "NodeB" + if d.source_path == "NodeA" and d.sink_path == "NodeB" ), None, ) @@ -284,7 +284,7 @@ def test_scenario_from_yaml_valid(valid_scenario_yaml: str) -> None: ( d for d in scenario.traffic_demands - if d.source == "NodeA" and d.target == "NodeC" + if d.source_path == "NodeA" and d.sink_path == "NodeC" ), None, ) diff --git a/tests/test_traffic_demand.py b/tests/test_traffic_demand.py index c83fb4e..b278e79 100644 --- a/tests/test_traffic_demand.py +++ b/tests/test_traffic_demand.py @@ -6,13 +6,12 @@ def test_traffic_demand_defaults(): """ Test creation of TrafficDemand with default values. """ - demand = TrafficDemand(source="NodeA", target="NodeB") - assert demand.source == "NodeA" - assert demand.target == "NodeB" + demand = TrafficDemand(source_path="NodeA", sink_path="NodeB") + assert demand.source_path == "NodeA" + assert demand.sink_path == "NodeB" assert demand.priority == 0 assert demand.demand == 0.0 assert demand.demand_placed == 0.0 - assert demand.demand_unplaced == 0.0 assert demand.attrs == {} @@ -21,20 +20,18 @@ def test_traffic_demand_custom_values(): Test creation of TrafficDemand with custom values. """ demand = TrafficDemand( - source="SourceNode", - target="TargetNode", + source_path="SourceNode", + sink_path="TargetNode", priority=5, demand=42.5, demand_placed=10.0, - demand_unplaced=32.5, attrs={"description": "test"}, ) - assert demand.source == "SourceNode" - assert demand.target == "TargetNode" + assert demand.source_path == "SourceNode" + assert demand.sink_path == "TargetNode" assert demand.priority == 5 assert demand.demand == 42.5 assert demand.demand_placed == 10.0 - assert demand.demand_unplaced == 32.5 assert demand.attrs == {"description": "test"} @@ -42,7 +39,7 @@ def test_traffic_demand_attrs_modification(): """ Test that the attrs dictionary can be modified after instantiation. """ - demand = TrafficDemand(source="NodeA", target="NodeB") + demand = TrafficDemand(source_path="NodeA", sink_path="NodeB") demand.attrs["key"] = "value" assert demand.attrs == {"key": "value"} @@ -51,12 +48,11 @@ def test_traffic_demand_partial_kwargs(): """ Test initialization with only a subset of fields, ensuring defaults work. """ - demand = TrafficDemand(source="NodeA", target="NodeC", demand=15.0) - assert demand.source == "NodeA" - assert demand.target == "NodeC" + demand = TrafficDemand(source_path="NodeA", sink_path="NodeC", demand=15.0) + assert demand.source_path == "NodeA" + assert demand.sink_path == "NodeC" assert demand.demand == 15.0 # Check the defaults assert demand.priority == 0 assert demand.demand_placed == 0.0 - assert demand.demand_unplaced == 0.0 assert demand.attrs == {} diff --git a/tests/test_traffic_manager.py b/tests/test_traffic_manager.py new file mode 100644 index 0000000..ef3c5e1 --- /dev/null +++ b/tests/test_traffic_manager.py @@ -0,0 +1,235 @@ +import pytest +from ngraph.network import Network, Node, Link +from ngraph.traffic_demand import TrafficDemand +from ngraph.lib.flow_policy import FlowPolicyConfig +from ngraph.lib.graph import StrictMultiDiGraph +from ngraph.lib.algorithms.base import MIN_FLOW + +from ngraph.traffic_manager import TrafficManager + + +@pytest.fixture +def small_network() -> Network: + """ + Build a small test network with 3 nodes and 2 directed links: + A -> B, B -> C. + Link capacities are large enough so we can place all demands easily. + """ + net = Network() + + # Add nodes + net.add_node(Node(name="A")) + net.add_node(Node(name="B")) + net.add_node(Node(name="C")) + + # Add links + link_ab = Link(source="A", target="B", capacity=100.0, cost=1.0) + link_bc = Link(source="B", target="C", capacity=100.0, cost=1.0) + net.add_link(link_ab) + net.add_link(link_bc) + + return net + + +@pytest.fixture +def small_network_with_loop() -> Network: + """ + Builds a small network with a loop: A -> B, B -> C, C -> A. + This can help test re-optimization more interestingly. + """ + net = Network() + + # Add nodes + net.add_node(Node(name="A")) + net.add_node(Node(name="B")) + net.add_node(Node(name="C")) + + # Add links forming a loop + net.add_link(Link(source="A", target="B", capacity=10.0, cost=1.0)) + net.add_link(Link(source="B", target="C", capacity=10.0, cost=1.0)) + net.add_link(Link(source="C", target="A", capacity=10.0, cost=1.0)) + + return net + + +def test_build_graph_not_built_error(small_network): + """ + Verify that calling place_all_demands before build_graph + raises a RuntimeError. + """ + tm = TrafficManager(network=small_network, traffic_demands=[]) + # no build_graph call here + with pytest.raises(RuntimeError): + tm.place_all_demands() + + +def test_basic_build_and_expand(small_network): + """ + Test the ability to build the graph and expand demands. + """ + demands = [ + TrafficDemand(source_path="A", sink_path="B", demand=10.0), + TrafficDemand(source_path="A", sink_path="C", demand=20.0), + ] + tm = TrafficManager( + network=small_network, + traffic_demands=demands, + default_flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP, + ) + + tm.build_graph() + assert isinstance(tm.graph, StrictMultiDiGraph), "Graph should be built" + assert len(tm.graph.get_nodes()) == 3, "Should have 3 nodes in graph" + assert len(tm.graph.get_edges()) == 4, "Should have 4 edges in graph" + + tm.expand_demands() + assert len(tm.demands) == 2, "Expected 2 expanded demands" + + +def test_place_all_demands_simple(small_network): + """ + Place demands on a simple A->B->C network. + We expect all to be placed because capacity = 100 is large. + """ + demands = [ + TrafficDemand(source_path="A", sink_path="C", demand=50.0), + TrafficDemand(source_path="B", sink_path="C", demand=20.0), + ] + tm = TrafficManager(network=small_network, traffic_demands=demands) + + tm.build_graph() + tm.expand_demands() + + total_placed = tm.place_all_demands() + assert total_placed == 70.0, "All traffic should be placed without issues" + + # Check final placed_demand on each Demand + for d in tm.demands: + assert ( + abs(d.placed_demand - d.volume) < MIN_FLOW + ), "Each demand should be fully placed" + + # Summarize link usage + usage = tm.summarize_link_usage() + # For A->B->C route, we expect 50 flow to pass A->B, and 50 + 20 = 70 on B->C + # However, the B->C link capacity is 100, so it can carry 70 total + ab_key = None + bc_key = None + for k, (src, dst, _, _) in tm.graph.get_edges().items(): + if src == "A" and dst == "B": + ab_key = k + elif src == "B" and dst == "C": + bc_key = k + + # usage[...] is how much capacity is used, i.e. used_capacity + assert abs(usage[ab_key] - 50.0) < MIN_FLOW, "A->B should carry 50" + assert abs(usage[bc_key] - 70.0) < MIN_FLOW, "B->C should carry 70" + + +def test_priority_fairness(small_network): + """ + Test that multiple demands with different priorities + are handled in ascending priority order (lowest numeric = highest priority). + For demonstration, we set small link capacities that will cause partial placement. + """ + # Reduce link capacity to 30 to test partial usage + small_network.links[next(iter(small_network.links))].capacity = 30.0 # A->B + small_network.links[list(small_network.links.keys())[1]].capacity = 30.0 # B->C + + # High priority demand: A->C with volume=40 + # Low priority demand: B->C with volume=40 + # Expect: The higher priority (A->C) saturates B->C first. + # Then the lower priority (B->C) might get leftover capacity (if any). + demands = [ + TrafficDemand( + source_path="A", sink_path="C", demand=40.0, priority=0 + ), # higher priority + TrafficDemand( + source_path="B", sink_path="C", demand=40.0, priority=1 + ), # lower priority + ] + tm = TrafficManager(network=small_network, traffic_demands=demands) + + tm.build_graph() + tm.expand_demands() + total_placed = tm.place_all_demands(placement_rounds=1) # single pass for clarity + + # The link B->C capacity is 30, so the first (priority=0) can fully use it + # or saturate it. Actually we have A->B->C route for the first demand, so + # the capacity from A->B->C is 30 end-to-end. + # The second demand (B->C direct) sees the same link capacity but it's + # already used up by the higher priority. So it gets 0. + assert total_placed == 30.0, "Expected only 30 placed in total" + + # Check each demand's placed + high_prio_placed = tm.demands[0].placed_demand + low_prio_placed = tm.demands[1].placed_demand + assert high_prio_placed == 30.0, "High priority demand should saturate capacity" + assert low_prio_placed == 0.0, "Low priority got no leftover capacity" + + +def test_reset_flow_usages(small_network): + """ + Test that reset_all_flow_usages zeroes out placed demand. + """ + demands = [TrafficDemand(source_path="A", sink_path="C", demand=10.0)] + tm = TrafficManager(network=small_network, traffic_demands=demands) + tm.build_graph() + tm.expand_demands() + placed_before = tm.place_all_demands() + assert placed_before == 10.0 + + # Now reset all flows + tm.reset_all_flow_usages() + for d in tm.demands: + assert d.placed_demand == 0.0, "Demand placed_demand should be reset to 0" + usage = tm.summarize_link_usage() + for k in usage: + assert usage[k] == 0.0, "Link usage should be reset to 0" + + +def test_reoptimize_flows(small_network_with_loop): + """ + Test that re-optimization logic is triggered in place_all_demands + when reoptimize_after_each_round=True. + We'll set the capacity on one link to be quite low so the flow might + switch to a loop path under re-optimization, if feasible. + """ + # Example: capacity A->B=10, B->C=1, C->A=10 + # Demand from A->C is 5, so if direct path A->B->C is tried first, + # it sees only capacity=1 for B->C. Then re-optimization might try A->B->C->A->B->C + # (though that is cyclical and might or might not help, depending on your path alg). + # This test just ensures we call the reopt method, not necessarily that it + # finds a truly cyclical route. Implementation depends on path selection logic. + # We'll do a small check that the reopt code doesn't crash and usage is consistent. + demands = [TrafficDemand(source_path="A", sink_path="C", demand=5.0)] + tm = TrafficManager( + network=small_network_with_loop, + traffic_demands=demands, + default_flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP, + ) + tm.build_graph() + tm.expand_demands() + + # place with reoptimize + total_placed = tm.place_all_demands( + placement_rounds=2, + reoptimize_after_each_round=True, + ) + # We do not strictly assert a certain path is used, + # only that a nonzero amount is placed (some path is feasible). + assert total_placed > 0.0, "Should place some flow even if B->C is small" + + # Summarize flows + flow_details = tm.get_flow_details() + # We only had 1 demand => index=0 + # We should have at least 1 flow (or more if it tries multiple splits) + assert len(flow_details) >= 1 + # No crash means re-optimization was invoked + + # The final usage on B->C might be at most 1.0 if it uses direct path, + # or it might use partial flows if there's a different path approach. + # We'll just assert we placed something, and capacity usage isn't insane. + usage = tm.summarize_link_usage() + for k in usage: + assert usage[k] <= 10.0, "No link usage should exceed capacity" From 29f0cd3f7adad0a4ff98b3b129a676fe4b0f221f Mon Sep 17 00:00:00 2001 From: Andrey Golovanov Date: Tue, 4 Mar 2025 23:53:00 +0000 Subject: [PATCH 2/2] demand placement finished --- ngraph/lib/flow_policy.py | 99 ++++--- ngraph/traffic_demand.py | 36 +-- ngraph/traffic_manager.py | 537 ++++++++++++++++------------------ notebooks/scenario.ipynb | 413 +++++++++++++++++++++----- tests/test_traffic_manager.py | 308 +++++++++++++++---- 5 files changed, 905 insertions(+), 488 deletions(-) diff --git a/ngraph/lib/flow_policy.py b/ngraph/lib/flow_policy.py index 21b0dec..d4dd341 100644 --- a/ngraph/lib/flow_policy.py +++ b/ngraph/lib/flow_policy.py @@ -1,5 +1,6 @@ from __future__ import annotations +import copy from collections import deque from enum import IntEnum from typing import Any, Callable, Dict, List, Optional, Set, Tuple @@ -12,7 +13,9 @@ class FlowPolicyConfig(IntEnum): - """Enumerates supported flow policy configurations.""" + """ + Enumerates supported flow policy configurations. + """ SHORTEST_PATHS_ECMP = 1 SHORTEST_PATHS_UCMP = 2 @@ -25,9 +28,9 @@ class FlowPolicy: """ Manages the placement and management of flows (demands) on a network graph. - A FlowPolicy converts a demand into one or more Flow objects subject to capacity - constraints and user-specified configurations such as path selection algorithms - and flow placement methods. + A FlowPolicy converts a demand into one or more Flow objects subject to + capacity constraints and user-specified configurations such as path + selection algorithms and flow placement methods. """ def __init__( @@ -61,15 +64,16 @@ def __init__( min_flow_count: Minimum number of flows to create for a demand. max_flow_count: Maximum number of flows allowable for a demand. max_path_cost: Absolute cost limit for allowable paths. - max_path_cost_factor: Relative cost factor limit (multiplied by the best path cost). + max_path_cost_factor: Relative cost factor limit (multiplying the best path cost). static_paths: Predefined paths to force flows onto, if provided. edge_select_func: Custom function for edge selection, if needed. edge_select_value: Additional parameter for certain edge selection strategies. - reoptimize_flows_on_each_placement: If True, re-run path optimization on every placement. + reoptimize_flows_on_each_placement: If True, re-run path optimization after every placement. Raises: - ValueError: If static_paths length does not match max_flow_count, or if - EQUAL_BALANCED placement is used without a specified max_flow_count. + ValueError: If static_paths length does not match max_flow_count, + or if EQUAL_BALANCED placement is used without a + specified max_flow_count. """ self.path_alg: base.PathAlg = path_alg self.flow_placement: FlowPlacement = flow_placement @@ -89,7 +93,7 @@ def __init__( # Dictionary to track all flows by their FlowIndex. self.flows: Dict[Tuple, Flow] = {} - # Track the best path cost found to enforce maximum cost constraints. + # Track the best path cost found to enforce maximum path cost constraints. self.best_path_cost: Optional[base.Cost] = None # Internal flow ID counter. @@ -108,14 +112,27 @@ def __init__( ): raise ValueError("max_flow_count must be set for EQUAL_BALANCED placement.") + def deep_copy(self) -> FlowPolicy: + """ + Creates and returns a deep copy of this FlowPolicy, including all flows. + + Returns: + A new FlowPolicy object that is a deep copy of the current instance. + """ + return copy.deepcopy(self) + @property def flow_count(self) -> int: - """Returns the number of flows currently tracked by the policy.""" + """ + Returns the number of flows currently tracked by the policy. + """ return len(self.flows) @property def placed_demand(self) -> float: - """Returns the sum of all placed flow volumes across flows.""" + """ + Returns the sum of all placed flow volumes across flows. + """ return sum(flow.placed_flow for flow in self.flows.values()) def _get_next_flow_id(self) -> int: @@ -160,7 +177,8 @@ def _get_path_bundle( excluded_nodes: Optional[Set[NodeID]] = None, ) -> Optional[PathBundle]: """ - Finds a path or set of paths from src_node to dst_node, optionally excluding certain edges or nodes. + Finds a path or set of paths from src_node to dst_node, optionally excluding + certain edges or nodes. Args: flow_graph: The network graph. @@ -171,7 +189,8 @@ def _get_path_bundle( excluded_nodes: Set of nodes to exclude. Returns: - A valid PathBundle if one is found and it satisfies cost constraints; otherwise, None. + A valid PathBundle if one is found and it satisfies cost constraints; + otherwise, None. Raises: ValueError: If the selected path algorithm is not supported. @@ -200,7 +219,8 @@ def _get_path_bundle( if dst_node in pred: dst_cost = cost[dst_node] - if self.best_path_cost is None: + # Update best_path_cost if we found a cheaper path. + if self.best_path_cost is None or dst_cost < self.best_path_cost: self.best_path_cost = dst_cost # Enforce maximum path cost constraints, if specified. @@ -337,8 +357,8 @@ def _reoptimize_flow( The updated Flow if re-optimization is successful; otherwise, None. """ flow = self.flows[flow_index] - flow_volume = flow.placed_flow - new_min_volume = flow_volume + headroom + current_flow_volume = flow.placed_flow + new_min_volume = current_flow_volume + headroom flow.remove_flow(flow_graph) path_bundle = self._get_path_bundle( @@ -349,15 +369,16 @@ def _reoptimize_flow( flow.excluded_edges, flow.excluded_nodes, ) - # If no suitable alternative path is found, revert to the original path. + # If no suitable alternative path is found or the new path is the same set of edges, + # revert to the original path. if not path_bundle or path_bundle.edges == flow.path_bundle.edges: - flow.place_flow(flow_graph, flow_volume, self.flow_placement) + flow.place_flow(flow_graph, current_flow_volume, self.flow_placement) return None new_flow = Flow( path_bundle, flow_index, flow.excluded_edges, flow.excluded_nodes ) - new_flow.place_flow(flow_graph, flow_volume, self.flow_placement) + new_flow.place_flow(flow_graph, current_flow_volume, self.flow_placement) self.flows[flow_index] = new_flow return new_flow @@ -372,8 +393,8 @@ def place_demand( min_flow: Optional[float] = None, ) -> Tuple[float, float]: """ - Places the given demand volume on the network graph by splitting or creating flows as needed. - Optionally re-optimizes flows based on the policy configuration. + Places the given demand volume on the network graph by splitting or creating + flows as needed. Optionally re-optimizes flows based on the policy configuration. Args: flow_graph: The network graph. @@ -385,8 +406,11 @@ def place_demand( min_flow: Minimum flow threshold for path selection. Returns: - A tuple (placed_flow, remaining_volume) where placed_flow is the total volume - successfully placed and remaining_volume is any unplaced volume. + A tuple (placed_flow, remaining_volume) where placed_flow is the total + volume successfully placed and remaining_volume is any unplaced volume. + + Raises: + RuntimeError: If an infinite loop is detected (safety net). """ if not self.flows: self._create_flows(flow_graph, src_node, dst_node, flow_class, min_flow) @@ -395,9 +419,8 @@ def place_demand( target_flow_volume = target_flow_volume or volume total_placed_flow = 0.0 - c = 0 + iteration_count = 0 - # Safety check to prevent infinite loops. while volume >= base.MIN_FLOW and flow_queue: flow = flow_queue.popleft() placed_flow, _ = flow.place_flow( @@ -409,7 +432,8 @@ def place_demand( # If the flow can accept more volume, attempt to create or re-optimize. if ( target_flow_volume - flow.placed_flow >= base.MIN_FLOW - ) and not self.static_paths: + and not self.static_paths + ): if not self.max_flow_count or len(self.flows) < self.max_flow_count: new_flow = self._create_flow( flow_graph, src_node, dst_node, flow_class @@ -421,17 +445,14 @@ def place_demand( if new_flow: flow_queue.append(new_flow) - c += 1 - if c > 10000: + iteration_count += 1 + if iteration_count > 10000: raise RuntimeError("Infinite loop detected in place_demand.") # For EQUAL_BALANCED placement, rebalance flows to maintain equal volumes. - if ( - self.flow_placement == FlowPlacement.EQUAL_BALANCED - and len(self.flows) > 0 # must not rebalance if no flows - ): + if self.flow_placement == FlowPlacement.EQUAL_BALANCED and len(self.flows) > 0: target_flow_volume = self.placed_demand / float(len(self.flows)) - # If the flows are not already near balanced + # If flows are not already near balanced, rebalance them. if any( abs(target_flow_volume - f.placed_flow) >= base.MIN_FLOW for f in self.flows.values() @@ -458,7 +479,8 @@ def rebalance_demand( ) -> Tuple[float, float]: """ Rebalances the demand across existing flows so that their volumes are closer - to the target_flow_volume. This is achieved by removing all flows and re-placing the demand. + to the target_flow_volume. This is achieved by removing all flows from + the network graph and re-placing them. Args: flow_graph: The network graph. @@ -468,7 +490,7 @@ def rebalance_demand( target_flow_volume: The desired volume per flow. Returns: - A tuple (placed_flow, remaining_volume) similar to place_demand. + A tuple (placed_flow, remaining_volume) similar to place_demand(). """ volume = self.placed_demand self.remove_demand(flow_graph) @@ -479,7 +501,7 @@ def rebalance_demand( def remove_demand(self, flow_graph: StrictMultiDiGraph) -> None: """ Removes all flows from the network graph without clearing internal state. - This enables subsequent re-optimization of flows. + This allows subsequent re-optimization. Args: flow_graph: The network graph. @@ -508,7 +530,8 @@ def get_flow_policy(flow_policy_config: FlowPolicyConfig) -> FlowPolicy: flow_placement=FlowPlacement.EQUAL_BALANCED, edge_select=base.EdgeSelect.ALL_MIN_COST, multipath=True, - max_flow_count=1, # Single flow following shortest paths. + max_flow_count=1, # Single flow from the perspective of the flow object, + # but multipath can create parallel SPF paths. ) elif flow_policy_config == FlowPolicyConfig.SHORTEST_PATHS_UCMP: # Hop-by-hop with proportional flow placement (e.g., per-hop UCMP). @@ -517,7 +540,7 @@ def get_flow_policy(flow_policy_config: FlowPolicyConfig) -> FlowPolicy: flow_placement=FlowPlacement.PROPORTIONAL, edge_select=base.EdgeSelect.ALL_MIN_COST, multipath=True, - max_flow_count=1, # Single flow following shortest paths. + max_flow_count=1, ) elif flow_policy_config == FlowPolicyConfig.TE_UCMP_UNLIM: # "Ideal" TE with multiple MPLS LSPs and UCMP flow placement. diff --git a/ngraph/traffic_demand.py b/ngraph/traffic_demand.py index 17e0c57..03c9476 100644 --- a/ngraph/traffic_demand.py +++ b/ngraph/traffic_demand.py @@ -1,9 +1,7 @@ -from __future__ import annotations - from dataclasses import dataclass, field -from typing import Any, Dict +from typing import Any, Dict, Optional -from ngraph.lib.flow_policy import FlowPolicyConfig +from ngraph.lib.flow_policy import FlowPolicyConfig, FlowPolicy from ngraph.network import new_base64_uuid @@ -12,31 +10,16 @@ class TrafficDemand: """ Represents a single traffic demand in a network. - This class provides: - - Source and sink regex patterns to match sets of nodes in the network. - - A total demand volume and a priority (lower number = higher priority). - - A flow policy configuration to specify routing/placement logic (if - not supplied, defaults to SHORTEST_PATHS_ECMP). - - A 'mode' that determines how the demand expands into per-node-pair - demands. Supported modes include: - * "node_to_node": default behavior (each (src, dst) pair shares - the demand). - * "combine": combine all matched sources and all matched sinks, - then distribute the demand among the cross-product of nodes. - * "pairwise": for each (src_label, dst_label) pair, split up the - total demand so each label cross-product receives an equal fraction. - * "one_to_one": match src_labels[i] to dst_labels[i], then split - demand among node pairs in those matched labels. - Attributes: source_path (str): A regex pattern (string) for selecting source nodes. sink_path (str): A regex pattern (string) for selecting sink nodes. priority (int): A priority class for this demand (default=0). demand (float): The total demand volume (default=0.0). - demand_placed (float): The portion of this demand that has been placed - so far (default=0.0). This is updated when flows are placed. - flow_policy_config (FlowPolicyConfig): The routing/placement policy. - mode (str): Expansion mode for generating sub-demands (defaults to "node_to_node"). + demand_placed (float): The portion of this demand that has been placed so far. + flow_policy_config ((Optional[FlowPolicyConfig]): The routing/placement policy config. + flow_policy (Optional[FlowPolicy]): A fully constructed FlowPolicy instance. + If provided, it overrides flow_policy_config. + mode (str): Expansion mode for generating sub-demands. attrs (Dict[str, Any]): Additional arbitrary attributes. id (str): Unique ID assigned at initialization. """ @@ -46,8 +29,9 @@ class TrafficDemand: priority: int = 0 demand: float = 0.0 demand_placed: float = 0.0 - flow_policy_config: FlowPolicyConfig = FlowPolicyConfig.SHORTEST_PATHS_ECMP - mode: str = "node_to_node" + flow_policy_config: Optional[FlowPolicyConfig] = None + flow_policy: Optional[FlowPolicy] = None + mode: str = "combine" attrs: Dict[str, Any] = field(default_factory=dict) id: str = field(init=False) diff --git a/ngraph/traffic_manager.py b/ngraph/traffic_manager.py index 75f1290..3a7f405 100644 --- a/ngraph/traffic_manager.py +++ b/ngraph/traffic_manager.py @@ -1,13 +1,12 @@ -from __future__ import annotations - from collections import defaultdict from dataclasses import dataclass, field -from typing import Dict, List, Optional, Tuple +import statistics +from typing import Dict, List, Optional, Tuple, Union from ngraph.lib.algorithms import base from ngraph.lib.algorithms.flow_init import init_flow_graph from ngraph.lib.demand import Demand -from ngraph.lib.flow_policy import FlowPolicy, FlowPolicyConfig, get_flow_policy +from ngraph.lib.flow_policy import FlowPolicyConfig, FlowPolicy, get_flow_policy from ngraph.lib.graph import StrictMultiDiGraph from ngraph.network import Network, Node from ngraph.traffic_demand import TrafficDemand @@ -19,17 +18,30 @@ class TrafficManager: Manages the expansion and placement of traffic demands on a Network. This class: + 1) Builds (or rebuilds) a StrictMultiDiGraph from the given Network. - 2) Expands each TrafficDemand into one or more Demand objects, according - to a configurable "mode" (e.g., combine, pairwise, node_to_node, - one_to_one). + 2) Expands each TrafficDemand into one or more Demand objects based + on a configurable 'mode' (e.g., 'combine' or 'full_mesh'). 3) Each Demand is associated with a FlowPolicy, which handles how flows are placed (split across paths, balancing, etc.). 4) Provides methods to place all demands incrementally with optional re-optimization, reset usage, and retrieve flow/usage summaries. - The sum of volumes of the expanded Demands for a given TrafficDemand - matches that TrafficDemand's `demand` value. + In particular: + - 'combine' mode: + * Combine all matched sources into a single pseudo-source node, and all + matched sinks into a single pseudo-sink node (named using the traffic + demand's `source_path` and `sink_path`). A single Demand is created + from the pseudo-source to the pseudo-sink, with the full volume. + + - 'full_mesh' mode: + * All matched sources form one group, all matched sinks form another group. + A separate Demand is created for each (src_node, dst_node) pair, + skipping self-pairs. The total volume is split evenly across the pairs. + + The sum of volumes of all expanded Demands for a given TrafficDemand matches + that TrafficDemand's `demand` value (unless no valid node pairs exist, in which + case no demands are created). Attributes: network (Network): The underlying network object. @@ -37,9 +49,9 @@ class TrafficManager: default_flow_policy_config (FlowPolicyConfig): Default FlowPolicy if a TrafficDemand does not specify one. graph (StrictMultiDiGraph): Active graph built from the network. - demands (List[Demand]): The expanded demands from traffic_demands. - _td_to_demands (Dict[str, List[Demand]]): Internal mapping from TrafficDemand.id - to its expanded Demand objects. + demands (List[Demand]): All expanded demands from traffic_demands. + _td_to_demands (Dict[str, List[Demand]]): Internal mapping from + TrafficDemand.id to its expanded Demand objects. """ network: Network @@ -48,182 +60,160 @@ class TrafficManager: graph: Optional[StrictMultiDiGraph] = None demands: List[Demand] = field(default_factory=list) - _td_to_demands: Dict[str, List[Demand]] = field(default_factory=dict) def build_graph(self, add_reverse: bool = True) -> None: """ Builds or rebuilds the internal StrictMultiDiGraph from self.network. - This also initializes flow-related edge attributes (like flow=0). + This also initializes flow-related edge attributes (e.g., flow=0). Args: - add_reverse (bool): If True, for every link A->B, add a mirrored link - B->A with the same capacity/cost. Default True. + add_reverse (bool): If True, for every link A->B, add a mirrored + link B->A with the same capacity/cost. """ self.graph = self.network.to_strict_multidigraph(add_reverse=add_reverse) init_flow_graph(self.graph) # Initialize flow-related attributes def expand_demands(self) -> None: """ - Converts each TrafficDemand into one or more Demand objects according - to the demand's 'mode'. The sum of volumes for all expanded Demands of - a TrafficDemand equals that TrafficDemand's `demand`. - - Supported modes: - - 'node_to_node' - - 'combine' - - 'pairwise' - - 'one_to_one' + Converts each TrafficDemand in self.traffic_demands into one or more + Demand objects based on the demand's 'mode'. - Each Demand is assigned a FlowPolicy (from the demand or default). The expanded demands are stored in self.demands, sorted by ascending - priority (lower demand_class -> earlier). + demand_class (priority). Also populates _td_to_demands[td.id] for each + TrafficDemand. - Also populates _td_to_demands[td.id] with the corresponding Demand list. + Raises: + ValueError: If an unknown mode is encountered. """ self._td_to_demands.clear() expanded: List[Demand] = [] for td in self.traffic_demands: - # Collect node groups for src and dst + # Gather node groups for source and sink src_groups = self.network.select_node_groups_by_path(td.source_path) snk_groups = self.network.select_node_groups_by_path(td.sink_path) - # If no node matches, store empty and skip if not src_groups or not snk_groups: + # No matching nodes; skip self._td_to_demands[td.id] = [] continue - # Sort labels for deterministic expansion - src_labels = sorted(src_groups.keys()) - snk_labels = sorted(snk_groups.keys()) - mode = td.mode - - local_demands: List[Demand] = [] - if mode == "combine": - self._expand_combine(local_demands, td, src_groups, snk_groups) - elif mode == "pairwise": - self._expand_pairwise( - local_demands, - td, - src_labels, - snk_labels, - src_groups, - snk_groups, - ) - elif mode == "one_to_one": - self._expand_one_to_one( - local_demands, - td, - src_labels, - snk_labels, - src_groups, - snk_groups, - ) + # Expand demands according to the specified mode + if td.mode == "combine": + demands_of_td: List[Demand] = [] + self._expand_combine(demands_of_td, td, src_groups, snk_groups) + expanded.extend(demands_of_td) + self._td_to_demands[td.id] = demands_of_td + elif td.mode == "full_mesh": + demands_of_td: List[Demand] = [] + self._expand_full_mesh(demands_of_td, td, src_groups, snk_groups) + expanded.extend(demands_of_td) + self._td_to_demands[td.id] = demands_of_td else: - # Default to "node_to_node" - self._expand_node_to_node(local_demands, td, src_groups, snk_groups) + raise ValueError(f"Unknown mode: {td.mode}") - expanded.extend(local_demands) - self._td_to_demands[td.id] = local_demands - - # Sort final demands by ascending priority - expanded.sort() + # Sort final demands by ascending demand_class (i.e., priority) + expanded.sort(key=lambda d: d.demand_class) self.demands = expanded def place_all_demands( self, - placement_rounds: int = 5, + placement_rounds: Union[int, str] = "auto", reoptimize_after_each_round: bool = False, ) -> float: """ - Places all expanded demands in ascending priority order, using a - multi-round approach for demands of the same priority. - - Each priority class is processed with `placement_rounds` passes, distributing - demand incrementally. Optionally re-optimizes flows after each round. - - Finally, updates each TrafficDemand's `demand_placed` with the sum of - its expanded demands' placed volumes. + Places all expanded demands in ascending priority order using multiple + incremental rounds per priority. + + In each priority class: + - We determine the number of rounds (user-supplied or estimated). + - We iterate placement_rounds times. + - In each round, we allocate (leftover / rounds_left) for each demand + and attempt to place that volume in the flow graph. + - If no progress was made during a round, we stop early. + - If reoptimize_after_each_round is True, we remove and re-place + each demand's flow after the round to better share capacity. Args: - placement_rounds (int): Number of incremental passes per priority. - reoptimize_after_each_round (bool): Whether to re-run an optimization - pass after each round of placement. + placement_rounds (Union[int, str]): Number of incremental passes per + priority class. If "auto", a heuristic is used to choose a reasonable + number based on total demand and total capacity. + reoptimize_after_each_round (bool): Whether to remove and re-place + all demands in the same priority after each round for better + capacity sharing. Returns: float: Total volume successfully placed across all demands. Raises: - RuntimeError: If the graph has not been built. + RuntimeError: If the graph has not been built yet. """ if self.graph is None: raise RuntimeError("Graph not built yet. Call build_graph() first.") - # Group demands by priority + if isinstance(placement_rounds, str) and placement_rounds.lower() == "auto": + placement_rounds = self._estimate_rounds() + + # Group demands by priority class prio_map: Dict[int, List[Demand]] = defaultdict(list) - for d in self.demands: - prio_map[d.demand_class].append(d) + for dmd in self.demands: + prio_map[dmd.demand_class].append(dmd) total_placed = 0.0 sorted_priorities = sorted(prio_map.keys()) - for priority in sorted_priorities: - demands_in_prio = prio_map[priority] + for priority_class in sorted_priorities: + demands_in_class = prio_map[priority_class] - # Multi-round fractional placement for round_idx in range(placement_rounds): - placement_this_round = 0.0 + placed_in_this_round = 0.0 + rounds_left = placement_rounds - round_idx - for demand in demands_in_prio: + for demand in demands_in_class: leftover = demand.volume - demand.placed_demand if leftover < base.MIN_FLOW: - # Already fully placed (or negligible leftover) continue - # Distribute in fractional increments - rounds_left = placement_rounds - round_idx step_to_place = leftover / float(rounds_left) - placed_now, _remain = demand.place( flow_graph=self.graph, max_placement=step_to_place, ) total_placed += placed_now - placement_this_round += placed_now + placed_in_this_round += placed_now - # Re-optimize if requested - if reoptimize_after_each_round and placement_this_round > 0.0: - self._reoptimize_priority_demands(demands_in_prio) + # Optionally reoptimize flows in this class + if reoptimize_after_each_round and placed_in_this_round > 0.0: + self._reoptimize_priority_demands(demands_in_class) - # No progress -> break - if placement_this_round < base.MIN_FLOW: + # If no progress was made, no need to continue extra rounds + if placed_in_this_round < base.MIN_FLOW: break - # Update each TrafficDemand with the sum of its expanded demands + # Update each TrafficDemand's placed volume for td in self.traffic_demands: - demand_list = self._td_to_demands.get(td.id, []) - td.demand_placed = sum(d.placed_demand for d in demand_list) + dlist = self._td_to_demands.get(td.id, []) + td.demand_placed = sum(d.placed_demand for d in dlist) return total_placed def reset_all_flow_usages(self) -> None: """ - Removes flow usage from the graph for each Demand's FlowPolicy, - resets placed_demand=0 for each Demand, and sets - TrafficDemand.demand_placed=0. + Removes flow usage from the graph for each Demand's FlowPolicy + and resets placed_demand to 0 for all demands. + + Also sets TrafficDemand.demand_placed to 0 for each top-level demand. """ if self.graph is None: return - # Clear usage from each Demand's FlowPolicy - for d in self.demands: - if d.flow_policy: - d.flow_policy.remove_demand(self.graph) - d.placed_demand = 0.0 + for dmd in self.demands: + if dmd.flow_policy: + dmd.flow_policy.remove_demand(self.graph) + dmd.placed_demand = 0.0 - # Reset top-level traffic demands for td in self.traffic_demands: td.demand_placed = 0.0 @@ -233,14 +223,20 @@ def get_flow_details(self) -> Dict[Tuple[int, int], Dict[str, object]]: Returns: Dict[Tuple[int, int], Dict[str, object]]: - Keyed by (demand_index, flow_index), with info on placed_flow, - src_node, dst_node, and the path edges. + A dictionary keyed by (demand_index, flow_index). Each value + includes: + { + "placed_flow": , + "src_node": , + "dst_node": , + "edges": + } """ details: Dict[Tuple[int, int], Dict[str, object]] = {} - for i, d in enumerate(self.demands): - if not d.flow_policy: + for i, dmd in enumerate(self.demands): + if not dmd.flow_policy: continue - for f_idx, flow_obj in d.flow_policy.flows.items(): + for f_idx, flow_obj in dmd.flow_policy.flows.items(): details[(i, f_idx)] = { "placed_flow": flow_obj.placed_flow, "src_node": flow_obj.src_node, @@ -251,10 +247,10 @@ def get_flow_details(self) -> Dict[Tuple[int, int], Dict[str, object]]: def summarize_link_usage(self) -> Dict[str, float]: """ - Returns flow usage per edge in the graph. + Returns the total flow usage per edge in the graph. Returns: - Dict[str, float]: edge_key -> used capacity (flow). + Dict[str, float]: A mapping from edge_key -> current flow on that edge. """ usage: Dict[str, float] = {} if self.graph is None: @@ -263,34 +259,37 @@ def summarize_link_usage(self) -> Dict[str, float]: for edge_key, edge_tuple in self.graph.get_edges().items(): attr_dict = edge_tuple[3] usage[edge_key] = attr_dict.get("flow", 0.0) + return usage def _reoptimize_priority_demands(self, demands_in_prio: List[Demand]) -> None: """ - Optionally re-run flow-policy optimization for each Demand in - the same priority class. + Re-run flow-policy placement for each Demand in the same priority class. + + Removing and re-placing each flow allows the flow policy to adjust if + capacity constraints have changed due to other demands. Args: - demands_in_prio (List[Demand]): Demands of the same priority. + demands_in_prio (List[Demand]): All demands of the same priority class. """ if self.graph is None: return - for d in demands_in_prio: - if not d.flow_policy: + for dmd in demands_in_prio: + if not dmd.flow_policy: continue - placed_volume = d.placed_demand - d.flow_policy.remove_demand(self.graph) - d.flow_policy.place_demand( + placed_volume = dmd.placed_demand + dmd.flow_policy.remove_demand(self.graph) + dmd.flow_policy.place_demand( self.graph, - d.src_node, - d.dst_node, - d.demand_class, + dmd.src_node, + dmd.dst_node, + dmd.demand_class, placed_volume, ) - d.placed_demand = d.flow_policy.placed_demand + dmd.placed_demand = dmd.flow_policy.placed_demand - def _expand_node_to_node( + def _expand_combine( self, expanded: List[Demand], td: TrafficDemand, @@ -298,43 +297,76 @@ def _expand_node_to_node( snk_groups: Dict[str, List[Node]], ) -> None: """ - 'node_to_node' mode: Each matched (src_node, dst_node) pair - gets an equal fraction of td.demand (skips self-pairs). + 'combine' mode expansion. + + Attaches a single pseudo-source and a single pseudo-sink node for the + matched source and sink nodes, similar to the approach in network.py. + A single Demand is created with the total volume from the pseudo-source + to the pseudo-sink. Infinite-capacity edges are added from the pseudo-source + to each real source node, and from each real sink node to the pseudo-sink. + + Args: + expanded (List[Demand]): Accumulates newly created Demand objects. + td (TrafficDemand): The original TrafficDemand (total volume, etc.). + src_groups (Dict[str, List[Node]]): Matched source nodes by label. + snk_groups (Dict[str, List[Node]]): Matched sink nodes by label. """ - # Determine the flow policy configuration - fp_config = td.flow_policy_config or self.default_flow_policy_config + # Flatten the source and sink node lists + src_nodes = [ + node for group_nodes in src_groups.values() for node in group_nodes + ] + dst_nodes = [ + node for group_nodes in snk_groups.values() for node in group_nodes + ] + + if not src_nodes or not dst_nodes or self.graph is None: + # If no valid nodes or no graph, skip + return - src_nodes: List[Node] = [] - for group_nodes in src_groups.values(): - src_nodes.extend(group_nodes) + # Create pseudo-source / pseudo-sink names + pseudo_source_name = f"combine_src::{td.id}" + pseudo_sink_name = f"combine_snk::{td.id}" - dst_nodes: List[Node] = [] - for group_nodes in snk_groups.values(): - dst_nodes.extend(group_nodes) + # Add pseudo nodes to the graph (no-op if they already exist) + self.graph.add_node(pseudo_source_name) + self.graph.add_node(pseudo_sink_name) - valid_pairs = [] + # Link pseudo-source to real sources, and real sinks to pseudo-sink for s_node in src_nodes: - for t_node in dst_nodes: - if s_node.name != t_node.name: - valid_pairs.append((s_node, t_node)) + self.graph.add_edge( + pseudo_source_name, + s_node.name, + capacity=float("inf"), + cost=0, + ) + for t_node in dst_nodes: + self.graph.add_edge( + t_node.name, + pseudo_sink_name, + capacity=float("inf"), + cost=0, + ) - if not valid_pairs: - return + init_flow_graph(self.graph) # Re-initialize flow-related attributes - demand_per_pair = td.demand / float(len(valid_pairs)) - for s_node, t_node in valid_pairs: + # Create a single Demand with the full volume + if td.flow_policy: + flow_policy = td.flow_policy.deep_copy() + else: + fp_config = td.flow_policy_config or self.default_flow_policy_config flow_policy = get_flow_policy(fp_config) - expanded.append( - Demand( - src_node=s_node.name, - dst_node=t_node.name, - volume=demand_per_pair, - demand_class=td.priority, - flow_policy=flow_policy, - ) + + expanded.append( + Demand( + src_node=pseudo_source_name, + dst_node=pseudo_sink_name, + volume=td.demand, + demand_class=td.priority, + flow_policy=flow_policy, ) + ) - def _expand_combine( + def _expand_full_mesh( self, expanded: List[Demand], td: TrafficDemand, @@ -342,32 +374,48 @@ def _expand_combine( snk_groups: Dict[str, List[Node]], ) -> None: """ - 'combine' mode: Combine all matched sources into one set, all sinks into another, - then distribute td.demand among all valid pairs. - """ - # Determine the flow policy configuration - fp_config = td.flow_policy_config or self.default_flow_policy_config - - combined_src_nodes: List[Node] = [] - combined_snk_nodes: List[Node] = [] - - for nodes in src_groups.values(): - combined_src_nodes.extend(nodes) - for nodes in snk_groups.values(): - combined_snk_nodes.extend(nodes) + 'full_mesh' mode expansion. - valid_pairs = [] - for s_node in combined_src_nodes: - for t_node in combined_snk_nodes: - if s_node.name != t_node.name: - valid_pairs.append((s_node, t_node)) + Combines all matched source nodes into one group and all matched sink + nodes into another group. Creates a Demand for each (src_node, dst_node) + pair (skipping self pairs), splitting td.demand evenly among them. - if not valid_pairs: + Args: + expanded (List[Demand]): Accumulates newly created Demand objects. + td (TrafficDemand): The original TrafficDemand (total volume, etc.). + src_groups (Dict[str, List[Node]]): Matched source nodes by label. + snk_groups (Dict[str, List[Node]]): Matched sink nodes by label. + """ + # Flatten the source and sink node lists + src_nodes = [ + node for group_nodes in src_groups.values() for node in group_nodes + ] + dst_nodes = [ + node for group_nodes in snk_groups.values() for node in group_nodes + ] + + # Generate all valid (src, dst) pairs + valid_pairs = [ + (s_node, t_node) + for s_node in src_nodes + for t_node in dst_nodes + if s_node.name != t_node.name + ] + pair_count = len(valid_pairs) + if pair_count == 0: return - demand_per_pair = td.demand / float(len(valid_pairs)) + demand_per_pair = td.demand / float(pair_count) + for s_node, t_node in valid_pairs: - flow_policy = get_flow_policy(fp_config) + if td.flow_policy: + # Already a FlowPolicy instance, so deep copy it + flow_policy = td.flow_policy.deep_copy() + else: + # Build from enum-based factory + fp_config = td.flow_policy_config or self.default_flow_policy_config + flow_policy = get_flow_policy(fp_config) + expanded.append( Demand( src_node=s_node.name, @@ -378,115 +426,38 @@ def _expand_combine( ) ) - def _expand_pairwise( - self, - expanded: List[Demand], - td: TrafficDemand, - src_labels: List[str], - snk_labels: List[str], - src_groups: Dict[str, List[Node]], - snk_groups: Dict[str, List[Node]], - ) -> None: - """ - 'pairwise' mode: For each (src_label, snk_label) pair, allocate a fraction - of td.demand, then split among valid node pairs (excluding self-pairs). - """ - # Determine the flow policy configuration - fp_config = td.flow_policy_config or self.default_flow_policy_config - - label_pairs_count = len(src_labels) * len(snk_labels) - if label_pairs_count == 0: - return - - label_share = td.demand / float(label_pairs_count) - - for s_label in src_labels: - s_nodes = src_groups[s_label] - if not s_nodes: - continue - - for t_label in snk_labels: - t_nodes = snk_groups[t_label] - if not t_nodes: - continue - - valid_pairs = [] - for s_node in s_nodes: - for t_node in t_nodes: - if s_node.name != t_node.name: - valid_pairs.append((s_node, t_node)) - - if not valid_pairs: - continue - - demand_per_pair = label_share / float(len(valid_pairs)) - for s_node, t_node in valid_pairs: - flow_policy = get_flow_policy(fp_config) - expanded.append( - Demand( - src_node=s_node.name, - dst_node=t_node.name, - volume=demand_per_pair, - demand_class=td.priority, - flow_policy=flow_policy, - ) - ) - - def _expand_one_to_one( - self, - expanded: List[Demand], - td: TrafficDemand, - src_labels: List[str], - snk_labels: List[str], - src_groups: Dict[str, List[Node]], - snk_groups: Dict[str, List[Node]], - ) -> None: + def _estimate_rounds(self) -> int: """ - 'one_to_one' mode: Match src_labels[i] to snk_labels[i], splitting td.demand - evenly among label pairs, then distributing that share among valid node pairs. + Estimates a suitable number of placement rounds by comparing + the median demand volume and the median edge capacity. Returns + a default of 5 rounds if there is insufficient data for a + meaningful calculation. - Raises: - ValueError: If the number of src_labels != number of snk_labels. + Returns: + int: Estimated number of rounds to use for traffic placement. """ - # Determine the flow policy configuration - fp_config = td.flow_policy_config or self.default_flow_policy_config - if len(src_labels) != len(snk_labels): - raise ValueError( - "one_to_one mode requires equal counts of src and sink labels. " - f"Got {len(src_labels)} vs {len(snk_labels)}." - ) - - label_count = len(src_labels) - if label_count == 0: - return - - pair_share = td.demand / float(label_count) - - for i, s_label in enumerate(src_labels): - t_label = snk_labels[i] - s_nodes = src_groups[s_label] - t_nodes = snk_groups[t_label] - if not s_nodes or not t_nodes: - continue - - valid_pairs = [] - for s_node in s_nodes: - for t_node in t_nodes: - if s_node.name != t_node.name: - valid_pairs.append((s_node, t_node)) - - if not valid_pairs: - continue - - demand_per_pair = pair_share / float(len(valid_pairs)) - for s_node, t_node in valid_pairs: - flow_policy = get_flow_policy(fp_config) - expanded.append( - Demand( - src_node=s_node.name, - dst_node=t_node.name, - volume=demand_per_pair, - demand_class=td.priority, - flow_policy=flow_policy, - ) - ) + if not self.demands: + return 5 + + demand_volumes = [demand.volume for demand in self.demands if demand.volume > 0] + if not demand_volumes: + return 5 + + median_demand = statistics.median(demand_volumes) + + if not self.graph: + return 5 + + edges = self.graph.get_edges().values() + capacities = [ + edge_data[3].get("capacity", 0) + for edge_data in edges + if edge_data[3].get("capacity", 0) > 0 + ] + if not capacities: + return 5 + + median_capacity = statistics.median(capacities) + ratio = median_demand / median_capacity + guessed_rounds = int(5 + 5 * ratio) + return max(5, min(guessed_rounds, 100)) diff --git a/notebooks/scenario.ipynb b/notebooks/scenario.ipynb index f78af78..00fbce2 100644 --- a/notebooks/scenario.ipynb +++ b/notebooks/scenario.ipynb @@ -9,7 +9,8 @@ "from ngraph.scenario import Scenario\n", "from ngraph.traffic_demand import TrafficDemand\n", "from ngraph.traffic_manager import TrafficManager\n", - "from ngraph.lib.flow_policy import FlowPolicyConfig" + "from ngraph.lib.flow_policy import FlowPolicyConfig, FlowPolicy, FlowPlacement\n", + "from ngraph.lib.algorithms.base import PathAlg, EdgeSelect" ] }, { @@ -23,10 +24,10 @@ " brick_2tier:\n", " groups:\n", " t1:\n", - " node_count: 4\n", + " node_count: 8\n", " name_template: t1-{node_num}\n", " t2:\n", - " node_count: 4\n", + " node_count: 8\n", " name_template: t2-{node_num}\n", "\n", " adjacency:\n", @@ -44,7 +45,7 @@ " b2:\n", " use_blueprint: brick_2tier\n", " spine:\n", - " node_count: 16\n", + " node_count: 64\n", " name_template: t3-{node_num}\n", "\n", " adjacency:\n", @@ -62,12 +63,23 @@ " cost: 1\n", "\n", "network:\n", - " name: \"3tier_clos\"\n", + " name: \"3tier_clos_network\"\n", " version: 1.0\n", "\n", " groups:\n", " my_clos1:\n", " use_blueprint: 3tier_clos\n", + "\n", + " my_clos2:\n", + " use_blueprint: 3tier_clos\n", + "\n", + " adjacency:\n", + " - source: my_clos1/spine\n", + " target: my_clos2/spine\n", + " pattern: one_to_one\n", + " link_params:\n", + " capacity: 2\n", + " cost: 1\n", "\"\"\"\n", "scenario = Scenario.from_yaml(scenario_yaml)\n", "network = scenario.network" @@ -81,7 +93,7 @@ { "data": { "text/plain": [ - "{('b1', 'b1'): inf, ('b1', 'b2'): 32.0, ('b2', 'b1'): 32.0, ('b2', 'b2'): inf}" + "{('b1|b2', 'b1|b2'): 128.0}" ] }, "execution_count": 3, @@ -91,22 +103,22 @@ ], "source": [ "network.max_flow(\n", - " source_path=r\".*(b[0-9]*)/t1\",\n", - " sink_path=r\".*(b[0-9]*)/t1\",\n", - " mode=\"pairwise\",\n", + " source_path=r\"my_clos1.*(b[0-9]*)/t1\",\n", + " sink_path=r\"my_clos2.*(b[0-9]*)/t1\",\n", + " mode=\"combine\",\n", " shortest_path=True,\n", ")" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "31.999999999999904" + "0.0" ] }, "execution_count": 4, @@ -116,20 +128,20 @@ ], "source": [ "d = TrafficDemand(\n", - " source_path=r\".*(b[0-9]*)/t1\",\n", - " sink_path=r\".*(b[0-9]*)/t1\",\n", - " demand=32,\n", - " mode=\"combine\",\n", + " source_path=r\"my_clos1.*(b[0-9]*)/t1\",\n", + " sink_path=r\"my_clos2.*(b[0-9])/t1\",\n", + " demand=10,\n", + " mode=\"full_mesh\",\n", + " flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,\n", ")\n", "demands = [d]\n", "tm = TrafficManager(\n", " network=network,\n", " traffic_demands=demands,\n", - " default_flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP,\n", ")\n", "tm.build_graph()\n", "tm.expand_demands()\n", - "tm.place_all_demands()" + "tm.place_all_demands(placement_rounds=50)" ] }, { @@ -140,7 +152,7 @@ { "data": { "text/plain": [ - "TrafficDemand(source_path='.*(b[0-9]*)/t1', sink_path='.*(b[0-9]*)/t1', priority=0, demand=32, demand_placed=32.0, flow_policy_config=, mode='combine', attrs={}, id='.*(b[0-9]*)/t1|.*(b[0-9]*)/t1|_WaSeCXEShS7JxwxzJLm3g')" + "TrafficDemand(source_path='my_clos1.*(b[0-9]*)/t1', sink_path='my_clos2.*(b[0-9])/t1', priority=0, demand=10, demand_placed=0.0, flow_policy_config=, flow_policy=None, mode='full_mesh', attrs={}, id='my_clos1.*(b[0-9]*)/t1|my_clos2.*(b[0-9])/t1|S6weVhAgQMCerTqMbTnMVw')" ] }, "execution_count": 5, @@ -154,71 +166,271 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos1/b2/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b1/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b1/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b1/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b1/t1/t1-4', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b2/t1/t1-1', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b2/t1/t1-2', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714),\n", - " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos1/b2/t1/t1-3', volume=0.5714285714285714, demand_class=0, flow_policy=, placed_demand=0.5714285714285714)]" + "[Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-2', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-3', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-4', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-5', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-6', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-7', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b1/t1/t1-8', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-1', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-2', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-3', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-4', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-5', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-6', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-7', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b1/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b1/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b1/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b1/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b1/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b1/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b1/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b1/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b2/t1/t1-1', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b2/t1/t1-2', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b2/t1/t1-3', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b2/t1/t1-4', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b2/t1/t1-5', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b2/t1/t1-6', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b2/t1/t1-7', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0),\n", + " Demand(src_node='my_clos1/b2/t1/t1-8', dst_node='my_clos2/b2/t1/t1-8', volume=0.0390625, demand_class=0, flow_policy=, placed_demand=0.0)]" ] }, - "execution_count": 6, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -227,6 +439,55 @@ "tm.demands" ] }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'path_alg': ,\n", + " 'flow_placement': ,\n", + " 'edge_select': ,\n", + " 'multipath': False,\n", + " 'min_flow_count': 16,\n", + " 'max_flow_count': 16,\n", + " 'max_path_cost': None,\n", + " 'max_path_cost_factor': None,\n", + " 'static_paths': None,\n", + " 'edge_select_func': None,\n", + " 'edge_select_value': None,\n", + " 'reoptimize_flows_on_each_placement': True,\n", + " 'flows': {FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=0): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=1): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=2): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=3): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=4): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=5): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=6): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=7): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=8): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=9): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=10): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=11): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=12): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=13): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=14): ,\n", + " FlowIndex(src_node='my_clos1/b1/t1/t1-1', dst_node='my_clos2/b1/t1/t1-1', flow_class=0, flow_id=15): },\n", + " 'best_path_cost': 500.0,\n", + " '_next_flow_id': 16}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "vars(dmd.flow_policy)" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/tests/test_traffic_manager.py b/tests/test_traffic_manager.py index ef3c5e1..d742087 100644 --- a/tests/test_traffic_manager.py +++ b/tests/test_traffic_manager.py @@ -1,9 +1,11 @@ import pytest + from ngraph.network import Network, Node, Link from ngraph.traffic_demand import TrafficDemand from ngraph.lib.flow_policy import FlowPolicyConfig from ngraph.lib.graph import StrictMultiDiGraph from ngraph.lib.algorithms.base import MIN_FLOW +from ngraph.lib.demand import Demand from ngraph.traffic_manager import TrafficManager @@ -35,7 +37,7 @@ def small_network() -> Network: def small_network_with_loop() -> Network: """ Builds a small network with a loop: A -> B, B -> C, C -> A. - This can help test re-optimization more interestingly. + This can help test re-optimization logic in place_all_demands. """ net = Network() @@ -58,14 +60,15 @@ def test_build_graph_not_built_error(small_network): raises a RuntimeError. """ tm = TrafficManager(network=small_network, traffic_demands=[]) - # no build_graph call here + # No build_graph call here, so we expect an error with pytest.raises(RuntimeError): tm.place_all_demands() def test_basic_build_and_expand(small_network): """ - Test the ability to build the graph and expand demands. + Test the ability to build the graph and expand demands using the default mode. + By default, we assume it's "combine" if not specified otherwise in TrafficDemand. """ demands = [ TrafficDemand(source_path="A", sink_path="B", demand=10.0), @@ -79,17 +82,19 @@ def test_basic_build_and_expand(small_network): tm.build_graph() assert isinstance(tm.graph, StrictMultiDiGraph), "Graph should be built" - assert len(tm.graph.get_nodes()) == 3, "Should have 3 nodes in graph" - assert len(tm.graph.get_edges()) == 4, "Should have 4 edges in graph" + assert len(tm.graph.get_nodes()) == 3, "Should have the original 3 nodes" + # 2 directed links => with add_reverse=True => we expect 4 edges total + assert len(tm.graph.get_edges()) == 4, "Should have 4 edges in the graph" tm.expand_demands() + # Each TrafficDemand uses default "combine" => 1 Demand each assert len(tm.demands) == 2, "Expected 2 expanded demands" def test_place_all_demands_simple(small_network): """ Place demands on a simple A->B->C network. - We expect all to be placed because capacity = 100 is large. + We expect all to be placed because capacity = 100 is large enough. """ demands = [ TrafficDemand(source_path="A", sink_path="C", demand=50.0), @@ -107,12 +112,11 @@ def test_place_all_demands_simple(small_network): for d in tm.demands: assert ( abs(d.placed_demand - d.volume) < MIN_FLOW - ), "Each demand should be fully placed" + ), "Demand should be fully placed" # Summarize link usage usage = tm.summarize_link_usage() - # For A->B->C route, we expect 50 flow to pass A->B, and 50 + 20 = 70 on B->C - # However, the B->C link capacity is 100, so it can carry 70 total + # We expect 50 flow on A->B, then 70 total on B->C ab_key = None bc_key = None for k, (src, dst, _, _) in tm.graph.get_edges().items(): @@ -121,7 +125,6 @@ def test_place_all_demands_simple(small_network): elif src == "B" and dst == "C": bc_key = k - # usage[...] is how much capacity is used, i.e. used_capacity assert abs(usage[ab_key] - 50.0) < MIN_FLOW, "A->B should carry 50" assert abs(usage[bc_key] - 70.0) < MIN_FLOW, "B->C should carry 70" @@ -129,48 +132,36 @@ def test_place_all_demands_simple(small_network): def test_priority_fairness(small_network): """ Test that multiple demands with different priorities - are handled in ascending priority order (lowest numeric = highest priority). - For demonstration, we set small link capacities that will cause partial placement. + are handled in ascending priority order (priority=0 means highest). + This test uses smaller link capacities to force partial placement. """ - # Reduce link capacity to 30 to test partial usage - small_network.links[next(iter(small_network.links))].capacity = 30.0 # A->B - small_network.links[list(small_network.links.keys())[1]].capacity = 30.0 # B->C + # Adjust capacities to 30 + link_ids = list(small_network.links.keys()) + small_network.links[link_ids[0]].capacity = 30.0 # A->B + small_network.links[link_ids[1]].capacity = 30.0 # B->C - # High priority demand: A->C with volume=40 - # Low priority demand: B->C with volume=40 - # Expect: The higher priority (A->C) saturates B->C first. - # Then the lower priority (B->C) might get leftover capacity (if any). + # Higher priority (0) vs lower priority (1) demands = [ - TrafficDemand( - source_path="A", sink_path="C", demand=40.0, priority=0 - ), # higher priority - TrafficDemand( - source_path="B", sink_path="C", demand=40.0, priority=1 - ), # lower priority + TrafficDemand(source_path="A", sink_path="C", demand=40.0, priority=0), + TrafficDemand(source_path="B", sink_path="C", demand=40.0, priority=1), ] tm = TrafficManager(network=small_network, traffic_demands=demands) - tm.build_graph() tm.expand_demands() - total_placed = tm.place_all_demands(placement_rounds=1) # single pass for clarity - # The link B->C capacity is 30, so the first (priority=0) can fully use it - # or saturate it. Actually we have A->B->C route for the first demand, so - # the capacity from A->B->C is 30 end-to-end. - # The second demand (B->C direct) sees the same link capacity but it's - # already used up by the higher priority. So it gets 0. + total_placed = tm.place_all_demands(placement_rounds=1) assert total_placed == 30.0, "Expected only 30 placed in total" - # Check each demand's placed high_prio_placed = tm.demands[0].placed_demand low_prio_placed = tm.demands[1].placed_demand - assert high_prio_placed == 30.0, "High priority demand should saturate capacity" - assert low_prio_placed == 0.0, "Low priority got no leftover capacity" + assert high_prio_placed == 30.0, "High priority saturates capacity" + assert low_prio_placed == 0.0, "No capacity left for lower priority" def test_reset_flow_usages(small_network): """ - Test that reset_all_flow_usages zeroes out placed demand. + Test that reset_all_flow_usages() zeroes out placed flow usage on edges + and sets all demands' placed_demand to 0. """ demands = [TrafficDemand(source_path="A", sink_path="C", demand=10.0)] tm = TrafficManager(network=small_network, traffic_demands=demands) @@ -179,29 +170,21 @@ def test_reset_flow_usages(small_network): placed_before = tm.place_all_demands() assert placed_before == 10.0 - # Now reset all flows + # Now reset tm.reset_all_flow_usages() for d in tm.demands: - assert d.placed_demand == 0.0, "Demand placed_demand should be reset to 0" + assert d.placed_demand == 0.0, "Demand placed_demand should be reset" usage = tm.summarize_link_usage() - for k in usage: - assert usage[k] == 0.0, "Link usage should be reset to 0" + for flow_val in usage.values(): + assert flow_val == 0.0, "All link usage should be reset to 0" def test_reoptimize_flows(small_network_with_loop): """ Test that re-optimization logic is triggered in place_all_demands - when reoptimize_after_each_round=True. - We'll set the capacity on one link to be quite low so the flow might - switch to a loop path under re-optimization, if feasible. - """ - # Example: capacity A->B=10, B->C=1, C->A=10 - # Demand from A->C is 5, so if direct path A->B->C is tried first, - # it sees only capacity=1 for B->C. Then re-optimization might try A->B->C->A->B->C - # (though that is cyclical and might or might not help, depending on your path alg). - # This test just ensures we call the reopt method, not necessarily that it - # finds a truly cyclical route. Implementation depends on path selection logic. - # We'll do a small check that the reopt code doesn't crash and usage is consistent. + when reoptimize_after_each_round=True. This forces flows to be + removed and re-placed each round. + """ demands = [TrafficDemand(source_path="A", sink_path="C", demand=5.0)] tm = TrafficManager( network=small_network_with_loop, @@ -211,25 +194,220 @@ def test_reoptimize_flows(small_network_with_loop): tm.build_graph() tm.expand_demands() - # place with reoptimize + # Place with reoptimize total_placed = tm.place_all_demands( - placement_rounds=2, - reoptimize_after_each_round=True, + placement_rounds=2, reoptimize_after_each_round=True ) - # We do not strictly assert a certain path is used, - # only that a nonzero amount is placed (some path is feasible). - assert total_placed > 0.0, "Should place some flow even if B->C is small" + assert total_placed > 0.0, "We should place some flow" # Summarize flows flow_details = tm.get_flow_details() # We only had 1 demand => index=0 - # We should have at least 1 flow (or more if it tries multiple splits) - assert len(flow_details) >= 1 - # No crash means re-optimization was invoked + assert len(flow_details) >= 1, "Expect at least one flow object" - # The final usage on B->C might be at most 1.0 if it uses direct path, - # or it might use partial flows if there's a different path approach. - # We'll just assert we placed something, and capacity usage isn't insane. + # Ensure no link usage exceeds capacity usage = tm.summarize_link_usage() - for k in usage: - assert usage[k] <= 10.0, "No link usage should exceed capacity" + for val in usage.values(): + assert val <= 10.0, "No link usage should exceed capacity of 10.0" + + +def test_unknown_mode_raises_value_error(small_network): + """ + Ensure that an invalid mode raises a ValueError during expand_demands. + """ + demands = [ + TrafficDemand(source_path="A", sink_path="B", demand=10.0, mode="invalid_mode") + ] + tm = TrafficManager(network=small_network, traffic_demands=demands) + tm.build_graph() + with pytest.raises(ValueError, match="Unknown mode: invalid_mode"): + tm.expand_demands() + + +def test_place_all_demands_auto_rounds(small_network): + """ + Test the 'auto' logic for placement rounds. Even though the network has + high capacity, we verify it doesn't crash and places demands correctly. + """ + demands = [TrafficDemand(source_path="A", sink_path="C", demand=25.0)] + tm = TrafficManager(network=small_network, traffic_demands=demands) + tm.build_graph() + tm.expand_demands() + + total_placed = tm.place_all_demands(placement_rounds="auto") + assert total_placed == 25.0, "Should place all traffic under auto rounds" + for d in tm.demands: + assert ( + abs(d.placed_demand - d.volume) < MIN_FLOW + ), "Demand should be fully placed" + + +def test_combine_mode_multi_source_sink(): + """ + Test 'combine' mode with multiple source/sink matches to ensure a single + pseudo-source and pseudo-sink are created, and that infinite-capacity edges + are added properly. + """ + net = Network() + net.add_node(Node(name="S1")) + net.add_node(Node(name="S2")) + net.add_node(Node(name="T1")) + net.add_node(Node(name="T2")) + + # Just one link to confirm it's recognized, capacity is large + net.add_link(Link(source="S1", target="T1", capacity=1000, cost=1.0)) + + # Suppose the 'source_path' matches both S1 and S2, and 'sink_path' matches T1 and T2 + demands = [ + TrafficDemand(source_path="S", sink_path="T", demand=100.0, mode="combine") + ] + tm = TrafficManager(network=net, traffic_demands=demands) + tm.build_graph() + tm.expand_demands() + + assert len(tm.demands) == 1, "Only one Demand in combine mode" + d = tm.demands[0] + assert d.src_node.startswith("combine_src::"), "Pseudo-source name mismatch" + assert d.dst_node.startswith("combine_snk::"), "Pseudo-sink name mismatch" + # Check that the graph has the pseudo-nodes + pseudo_src_exists = f"combine_src::{demands[0].id}" in tm.graph.get_nodes() + pseudo_snk_exists = f"combine_snk::{demands[0].id}" in tm.graph.get_nodes() + assert pseudo_src_exists, "Pseudo-source node should exist in the graph" + assert pseudo_snk_exists, "Pseudo-sink node should exist in the graph" + + # There should be edges from the pseudo-source to S1, S2, and from T1, T2 to the pseudo-sink + edges_out_of_pseudo_src = [ + (src, dst) + for _, (src, dst, _, data) in tm.graph.get_edges().items() + if src == d.src_node + ] + assert ( + len(edges_out_of_pseudo_src) == 2 + ), "2 edges from pseudo-source to real sources" + + edges_into_pseudo_snk = [ + (src, dst) + for _, (src, dst, _, data) in tm.graph.get_edges().items() + if dst == d.dst_node + ] + assert len(edges_into_pseudo_snk) == 2, "2 edges from real sinks to pseudo-sink" + + +def test_full_mesh_mode_multi_source_sink(): + """ + Test 'full_mesh' mode with multiple sources and sinks. Each (src, dst) pair + should get its own Demand, skipping any self-pairs. The total volume is split + evenly among pairs. + """ + net = Network() + net.add_node(Node(name="S1")) + net.add_node(Node(name="S2")) + net.add_node(Node(name="T1")) + net.add_node(Node(name="T2")) + + # For clarity, do not add links here. We just want to confirm expansions. + demands = [ + TrafficDemand(source_path="S", sink_path="T", demand=80.0, mode="full_mesh") + ] + tm = TrafficManager(network=net, traffic_demands=demands) + tm.build_graph() + tm.expand_demands() + + # We expect pairs: (S1->T1), (S1->T2), (S2->T1), (S2->T2), so 4 demands + # Each gets 80/4 = 20 volume + assert len(tm.demands) == 4, "4 demands in full mesh" + for d in tm.demands: + assert abs(d.volume - 20.0) < MIN_FLOW, "Each demand should have 20 volume" + + +def test_combine_mode_no_nodes(): + """ + Test that if the source or sink match returns no valid nodes, no Demand is created. + """ + net = Network() + net.add_node(Node(name="X")) # does not match "A" or "B" + + demands = [ + TrafficDemand(source_path="A", sink_path="B", demand=10.0, mode="combine"), + ] + tm = TrafficManager(network=net, traffic_demands=demands) + tm.build_graph() + tm.expand_demands() + assert len(tm.demands) == 0, "No demands created if source/sink matching fails" + + +def test_full_mesh_mode_no_nodes(): + """ + Test that in full_mesh mode, if source or sink match returns no valid nodes, + no Demand is created. + """ + net = Network() + net.add_node(Node(name="X")) # does not match "A" or "B" + + demands = [ + TrafficDemand(source_path="A", sink_path="B", demand=10.0, mode="full_mesh"), + ] + tm = TrafficManager(network=net, traffic_demands=demands) + tm.build_graph() + tm.expand_demands() + assert len(tm.demands) == 0, "No demands created if source/sink matching fails" + + +def test_full_mesh_mode_self_pairs(): + """ + Test that in full_mesh mode, demands skip self-pairs (i.e., src==dst). + We'll create a scenario where source and sink might match the same node. + """ + net = Network() + net.add_node(Node(name="N1")) + net.add_node(Node(name="N2")) + + demands = [ + # source_path="N", sink_path="N" => matches N1, N2 for both source and sink + TrafficDemand(source_path="N", sink_path="N", demand=20.0, mode="full_mesh"), + ] + tm = TrafficManager(network=net, traffic_demands=demands) + tm.build_graph() + tm.expand_demands() + + # Pairs would be (N1->N1), (N1->N2), (N2->N1), (N2->N2). + # Self pairs (N1->N1) and (N2->N2) are skipped => 2 valid pairs + # So we expect 2 demands, each with 10.0 + assert len(tm.demands) == 2, "Only N1->N2 and N2->N1 should be created" + for d in tm.demands: + assert ( + abs(d.volume - 10.0) < MIN_FLOW + ), "Volume should be evenly split among 2 pairs" + + +def test_estimate_rounds_no_demands(small_network): + """ + Test that _estimate_rounds returns a default (5) if no demands exist. + """ + tm = TrafficManager(network=small_network, traffic_demands=[]) + tm.build_graph() + # place_all_demands calls _estimate_rounds if placement_rounds="auto" + # With no demands, we expect no error, just zero placed and default rounds chosen. + total_placed = tm.place_all_demands(placement_rounds="auto") + assert total_placed == 0.0, "No demands => no placement" + + +def test_estimate_rounds_no_capacities(): + """ + Test that _estimate_rounds returns a default (5) if no edges have capacity. + """ + net = Network() + net.add_node(Node(name="A")) + net.add_node(Node(name="B")) + # Link with capacity=0 + net.add_link(Link(source="A", target="B", capacity=0.0, cost=1.0)) + + demands = [TrafficDemand(source_path="A", sink_path="B", demand=50.0)] + tm = TrafficManager(network=net, traffic_demands=demands) + tm.build_graph() + tm.expand_demands() + + # We expect auto => fallback to default rounds => partial or no placement + total_placed = tm.place_all_demands(placement_rounds="auto") + # The link has 0 capacity, so no actual flow can be placed. + assert total_placed == 0.0, "No capacity => no flow placed"