diff --git a/ngraph/blueprints.py b/ngraph/blueprints.py index c9d7774..6386528 100644 --- a/ngraph/blueprints.py +++ b/ngraph/blueprints.py @@ -11,8 +11,7 @@ @dataclass class Blueprint: - """ - Represents a reusable blueprint for hierarchical sub-topologies. + """Represents a reusable blueprint for hierarchical sub-topologies. A blueprint may contain multiple groups of nodes (each can have a node_count and a name_template), plus adjacency rules describing how those groups connect. @@ -35,8 +34,7 @@ class Blueprint: @dataclass class DSLExpansionContext: - """ - Carries the blueprint definitions and the final Network instance + """Carries the blueprint definitions and the final Network instance to be populated during DSL expansion. Attributes: @@ -49,8 +47,7 @@ class DSLExpansionContext: def expand_network_dsl(data: Dict[str, Any]) -> Network: - """ - Expands a combined blueprint + network DSL into a complete Network object. + """Expands a combined blueprint + network DSL into a complete Network object. Overall flow: 1) Parse "blueprints" into Blueprint objects. @@ -164,8 +161,7 @@ def _expand_group( group_def: Dict[str, Any], inherited_risk_groups: Set[str] | None = None, ) -> None: - """ - Expands a single group definition into either: + """Expands a single group definition into either: - Another blueprint's subgroups, or - A direct node group (with node_count, etc.), - Possibly replicating itself if group_name has bracket expansions. @@ -319,8 +315,7 @@ def _expand_blueprint_adjacency( adj_def: Dict[str, Any], parent_path: str, ) -> None: - """ - Expands adjacency definitions from within a blueprint, using parent_path + """Expands adjacency definitions from within a blueprint, using parent_path as the local root. This also handles optional expand_vars for repeated adjacency. Recognized adjacency keys: @@ -352,8 +347,7 @@ def _expand_blueprint_adjacency( def _expand_adjacency(ctx: DSLExpansionContext, adj_def: Dict[str, Any]) -> None: - """ - Expands a top-level adjacency definition from 'network.adjacency'. If 'expand_vars' + """Expands a top-level adjacency definition from 'network.adjacency'. If 'expand_vars' is provided, we expand the source/target as templates repeatedly. Recognized adjacency keys: @@ -388,8 +382,7 @@ def _expand_adjacency(ctx: DSLExpansionContext, adj_def: Dict[str, Any]) -> None def _expand_adjacency_with_variables( ctx: DSLExpansionContext, adj_def: Dict[str, Any], parent_path: str ) -> None: - """ - Handles adjacency expansions when 'expand_vars' is provided. + """Handles adjacency expansions when 'expand_vars' is provided. We substitute variables into the 'source' and 'target' templates to produce multiple adjacency expansions. Then each expansion is passed to _expand_adjacency_pattern. @@ -451,8 +444,7 @@ def _expand_adjacency_pattern( link_params: Dict[str, Any], link_count: int = 1, ) -> None: - """ - Generates Link objects for the chosen adjacency pattern among matched nodes. + """Generates Link objects for the chosen adjacency pattern among matched nodes. Supported Patterns: * "mesh": Connect every source node to every target node @@ -531,8 +523,7 @@ def _create_link( link_params: Dict[str, Any], link_count: int = 1, ) -> None: - """ - Creates and adds one or more Links to the network, applying capacity, cost, + """Creates and adds one or more Links to the network, applying capacity, cost, disabled, risk_groups, and attrs from link_params if present. Args: @@ -566,8 +557,7 @@ def _create_link( def _process_direct_nodes(net: Network, network_data: Dict[str, Any]) -> None: - """ - Processes direct node definitions (network_data["nodes"]) and adds them to the network + """Processes direct node definitions (network_data["nodes"]) and adds them to the network if they do not already exist. If the node name already exists, we do nothing. Allowed top-level keys for each node: {"disabled", "attrs", "risk_groups"}. @@ -609,8 +599,7 @@ def _process_direct_nodes(net: Network, network_data: Dict[str, Any]) -> None: def _process_direct_links(net: Network, network_data: Dict[str, Any]) -> None: - """ - Processes direct link definitions (network_data["links"]) and adds them to the network. + """Processes direct link definitions (network_data["links"]) and adds them to the network. Each link dict must contain {"source", "target"} plus optionally {"link_params", "link_count"}. No other top-level keys allowed. @@ -653,8 +642,7 @@ def _process_direct_links(net: Network, network_data: Dict[str, Any]) -> None: def _process_link_overrides(net: Network, network_data: Dict[str, Any]) -> None: - """ - Processes the 'link_overrides' section of the network DSL, updating + """Processes the 'link_overrides' section of the network DSL, updating existing links with new parameters. Overrides are applied in order if multiple items match the same link. @@ -691,8 +679,7 @@ def _process_link_overrides(net: Network, network_data: Dict[str, Any]) -> None: def _process_node_overrides(net: Network, network_data: Dict[str, Any]) -> None: - """ - Processes the 'node_overrides' section of the network DSL, updating + """Processes the 'node_overrides' section of the network DSL, updating existing nodes with new attributes in bulk. Overrides are applied in order if multiple items match the same node. @@ -740,8 +727,7 @@ def _update_links( link_params: Dict[str, Any], any_direction: bool = True, ) -> None: - """ - Updates all Link objects between nodes matching 'source' and 'target' paths + """Updates all Link objects between nodes matching 'source' and 'target' paths with new parameters (capacity, cost, disabled, risk_groups, attrs). If any_direction=True, both (source->target) and (target->source) links @@ -802,8 +788,7 @@ def _update_nodes( disabled_val: Any = None, risk_groups_val: Any = None, ) -> None: - """ - Updates attributes on all nodes matching a given path pattern. + """Updates attributes on all nodes matching a given path pattern. - If 'disabled_val' is not None, sets node.disabled to that boolean value. - If 'risk_groups_val' is not None, *replaces* the node's risk_groups with that new set. @@ -833,8 +818,7 @@ def _update_nodes( def _apply_parameters( subgroup_name: str, subgroup_def: Dict[str, Any], params_overrides: Dict[str, Any] ) -> Dict[str, Any]: - """ - Applies user-provided parameter overrides to a blueprint subgroup. + """Applies user-provided parameter overrides to a blueprint subgroup. Example: If 'spine.node_count' = 6 is in params_overrides, @@ -864,8 +848,7 @@ def _apply_parameters( def _apply_nested_path( node_def: Dict[str, Any], path_parts: List[str], value: Any ) -> None: - """ - Recursively applies a path like ["attrs", "role"] to set node_def["attrs"]["role"] = value. + """Recursively applies a path like ["attrs", "role"] to set node_def["attrs"]["role"] = value. Creates intermediate dicts as needed. Args: @@ -888,8 +871,7 @@ def _apply_nested_path( def _expand_name_patterns(name: str) -> List[str]: - """ - Parses and expands bracketed expressions in a group name. For example: + """Parses and expands bracketed expressions in a group name. For example: "fa[1-3]" -> ["fa1", "fa2", "fa3"] "dc[1,3,5-6]" -> ["dc1", "dc3", "dc5", "dc6"] @@ -930,8 +912,7 @@ def _expand_name_patterns(name: str) -> List[str]: def _parse_range_expr(expr: str) -> List[str]: - """ - Parses a bracket expression that might have commas, single values, and dash ranges. + """Parses a bracket expression that might have commas, single values, and dash ranges. For example: "1-3,5,7-9" -> ["1", "2", "3", "5", "7", "8", "9"]. Args: @@ -955,8 +936,7 @@ def _parse_range_expr(expr: str) -> List[str]: def _join_paths(parent_path: str, rel_path: str) -> str: - """ - Joins two path segments according to NetGraph's DSL conventions: + """Joins two path segments according to NetGraph's DSL conventions: - If rel_path starts with '/', we strip the leading slash and treat it as appended to parent_path if parent_path is not empty. @@ -983,8 +963,7 @@ def _join_paths(parent_path: str, rel_path: str) -> str: def _check_no_extra_keys( data_dict: Dict[str, Any], allowed: set[str], context: str ) -> None: - """ - Checks that data_dict only has keys in 'allowed'. Raises ValueError if not. + """Checks that data_dict only has keys in 'allowed'. Raises ValueError if not. Args: data_dict (Dict[str, Any]): The dict to check. @@ -1000,8 +979,7 @@ def _check_no_extra_keys( def _check_adjacency_keys(adj_def: Dict[str, Any], context: str) -> None: - """ - Ensures adjacency definitions only contain recognized keys. + """Ensures adjacency definitions only contain recognized keys. Recognized adjacency keys are: {"source", "target", "pattern", "link_count", "link_params", @@ -1025,9 +1003,8 @@ def _check_adjacency_keys(adj_def: Dict[str, Any], context: str) -> None: def _check_link_params(link_params: Dict[str, Any], context: str) -> None: - """ - Checks that link_params only has recognized keys: - {"capacity", "cost", "disabled", "risk_groups", "attrs"}. + """Checks that link_params only has recognized keys: + {"capacity", "cost", "disabled", "risk_groups", "attrs"}. """ recognized = {"capacity", "cost", "disabled", "risk_groups", "attrs"} extra = set(link_params.keys()) - recognized diff --git a/ngraph/cli.py b/ngraph/cli.py index 932fe44..aa45865 100644 --- a/ngraph/cli.py +++ b/ngraph/cli.py @@ -10,6 +10,7 @@ def _run_scenario(path: Path, output: Optional[Path]) -> None: """Run a scenario file and store results as JSON.""" + yaml_text = path.read_text() scenario = Scenario.from_yaml(yaml_text) scenario.run() @@ -23,7 +24,12 @@ def _run_scenario(path: Path, output: Optional[Path]) -> None: def main(argv: Optional[List[str]] = None) -> None: - """Entry point for the ``ngraph`` command.""" + """Entry point for the ``ngraph`` command. + + Args: + argv: Optional list of command-line arguments. If ``None``, ``sys.argv`` + is used. + """ parser = argparse.ArgumentParser(prog="ngraph") subparsers = parser.add_subparsers(dest="command", required=True) diff --git a/ngraph/components.py b/ngraph/components.py index df2d07b..a60ff76 100644 --- a/ngraph/components.py +++ b/ngraph/components.py @@ -9,8 +9,7 @@ @dataclass class Component: - """ - A generic component that can represent chassis, line cards, optics, etc. + """A generic component that can represent chassis, line cards, optics, etc. Components can have nested children, each with their own cost, power, etc. Attributes: @@ -44,8 +43,7 @@ class Component: children: Dict[str, Component] = field(default_factory=dict) def total_cost(self) -> float: - """ - Computes the total (recursive) cost of this component, including children, + """Computes the total (recursive) cost of this component, including children, multiplied by this component's count. Returns: @@ -57,8 +55,7 @@ def total_cost(self) -> float: return single_instance_cost * self.count def total_power(self) -> float: - """ - Computes the total *typical* (recursive) power usage of this component, + """Computes the total *typical* (recursive) power usage of this component, including children, multiplied by this component's count. Returns: @@ -70,8 +67,7 @@ def total_power(self) -> float: return single_instance_power * self.count def total_power_max(self) -> float: - """ - Computes the total *peak* (recursive) power usage of this component, + """Computes the total *peak* (recursive) power usage of this component, including children, multiplied by this component's count. Returns: @@ -83,8 +79,7 @@ def total_power_max(self) -> float: return single_instance_power_max * self.count def total_capacity(self) -> float: - """ - Computes the total (recursive) capacity of this component, + """Computes the total (recursive) capacity of this component, including children, multiplied by this component's count. Returns: @@ -96,8 +91,7 @@ def total_capacity(self) -> float: return single_instance_capacity * self.count def as_dict(self, include_children: bool = True) -> Dict[str, Any]: - """ - Returns a dictionary containing all properties of this component. + """Returns a dictionary containing all properties of this component. Args: include_children (bool): If True, recursively includes children. @@ -127,8 +121,7 @@ def as_dict(self, include_children: bool = True) -> Dict[str, Any]: @dataclass class ComponentsLibrary: - """ - Holds a collection of named Components. Each entry is a top-level "template" + """Holds a collection of named Components. Each entry is a top-level "template" that can be referenced for cost/power/capacity lookups, possibly with nested children. Example (YAML-like): @@ -155,8 +148,7 @@ class ComponentsLibrary: components: Dict[str, Component] = field(default_factory=dict) def get(self, name: str) -> Optional[Component]: - """ - Retrieves a Component by its name from the library. + """Retrieves a Component by its name from the library. Args: name (str): Name of the component. @@ -169,8 +161,7 @@ def get(self, name: str) -> Optional[Component]: def merge( self, other: ComponentsLibrary, override: bool = True ) -> ComponentsLibrary: - """ - Merges another ComponentsLibrary into this one. By default (override=True), + """Merges another ComponentsLibrary into this one. By default (override=True), duplicate components in `other` overwrite those in the current library. Args: @@ -186,8 +177,7 @@ def merge( return self def clone(self) -> ComponentsLibrary: - """ - Creates a deep copy of this ComponentsLibrary. + """Creates a deep copy of this ComponentsLibrary. Returns: ComponentsLibrary: A new, cloned library instance. @@ -196,8 +186,7 @@ def clone(self) -> ComponentsLibrary: @classmethod def from_dict(cls, data: Dict[str, Any]) -> ComponentsLibrary: - """ - Constructs a ComponentsLibrary from a dictionary of raw component definitions. + """Constructs a ComponentsLibrary from a dictionary of raw component definitions. Args: data (Dict[str, Any]): Raw component definitions. @@ -212,8 +201,7 @@ def from_dict(cls, data: Dict[str, Any]) -> ComponentsLibrary: @classmethod def _build_component(cls, name: str, definition_data: Dict[str, Any]) -> Component: - """ - Recursively constructs a single Component from a dictionary definition. + """Recursively constructs a single Component from a dictionary definition. Args: name (str): Name of the component. @@ -269,8 +257,7 @@ def _build_component(cls, name: str, definition_data: Dict[str, Any]) -> Compone @classmethod def from_yaml(cls, yaml_str: str) -> ComponentsLibrary: - """ - Constructs a ComponentsLibrary from a YAML string. If the YAML contains + """Constructs a ComponentsLibrary from a YAML string. If the YAML contains a top-level 'components' key, that key is used; otherwise the entire top-level is treated as component definitions. diff --git a/ngraph/explorer.py b/ngraph/explorer.py index 554714f..ffa891d 100644 --- a/ngraph/explorer.py +++ b/ngraph/explorer.py @@ -13,8 +13,7 @@ @dataclass class ExternalLinkBreakdown: - """ - Holds stats for external links to a particular other subtree. + """Holds stats for external links to a particular other subtree. Attributes: link_count (int): Number of links to that other subtree. @@ -27,8 +26,7 @@ class ExternalLinkBreakdown: @dataclass class TreeStats: - """ - Aggregated statistics for a single tree node (subtree). + """Aggregated statistics for a single tree node (subtree). Attributes: node_count (int): Total number of nodes in this subtree. @@ -59,8 +57,7 @@ class TreeStats: @dataclass(eq=False) class TreeNode: - """ - Represents a node in the hierarchical tree. + """Represents a node in the hierarchical tree. Attributes: name (str): Name/label of this node. @@ -92,24 +89,19 @@ def __hash__(self) -> int: return id(self) def add_child(self, child_name: str) -> TreeNode: - """ - Ensure a child node named 'child_name' exists and return it. - """ + """Ensure a child node named 'child_name' exists and return it.""" if child_name not in self.children: child_node = TreeNode(name=child_name, parent=self) self.children[child_name] = child_node return self.children[child_name] def is_leaf(self) -> bool: - """ - Return True if this node has no children. - """ + """Return True if this node has no children.""" return len(self.children) == 0 class NetworkExplorer: - """ - Provides hierarchical exploration of a Network, computing statistics in two modes: + """Provides hierarchical exploration of a Network, computing statistics in two modes: 'all' (ignores disabled) and 'active' (only enabled). """ @@ -136,9 +128,7 @@ def explore_network( network: Network, components_library: Optional[ComponentsLibrary] = None, ) -> NetworkExplorer: - """ - Build a NetworkExplorer, constructing a tree plus 'all' and 'active' stats. - """ + """Build a NetworkExplorer, constructing a tree plus 'all' and 'active' stats.""" instance = cls(network, components_library) # 1) Build hierarchy @@ -160,8 +150,7 @@ def explore_network( return instance def _build_hierarchy_tree(self) -> TreeNode: - """ - Build a multi-level tree by splitting node names on '/'. + """Build a multi-level tree by splitting node names on '/'. Example: "dc1/plane1/ssw/ssw-1" => root/dc1/plane1/ssw/ssw-1 """ root = TreeNode(name="root") @@ -174,9 +163,7 @@ def _build_hierarchy_tree(self) -> TreeNode: return root def _compute_subtree_sets_all(self, node: TreeNode) -> Set[str]: - """ - Recursively collect all node names (regardless of disabled) into subtree_nodes. - """ + """Recursively collect all node names (regardless of disabled) into subtree_nodes.""" collected = set() for child in node.children.values(): collected |= self._compute_subtree_sets_all(child) @@ -186,8 +173,7 @@ def _compute_subtree_sets_all(self, node: TreeNode) -> Set[str]: return collected def _compute_subtree_sets_active(self, node: TreeNode) -> Set[str]: - """ - Recursively collect enabled node names into active_subtree_nodes. + """Recursively collect enabled node names into active_subtree_nodes. A node is considered enabled if nd.attrs.get("disabled") is not truthy. """ collected = set() @@ -200,8 +186,7 @@ def _compute_subtree_sets_active(self, node: TreeNode) -> Set[str]: return collected def _build_node_map(self, node: TreeNode) -> None: - """ - Assign each node's name to the *deepest* TreeNode that actually holds it. + """Assign each node's name to the *deepest* TreeNode that actually holds it. We do a parent-first approach so children override if needed. """ # Map the raw_nodes at this level @@ -213,18 +198,14 @@ def _build_node_map(self, node: TreeNode) -> None: self._build_node_map(child) def _build_path_map(self, node: TreeNode) -> None: - """ - Build a path->TreeNode map for easy lookups. Skips "root" in path strings. - """ + """Build a path->TreeNode map for easy lookups. Skips "root" in path strings.""" path_str = self._compute_full_path(node) self._path_map[path_str] = node for child in node.children.values(): self._build_path_map(child) def _compute_full_path(self, node: TreeNode) -> str: - """ - Return a '/'-joined path, omitting "root". - """ + """Return a '/'-joined path, omitting "root".""" parts = [] current = node while current and current.name != "root": @@ -233,9 +214,7 @@ def _compute_full_path(self, node: TreeNode) -> str: return "/".join(reversed(parts)) def _get_ancestors(self, node: TreeNode) -> Set[TreeNode]: - """ - Return a cached set of this node's ancestors (including itself). - """ + """Return a cached set of this node's ancestors (including itself).""" if node in self._ancestors_cache: return self._ancestors_cache[node] @@ -248,10 +227,9 @@ def _get_ancestors(self, node: TreeNode) -> Set[TreeNode]: return ancestors def _compute_statistics(self) -> None: - """ - Populates two stats sets for each TreeNode: - - node.stats (all, ignoring disabled) - - node.active_stats (only enabled nodes/links) + """Populates two stats sets for each TreeNode: + - node.stats (all, ignoring disabled) + - node.active_stats (only enabled nodes/links) """ # First, zero them out @@ -390,8 +368,7 @@ def print_tree( detailed: bool = False, include_disabled: bool = True, ) -> None: - """ - Print the hierarchy from 'node' down (default: root). + """Print the hierarchy from 'node' down (default: root). Args: node (TreeNode): subtree to print, or root if None @@ -469,9 +446,7 @@ def print_tree( ) def _roll_up_if_leaf(self, path: str) -> str: - """ - If 'path' is a leaf node's path, climb up until a non-leaf or root is found. - """ + """If 'path' is a leaf node's path, climb up until a non-leaf or root is found.""" node = self._path_map.get(path) if not node: return path diff --git a/ngraph/failure_manager.py b/ngraph/failure_manager.py index be6810f..8678862 100644 --- a/ngraph/failure_manager.py +++ b/ngraph/failure_manager.py @@ -14,8 +14,7 @@ class FailureManager: - """ - Applies FailurePolicy to a Network, runs traffic placement, and (optionally) + """Applies FailurePolicy to a Network, runs traffic placement, and (optionally) repeats multiple times for Monte Carlo experiments. Attributes: @@ -32,8 +31,7 @@ def __init__( failure_policy: Optional[FailurePolicy] = None, default_flow_policy_config: Optional[FlowPolicyConfig] = None, ) -> None: - """ - Initialize a FailureManager. + """Initialize a FailureManager. Args: network: The Network to be modified by failures. @@ -47,8 +45,7 @@ def __init__( self.default_flow_policy_config = default_flow_policy_config def apply_failures(self) -> None: - """ - Apply the current failure_policy to self.network (in-place). + """Apply the current failure_policy to self.network (in-place). If failure_policy is None, this method does nothing. """ @@ -69,8 +66,7 @@ def apply_failures(self) -> None: self.network.disable_link(f_id) def run_single_failure_scenario(self) -> List[TrafficResult]: - """ - Applies failures to the network, places the demands, and returns per-demand results. + """Applies failures to the network, places the demands, and returns per-demand results. Returns: List[TrafficResult]: A list of traffic result objects under the applied failures. @@ -100,8 +96,7 @@ def run_monte_carlo_failures( iterations: int, parallelism: int = 1, ) -> Dict[str, Any]: - """ - Repeatedly applies (randomized) failures to the network and accumulates + """Repeatedly applies (randomized) failures to the network and accumulates per-run traffic data. Returns both overall volume statistics and a breakdown of results for each (src, dst, priority). diff --git a/ngraph/failure_policy.py b/ngraph/failure_policy.py index edcb800..3a8d3ed 100644 --- a/ngraph/failure_policy.py +++ b/ngraph/failure_policy.py @@ -8,8 +8,7 @@ @dataclass class FailureCondition: - """ - A single condition for matching an entity's attribute with an operator and value. + """A single condition for matching an entity's attribute with an operator and value. Example usage (YAML): conditions: @@ -38,8 +37,7 @@ class FailureCondition: @dataclass class FailureRule: - """ - Defines how to match and then select entities for failure. + """Defines how to match and then select entities for failure. Attributes: entity_scope (EntityScope): @@ -79,8 +77,7 @@ def __post_init__(self) -> None: @dataclass class FailurePolicy: - """ - A container for multiple FailureRules plus optional metadata in `attrs`. + """A container for multiple FailureRules plus optional metadata in `attrs`. The main entry point is `apply_failures`, which: 1) For each rule, gather the relevant entities (node, link, or risk_group). @@ -128,8 +125,7 @@ def apply_failures( network_links: Dict[str, Any], network_risk_groups: Dict[str, Any] | None = None, ) -> List[str]: - """ - Identify which entities fail given the defined rules, then optionally + """Identify which entities fail given the defined rules, then optionally expand by shared-risk groups or nested risk groups. Args: @@ -195,8 +191,7 @@ def _match_scope( network_links: Dict[str, Any], network_risk_groups: Dict[str, Any], ) -> Set[str]: - """ - Get the set of IDs matched by the given rule, either from cache + """Get the set of IDs matched by the given rule, either from cache or by performing a fresh match over the relevant entity type. """ if self.use_cache and rule_idx in self._match_cache: @@ -222,8 +217,7 @@ def _match_entities( conditions: List[FailureCondition], logic: str, ) -> Set[str]: - """ - Return all entity IDs that match the given conditions based on 'and'/'or'/'any' logic. + """Return all entity IDs that match the given conditions based on 'and'/'or'/'any' logic. entity_map is either nodes, links, or risk_groups: {entity_id -> {top_level_attr: value, ...}} @@ -253,8 +247,7 @@ def _evaluate_conditions( conditions: List[FailureCondition], logic: str, ) -> bool: - """ - Evaluate multiple conditions on a single entity. All or any condition(s) + """Evaluate multiple conditions on a single entity. All or any condition(s) must pass, depending on 'logic'. """ if logic == "and": @@ -266,9 +259,7 @@ def _evaluate_conditions( @staticmethod def _select_entities(entity_ids: Set[str], rule: FailureRule) -> Set[str]: - """ - From the matched IDs, pick which entities fail under the given rule_type. - """ + """From the matched IDs, pick which entities fail under the given rule_type.""" if not entity_ids: return set() @@ -290,8 +281,7 @@ def _expand_shared_risk_groups( network_nodes: Dict[str, Any], network_links: Dict[str, Any], ) -> None: - """ - Expand failures among any node/link that shares a risk group + """Expand failures among any node/link that shares a risk group with a failed entity. BFS until no new failures. """ # We'll handle node + link expansions only. (Risk group expansions are separate.) @@ -344,8 +334,7 @@ def _expand_failed_risk_group_children( failed_rgs: Set[str], all_risk_groups: Dict[str, Any], ) -> None: - """ - If we fail a risk_group, also fail its descendants recursively. + """If we fail a risk_group, also fail its descendants recursively. We assume each entry in all_risk_groups is something like: rg_name -> RiskGroup object or { 'name': .., 'children': [...] } @@ -378,8 +367,7 @@ def _expand_failed_risk_group_children( def _evaluate_condition(entity_attrs: Dict[str, Any], cond: FailureCondition) -> bool: - """ - Evaluate a single FailureCondition against entity attributes. + """Evaluate a single FailureCondition against entity attributes. Operators supported: ==, !=, <, <=, >, >= diff --git a/ngraph/lib/algorithms/base.py b/ngraph/lib/algorithms/base.py index 893dc88..7ac963f 100644 --- a/ngraph/lib/algorithms/base.py +++ b/ngraph/lib/algorithms/base.py @@ -27,17 +27,14 @@ class PathAlg(IntEnum): - """ - Types of path finding algorithms - """ + """Types of path finding algorithms""" SPF = 1 KSP_YENS = 2 class EdgeSelect(IntEnum): - """ - Edge selection criteria determining which edges are considered + """Edge selection criteria determining which edges are considered for path-finding between a node and its neighbor(s). """ diff --git a/ngraph/lib/algorithms/calc_capacity.py b/ngraph/lib/algorithms/calc_capacity.py index 34b2fee..1ba7074 100644 --- a/ngraph/lib/algorithms/calc_capacity.py +++ b/ngraph/lib/algorithms/calc_capacity.py @@ -20,8 +20,7 @@ def _init_graph_data( Dict[NodeID, Dict[NodeID, float]], Dict[NodeID, Dict[NodeID, float]], ]: - """ - Build the necessary data structures for the flow algorithm (in reversed orientation): + """Build the necessary data structures for the flow algorithm (in reversed orientation): - ``succ``: Reversed adjacency mapping. For each forward edge u->v in ``pred``, store v->u in ``succ`` along with the tuple of edge IDs. @@ -150,8 +149,7 @@ def _set_levels_bfs( levels: Dict[NodeID, int], residual_cap: Dict[NodeID, Dict[NodeID, float]], ) -> None: - """ - Perform a BFS on the reversed residual graph to assign levels for Dinic's algorithm. + """Perform a BFS on the reversed residual graph to assign levels for Dinic's algorithm. An edge is considered if its residual capacity is at least MIN_CAP. Args: @@ -183,8 +181,7 @@ def _push_flow_dfs( flow_dict: Dict[NodeID, Dict[NodeID, float]], levels: Dict[NodeID, int], ) -> float: - """ - Recursively push flow from `current` to `sink` in the reversed residual graph using DFS. + """Recursively push flow from `current` to `sink` in the reversed residual graph using DFS. Only paths that follow the level structure (levels[nxt] == levels[current] + 1) are considered. Args: @@ -243,8 +240,7 @@ def _equal_balance_bfs( succ: Dict[NodeID, Dict[NodeID, Tuple[EdgeID, ...]]], flow_dict: Dict[NodeID, Dict[NodeID, float]], ) -> None: - """ - Perform a BFS-like pass to distribute a nominal flow of 1.0 from `src_node` + """Perform a BFS-like pass to distribute a nominal flow of 1.0 from `src_node` over the reversed adjacency (succ), splitting flow equally among all outgoing parallel edges from each node. This does not verify capacities. It merely assigns relative (fractional) flow amounts, which are later scaled so that @@ -300,8 +296,7 @@ def calc_graph_capacity( capacity_attr: str = "capacity", flow_attr: str = "flow", ) -> Tuple[float, Dict[NodeID, Dict[NodeID, float]]]: - """ - Calculate the maximum feasible flow from src_node to dst_node (forward sense) + """Calculate the maximum feasible flow from src_node to dst_node (forward sense) using either the PROPORTIONAL or EQUAL_BALANCED approach. In PROPORTIONAL mode (similar to Dinic in reversed orientation): diff --git a/ngraph/lib/algorithms/edge_select.py b/ngraph/lib/algorithms/edge_select.py index ed48e13..19d4345 100644 --- a/ngraph/lib/algorithms/edge_select.py +++ b/ngraph/lib/algorithms/edge_select.py @@ -37,8 +37,7 @@ def edge_select_fabric( ], Tuple[Cost, List[EdgeID]], ]: - """ - Creates a function that selects edges between two nodes according + """Creates a function that selects edges between two nodes according to a given EdgeSelect strategy (or a user-defined function). Args: @@ -68,8 +67,7 @@ def get_all_min_cost_edges( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Tuple[Cost, List[EdgeID]]: - """ - Return all edges whose cost is the minimum among available edges. + """Return all edges whose cost is the minimum among available edges. If the destination node is excluded, returns (inf, []). """ if excluded_nodes and dst_node in excluded_nodes: @@ -99,8 +97,7 @@ def get_single_min_cost_edge( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Tuple[Cost, List[EdgeID]]: - """ - Return exactly one edge: the single lowest-cost edge. + """Return exactly one edge: the single lowest-cost edge. If the destination node is excluded, returns (inf, []). """ if excluded_nodes and dst_node in excluded_nodes: @@ -128,8 +125,7 @@ def get_all_edges_with_cap_remaining( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Tuple[Cost, List[EdgeID]]: - """ - Return all edges that have remaining capacity >= min_cap, + """Return all edges that have remaining capacity >= min_cap, ignoring cost differences (though return the minimal cost found). """ if excluded_nodes and dst_node in excluded_nodes: @@ -163,8 +159,7 @@ def get_all_min_cost_edges_with_cap_remaining( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Tuple[Cost, List[EdgeID]]: - """ - Return all edges that have remaining capacity >= min_cap + """Return all edges that have remaining capacity >= min_cap among those with the minimum cost. """ if excluded_nodes and dst_node in excluded_nodes: @@ -200,8 +195,7 @@ def get_single_min_cost_edge_with_cap_remaining( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Tuple[Cost, List[EdgeID]]: - """ - Return exactly one edge with the minimal cost among those + """Return exactly one edge with the minimal cost among those that have remaining capacity >= min_cap. """ if excluded_nodes and dst_node in excluded_nodes: @@ -235,8 +229,7 @@ def get_single_min_cost_edge_with_cap_remaining_load_factored( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Tuple[Cost, List[EdgeID]]: - """ - Return exactly one edge, factoring both cost and load level + """Return exactly one edge, factoring both cost and load level into a combined cost: cost_factor = (cost * 100) + round((flow/capacity)*10). Only edges with remaining capacity >= min_cap are considered. """ diff --git a/ngraph/lib/algorithms/flow_init.py b/ngraph/lib/algorithms/flow_init.py index 0c84db1..5064f17 100644 --- a/ngraph/lib/algorithms/flow_init.py +++ b/ngraph/lib/algorithms/flow_init.py @@ -9,8 +9,7 @@ def init_flow_graph( flows_attr: str = "flows", reset_flow_graph: bool = True, ) -> StrictMultiDiGraph: - """ - Ensure that every node and edge in the provided `flow_graph` has + """Ensure that every node and edge in the provided `flow_graph` has flow-related attributes. Specifically, for each node and edge: - The attribute named `flow_attr` (default: "flow") is set to 0. diff --git a/ngraph/lib/algorithms/max_flow.py b/ngraph/lib/algorithms/max_flow.py index 0662a60..d0ac6f9 100644 --- a/ngraph/lib/algorithms/max_flow.py +++ b/ngraph/lib/algorithms/max_flow.py @@ -17,8 +17,7 @@ def calc_max_flow( flows_attr: str = "flows", copy_graph: bool = True, ) -> float: - """ - Compute the maximum flow between two nodes in a directed multi-graph, + """Compute the maximum flow between two nodes in a directed multi-graph, using an iterative shortest-path augmentation approach. By default, this function: diff --git a/ngraph/lib/algorithms/path_utils.py b/ngraph/lib/algorithms/path_utils.py index 8a9c325..92031a0 100644 --- a/ngraph/lib/algorithms/path_utils.py +++ b/ngraph/lib/algorithms/path_utils.py @@ -13,8 +13,7 @@ def resolve_to_paths( pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]], split_parallel_edges: bool = False, ) -> Iterator[PathTuple]: - """ - Enumerate all source->destination paths from a predecessor map. + """Enumerate all source->destination paths from a predecessor map. Args: src_node: Source node ID. diff --git a/ngraph/lib/algorithms/place_flow.py b/ngraph/lib/algorithms/place_flow.py index a985d0b..6cc49f7 100644 --- a/ngraph/lib/algorithms/place_flow.py +++ b/ngraph/lib/algorithms/place_flow.py @@ -10,8 +10,7 @@ @dataclass class FlowPlacementMeta: - """ - Metadata capturing how flow was placed on the graph. + """Metadata capturing how flow was placed on the graph. Attributes: placed_flow: The amount of flow actually placed. diff --git a/ngraph/lib/algorithms/spf.py b/ngraph/lib/algorithms/spf.py index aa7500f..03af61a 100644 --- a/ngraph/lib/algorithms/spf.py +++ b/ngraph/lib/algorithms/spf.py @@ -29,8 +29,7 @@ def _spf_fast_all_min_cost_dijkstra( src_node: NodeID, multipath: bool, ) -> Tuple[Dict[NodeID, Cost], Dict[NodeID, Dict[NodeID, List[EdgeID]]]]: - """ - Specialized Dijkstra's SPF for: + """Specialized Dijkstra's SPF for: - EdgeSelect.ALL_MIN_COST - No excluded edges/nodes. @@ -96,8 +95,7 @@ def _spf_fast_all_min_cost_with_cap_remaining_dijkstra( src_node: NodeID, multipath: bool, ) -> Tuple[Dict[NodeID, Cost], Dict[NodeID, Dict[NodeID, List[EdgeID]]]]: - """ - Specialized Dijkstra's SPF for: + """Specialized Dijkstra's SPF for: - EdgeSelect.ALL_MIN_COST_WITH_CAP_REMAINING - No excluded edges/nodes @@ -177,8 +175,7 @@ def spf( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Tuple[Dict[NodeID, Cost], Dict[NodeID, Dict[NodeID, List[EdgeID]]]]: - """ - Compute shortest paths (cost-based) from a source node using a Dijkstra-like method. + """Compute shortest paths (cost-based) from a source node using a Dijkstra-like method. By default, uses EdgeSelect.ALL_MIN_COST. If multipath=True, multiple equal-cost paths to the same node will be recorded in the predecessor structure. If no @@ -294,8 +291,7 @@ def ksp( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Iterator[Tuple[Dict[NodeID, Cost], Dict[NodeID, Dict[NodeID, List[EdgeID]]]]]: - """ - Generator of up to k shortest paths from src_node to dst_node using a Yen-like algorithm. + """Generator of up to k shortest paths from src_node to dst_node using a Yen-like algorithm. The initial SPF (shortest path) is computed; subsequent paths are found by systematically excluding edges/nodes used by previously generated paths. Each iteration yields a diff --git a/ngraph/lib/demand.py b/ngraph/lib/demand.py index a2b06da..444b8e4 100644 --- a/ngraph/lib/demand.py +++ b/ngraph/lib/demand.py @@ -11,8 +11,7 @@ @dataclass class Demand: - """ - Represents a network demand between two nodes. It is realized via one or more + """Represents a network demand between two nodes. It is realized via one or more flows through a single FlowPolicy. """ @@ -34,8 +33,7 @@ def _round_float(value: float) -> float: return value def __lt__(self, other: Demand) -> bool: - """ - Compare Demands by their demand_class (priority). A lower demand_class + """Compare Demands by their demand_class (priority). A lower demand_class indicates higher priority, so it should come first in sorting. Args: @@ -47,9 +45,7 @@ def __lt__(self, other: Demand) -> bool: return self.demand_class < other.demand_class def __str__(self) -> str: - """ - String representation showing src, dst, volume, priority, and placed_demand. - """ + """String representation showing src, dst, volume, priority, and placed_demand.""" return ( f"Demand(src_node={self.src_node}, dst_node={self.dst_node}, " f"volume={self.volume}, demand_class={self.demand_class}, " @@ -62,8 +58,7 @@ def place( max_fraction: float = 1.0, max_placement: Optional[float] = None, ) -> Tuple[float, float]: - """ - Places demand volume onto the network via self.flow_policy. + """Places demand volume onto the network via self.flow_policy. Args: flow_graph (StrictMultiDiGraph): The graph to place flows onto. diff --git a/ngraph/lib/flow.py b/ngraph/lib/flow.py index 0279e1e..9068370 100644 --- a/ngraph/lib/flow.py +++ b/ngraph/lib/flow.py @@ -13,8 +13,7 @@ class FlowIndex(NamedTuple): - """ - Describes a unique identifier for a Flow in the network. + """Describes a unique identifier for a Flow in the network. Attributes: src_node (NodeID): The source node of the flow. @@ -31,8 +30,7 @@ class FlowIndex(NamedTuple): class Flow: - """ - Represents a fraction of demand routed along a given PathBundle. + """Represents a fraction of demand routed along a given PathBundle. In traffic-engineering scenarios, a `Flow` object can model: - MPLS LSPs/tunnels with explicit paths, @@ -47,8 +45,7 @@ def __init__( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> None: - """ - Initialize a Flow object. + """Initialize a Flow object. Args: path_bundle (PathBundle): The set of paths this flow uses. @@ -69,8 +66,7 @@ def __init__( self.placed_flow: float = 0.0 def __str__(self) -> str: - """ - Returns a string representation of the Flow. + """Returns a string representation of the Flow. Returns: str: String representation including flow index and placed flow amount. @@ -83,8 +79,7 @@ def place_flow( to_place: float, flow_placement: FlowPlacement, ) -> Tuple[float, float]: - """ - Attempt to place (or update) this flow on the given `flow_graph`. + """Attempt to place (or update) this flow on the given `flow_graph`. Args: flow_graph (StrictMultiDiGraph): The network graph tracking capacities and usage. @@ -116,8 +111,7 @@ def place_flow( return placed_flow, to_place def remove_flow(self, flow_graph: StrictMultiDiGraph) -> None: - """ - Remove this flow's contribution from the provided `flow_graph`. + """Remove this flow's contribution from the provided `flow_graph`. Args: flow_graph (StrictMultiDiGraph): The network graph from which to remove this flow's usage. diff --git a/ngraph/lib/flow_policy.py b/ngraph/lib/flow_policy.py index b2a6dcd..e3aa60c 100644 --- a/ngraph/lib/flow_policy.py +++ b/ngraph/lib/flow_policy.py @@ -13,9 +13,7 @@ class FlowPolicyConfig(IntEnum): - """ - Enumerates supported flow policy configurations. - """ + """Enumerates supported flow policy configurations.""" SHORTEST_PATHS_ECMP = 1 SHORTEST_PATHS_UCMP = 2 @@ -25,8 +23,7 @@ class FlowPolicyConfig(IntEnum): class FlowPolicy: - """ - Manages the placement and management of flows (demands) on a network graph. + """Manages the placement and management of flows (demands) on a network graph. A FlowPolicy converts a demand into one or more Flow objects subject to capacity constraints and user-specified configurations such as path @@ -60,8 +57,7 @@ def __init__( edge_select_value: Optional[Any] = None, reoptimize_flows_on_each_placement: bool = False, ) -> None: - """ - Initializes a FlowPolicy instance. + """Initializes a FlowPolicy instance. Args: path_alg: The path algorithm to use (e.g., SPF). @@ -120,8 +116,7 @@ def __init__( raise ValueError("max_flow_count must be set for EQUAL_BALANCED placement.") def deep_copy(self) -> FlowPolicy: - """ - Creates and returns a deep copy of this FlowPolicy, including all flows. + """Creates and returns a deep copy of this FlowPolicy, including all flows. Returns: A new FlowPolicy object that is a deep copy of the current instance. @@ -130,21 +125,16 @@ def deep_copy(self) -> FlowPolicy: @property def flow_count(self) -> int: - """ - Returns the number of flows currently tracked by the policy. - """ + """Returns the number of flows currently tracked by the policy.""" return len(self.flows) @property def placed_demand(self) -> float: - """ - Returns the sum of all placed flow volumes across flows. - """ + """Returns the sum of all placed flow volumes across flows.""" return sum(flow.placed_flow for flow in self.flows.values()) def _get_next_flow_id(self) -> int: - """ - Retrieves and increments the internal flow ID counter. + """Retrieves and increments the internal flow ID counter. Returns: The next available integer flow ID. @@ -160,8 +150,7 @@ def _build_flow_index( flow_class: Hashable, flow_id: int, ) -> FlowIndex: - """ - Constructs a FlowIndex tuple used as a dictionary key to track flows. + """Constructs a FlowIndex tuple used as a dictionary key to track flows. Args: src_node: The source node identifier. @@ -183,8 +172,7 @@ def _get_path_bundle( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Optional[PathBundle]: - """ - Finds a path or set of paths from src_node to dst_node, optionally excluding + """Finds a path or set of paths from src_node to dst_node, optionally excluding certain edges or nodes. Args: @@ -254,8 +242,7 @@ def _create_flow( excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> Optional[Flow]: - """ - Creates a new Flow and registers it within the policy. + """Creates a new Flow and registers it within the policy. Args: flow_graph: The network graph. @@ -291,8 +278,7 @@ def _create_flows( flow_class: Hashable, min_flow: Optional[float] = None, ) -> None: - """ - Creates the initial set of flows for a new demand. + """Creates the initial set of flows for a new demand. If static paths are defined, they are used directly; otherwise, flows are created via path-finding. @@ -332,8 +318,7 @@ def _create_flows( def _delete_flow( self, flow_graph: StrictMultiDiGraph, flow_index: FlowIndex ) -> None: - """ - Deletes a flow from the policy and removes it from the network graph. + """Deletes a flow from the policy and removes it from the network graph. Args: flow_graph: The network graph. @@ -351,8 +336,7 @@ def _reoptimize_flow( flow_index: FlowIndex, headroom: float = 0.0, ) -> Optional[Flow]: - """ - Re-optimizes an existing flow by finding a new path that can accommodate + """Re-optimizes an existing flow by finding a new path that can accommodate additional volume headroom. If no better path is found, the original path is restored. Args: @@ -399,8 +383,7 @@ def place_demand( target_flow_volume: Optional[float] = None, min_flow: Optional[float] = None, ) -> Tuple[float, float]: - """ - Places the given demand volume on the network graph by splitting or creating + """Places the given demand volume on the network graph by splitting or creating flows as needed. Optionally re-optimizes flows based on the policy configuration. Args: @@ -484,8 +467,7 @@ def rebalance_demand( flow_class: Hashable, target_flow_volume: float, ) -> Tuple[float, float]: - """ - Rebalances the demand across existing flows so that their volumes are closer + """Rebalances the demand across existing flows so that their volumes are closer to the target_flow_volume. This is achieved by removing all flows from the network graph and re-placing them. @@ -506,8 +488,7 @@ def rebalance_demand( ) def remove_demand(self, flow_graph: StrictMultiDiGraph) -> None: - """ - Removes all flows from the network graph without clearing internal state. + """Removes all flows from the network graph without clearing internal state. This allows subsequent re-optimization. Args: @@ -518,8 +499,7 @@ def remove_demand(self, flow_graph: StrictMultiDiGraph) -> None: def get_flow_policy(flow_policy_config: FlowPolicyConfig) -> FlowPolicy: - """ - Factory method to create and return a FlowPolicy instance based on the provided configuration. + """Factory method to create and return a FlowPolicy instance based on the provided configuration. Args: flow_policy_config: A FlowPolicyConfig enum value specifying the desired policy. diff --git a/ngraph/lib/graph.py b/ngraph/lib/graph.py index 68fcb94..651ee6b 100644 --- a/ngraph/lib/graph.py +++ b/ngraph/lib/graph.py @@ -9,8 +9,7 @@ def new_base64_uuid() -> str: - """ - Generate a Base64-encoded UUID without padding. + """Generate a Base64-encoded UUID without padding. This function produces a 22-character, URL-safe, Base64-encoded UUID. @@ -29,8 +28,7 @@ def new_base64_uuid() -> str: class StrictMultiDiGraph(nx.MultiDiGraph): - """ - A custom multi-directed graph with strict rules and unique edge IDs. + """A custom multi-directed graph with strict rules and unique edge IDs. This class enforces: - No automatic creation of missing nodes when adding an edge. @@ -47,8 +45,7 @@ class StrictMultiDiGraph(nx.MultiDiGraph): """ def __init__(self, *args, **kwargs) -> None: - """ - Initialize a StrictMultiDiGraph. + """Initialize a StrictMultiDiGraph. Args: *args: Positional arguments forwarded to the MultiDiGraph constructor. @@ -62,8 +59,7 @@ def __init__(self, *args, **kwargs) -> None: self._edges: Dict[EdgeID, EdgeTuple] = {} def copy(self, as_view: bool = False, pickle: bool = True) -> StrictMultiDiGraph: - """ - Create a copy of this graph. + """Create a copy of this graph. By default, uses pickle-based deep copying. If pickle=False, this method calls the parent class's copy, which supports views. @@ -85,8 +81,7 @@ def copy(self, as_view: bool = False, pickle: bool = True) -> StrictMultiDiGraph # Node management # def add_node(self, node_for_adding: NodeID, **attr: Any) -> None: - """ - Add a single node, disallowing duplicates. + """Add a single node, disallowing duplicates. Args: node_for_adding (NodeID): The node to add. @@ -100,8 +95,7 @@ def add_node(self, node_for_adding: NodeID, **attr: Any) -> None: super().add_node(node_for_adding, **attr) def remove_node(self, n: NodeID) -> None: - """ - Remove a single node and all incident edges. + """Remove a single node and all incident edges. Args: n (NodeID): The node to remove. @@ -130,8 +124,7 @@ def add_edge( key: Optional[EdgeID] = None, **attr: Any, ) -> EdgeID: - """ - Add a directed edge from u_for_edge to v_for_edge. + """Add a directed edge from u_for_edge to v_for_edge. If no key is provided, a unique Base64-UUID is generated. This method does not create nodes automatically; both u_for_edge and v_for_edge @@ -178,8 +171,7 @@ def remove_edge( v: NodeID, key: Optional[EdgeID] = None, ) -> None: - """ - Remove an edge (or edges) between nodes u and v. + """Remove an edge (or edges) between nodes u and v. If key is provided, remove only that edge. Otherwise, remove all edges from u to v. @@ -219,8 +211,7 @@ def remove_edge( self.remove_edge_by_id(e_id) def remove_edge_by_id(self, key: EdgeID) -> None: - """ - Remove a directed edge by its unique key. + """Remove a directed edge by its unique key. Args: key (EdgeID): The key identifying the edge to remove. @@ -237,8 +228,7 @@ def remove_edge_by_id(self, key: EdgeID) -> None: # Convenience methods # def get_nodes(self) -> Dict[NodeID, AttrDict]: - """ - Retrieve all nodes and their attributes as a dictionary. + """Retrieve all nodes and their attributes as a dictionary. Returns: Dict[NodeID, AttrDict]: A mapping of node ID to its attributes. @@ -246,8 +236,7 @@ def get_nodes(self) -> Dict[NodeID, AttrDict]: return dict(self.nodes(data=True)) def get_edges(self) -> Dict[EdgeID, EdgeTuple]: - """ - Retrieve a dictionary of all edges by their keys. + """Retrieve a dictionary of all edges by their keys. Returns: Dict[EdgeID, EdgeTuple]: A mapping of edge key to a tuple @@ -256,8 +245,7 @@ def get_edges(self) -> Dict[EdgeID, EdgeTuple]: return self._edges def get_edge_attr(self, key: EdgeID) -> AttrDict: - """ - Retrieve the attribute dictionary of a specific edge. + """Retrieve the attribute dictionary of a specific edge. Args: key (EdgeID): The unique edge key. @@ -273,8 +261,7 @@ def get_edge_attr(self, key: EdgeID) -> AttrDict: return self._edges[key][3] def has_edge_by_id(self, key: EdgeID) -> bool: - """ - Check whether an edge with the given key exists. + """Check whether an edge with the given key exists. Args: key (EdgeID): The unique edge key to check. @@ -285,8 +272,7 @@ def has_edge_by_id(self, key: EdgeID) -> bool: return key in self._edges def edges_between(self, u: NodeID, v: NodeID) -> List[EdgeID]: - """ - List all edge keys from node u to node v. + """List all edge keys from node u to node v. Args: u (NodeID): The source node. @@ -300,8 +286,7 @@ def edges_between(self, u: NodeID, v: NodeID) -> List[EdgeID]: return list(self.succ[u][v].keys()) def update_edge_attr(self, key: EdgeID, **attr: Any) -> None: - """ - Update attributes on an existing edge by key. + """Update attributes on an existing edge by key. Args: key (EdgeID): The unique edge key to update. diff --git a/ngraph/lib/io.py b/ngraph/lib/io.py index 382d543..33e4e40 100644 --- a/ngraph/lib/io.py +++ b/ngraph/lib/io.py @@ -6,8 +6,7 @@ def graph_to_node_link(graph: StrictMultiDiGraph) -> Dict[str, Any]: - """ - Converts a StrictMultiDiGraph into a node-link dict representation. + """Converts a StrictMultiDiGraph into a node-link dict representation. This representation is suitable for JSON serialization (e.g., for D3.js or Nx formats). @@ -58,8 +57,7 @@ def graph_to_node_link(graph: StrictMultiDiGraph) -> Dict[str, Any]: def node_link_to_graph(data: Dict[str, Any]) -> StrictMultiDiGraph: - """ - Reconstructs a StrictMultiDiGraph from its node-link dict representation. + """Reconstructs a StrictMultiDiGraph from its node-link dict representation. Expected input format: { @@ -116,8 +114,7 @@ def edgelist_to_graph( target: str = "dst", key: str = "key", ) -> StrictMultiDiGraph: - """ - Builds or updates a StrictMultiDiGraph from an edge list. + """Builds or updates a StrictMultiDiGraph from an edge list. Each line in the input is split by the specified separator into tokens. These tokens are mapped to column names provided in `columns`. The tokens corresponding to `source` @@ -177,8 +174,7 @@ def graph_to_edgelist( target_col: str = "dst", key_col: str = "key", ) -> List[str]: - """ - Converts a StrictMultiDiGraph into an edge-list text representation. + """Converts a StrictMultiDiGraph into an edge-list text representation. Each line in the output represents one edge with tokens joined by the given separator. By default, the output columns are: diff --git a/ngraph/lib/path.py b/ngraph/lib/path.py index bb5bcb5..64fefea 100644 --- a/ngraph/lib/path.py +++ b/ngraph/lib/path.py @@ -10,8 +10,7 @@ @dataclass class Path: - """ - Represents a single path in the network. + """Represents a single path in the network. Attributes: path_tuple (PathTuple): @@ -36,16 +35,14 @@ class Path: ) def __post_init__(self) -> None: - """ - Populate `edges`, `nodes`, and `edge_tuples` from `path_tuple`.""" + """Populate `edges`, `nodes`, and `edge_tuples` from `path_tuple`.""" for node, parallel_edges in self.path_tuple: self.nodes.add(node) self.edges.update(parallel_edges) self.edge_tuples.add(parallel_edges) def __getitem__(self, idx: int) -> Tuple[NodeID, Tuple[EdgeID, ...]]: - """ - Return the (node, parallel_edges) tuple at the specified index. + """Return the (node, parallel_edges) tuple at the specified index. Args: idx: The index of the desired path element. @@ -56,8 +53,7 @@ def __getitem__(self, idx: int) -> Tuple[NodeID, Tuple[EdgeID, ...]]: return self.path_tuple[idx] def __iter__(self) -> Iterator[Tuple[NodeID, Tuple[EdgeID, ...]]]: - """ - Iterate over each (node, parallel_edges) element in the path. + """Iterate over each (node, parallel_edges) element in the path. Yields: Each element from `path_tuple` in order. @@ -65,8 +61,7 @@ def __iter__(self) -> Iterator[Tuple[NodeID, Tuple[EdgeID, ...]]]: return iter(self.path_tuple) def __len__(self) -> int: - """ - Return the number of elements in the path. + """Return the number of elements in the path. Returns: The length of `path_tuple`. @@ -75,19 +70,16 @@ def __len__(self) -> int: @property def src_node(self) -> NodeID: - """ - Return the first node in the path (the source node).""" + """Return the first node in the path (the source node).""" return self.path_tuple[0][0] @property def dst_node(self) -> NodeID: - """ - Return the last node in the path (the destination node).""" + """Return the last node in the path (the destination node).""" return self.path_tuple[-1][0] def __lt__(self, other: Any) -> bool: - """ - Compare two paths based on their cost. + """Compare two paths based on their cost. Args: other: Another Path instance. @@ -101,8 +93,7 @@ def __lt__(self, other: Any) -> bool: return self.cost < other.cost def __eq__(self, other: Any) -> bool: - """ - Check equality by comparing path structure and cost. + """Check equality by comparing path structure and cost. Args: other: Another Path instance. @@ -116,8 +107,7 @@ def __eq__(self, other: Any) -> bool: return (self.path_tuple == other.path_tuple) and (self.cost == other.cost) def __hash__(self) -> int: - """ - Compute a hash based on the (path_tuple, cost) tuple. + """Compute a hash based on the (path_tuple, cost) tuple. Returns: The hash value of this Path. @@ -125,8 +115,7 @@ def __hash__(self) -> int: return hash((self.path_tuple, self.cost)) def __repr__(self) -> str: - """ - Return a string representation of the path including its tuple and cost. + """Return a string representation of the path including its tuple and cost. Returns: A debug-friendly string representation. @@ -135,8 +124,7 @@ def __repr__(self) -> str: @cached_property def edges_seq(self) -> Tuple[Tuple[EdgeID, ...], ...]: - """ - Return a tuple containing the sequence of parallel-edge tuples for each path element except the last. + """Return a tuple containing the sequence of parallel-edge tuples for each path element except the last. Returns: A tuple of parallel-edge tuples; returns an empty tuple if the path has 1 or fewer elements. @@ -147,8 +135,7 @@ def edges_seq(self) -> Tuple[Tuple[EdgeID, ...], ...]: @cached_property def nodes_seq(self) -> Tuple[NodeID, ...]: - """ - Return a tuple of node IDs in order along the path. + """Return a tuple of node IDs in order along the path. Returns: A tuple containing the ordered sequence of nodes from source to destination. @@ -161,8 +148,7 @@ def get_sub_path( graph: StrictMultiDiGraph, cost_attr: str = "cost", ) -> Path: - """ - Create a sub-path ending at the specified destination node, recalculating the cost. + """Create a sub-path ending at the specified destination node, recalculating the cost. The sub-path is formed by truncating the original path at the first occurrence of `dst_node` and ensuring that the final element has an empty tuple of edges. diff --git a/ngraph/lib/path_bundle.py b/ngraph/lib/path_bundle.py index 3032ef8..55b1a5f 100644 --- a/ngraph/lib/path_bundle.py +++ b/ngraph/lib/path_bundle.py @@ -11,8 +11,7 @@ class PathBundle: - """ - A collection of equal-cost paths between two nodes. + """A collection of equal-cost paths between two nodes. This class encapsulates one or more parallel paths (all of the same cost) between `src_node` and `dst_node`. The predecessor map `pred` associates @@ -33,8 +32,7 @@ def __init__( pred: Dict[NodeID, Dict[NodeID, List[EdgeID]]], cost: Cost, ) -> None: - """ - Initialize the PathBundle. + """Initialize the PathBundle. Args: src_node: The source node for all paths in this bundle. @@ -106,8 +104,7 @@ def __repr__(self) -> str: return f"PathBundle({self.src_node}, {self.dst_node}, {self.pred}, {self.cost})" def add(self, other: PathBundle) -> PathBundle: - """ - Concatenate this bundle with another bundle (end-to-start). + """Concatenate this bundle with another bundle (end-to-start). This effectively merges the predecessor maps and combines costs. @@ -150,8 +147,7 @@ def from_path( cost_attr: str = "cost", capacity_attr: str = "capacity", ) -> PathBundle: - """ - Construct a PathBundle from a single `Path` object. + """Construct a PathBundle from a single `Path` object. Args: path: A `Path` object which contains node-edge tuples, plus a `cost`. @@ -218,8 +214,7 @@ def from_path( return cls(src_node, dst_node, pred_map, path.cost) def resolve_to_paths(self, split_parallel_edges: bool = False) -> Iterator[Path]: - """ - Generate all concrete `Path` objects contained in this PathBundle. + """Generate all concrete `Path` objects contained in this PathBundle. Args: split_parallel_edges: If False, any parallel edges are grouped together @@ -238,8 +233,7 @@ def resolve_to_paths(self, split_parallel_edges: bool = False) -> Iterator[Path] yield Path(path_tuple, self.cost) def contains(self, other: PathBundle) -> bool: - """ - Check if this bundle's edge set contains all edges of `other`. + """Check if this bundle's edge set contains all edges of `other`. Args: other: Another PathBundle. @@ -250,8 +244,7 @@ def contains(self, other: PathBundle) -> bool: return self.edges.issuperset(other.edges) def is_subset_of(self, other: PathBundle) -> bool: - """ - Check if this bundle's edge set is contained in `other`'s edge set. + """Check if this bundle's edge set is contained in `other`'s edge set. Args: other: Another PathBundle. @@ -262,8 +255,7 @@ def is_subset_of(self, other: PathBundle) -> bool: return self.edges.issubset(other.edges) def is_disjoint_from(self, other: PathBundle) -> bool: - """ - Check if this bundle shares no edges with `other`. + """Check if this bundle shares no edges with `other`. Args: other: Another PathBundle. @@ -279,8 +271,7 @@ def get_sub_path_bundle( graph: StrictMultiDiGraph, cost_attr: str = "cost", ) -> PathBundle: - """ - Create a sub-bundle ending at `new_dst_node` (which must appear in this bundle). + """Create a sub-bundle ending at `new_dst_node` (which must appear in this bundle). This method performs a reverse traversal (BFS) from `new_dst_node` up to `self.src_node`, collecting edges and recalculating the cost along the way diff --git a/ngraph/lib/util.py b/ngraph/lib/util.py index a6ed848..4cae38b 100644 --- a/ngraph/lib/util.py +++ b/ngraph/lib/util.py @@ -12,8 +12,7 @@ def to_digraph( ] = None, revertible: bool = True, ) -> nx.DiGraph: - """ - Convert a StrictMultiDiGraph to a NetworkX DiGraph. + """Convert a StrictMultiDiGraph to a NetworkX DiGraph. This function consolidates multi-edges between nodes into a single edge. Optionally, a custom edge function can be provided to compute edge attributes. @@ -52,8 +51,7 @@ def to_digraph( def from_digraph(nx_graph: nx.DiGraph) -> StrictMultiDiGraph: - """ - Convert a revertible NetworkX DiGraph to a StrictMultiDiGraph. + """Convert a revertible NetworkX DiGraph to a StrictMultiDiGraph. This function reconstructs the original StrictMultiDiGraph by restoring multi-edge information from the '_uv_edges' attribute of each edge. @@ -83,8 +81,7 @@ def to_graph( ] = None, revertible: bool = True, ) -> nx.Graph: - """ - Convert a StrictMultiDiGraph to a NetworkX Graph. + """Convert a StrictMultiDiGraph to a NetworkX Graph. This function works similarly to `to_digraph` but returns an undirected graph. @@ -118,8 +115,7 @@ def to_graph( def from_graph(nx_graph: nx.Graph) -> StrictMultiDiGraph: - """ - Convert a revertible NetworkX Graph to a StrictMultiDiGraph. + """Convert a revertible NetworkX Graph to a StrictMultiDiGraph. Restores the original multi-edge structure from the '_uv_edges' attribute stored in each consolidated edge. diff --git a/ngraph/network.py b/ngraph/network.py index 93b7ff3..868cb60 100644 --- a/ngraph/network.py +++ b/ngraph/network.py @@ -12,8 +12,7 @@ def new_base64_uuid() -> str: - """ - Generate a Base64-encoded, URL-safe UUID (22 characters, no padding). + """Generate a Base64-encoded, URL-safe UUID (22 characters, no padding). Returns: str: A 22-character Base64 URL-safe string with trailing '=' removed. @@ -23,8 +22,7 @@ def new_base64_uuid() -> str: @dataclass class Node: - """ - Represents a node in the network. + """Represents a node in the network. Each node is uniquely identified by its name, which is used as the key in the Network's node dictionary. @@ -44,8 +42,7 @@ class Node: @dataclass class Link: - """ - Represents a directed link between two nodes in the network. + """Represents a directed link between two nodes in the network. Attributes: source (str): Name of the source node. @@ -68,16 +65,13 @@ class Link: id: str = field(init=False) def __post_init__(self) -> None: - """ - Generate the link's unique ID upon initialization. - """ + """Generate the link's unique ID upon initialization.""" self.id = f"{self.source}|{self.target}|{new_base64_uuid()}" @dataclass class RiskGroup: - """ - Represents a shared-risk or failure domain, which may have nested children. + """Represents a shared-risk or failure domain, which may have nested children. Attributes: name (str): Unique name of this risk group. @@ -94,8 +88,7 @@ class RiskGroup: @dataclass class Network: - """ - A container for network nodes and links. + """A container for network nodes and links. Attributes: nodes (Dict[str, Node]): Mapping from node name -> Node object. @@ -110,8 +103,7 @@ class Network: attrs: Dict[str, Any] = field(default_factory=dict) def add_node(self, node: Node) -> None: - """ - Add a node to the network (keyed by node.name). + """Add a node to the network (keyed by node.name). Args: node (Node): Node to add. @@ -124,8 +116,7 @@ def add_node(self, node: Node) -> None: self.nodes[node.name] = node def add_link(self, link: Link) -> None: - """ - Add a link to the network (keyed by the link's auto-generated ID). + """Add a link to the network (keyed by the link's auto-generated ID). Args: link (Link): Link to add. @@ -141,8 +132,7 @@ def add_link(self, link: Link) -> None: self.links[link.id] = link def to_strict_multidigraph(self, add_reverse: bool = True) -> StrictMultiDiGraph: - """ - Create a StrictMultiDiGraph representation of this Network. + """Create a StrictMultiDiGraph representation of this Network. Skips disabled nodes/links. Optionally adds reverse edges. @@ -192,8 +182,7 @@ def to_strict_multidigraph(self, add_reverse: bool = True) -> StrictMultiDiGraph return graph def select_node_groups_by_path(self, path: str) -> Dict[str, List[Node]]: - """ - Select and group nodes whose names match a given regular expression. + """Select and group nodes whose names match a given regular expression. Uses re.match(), so the pattern is anchored at the start of the node name. If the pattern includes capturing groups, the group label is formed by @@ -229,8 +218,7 @@ def max_flow( shortest_path: bool = False, flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL, ) -> Dict[Tuple[str, str], float]: - """ - Compute maximum flow between groups of source nodes and sink nodes. + """Compute maximum flow between groups of source nodes and sink nodes. Returns a dictionary of flow values keyed by (source_label, sink_label). @@ -301,8 +289,7 @@ def _compute_flow_single_group( shortest_path: bool, flow_placement: Optional[FlowPlacement], ) -> float: - """ - Attach a pseudo-source and pseudo-sink to the provided node lists, + """Attach a pseudo-source and pseudo-sink to the provided node lists, then run calc_max_flow. Returns the resulting flow from all sources to all sinks as a single float. @@ -349,8 +336,7 @@ def _compute_flow_single_group( ) def disable_node(self, node_name: str) -> None: - """ - Mark a node as disabled. + """Mark a node as disabled. Args: node_name (str): Name of the node to disable. @@ -363,8 +349,7 @@ def disable_node(self, node_name: str) -> None: self.nodes[node_name].disabled = True def enable_node(self, node_name: str) -> None: - """ - Mark a node as enabled. + """Mark a node as enabled. Args: node_name (str): Name of the node to enable. @@ -377,8 +362,7 @@ def enable_node(self, node_name: str) -> None: self.nodes[node_name].disabled = False def disable_link(self, link_id: str) -> None: - """ - Mark a link as disabled. + """Mark a link as disabled. Args: link_id (str): ID of the link to disable. @@ -391,8 +375,7 @@ def disable_link(self, link_id: str) -> None: self.links[link_id].disabled = True def enable_link(self, link_id: str) -> None: - """ - Mark a link as enabled. + """Mark a link as enabled. Args: link_id (str): ID of the link to enable. @@ -405,26 +388,21 @@ def enable_link(self, link_id: str) -> None: self.links[link_id].disabled = False def enable_all(self) -> None: - """ - Mark all nodes and links as enabled. - """ + """Mark all nodes and links as enabled.""" for node in self.nodes.values(): node.disabled = False for link in self.links.values(): link.disabled = False def disable_all(self) -> None: - """ - Mark all nodes and links as disabled. - """ + """Mark all nodes and links as disabled.""" for node in self.nodes.values(): node.disabled = True for link in self.links.values(): link.disabled = True def get_links_between(self, source: str, target: str) -> List[str]: - """ - Retrieve all link IDs that connect the specified source node + """Retrieve all link IDs that connect the specified source node to the target node. Args: @@ -446,8 +424,7 @@ def find_links( target_regex: Optional[str] = None, any_direction: bool = False, ) -> List[Link]: - """ - Search for links using optional regex patterns for source or target node names. + """Search for links using optional regex patterns for source or target node names. Args: source_regex (Optional[str]): Regex to match link.source. If None, matches all sources. @@ -481,8 +458,7 @@ def find_links( return results def disable_risk_group(self, name: str, recursive: bool = True) -> None: - """ - Disable all nodes/links that have 'name' in their risk_groups. + """Disable all nodes/links that have 'name' in their risk_groups. If recursive=True, also disable items belonging to child risk groups. Args: @@ -511,8 +487,7 @@ def disable_risk_group(self, name: str, recursive: bool = True) -> None: self.disable_link(link_id) def enable_risk_group(self, name: str, recursive: bool = True) -> None: - """ - Enable all nodes/links that have 'name' in their risk_groups. + """Enable all nodes/links that have 'name' in their risk_groups. If recursive=True, also enable items belonging to child risk groups. Note: diff --git a/ngraph/results.py b/ngraph/results.py index b2a737d..4290d68 100644 --- a/ngraph/results.py +++ b/ngraph/results.py @@ -4,8 +4,7 @@ @dataclass class Results: - """ - A container for storing arbitrary key-value data that arises during workflow steps. + """A container for storing arbitrary key-value data that arises during workflow steps. The data is organized by step name, then by key. Example usage: @@ -19,8 +18,7 @@ class Results: _store: Dict[str, Dict[str, Any]] = field(default_factory=dict) def put(self, step_name: str, key: str, value: Any) -> None: - """ - Store a value under (step_name, key). + """Store a value under (step_name, key). If the step_name sub-dict does not exist, it is created. Args: @@ -33,8 +31,7 @@ def put(self, step_name: str, key: str, value: Any) -> None: self._store[step_name][key] = value def get(self, step_name: str, key: str, default: Any = None) -> Any: - """ - Retrieve the value from (step_name, key). If the key is missing, return `default`. + """Retrieve the value from (step_name, key). If the key is missing, return `default`. Args: step_name (str): The workflow step name. @@ -47,8 +44,7 @@ def get(self, step_name: str, key: str, default: Any = None) -> Any: return self._store.get(step_name, {}).get(key, default) def get_all(self, key: str) -> Dict[str, Any]: - """ - Retrieve a dictionary of {step_name: value} for all step_names that contain the specified key. + """Retrieve a dictionary of {step_name: value} for all step_names that contain the specified key. Args: key (str): The key to look up in each step. @@ -63,8 +59,7 @@ def get_all(self, key: str) -> Dict[str, Any]: return result def to_dict(self) -> Dict[str, Dict[str, Any]]: - """ - Return a dictionary representation of all stored results. + """Return a dictionary representation of all stored results. Returns: Dict[str, Dict[str, Any]]: Dictionary representation of all stored results. diff --git a/ngraph/scenario.py b/ngraph/scenario.py index 62dd230..9fbf60f 100644 --- a/ngraph/scenario.py +++ b/ngraph/scenario.py @@ -20,8 +20,7 @@ @dataclass class Scenario: - """ - Represents a complete scenario for building and executing network workflows. + """Represents a complete scenario for building and executing network workflows. This scenario includes: - A network (nodes/links), constructed via blueprint expansion. @@ -46,8 +45,7 @@ class Scenario: components_library: ComponentsLibrary = field(default_factory=ComponentsLibrary) def run(self) -> None: - """ - Executes the scenario's workflow steps in order. + """Executes the scenario's workflow steps in order. Each step may modify scenario data or store outputs in scenario.results. @@ -61,8 +59,7 @@ def from_yaml( yaml_str: str, default_components: Optional[ComponentsLibrary] = None, ) -> Scenario: - """ - Constructs a Scenario from a YAML string, optionally merging + """Constructs a Scenario from a YAML string, optionally merging with a default ComponentsLibrary if provided. Top-level YAML keys can include: @@ -164,8 +161,7 @@ def from_yaml( @staticmethod def _build_risk_groups(rg_data: List[Dict[str, Any]]) -> List[RiskGroup]: - """ - Recursively builds a list of RiskGroup objects from YAML data. + """Recursively builds a list of RiskGroup objects from YAML data. Each entry may have keys: "name", "children", "disabled", and "attrs" (dict). @@ -195,8 +191,7 @@ def build_one(d: Dict[str, Any]) -> RiskGroup: @staticmethod def _build_failure_policy(fp_data: Dict[str, Any]) -> FailurePolicy: - """ - Constructs a FailurePolicy from data that may specify multiple rules plus + """Constructs a FailurePolicy from data that may specify multiple rules plus optional top-level fields like fail_shared_risk_groups, fail_risk_group_children, use_cache, and attrs. @@ -275,8 +270,7 @@ def _build_failure_policy(fp_data: Dict[str, Any]) -> FailurePolicy: def _build_workflow_steps( workflow_data: List[Dict[str, Any]], ) -> List[WorkflowStep]: - """ - Converts workflow step dictionaries into WorkflowStep objects. + """Converts workflow step dictionaries into WorkflowStep objects. Each step dict must have a "step_type" referencing a registered workflow step in WORKFLOW_STEP_REGISTRY. All other keys in the dict are passed diff --git a/ngraph/traffic_demand.py b/ngraph/traffic_demand.py index 75238ae..1e71d8d 100644 --- a/ngraph/traffic_demand.py +++ b/ngraph/traffic_demand.py @@ -7,8 +7,7 @@ @dataclass class TrafficDemand: - """ - Represents a single traffic demand in a network. + """Represents a single traffic demand in a network. Attributes: source_path (str): A regex pattern (string) for selecting source nodes. @@ -36,7 +35,5 @@ class TrafficDemand: id: str = field(init=False) def __post_init__(self) -> None: - """ - Generate a unique ID by combining source, sink, and a random Base64 UUID. - """ + """Generate a unique ID by combining source, sink, and a random Base64 UUID.""" self.id = f"{self.source_path}|{self.sink_path}|{new_base64_uuid()}" diff --git a/ngraph/traffic_manager.py b/ngraph/traffic_manager.py index 4ca89c6..6d4c3de 100644 --- a/ngraph/traffic_manager.py +++ b/ngraph/traffic_manager.py @@ -13,8 +13,7 @@ class TrafficResult(NamedTuple): - """ - A container for traffic demand result data. + """A container for traffic demand result data. Attributes: priority (int): Demand priority class (lower=more critical). @@ -35,8 +34,7 @@ class TrafficResult(NamedTuple): @dataclass class TrafficManager: - """ - Manages the expansion and placement of traffic demands on a Network. + """Manages the expansion and placement of traffic demands on a Network. This class: @@ -84,8 +82,7 @@ class TrafficManager: _td_to_demands: Dict[str, List[Demand]] = field(default_factory=dict) def build_graph(self, add_reverse: bool = True) -> None: - """ - Builds or rebuilds the internal StrictMultiDiGraph from self.network. + """Builds or rebuilds the internal StrictMultiDiGraph from self.network. This also initializes flow-related edge attributes (e.g., flow=0). @@ -97,8 +94,7 @@ def build_graph(self, add_reverse: bool = True) -> None: init_flow_graph(self.graph) # Initialize flow-related attributes def expand_demands(self) -> None: - """ - Converts each TrafficDemand in self.traffic_demands into one or more + """Converts each TrafficDemand in self.traffic_demands into one or more Demand objects based on the demand's 'mode'. The expanded demands are stored in self.demands, sorted by ascending @@ -144,8 +140,7 @@ def place_all_demands( placement_rounds: Union[int, str] = "auto", reoptimize_after_each_round: bool = False, ) -> float: - """ - Places all expanded demands in ascending priority order using multiple + """Places all expanded demands in ascending priority order using multiple incremental rounds per priority. In each priority class: @@ -228,8 +223,7 @@ def place_all_demands( return total_placed def reset_all_flow_usages(self) -> None: - """ - Removes flow usage from the graph for each Demand's FlowPolicy + """Removes flow usage from the graph for each Demand's FlowPolicy and resets placed_demand to 0 for all demands. Also sets TrafficDemand.demand_placed to 0 for each top-level demand. @@ -246,8 +240,7 @@ def reset_all_flow_usages(self) -> None: td.demand_placed = 0.0 def get_flow_details(self) -> Dict[Tuple[int, int], Dict[str, object]]: - """ - Summarizes flows from each Demand's FlowPolicy. + """Summarizes flows from each Demand's FlowPolicy. Returns: Dict[Tuple[int, int], Dict[str, object]]: @@ -274,8 +267,7 @@ def get_flow_details(self) -> Dict[Tuple[int, int], Dict[str, object]]: return details def summarize_link_usage(self) -> Dict[str, float]: - """ - Returns the total flow usage per edge in the graph. + """Returns the total flow usage per edge in the graph. Returns: Dict[str, float]: A mapping from edge_key -> current flow on that edge. @@ -291,8 +283,7 @@ def summarize_link_usage(self) -> Dict[str, float]: return usage def get_traffic_results(self, detailed: bool = False) -> List[TrafficResult]: - """ - Returns traffic demand summaries. + """Returns traffic demand summaries. If detailed=False, each top-level TrafficDemand is returned as a single entry. If detailed=True, each expanded Demand is returned separately. @@ -346,8 +337,7 @@ def get_traffic_results(self, detailed: bool = False) -> List[TrafficResult]: return results def _reoptimize_priority_demands(self, demands_in_prio: List[Demand]) -> None: - """ - Re-run flow-policy placement for each Demand in the same priority class. + """Re-run flow-policy placement for each Demand in the same priority class. Removing and re-placing each flow allows the flow policy to adjust if capacity constraints have changed due to other demands. @@ -379,8 +369,7 @@ def _expand_combine( src_groups: Dict[str, List[Node]], snk_groups: Dict[str, List[Node]], ) -> None: - """ - 'combine' mode expansion. + """'combine' mode expansion. Attaches a single pseudo-source and a single pseudo-sink node for the matched source and sink nodes, similar to the approach in network.py. @@ -456,8 +445,7 @@ def _expand_full_mesh( src_groups: Dict[str, List[Node]], snk_groups: Dict[str, List[Node]], ) -> None: - """ - 'full_mesh' mode expansion. + """'full_mesh' mode expansion. Combines all matched source nodes into one group and all matched sink nodes into another group. Creates a Demand for each (src_node, dst_node) @@ -510,8 +498,7 @@ def _expand_full_mesh( ) def _estimate_rounds(self) -> int: - """ - Estimates a suitable number of placement rounds by comparing + """Estimates a suitable number of placement rounds by comparing the median demand volume and the median edge capacity. Returns a default of 5 rounds if there is insufficient data for a meaningful calculation. diff --git a/ngraph/transform/base.py b/ngraph/transform/base.py index f07a459..149cefe 100644 --- a/ngraph/transform/base.py +++ b/ngraph/transform/base.py @@ -10,8 +10,7 @@ def register_transform(name: str) -> Any: - """ - Class decorator that registers a concrete :class:`NetworkTransform` and + """Class decorator that registers a concrete :class:`NetworkTransform` and auto-wraps it as a :class:`WorkflowStep`. The same *name* is used for both the transform factory and the workflow @@ -43,8 +42,7 @@ def run(self, scenario: Scenario) -> None: # noqa: D401 class NetworkTransform(abc.ABC): - """ - Stateless mutator applied to a :class:`ngraph.scenario.Scenario`. + """Stateless mutator applied to a :class:`ngraph.scenario.Scenario`. Subclasses must override :meth:`apply`. """ @@ -58,8 +56,7 @@ def apply(self, scenario: Scenario) -> None: @classmethod def create(cls, step_type: str, **kwargs: Any) -> Self: - """ - Instantiate a registered transform by *step_type*. + """Instantiate a registered transform by *step_type*. Args: step_type: Name given in :func:`register_transform`. diff --git a/ngraph/transform/distribute_external.py b/ngraph/transform/distribute_external.py index 1b18d6c..1ecca7e 100644 --- a/ngraph/transform/distribute_external.py +++ b/ngraph/transform/distribute_external.py @@ -1,20 +1,3 @@ -""" -Distribute external (remote) nodes across stripes of attachment nodes. - -The transform is generic: - -* ``attachment_path`` - regex that selects any enabled nodes to serve as - attachment points. -* ``remote_locations`` - short names; each is mapped deterministically to - a stripe of attachments. -* ``stripe_width`` - number of attachment nodes per stripe. -* ``capacity`` / ``cost`` - link attributes for created edges. - -Idempotent: re-running the transform will not duplicate nodes or links. -""" - -from __future__ import annotations - from dataclasses import dataclass from typing import List, Sequence @@ -38,8 +21,7 @@ def select(self, index: int, stripes: List[List[Node]]) -> List[Node]: @register_transform("DistributeExternalConnectivity") class DistributeExternalConnectivity(NetworkTransform): - """ - Attach (or create) remote nodes and link them to attachment stripes. + """Attach (or create) remote nodes and link them to attachment stripes. Args: remote_locations: Iterable of node names, e.g. ``["den", "sea"]``. diff --git a/ngraph/transform/enable_nodes.py b/ngraph/transform/enable_nodes.py index 7f91ccd..3af5942 100644 --- a/ngraph/transform/enable_nodes.py +++ b/ngraph/transform/enable_nodes.py @@ -9,8 +9,7 @@ @register_transform("EnableNodes") class EnableNodesTransform(NetworkTransform): - """ - Enable *count* disabled nodes that match *path*. + """Enable *count* disabled nodes that match *path*. Ordering is configurable; default is lexical by node name. """ diff --git a/ngraph/workflow/base.py b/ngraph/workflow/base.py index 8239e7a..ff24568 100644 --- a/ngraph/workflow/base.py +++ b/ngraph/workflow/base.py @@ -12,9 +12,7 @@ def register_workflow_step(step_type: str): - """ - A decorator that registers a WorkflowStep subclass under `step_type`. - """ + """A decorator that registers a WorkflowStep subclass under `step_type`.""" def decorator(cls: Type["WorkflowStep"]): WORKFLOW_STEP_REGISTRY[step_type] = cls @@ -25,15 +23,11 @@ def decorator(cls: Type["WorkflowStep"]): @dataclass class WorkflowStep(ABC): - """ - Base class for all workflow steps. - """ + """Base class for all workflow steps.""" name: str = "" @abstractmethod def run(self, scenario: Scenario) -> None: - """ - Execute the workflow step logic. - """ + """Execute the workflow step logic.""" pass diff --git a/ngraph/workflow/build_graph.py b/ngraph/workflow/build_graph.py index 38123fb..28e8157 100644 --- a/ngraph/workflow/build_graph.py +++ b/ngraph/workflow/build_graph.py @@ -12,9 +12,7 @@ @register_workflow_step("BuildGraph") @dataclass class BuildGraph(WorkflowStep): - """ - A workflow step that builds a StrictMultiDiGraph from scenario.network. - """ + """A workflow step that builds a StrictMultiDiGraph from scenario.network.""" def run(self, scenario: Scenario) -> None: graph = scenario.network.to_strict_multidigraph(add_reverse=True) diff --git a/ngraph/workflow/capacity_probe.py b/ngraph/workflow/capacity_probe.py index 0cdd617..4f511d5 100644 --- a/ngraph/workflow/capacity_probe.py +++ b/ngraph/workflow/capacity_probe.py @@ -13,8 +13,7 @@ @register_workflow_step("CapacityProbe") @dataclass class CapacityProbe(WorkflowStep): - """ - A workflow step that probes capacity (max flow) between selected groups of nodes. + """A workflow step that probes capacity (max flow) between selected groups of nodes. Attributes: source_path (str): A regex pattern to select source node groups. @@ -46,8 +45,7 @@ def __post_init__(self): ) from None def run(self, scenario: Scenario) -> None: - """ - Executes the capacity probe by computing max flow between node groups + """Executes the capacity probe by computing max flow between node groups matched by source_path and sink_path. Results are stored in scenario.results. Depending on 'mode', the returned flow is either a single combined dict entry @@ -89,8 +87,7 @@ def _store_flow_dict( scenario: Scenario, flow_dict: Dict[Tuple[str, str], float], ) -> None: - """ - Stores the flow dictionary in the scenario's results container, labeling + """Stores the flow dictionary in the scenario's results container, labeling each entry consistently. For each (src_label, snk_label) in the flow_dict, we store: "max_flow:[src_label -> snk_label]".