diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ae27921..6b289a5 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,12 +9,12 @@ jobs: steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 - with: {python-version: '3.12'} + with: {python-version: '3.11'} - name: Install dependencies - run: pip install "mkdocs-material>=9,<10" pdoc - - name: Install package in development mode - run: pip install -e . + run: | + python -m pip install --upgrade pip + python -m pip install '.[dev]' - name: Generate auto-generated API documentation - run: python scripts/generate_api_docs.py + run: python dev/generate_api_docs.py - name: Deploy documentation run: mkdocs gh-deploy --force --clean diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000..cb97eb4 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,51 @@ +name: Publish to PyPI + +on: + release: + types: [published] + workflow_dispatch: + inputs: + test_pypi: + description: 'Publish to Test PyPI instead of PyPI' + required: false + default: false + type: boolean + +jobs: + publish: + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write # Required for trusted publishing + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install build dependencies + run: | + python -m pip install --upgrade pip + python -m pip install build twine + + - name: Build package + run: python -m build + + - name: Check package + run: python -m twine check dist/* + + - name: Publish to Test PyPI + if: ${{ github.event.inputs.test_pypi == 'true' }} + uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + print-hash: true + + - name: Publish to PyPI + if: ${{ github.event_name == 'release' && github.event.action == 'published' }} + uses: pypa/gh-action-pypi-publish@release/v1 + with: + print-hash: true diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml index 9757886..d69ddf1 100644 --- a/.github/workflows/python-test.yml +++ b/.github/workflows/python-test.yml @@ -1,25 +1,32 @@ -# Run unit tests and check test coverage -name: Python-test +# Run tests, linting, and type checking +name: CI on: [push, pull_request] jobs: - build: + test: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.13"] + python-version: ["3.11", "3.12", "3.13"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install '.[dev]' - - name: Test with pytest and check test coverage + - name: Lint with Ruff run: | - pytest \ No newline at end of file + ruff check . + ruff format --check . + - name: Type check with Pyright + run: | + pyright + - name: Test with pytest and check coverage + run: | + pytest diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..c662ccc --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,24 @@ +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.11.13 + hooks: + - id: ruff + args: [--fix] + - id: ruff-format + + - repo: https://github.com/RobertCraigie/pyright-python + rev: v1.1.401 + hooks: + - id: pyright + args: [--project, pyproject.toml] + additional_dependencies: ['geopy', 'networkx', 'pyyaml', 'numpy', 'pandas', 'matplotlib', 'seaborn'] + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-toml + - id: check-merge-conflict + - id: check-added-large-files diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..dd6a397 --- /dev/null +++ b/Makefile @@ -0,0 +1,177 @@ +# NetGraph Development Makefile +# This Makefile provides convenient shortcuts for common development tasks + +.PHONY: help setup install dev-install check test clean docs build check-dist publish-test publish docker-build docker-run + +# Default target - show help +.DEFAULT_GOAL := help + +help: + @echo "๐Ÿ”ง NetGraph Development Commands" + @echo "" + @echo "Setup & Installation:" + @echo " make setup - Full development environment setup (install + hooks)" + @echo " make install - Install package in development mode (no dev deps)" + @echo " make dev-install - Install package with all dev dependencies" + @echo "" + @echo "Code Quality & Testing:" + @echo " make check - Run all pre-commit checks and tests" + @echo " make lint - Run only linting (ruff + pyright)" + @echo " make format - Auto-format code with ruff" + @echo " make test - Run tests with coverage" + @echo " make test-quick - Run tests without coverage" + @echo "" + @echo "Documentation:" + @echo " make docs - Generate API documentation" + @echo " make docs-test - Test API documentation generation" + @echo " make docs-serve - Serve documentation locally" + @echo "" + @echo "Build & Package:" + @echo " make build - Build distribution packages" + @echo " make clean - Clean build artifacts and cache files" + @echo "" + @echo "Publishing:" + @echo " make check-dist - Check distribution packages with twine" + @echo " make publish-test - Publish to Test PyPI" + @echo " make publish - Publish to PyPI" + @echo "" + @echo "Docker (if available):" + @echo " make docker-build - Build Docker image" + @echo " make docker-run - Run Docker container with Jupyter" + @echo "" + @echo "Utilities:" + @echo " make info - Show project information" + +# Setup and Installation +setup: + @echo "๐Ÿš€ Setting up development environment..." + @bash dev/setup-dev.sh + +install: + @echo "๐Ÿ“ฆ Installing package in development mode (no dev dependencies)..." + pip install -e . + +dev-install: + @echo "๐Ÿ“ฆ Installing package with dev dependencies..." + pip install -e '.[dev]' + +# Code Quality and Testing +check: + @echo "๐Ÿ” Running complete code quality checks and tests..." + @bash dev/run-checks.sh + +lint: + @echo "๐Ÿงน Running linting checks..." + @pre-commit run ruff --all-files + @pre-commit run pyright --all-files + +format: + @echo "โœจ Auto-formatting code..." + @pre-commit run ruff-format --all-files + +test: + @echo "๐Ÿงช Running tests with coverage..." + @pytest + +test-quick: + @echo "โšก Running tests without coverage..." + @pytest --no-cov + +# Documentation +docs: + @echo "๐Ÿ“š Generating API documentation..." + @echo "โ„น๏ธ This regenerates docs/reference/api-full.md from source code" + @python dev/generate_api_docs.py --write-file + +docs-test: + @echo "๐Ÿงช Testing API documentation generation..." + @python dev/test_doc_generation.py + +docs-serve: + @echo "๐ŸŒ Serving documentation locally..." + @if command -v mkdocs >/dev/null 2>&1; then \ + mkdocs serve; \ + else \ + echo "โŒ mkdocs not installed. Install dev dependencies with: make dev-install"; \ + exit 1; \ + fi + +# Build and Package +build: + @echo "๐Ÿ—๏ธ Building distribution packages..." + @if python -c "import build" >/dev/null 2>&1; then \ + python -m build; \ + else \ + echo "โŒ build module not installed. Install dev dependencies with: make dev-install"; \ + exit 1; \ + fi + +clean: + @echo "๐Ÿงน Cleaning build artifacts and cache files..." + @rm -rf build/ + @rm -rf dist/ + @rm -rf *.egg-info/ + @find . -type f -name "*.pyc" -delete + @find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true + @find . -type f -name "*.pyo" -delete + @find . -type f -name "*~" -delete + @find . -type f -name "*.orig" -delete + @echo "โœ… Cleanup complete!" + +# Docker commands (optional) +docker-build: + @echo "๐Ÿณ Building Docker image..." + @if [ -f "Dockerfile" ]; then \ + bash run.sh build; \ + else \ + echo "โŒ Dockerfile not found"; \ + exit 1; \ + fi + +docker-run: + @echo "๐Ÿณ Running Docker container with Jupyter..." + @if [ -f "run.sh" ]; then \ + bash run.sh run; \ + else \ + echo "โŒ run.sh not found"; \ + exit 1; \ + fi + +# Publishing +check-dist: + @echo "๐Ÿ” Checking distribution packages..." + @if python -c "import twine" >/dev/null 2>&1; then \ + python -m twine check dist/*; \ + else \ + echo "โŒ twine not installed. Install dev dependencies with: make dev-install"; \ + exit 1; \ + fi + +publish-test: + @echo "๐Ÿ“ฆ Publishing to Test PyPI..." + @if python -c "import twine" >/dev/null 2>&1; then \ + python -m twine upload --repository testpypi dist/*; \ + else \ + echo "โŒ twine not installed. Install dev dependencies with: make dev-install"; \ + exit 1; \ + fi + +publish: + @echo "๐Ÿš€ Publishing to PyPI..." + @if python -c "import twine" >/dev/null 2>&1; then \ + python -m twine upload dist/*; \ + else \ + echo "โŒ twine not installed. Install dev dependencies with: make dev-install"; \ + exit 1; \ + fi + +# Project Information +info: + @echo "๐Ÿ“‹ NetGraph Project Information" + @echo "================================" + @echo "Python version: $$(python --version)" + @echo "Package version: $$(python -c 'import ngraph; print(ngraph.__version__)' 2>/dev/null || echo 'Not installed')" + @echo "Virtual environment: $$(echo $$VIRTUAL_ENV | sed 's|.*/||' || echo 'None active')" + @echo "Pre-commit installed: $$(pre-commit --version 2>/dev/null || echo 'Not installed')" + @echo "Git status:" + @git status --porcelain | head -5 || echo "Not a git repository" diff --git a/README.md b/README.md index 9687ff5..ee74497 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ NetGraph is a scenario-based network modeling and analysis framework written in - **Scenario-Based Modeling** [DONE]: Define complete network scenarios in YAML with topology, failures, traffic, and workflow - **Hierarchical Blueprints** [DONE]: Reusable network templates with nested structures and parameterization - **Demand Placement** [DONE]: Place traffic demands on the network with various flow placement strategies (e.g., shortest path only, ECMP/UCMP, etc.) -- **Capacity Calculation** [DONE]: Calculate capacity with different flow placement strategies +- **Capacity Calculation** [DONE]: Calculate capacity with different flow placement strategies - **Failure Simulation** [DONE]: Model component and risk groups failures for availability analysis - **Network Analysis** [IN PROGRESS]: Analyze capacity, failure tolerance, and efficiency diff --git a/dev/dev.md b/dev/dev.md new file mode 100644 index 0000000..99dece7 --- /dev/null +++ b/dev/dev.md @@ -0,0 +1,66 @@ +# NetGraph Development Guide + +## Essential Commands + +```bash +make setup # Complete dev environment setup +make check # Run all quality checks + tests +make test # Run tests with coverage +make docs # Generate API documentation +make docs-serve # Serve docs locally +``` + +**For all available commands**: `make help` + +## Documentation + +### Generating API Documentation + +The API documentation is auto-generated from source code docstrings: + +```bash +# Generate API documentation +make docs +# or +python dev/generate_api_docs.py +``` + +**Important**: API documentation is **not** regenerated during pytest runs to avoid constant file changes. The doc generation test is skipped by default. To test doc generation: + +```bash +GENERATE_DOCS=true pytest tests/test_api_docs.py::test_api_doc_generation_output +``` + +### Documentation Types + +- `docs/reference/api.md` - Curated, example-driven API guide (manually maintained) +- `docs/reference/api-full.md` - Complete auto-generated reference (regenerated via `make docs`) +- `docs/reference/cli.md` - Command-line interface documentation +- `docs/reference/dsl.md` - YAML DSL syntax reference + +## Publishing + +**Manual**: `make clean && make build && make publish-test && make publish` + +**Automated**: Create GitHub release โ†’ auto-publishes to PyPI + +**Version**: Update `version = "x.y.z"` in `pyproject.toml` before publishing + +## Key Development Files + +``` +pyproject.toml # Package config, dependencies, tool settings +Makefile # Development commands +.pre-commit-config.yaml # Code quality hooks +dev/setup-dev.sh # Development environment setup script +dev/run-checks.sh # Manual code quality checks +``` + +## Git Workflows + +``` +.github/workflows/ +โ”œโ”€โ”€ python-test.yml # CI: tests, linting, type checking +โ”œโ”€โ”€ docs.yml # Auto-deploy documentation +โ””โ”€โ”€ publish.yml # Auto-publish to PyPI on releases +``` diff --git a/scripts/generate_api_docs.py b/dev/generate_api_docs.py similarity index 79% rename from scripts/generate_api_docs.py rename to dev/generate_api_docs.py index 1954f27..8a4ab37 100755 --- a/scripts/generate_api_docs.py +++ b/dev/generate_api_docs.py @@ -2,15 +2,19 @@ """ Generate API documentation for NetGraph This script should be run from the project root directory. + +By default, outputs documentation to stdout. +Use --write-file to write to docs/reference/api-full.md instead. """ -import inspect +import argparse +import dataclasses import importlib -import sys +import inspect import os -import dataclasses -from pathlib import Path +import sys from datetime import datetime +from pathlib import Path # Add the current directory to Python path for development installs if os.path.exists("ngraph"): @@ -61,7 +65,7 @@ def get_class_info(cls): try: # Try to call the factory to get a representative value default_val = field.default_factory() - except: + except Exception: default_val = f"{field.default_factory.__name__}()" else: default_val = None @@ -145,8 +149,16 @@ def document_module(module_name): return doc -def generate_api_documentation(): - """Generate the complete API documentation.""" +def generate_api_documentation(output_to_file=False): + """Generate the complete API documentation. + + Args: + output_to_file (bool): If True, write to docs/reference/api-full.md. + If False, return the documentation string. + + Returns: + str: The generated documentation (when output_to_file=False) + """ # Modules to document (in order) modules = [ @@ -177,7 +189,7 @@ def generate_api_documentation(): timestamp = datetime.now().strftime("%B %d, %Y at %H:%M UTC") header = f"""# NetGraph API Reference (Auto-Generated) -This is the complete auto-generated API documentation for NetGraph. +This is the complete auto-generated API documentation for NetGraph. For a curated, example-driven API guide, see **[api.md](api.md)**. > **๐Ÿ“‹ Documentation Types:** @@ -230,19 +242,45 @@ def generate_api_documentation(): doc += footer - # Ensure output directory exists - output_path = Path("docs/reference/api-full.md") - output_path.parent.mkdir(parents=True, exist_ok=True) + if output_to_file: + # Ensure output directory exists + output_path = Path("docs/reference/api-full.md") + output_path.parent.mkdir(parents=True, exist_ok=True) - # Write to file - with open(output_path, "w", encoding="utf-8") as f: - f.write(doc) + # Write to file + with open(output_path, "w", encoding="utf-8") as f: + f.write(doc) - print(f"โœ… API documentation generated successfully!") - print(f"๐Ÿ“„ Written to: {output_path}") - print(f"๐Ÿ“Š Size: {len(doc):,} characters") - print(f"๐Ÿ“š Modules documented: {len(modules)}") + print("โœ… API documentation generated successfully!") + print(f"๐Ÿ“„ Written to: {output_path}") + print(f"๐Ÿ“Š Size: {len(doc):,} characters") + print(f"๐Ÿ“š Modules documented: {len(modules)}") + else: + # Return the documentation string + return doc if __name__ == "__main__": - generate_api_documentation() + parser = argparse.ArgumentParser( + description="Generate API documentation for NetGraph", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python generate_api_docs.py # Output to stdout + python generate_api_docs.py --write-file # Write to docs/reference/api-full.md + """, + ) + parser.add_argument( + "--write-file", + action="store_true", + help="Write documentation to docs/reference/api-full.md instead of stdout", + ) + + args = parser.parse_args() + + if args.write_file: + generate_api_documentation(output_to_file=True) + else: + # Output to stdout + doc = generate_api_documentation(output_to_file=False) + print(doc) diff --git a/dev/run-checks.sh b/dev/run-checks.sh new file mode 100755 index 0000000..9ad285b --- /dev/null +++ b/dev/run-checks.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Run all code quality checks and tests +# This script runs the complete validation suite: pre-commit hooks + tests + +set -e # Exit on any error + +echo "๐Ÿ” Running complete code quality checks and tests..." +echo "" + +# Check if pre-commit is installed +if ! command -v pre-commit &> /dev/null; then + echo "โŒ pre-commit is not installed. Please run 'pip install pre-commit' first." + exit 1 +fi + +# Check if pytest is installed +if ! command -v pytest &> /dev/null; then + echo "โŒ pytest is not installed. Please run 'pip install -e .[dev]' first." + exit 1 +fi + +# Check if pre-commit hooks are installed +if [ ! -f .git/hooks/pre-commit ]; then + echo "โš ๏ธ Pre-commit hooks not installed. Installing now..." + pre-commit install + echo "" +fi + +# Run pre-commit checks +echo "๐Ÿƒ Running pre-commit on all files..." +pre-commit run --all-files + +if [ $? -ne 0 ]; then + echo "" + echo "โŒ Pre-commit checks failed. Please fix the issues above before running tests." + echo "๐Ÿ’ก Tip: Most formatting issues can be auto-fixed by running the checks again." + exit 1 +fi + +echo "" +echo "โœ… Pre-commit checks passed!" +echo "" + +# Run tests with coverage +echo "๐Ÿงช Running tests with coverage..." +pytest + +if [ $? -eq 0 ]; then + echo "" + echo "๐ŸŽ‰ All checks and tests passed! Your code is ready for commit." +else + echo "" + echo "โŒ Some tests failed. Please fix the issues above and try again." + exit 1 +fi diff --git a/dev/setup-dev.sh b/dev/setup-dev.sh new file mode 100755 index 0000000..6d90e49 --- /dev/null +++ b/dev/setup-dev.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Setup script for developers + +echo "๐Ÿ”ง Setting up development environment..." + +# Install the package with dev dependencies +echo "๐Ÿ“ฆ Installing package with dev dependencies..." +pip install -e '.[dev]' + +# Install pre-commit hooks +echo "๐Ÿช Installing pre-commit hooks..." +pre-commit install + +# Run pre-commit on all files to ensure everything is set up correctly +echo "โœ… Running pre-commit checks..." +pre-commit run --all-files + +echo "๐ŸŽ‰ Development environment setup complete!" +echo "" +echo "๐Ÿš€ You're ready to contribute! Pre-commit hooks will now run automatically on each commit." +echo "๐Ÿ’ก To manually run all checks: pre-commit run --all-files" diff --git a/docs/examples/basic.md b/docs/examples/basic.md index 61e62ff..7a5ea61 100644 --- a/docs/examples/basic.md +++ b/docs/examples/basic.md @@ -1,6 +1,6 @@ # Basic Example -In this toy example, we'll create a simple graph with parallel edges and alternative paths, then run max flow analysis with different flow placement policies. +In this toy example, we'll create a simple graph with parallel edges and alternative paths, then run max flow analysis with different flow placement policies. ### Creating a Simple Network @@ -27,7 +27,7 @@ from ngraph.lib.algorithms.base import FlowPlacement scenario_yaml = """ network: name: "fundamentals_example" - + # Create individual nodes nodes: A: {} @@ -48,8 +48,8 @@ network: link_params: capacity: 2 cost: 1 - - # Parallel edges between Bโ†’C + + # Parallel edges between Bโ†’C - source: B target: C link_params: @@ -60,7 +60,7 @@ network: link_params: capacity: 2 cost: 1 - + # Alternative path Aโ†’Dโ†’C - source: A target: D @@ -102,9 +102,9 @@ print(f"Flow on shortest paths: {max_flow_shortest}") # 3. Equal-balanced flow placement on shortest paths max_flow_shortest_balanced = network.max_flow( - source_path="A", - sink_path="C", - shortest_path=True, + source_path="A", + sink_path="C", + shortest_path=True, flow_placement=FlowPlacement.EQUAL_BALANCED ) print(f"Equal-balanced flow: {max_flow_shortest_balanced}") diff --git a/docs/examples/clos-fabric.md b/docs/examples/clos-fabric.md index 5e4330d..cdd0964 100644 --- a/docs/examples/clos-fabric.md +++ b/docs/examples/clos-fabric.md @@ -7,7 +7,7 @@ This example demonstrates how to model and analyze a 3-tier Clos network fabric We'll create two separate 3-tier Clos networks and analyze the maximum flow capacity between them. This scenario showcases: - Hierarchical blueprint composition -- Complex adjacency patterns +- Complex adjacency patterns - Flow analysis with different placement policies - ECMP vs UCMP traffic distribution @@ -102,7 +102,7 @@ print(f"Maximum flow with ECMP: {max_flow_ecmp}") The result `{('b1|b2', 'b1|b2'): 256.0}` means: - **Source**: All t1 nodes in both b1 and b2 segments of my_clos1 -- **Sink**: All t1 nodes in both b1 and b2 segments of my_clos2 +- **Sink**: All t1 nodes in both b1 and b2 segments of my_clos2 - **Capacity**: Maximum flow of 256.0 units ## Traffic Engineering Comparison @@ -115,7 +115,7 @@ The result `{('b1|b2', 'b1|b2'): 256.0}` means: # ECMP: Equal distribution across all paths max_flow_ecmp = network.max_flow( source_path=r"my_clos1.*(b[0-9]*)/t1", - sink_path=r"my_clos2.*(b[0-9]*)/t1", + sink_path=r"my_clos2.*(b[0-9]*)/t1", mode="combine", flow_placement=FlowPlacement.EQUAL_BALANCED ) @@ -124,7 +124,7 @@ max_flow_ecmp = network.max_flow( max_flow_ucmp = network.max_flow( source_path=r"my_clos1.*(b[0-9]*)/t1", sink_path=r"my_clos2.*(b[0-9]*)/t1", - mode="combine", + mode="combine", flow_placement=FlowPlacement.PROPORTIONAL ) diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md index 96cc8f7..46b3216 100644 --- a/docs/getting-started/installation.md +++ b/docs/getting-started/installation.md @@ -78,7 +78,7 @@ To exit the JupyterLab server, press `Ctrl+C` in the terminal where the server i node_count: 4 name_template: "leaf-{node_num}" spine: - node_count: 2 + node_count: 2 name_template: "spine-{node_num}" adjacency: - source: /leaf diff --git a/docs/getting-started/tutorial.md b/docs/getting-started/tutorial.md index 052fe65..c66c424 100644 --- a/docs/getting-started/tutorial.md +++ b/docs/getting-started/tutorial.md @@ -17,7 +17,7 @@ network: node_count: 4 name_template: "leaf-{node_num}" spine: - node_count: 2 + node_count: 2 name_template: "spine-{node_num}" adjacency: - source: /leaf @@ -149,21 +149,21 @@ from ngraph.lib.flow_policy import FlowPlacement # Calculate MaxFlow from pod1 servers to pod2 servers max_flow = network.max_flow( source_path="pod1/servers", - sink_path="pod2/servers", + sink_path="pod2/servers", ) print(f"Maximum flow pod1โ†’pod2: {max_flow}") # Calculate MaxFlow from pod1 leaf to pod2 leaf max_flow_leaf = network.max_flow( source_path="pod1/leaf", - sink_path="pod2/leaf", + sink_path="pod2/leaf", ) print(f"Maximum flow pod1โ†’pod2 leaf: {max_flow_leaf}") # Calculate MaxFlow from pod1 spine to pod2 spine max_flow_spine = network.max_flow( source_path="pod1/spine", - sink_path="pod2/spine", + sink_path="pod2/spine", ) print(f"Maximum flow pod1โ†’pod2 spine: {max_flow_spine}") ``` diff --git a/docs/reference/api-full.md b/docs/reference/api-full.md index 3db1345..0bf2baf 100644 --- a/docs/reference/api-full.md +++ b/docs/reference/api-full.md @@ -1,6 +1,6 @@ # NetGraph API Reference (Auto-Generated) -This is the complete auto-generated API documentation for NetGraph. +This is the complete auto-generated API documentation for NetGraph. For a curated, example-driven API guide, see **[api.md](api.md)**. > **๐Ÿ“‹ Documentation Types:** @@ -10,7 +10,7 @@ For a curated, example-driven API guide, see **[api.md](api.md)**. > - **[CLI Reference](cli.md)** - Command-line interface > - **[DSL Reference](dsl.md)** - YAML syntax guide -**Generated from source code on:** June 05, 2025 at 03:11 UTC +**Generated from source code on:** June 08, 2025 at 01:15 UTC --- @@ -439,7 +439,7 @@ Attributes: priority (int): A priority class for this demand (default=0). demand (float): The total demand volume (default=0.0). demand_placed (float): The portion of this demand that has been placed so far. - flow_policy_config ((Optional[FlowPolicyConfig]): The routing/placement policy config. + flow_policy_config (Optional[FlowPolicyConfig]): The routing/placement policy config. flow_policy (Optional[FlowPolicy]): A fully constructed FlowPolicy instance. If provided, it overrides flow_policy_config. mode (str): Expansion mode for generating sub-demands. @@ -532,7 +532,7 @@ Attributes: **Methods:** -- `apply_failures(self, network_nodes: 'Dict[str, Any]', network_links: 'Dict[str, Any]', network_risk_groups: 'Dict[str, Any]' = None) -> 'List[str]'` +- `apply_failures(self, network_nodes: 'Dict[str, Any]', network_links: 'Dict[str, Any]', network_risk_groups: 'Dict[str, Any] | None' = None) -> 'List[str]'` - Identify which entities fail given the defined rules, then optionally ### FailureRule @@ -729,7 +729,7 @@ Inherits from: - Add a directed edge from u_for_edge to v_for_edge. - `add_edges_from(self, ebunch_to_add, **attr)` - Add all the edges in ebunch_to_add. -- `add_node(self, n: 'NodeID', **attr: 'Any') -> 'None'` +- `add_node(self, node_for_adding: 'NodeID', **attr: 'Any') -> 'None'` - Add a single node, disallowing duplicates. - `add_nodes_from(self, nodes_for_adding, **attr)` - Add multiple nodes. @@ -773,8 +773,8 @@ Inherits from: - Returns an iterator over nodes contained in nbunch that are - `neighbors(self, n)` - Returns an iterator over successor nodes of n. -- `new_edge_key(src_node: 'NodeID', dst_node: 'NodeID') -> 'EdgeID'` - - Generate a unique edge key. +- `new_edge_key(self, u, v)` + - Returns an unused key for edges between nodes `u` and `v`. - `number_of_edges(self, u=None, v=None)` - Returns the number of edges between two nodes. - `number_of_nodes(self)` @@ -889,7 +889,7 @@ Returns: ## ngraph.lib.algorithms.spf -### ksp(graph: ngraph.lib.graph.StrictMultiDiGraph, src_node: Hashable, dst_node: Hashable, edge_select: ngraph.lib.algorithms.base.EdgeSelect = , edge_select_func: Optional[Callable[[ngraph.lib.graph.StrictMultiDiGraph, Hashable, Hashable, Dict[Hashable, Dict[str, Any]], Set[Hashable], Set[Hashable]], Tuple[Union[int, float], List[Hashable]]]] = None, max_k: Optional[int] = None, max_path_cost: Union[int, float, NoneType] = inf, max_path_cost_factor: Optional[float] = None, multipath: bool = True, excluded_edges: Optional[Set[Hashable]] = None, excluded_nodes: Optional[Set[Hashable]] = None) -> Iterator[Tuple[Dict[Hashable, Union[int, float]], Dict[Hashable, Dict[Hashable, List[Hashable]]]]] +### ksp(graph: ngraph.lib.graph.StrictMultiDiGraph, src_node: Hashable, dst_node: Hashable, edge_select: ngraph.lib.algorithms.base.EdgeSelect = , edge_select_func: Optional[Callable[[ngraph.lib.graph.StrictMultiDiGraph, Hashable, Hashable, Dict[Hashable, Dict[str, Any]], Set[Hashable], Set[Hashable]], Tuple[Union[int, float], List[Hashable]]]] = None, max_k: Optional[int] = None, max_path_cost: Union[int, float] = inf, max_path_cost_factor: Optional[float] = None, multipath: bool = True, excluded_edges: Optional[Set[Hashable]] = None, excluded_nodes: Optional[Set[Hashable]] = None) -> Iterator[Tuple[Dict[Hashable, Union[int, float]], Dict[Hashable, Dict[Hashable, List[Hashable]]]]] Generator of up to k shortest paths from src_node to dst_node using a Yen-like algorithm. @@ -1087,8 +1087,8 @@ Attributes: **Attributes:** - `name` (str) -- `source_path` (Pattern[str]) -- `sink_path` (Pattern[str]) +- `source_path` (str) +- `sink_path` (str) - `mode` (str) = combine - `probe_reverse` (bool) = False - `shortest_path` (bool) = False diff --git a/docs/reference/api.md b/docs/reference/api.md index 385937a..2bb6f9e 100644 --- a/docs/reference/api.md +++ b/docs/reference/api.md @@ -19,7 +19,7 @@ from ngraph.scenario import Scenario # Create from YAML scenario = Scenario.from_yaml(yaml_content) -# Create programmatically +# Create programmatically scenario = Scenario() scenario.network = Network() scenario.run() @@ -118,7 +118,7 @@ from ngraph.traffic_demand import TrafficDemand demand = TrafficDemand( name="web_traffic", source_path="web_servers", - sink_path="databases", + sink_path="databases", volume=1000, mode="full_mesh" ) @@ -207,7 +207,7 @@ restored_graph = from_digraph(nx_digraph) restored_graph = from_graph(nx_graph) ``` -### Graph Algorithms +### Graph Algorithms Low-level graph analysis functions. ```python diff --git a/docs/reference/dsl.md b/docs/reference/dsl.md index 30553db..2c43c21 100644 --- a/docs/reference/dsl.md +++ b/docs/reference/dsl.md @@ -118,7 +118,7 @@ network: The bracket expansion syntax supports: - **Numeric ranges**: `[1-4]` expands to `1`, `2`, `3`, `4` -- **Character ranges**: `[a-c]` expands to `a`, `b`, `c` +- **Character ranges**: `[a-c]` expands to `a`, `b`, `c` - **Explicit lists**: `[red,blue,green]` expands to `red`, `blue`, `green` - **Multiple expansions**: `dc[1-2]/rack[a-b]` creates `dc1/racka`, `dc1/rackb`, `dc2/racka`, `dc2/rackb` @@ -133,11 +133,11 @@ adjacency: target: "spine{s}" expand_vars: p: [1, 2] - r: ["a", "b"] + r: ["a", "b"] s: [1, 2, 3] expansion_mode: "cartesian" # Creates all combinations pattern: "mesh" - + # Zip expansion (pairs elements by index) - source: "server{idx}" target: "switch{idx}" @@ -164,7 +164,7 @@ You can override specific attributes of nodes and links after they are created b ```yaml network: # ... groups and adjacency definitions ... - + node_overrides: - path: "^my_clos1/spine/switch-(1|3|5)$" # Specific spine switches disabled: true @@ -266,7 +266,7 @@ blueprints: idx: [1, 2, 3, 4, 5, 6, 7, 8] expansion_mode: "zip" # Pairs source[i] with target[i] pattern: "mesh" - link_params: + link_params: capacity: 3200 ``` @@ -320,7 +320,7 @@ components: capacity: 12800.0 # 32x400G ports ports: 32 count: 4 - + Optic400GLR4: component_type: "optic" description: "400G LR4 pluggable optic" @@ -585,7 +585,7 @@ workflow: - step_type: EnableNodes path: "^my_clos2/leaf/switch-\\d+$" # All leaf switches count: 4 - + - step_type: CapacityProbe source_path: "^(dc\\d+)/client" # Capturing group creates per-DC groups sink_path: "^(dc\\d+)/server" @@ -595,7 +595,7 @@ workflow: ### Best Practices 1. **Use anchors for precision**: Always use `$` at the end if you want exact matches -2. **Escape special characters in YAML**: +2. **Escape special characters in YAML**: - For digit patterns: Use `\\d+` instead of `\d+` in quoted YAML strings - For simple wildcards: `.*/spine/.*` works directly in YAML - In Python code: Use raw strings `r"pattern"` or double escaping `"\\d+"` @@ -607,15 +607,15 @@ workflow: 1. **Missing end anchors**: `switch-1` matches `switch-10`, `switch-11`, etc. - Fix: Use `switch-1$` for exact match - -2. **YAML escaping inconsistencies**: + +2. **YAML escaping inconsistencies**: - Simple patterns like `.*` work directly: `path: .*/spine/.*` - Complex patterns need escaping: `path: "spine-\\d+$"` - Python code always needs proper escaping: `"(SEA/leaf\\d)"` - + 3. **Greedy matching**: `.*` can match more than intended - Fix: Use specific patterns like `[^/]+` to match within path segments - + 4. **Empty groups**: Patterns that don't match any nodes create empty results - Fix: Test patterns against your actual node names @@ -630,7 +630,7 @@ adjacency: - source: .*/spine/.* # Matches any spine nodes target: .*/spine/.* -# Complex patterns - use quotes and double backslashes +# Complex patterns - use quotes and double backslashes node_overrides: - path: "spine-\\d+$" # Matches spine-1, spine-2, etc. attrs: diff --git a/ngraph/__init__.py b/ngraph/__init__.py index 6cf9a8a..427d228 100644 --- a/ngraph/__init__.py +++ b/ngraph/__init__.py @@ -1,5 +1,5 @@ from __future__ import annotations -from . import cli, transform +from . import cli, transform __all__ = ["cli", "transform"] diff --git a/ngraph/blueprints.py b/ngraph/blueprints.py index 7f6e828..c9d7774 100644 --- a/ngraph/blueprints.py +++ b/ngraph/blueprints.py @@ -9,7 +9,7 @@ from ngraph.network import Link, Network, Node -@dataclass(slots=True) +@dataclass class Blueprint: """ Represents a reusable blueprint for hierarchical sub-topologies. @@ -33,7 +33,7 @@ class Blueprint: adjacency: List[Dict[str, Any]] -@dataclass(slots=True) +@dataclass class DSLExpansionContext: """ Carries the blueprint definitions and the final Network instance @@ -162,7 +162,7 @@ def _expand_group( parent_path: str, group_name: str, group_def: Dict[str, Any], - inherited_risk_groups: Set[str] = frozenset(), + inherited_risk_groups: Set[str] | None = None, ) -> None: """ Expands a single group definition into either: @@ -191,6 +191,8 @@ def _expand_group( group_def (Dict[str, Any]): The group definition (node_count, name_template, etc.). inherited_risk_groups (Set[str]): Risk groups inherited from a higher-level group. """ + if inherited_risk_groups is None: + inherited_risk_groups = set() expanded_names = _expand_name_patterns(group_name) # If bracket expansions exist, replicate for each expansion if len(expanded_names) > 1 or expanded_names[0] != group_name: @@ -416,7 +418,7 @@ def _expand_adjacency_with_variables( ) for combo_tuple in zip_longest(*lists_of_values, fillvalue=None): - combo_dict = dict(zip(var_names, combo_tuple)) + combo_dict = dict(zip(var_names, combo_tuple, strict=False)) expanded_src = _join_paths( parent_path, source_template.format(**combo_dict) ) @@ -429,7 +431,7 @@ def _expand_adjacency_with_variables( else: # "cartesian" default for combo_tuple in product(*lists_of_values): - combo_dict = dict(zip(var_names, combo_tuple)) + combo_dict = dict(zip(var_names, combo_tuple, strict=False)) expanded_src = _join_paths( parent_path, source_template.format(**combo_dict) ) diff --git a/ngraph/components.py b/ngraph/components.py index b1bac5d..df2d07b 100644 --- a/ngraph/components.py +++ b/ngraph/components.py @@ -1,12 +1,13 @@ from __future__ import annotations -import yaml from copy import deepcopy from dataclasses import dataclass, field -from typing import Dict, Any, Optional +from typing import Any, Dict, Optional + +import yaml -@dataclass(slots=True) +@dataclass class Component: """ A generic component that can represent chassis, line cards, optics, etc. @@ -124,7 +125,7 @@ def as_dict(self, include_children: bool = True) -> Dict[str, Any]: return data -@dataclass(slots=True) +@dataclass class ComponentsLibrary: """ Holds a collection of named Components. Each entry is a top-level "template" @@ -207,7 +208,7 @@ def from_dict(cls, data: Dict[str, Any]) -> ComponentsLibrary: components_map: Dict[str, Component] = {} for comp_name, comp_def in data.items(): components_map[comp_name] = cls._build_component(comp_name, comp_def) - return cls(components=components_map) + return ComponentsLibrary(components=components_map) @classmethod def _build_component(cls, name: str, definition_data: Dict[str, Any]) -> Component: diff --git a/ngraph/explorer.py b/ngraph/explorer.py index 38140f6..554714f 100644 --- a/ngraph/explorer.py +++ b/ngraph/explorer.py @@ -2,10 +2,10 @@ import logging from dataclasses import dataclass, field -from typing import Dict, List, Set, Optional +from typing import Dict, List, Optional, Set -from ngraph.network import Network, Node, Link from ngraph.components import ComponentsLibrary +from ngraph.network import Network, Node logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) diff --git a/ngraph/failure_manager.py b/ngraph/failure_manager.py index 860e560..be6810f 100644 --- a/ngraph/failure_manager.py +++ b/ngraph/failure_manager.py @@ -4,12 +4,13 @@ import statistics from collections import defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed -from typing import List, Dict, Optional, Tuple, Any +from typing import Any, Dict, List, Optional, Tuple +from ngraph.failure_policy import FailurePolicy +from ngraph.lib.flow_policy import FlowPolicyConfig from ngraph.network import Network from ngraph.traffic_demand import TrafficDemand from ngraph.traffic_manager import TrafficManager, TrafficResult -from ngraph.failure_policy import FailurePolicy class FailureManager: @@ -29,7 +30,7 @@ def __init__( network: Network, traffic_demands: List[TrafficDemand], failure_policy: Optional[FailurePolicy] = None, - default_flow_policy_config=None, + default_flow_policy_config: Optional[FlowPolicyConfig] = None, ) -> None: """ Initialize a FailureManager. @@ -56,7 +57,7 @@ def apply_failures(self) -> None: # Collect node/links as dicts {id: attrs}, matching FailurePolicy expectations node_map = {n_name: n.attrs for n_name, n in self.network.nodes.items()} - link_map = {l_id: l.attrs for l_id, l in self.network.links.items()} + link_map = {link_id: link.attrs for link_id, link in self.network.links.items()} failed_ids = self.failure_policy.apply_failures(node_map, link_map) @@ -84,7 +85,8 @@ def run_single_failure_scenario(self) -> List[TrafficResult]: tmgr = TrafficManager( network=self.network, traffic_demands=copy.deepcopy(self.traffic_demands), - default_flow_policy_config=self.default_flow_policy_config, + default_flow_policy_config=self.default_flow_policy_config + or FlowPolicyConfig.SHORTEST_PATHS_ECMP, ) tmgr.build_graph() tmgr.expand_demands() diff --git a/ngraph/failure_policy.py b/ngraph/failure_policy.py index 013f917..edcb800 100644 --- a/ngraph/failure_policy.py +++ b/ngraph/failure_policy.py @@ -1,9 +1,9 @@ from __future__ import annotations +from collections import defaultdict, deque from dataclasses import dataclass, field -from typing import Any, Dict, List, Literal, Optional, Set from random import random, sample -from collections import defaultdict, deque +from typing import Any, Dict, List, Literal, Set @dataclass @@ -126,7 +126,7 @@ def apply_failures( self, network_nodes: Dict[str, Any], network_links: Dict[str, Any], - network_risk_groups: Dict[str, Any] = None, + network_risk_groups: Dict[str, Any] | None = None, ) -> List[str]: """ Identify which entities fail given the defined rules, then optionally diff --git a/ngraph/lib/algorithms/base.py b/ngraph/lib/algorithms/base.py index 0b309c3..893dc88 100644 --- a/ngraph/lib/algorithms/base.py +++ b/ngraph/lib/algorithms/base.py @@ -1,8 +1,9 @@ from __future__ import annotations from enum import IntEnum -from typing import Union, Tuple -from ngraph.lib.graph import NodeID, EdgeID +from typing import Tuple, Union + +from ngraph.lib.graph import EdgeID, NodeID #: Represents numeric cost in the network (e.g. distance, latency, etc.). Cost = Union[int, float] diff --git a/ngraph/lib/algorithms/calc_capacity.py b/ngraph/lib/algorithms/calc_capacity.py index e8889c6..34b2fee 100644 --- a/ngraph/lib/algorithms/calc_capacity.py +++ b/ngraph/lib/algorithms/calc_capacity.py @@ -3,8 +3,8 @@ from collections import defaultdict, deque from typing import Deque, Dict, List, Set, Tuple -from ngraph.lib.graph import EdgeID, StrictMultiDiGraph, NodeID from ngraph.lib.algorithms.base import MIN_CAP, MIN_FLOW, FlowPlacement +from ngraph.lib.graph import EdgeID, NodeID, StrictMultiDiGraph def _init_graph_data( diff --git a/ngraph/lib/algorithms/edge_select.py b/ngraph/lib/algorithms/edge_select.py index 842f9c3..ed48e13 100644 --- a/ngraph/lib/algorithms/edge_select.py +++ b/ngraph/lib/algorithms/edge_select.py @@ -1,8 +1,8 @@ from math import isclose from typing import Any, Callable, Dict, List, Optional, Set, Tuple -from ngraph.lib.graph import StrictMultiDiGraph, NodeID, EdgeID, AttrDict -from ngraph.lib.algorithms.base import Cost, MIN_CAP, EdgeSelect +from ngraph.lib.algorithms.base import MIN_CAP, Cost, EdgeSelect +from ngraph.lib.graph import AttrDict, EdgeID, NodeID, StrictMultiDiGraph def edge_select_fabric( diff --git a/ngraph/lib/algorithms/max_flow.py b/ngraph/lib/algorithms/max_flow.py index cec39a3..0662a60 100644 --- a/ngraph/lib/algorithms/max_flow.py +++ b/ngraph/lib/algorithms/max_flow.py @@ -1,8 +1,8 @@ -from ngraph.lib.algorithms.spf import spf -from ngraph.lib.algorithms.place_flow import place_flow_on_graph from ngraph.lib.algorithms.base import EdgeSelect, FlowPlacement -from ngraph.lib.graph import NodeID, StrictMultiDiGraph from ngraph.lib.algorithms.flow_init import init_flow_graph +from ngraph.lib.algorithms.place_flow import place_flow_on_graph +from ngraph.lib.algorithms.spf import spf +from ngraph.lib.graph import NodeID, StrictMultiDiGraph def calc_max_flow( diff --git a/ngraph/lib/algorithms/path_utils.py b/ngraph/lib/algorithms/path_utils.py index 798edfc..8a9c325 100644 --- a/ngraph/lib/algorithms/path_utils.py +++ b/ngraph/lib/algorithms/path_utils.py @@ -1,10 +1,10 @@ from __future__ import annotations from itertools import product -from typing import Dict, Iterator, List +from typing import Any, Dict, Iterator, List -from ngraph.lib.graph import NodeID, EdgeID from ngraph.lib.algorithms.base import PathTuple +from ngraph.lib.graph import EdgeID, NodeID def resolve_to_paths( @@ -31,7 +31,7 @@ def resolve_to_paths( seen = {dst_node} # Each stack entry: [(current_node, tuple_of_edgeIDs), predecessor_index] - stack: List[List[object]] = [[(dst_node, ()), 0]] + stack: List[List[Any]] = [[(dst_node, ()), 0]] top = 0 while top >= 0: diff --git a/ngraph/lib/algorithms/place_flow.py b/ngraph/lib/algorithms/place_flow.py index b1e3d75..a985d0b 100644 --- a/ngraph/lib/algorithms/place_flow.py +++ b/ngraph/lib/algorithms/place_flow.py @@ -3,8 +3,8 @@ from dataclasses import dataclass, field from typing import Dict, Hashable, List, Optional, Set -from ngraph.lib.algorithms.calc_capacity import calc_graph_capacity from ngraph.lib.algorithms.base import FlowPlacement +from ngraph.lib.algorithms.calc_capacity import calc_graph_capacity from ngraph.lib.graph import EdgeID, NodeID, StrictMultiDiGraph @@ -120,9 +120,9 @@ def place_flow_on_graph( edges[eid][3][flows_attr].setdefault( flow_index, 0.0 ) - edges[eid][3][flows_attr][ - flow_index - ] += edge_subflow + edges[eid][3][flows_attr][flow_index] += ( + edge_subflow + ) elif flow_placement == FlowPlacement.EQUAL_BALANCED: # Split equally across all parallel edges in edge_list. @@ -153,7 +153,7 @@ def remove_flow_from_graph( flows_attr: The per-flow attribute name on edges. """ edges = flow_graph.get_edges() - for edge_id, (_, _, _, edge_attr) in edges.items(): + for _edge_id, (_, _, _, edge_attr) in edges.items(): if flow_index is not None and flow_index in edge_attr[flows_attr]: # Subtract only the specified flow removed = edge_attr[flows_attr][flow_index] diff --git a/ngraph/lib/algorithms/spf.py b/ngraph/lib/algorithms/spf.py index 5bd988d..aa7500f 100644 --- a/ngraph/lib/algorithms/spf.py +++ b/ngraph/lib/algorithms/spf.py @@ -9,19 +9,19 @@ Tuple, ) -from ngraph.lib.graph import ( - AttrDict, - NodeID, - EdgeID, - StrictMultiDiGraph, -) from ngraph.lib.algorithms.base import ( + MIN_CAP, Cost, EdgeSelect, - MIN_CAP, ) from ngraph.lib.algorithms.edge_select import edge_select_fabric from ngraph.lib.algorithms.path_utils import resolve_to_paths +from ngraph.lib.graph import ( + AttrDict, + EdgeID, + NodeID, + StrictMultiDiGraph, +) def _spf_fast_all_min_cost_dijkstra( @@ -50,7 +50,7 @@ def _spf_fast_all_min_cost_dijkstra( from the predecessor to that node. If multipath=True, there may be multiple predecessors for the same node. """ - outgoing_adjacencies = graph._adj + outgoing_adjacencies = graph._adj # type: ignore[attr-defined] if src_node not in outgoing_adjacencies: raise KeyError(f"Source node '{src_node}' is not in the graph.") @@ -115,7 +115,7 @@ def _spf_fast_all_min_cost_with_cap_remaining_dijkstra( - pred: For each reachable node, a dict of predecessor -> list of edges from the predecessor to that node. """ - outgoing_adjacencies = graph._adj + outgoing_adjacencies = graph._adj # type: ignore[attr-defined] if src_node not in outgoing_adjacencies: raise KeyError(f"Source node '{src_node}' is not in the graph.") @@ -223,7 +223,11 @@ def spf( else: edge_select_func = edge_select_fabric(edge_select) - outgoing_adjacencies = graph._adj + # Ensure edge_select_func is set at this point + if edge_select_func is None: + edge_select_func = edge_select_fabric(edge_select) + + outgoing_adjacencies = graph._adj # type: ignore[attr-defined] if src_node not in outgoing_adjacencies: raise KeyError(f"Source node '{src_node}' is not in the graph.") @@ -284,7 +288,7 @@ def ksp( ] ] = None, max_k: Optional[int] = None, - max_path_cost: Optional[Cost] = float("inf"), + max_path_cost: Cost = float("inf"), max_path_cost_factor: Optional[float] = None, multipath: bool = True, excluded_edges: Optional[Set[EdgeID]] = None, @@ -370,7 +374,7 @@ def ksp( # For each realized path from src->dst in the last SPF for path in resolve_to_paths(src_node, dst_node, root_pred): # Spur node iteration - for idx, (spur_node, edges_list) in enumerate(path[:-1]): + for idx, (spur_node, _edges_list) in enumerate(path[:-1]): # The path up to but not including spur_node root_path = path[:idx] @@ -380,7 +384,7 @@ def ksp( # Remove edges (and possibly nodes) used in previous shortest paths that # share the same root_path - for sp_costs, sp_pred, sp_ex_e, sp_ex_n in shortest_paths: + for _sp_costs, sp_pred, sp_ex_e, sp_ex_n in shortest_paths: for p in resolve_to_paths(src_node, dst_node, sp_pred): if p[:idx] == root_path: excl_e.update(sp_ex_e) diff --git a/ngraph/lib/flow.py b/ngraph/lib/flow.py index 0543ed1..0279e1e 100644 --- a/ngraph/lib/flow.py +++ b/ngraph/lib/flow.py @@ -19,14 +19,15 @@ class FlowIndex(NamedTuple): Attributes: src_node (NodeID): The source node of the flow. dst_node (NodeID): The destination node of the flow. - flow_class (int): Integer representing the 'class' of this flow (e.g., traffic class). - flow_id (str): A unique ID for this flow. + flow_class (Hashable): Identifier representing the 'class' of this flow (e.g., traffic class). + Can be int, str, or any hashable type for flexibility. + flow_id (int): A unique ID for this flow. """ src_node: NodeID dst_node: NodeID - flow_class: int - flow_id: str + flow_class: Hashable + flow_id: int class Flow: @@ -42,7 +43,7 @@ class Flow: def __init__( self, path_bundle: PathBundle, - flow_index: Hashable, + flow_index: FlowIndex, excluded_edges: Optional[Set[EdgeID]] = None, excluded_nodes: Optional[Set[NodeID]] = None, ) -> None: @@ -51,12 +52,12 @@ def __init__( Args: path_bundle (PathBundle): The set of paths this flow uses. - flow_index (Hashable): A unique identifier for this flow (e.g., MPLS label, tuple, etc.). + flow_index (FlowIndex): A unique identifier for this flow. excluded_edges (Optional[Set[EdgeID]]): Edges to exclude from usage. excluded_nodes (Optional[Set[NodeID]]): Nodes to exclude from usage. """ self.path_bundle: PathBundle = path_bundle - self.flow_index: Hashable = flow_index + self.flow_index: FlowIndex = flow_index self.excluded_edges: Set[EdgeID] = excluded_edges or set() self.excluded_nodes: Set[NodeID] = excluded_nodes or set() diff --git a/ngraph/lib/flow_policy.py b/ngraph/lib/flow_policy.py index d4dd341..b2a6dcd 100644 --- a/ngraph/lib/flow_policy.py +++ b/ngraph/lib/flow_policy.py @@ -3,12 +3,12 @@ import copy from collections import deque from enum import IntEnum -from typing import Any, Callable, Dict, List, Optional, Set, Tuple +from typing import Any, Callable, Dict, Hashable, List, Optional, Set, Tuple -from ngraph.lib.flow import Flow, FlowIndex -from ngraph.lib.algorithms import spf, base, edge_select +from ngraph.lib.algorithms import base, edge_select, spf from ngraph.lib.algorithms.place_flow import FlowPlacement -from ngraph.lib.graph import AttrDict, NodeID, EdgeID, StrictMultiDiGraph +from ngraph.lib.flow import Flow, FlowIndex +from ngraph.lib.graph import AttrDict, EdgeID, NodeID, StrictMultiDiGraph from ngraph.lib.path_bundle import PathBundle @@ -46,7 +46,14 @@ def __init__( static_paths: Optional[List[PathBundle]] = None, edge_select_func: Optional[ Callable[ - [StrictMultiDiGraph, NodeID, NodeID, Dict[EdgeID, AttrDict]], + [ + StrictMultiDiGraph, + NodeID, + NodeID, + Dict[EdgeID, AttrDict], + Optional[Set[EdgeID]], + Optional[Set[NodeID]], + ], Tuple[base.Cost, List[EdgeID]], ] ] = None, @@ -150,7 +157,7 @@ def _build_flow_index( self, src_node: NodeID, dst_node: NodeID, - flow_class: int, + flow_class: Hashable, flow_id: int, ) -> FlowIndex: """ @@ -241,7 +248,7 @@ def _create_flow( flow_graph: StrictMultiDiGraph, src_node: NodeID, dst_node: NodeID, - flow_class: int, + flow_class: Hashable, min_flow: Optional[float] = None, path_bundle: Optional[PathBundle] = None, excluded_edges: Optional[Set[EdgeID]] = None, @@ -281,7 +288,7 @@ def _create_flows( flow_graph: StrictMultiDiGraph, src_node: NodeID, dst_node: NodeID, - flow_class: int, + flow_class: Hashable, min_flow: Optional[float] = None, ) -> None: """ @@ -387,7 +394,7 @@ def place_demand( flow_graph: StrictMultiDiGraph, src_node: NodeID, dst_node: NodeID, - flow_class: int, + flow_class: Hashable, volume: float, target_flow_volume: Optional[float] = None, min_flow: Optional[float] = None, @@ -474,7 +481,7 @@ def rebalance_demand( flow_graph: StrictMultiDiGraph, src_node: NodeID, dst_node: NodeID, - flow_class: int, + flow_class: Hashable, target_flow_volume: float, ) -> Tuple[float, float]: """ diff --git a/ngraph/lib/graph.py b/ngraph/lib/graph.py index 8857c3f..68fcb94 100644 --- a/ngraph/lib/graph.py +++ b/ngraph/lib/graph.py @@ -1,7 +1,7 @@ from __future__ import annotations -import uuid import base64 +import uuid from pickle import dumps, loads from typing import Any, Dict, Hashable, List, Optional, Tuple @@ -61,23 +61,6 @@ def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._edges: Dict[EdgeID, EdgeTuple] = {} - @staticmethod - def new_edge_key(src_node: NodeID, dst_node: NodeID) -> EdgeID: - """ - Generate a unique edge key. - - By default, creates a Base64-encoded UUID. Subclasses may override this - to provide an alternative scheme, such as a numeric counter. - - Args: - src_node (NodeID): The source node of the new edge. - dst_node (NodeID): The target node of the new edge. - - Returns: - EdgeID: The newly generated edge key. - """ - return new_base64_uuid() - def copy(self, as_view: bool = False, pickle: bool = True) -> StrictMultiDiGraph: """ Create a copy of this graph. @@ -95,26 +78,26 @@ def copy(self, as_view: bool = False, pickle: bool = True) -> StrictMultiDiGraph StrictMultiDiGraph: A new instance (or view) of the graph. """ if not pickle: - return super().copy(as_view=as_view) + return super().copy(as_view=as_view) # type: ignore[return-value] return loads(dumps(self)) # # Node management # - def add_node(self, n: NodeID, **attr: Any) -> None: + def add_node(self, node_for_adding: NodeID, **attr: Any) -> None: """ Add a single node, disallowing duplicates. Args: - n (NodeID): The node to add. + node_for_adding (NodeID): The node to add. **attr: Arbitrary attributes for this node. Raises: ValueError: If the node already exists in the graph. """ - if n in self: - raise ValueError(f"Node '{n}' already exists in this graph.") - super().add_node(n, **attr) + if node_for_adding in self: + raise ValueError(f"Node '{node_for_adding}' already exists in this graph.") + super().add_node(node_for_adding, **attr) def remove_node(self, n: NodeID) -> None: """ @@ -173,17 +156,19 @@ def add_edge( raise ValueError(f"Target node '{v_for_edge}' does not exist.") if key is None: - key = self.new_edge_key(u_for_edge, v_for_edge) + key = new_base64_uuid() else: if key in self._edges: raise ValueError(f"Edge with id '{key}' already exists.") super().add_edge(u_for_edge, v_for_edge, key=key, **attr) + # At this point, key is guaranteed to be non-None (either provided or generated) + assert key is not None self._edges[key] = ( u_for_edge, v_for_edge, key, - self[u_for_edge][v_for_edge][key], + self[u_for_edge][v_for_edge][key], # pyright: ignore[reportArgumentType] ) return key diff --git a/ngraph/lib/io.py b/ngraph/lib/io.py index 1f05532..382d543 100644 --- a/ngraph/lib/io.py +++ b/ngraph/lib/io.py @@ -1,8 +1,8 @@ from __future__ import annotations -from typing import Dict, Iterable, List, Optional, Any +from typing import Any, Dict, Iterable, List, Optional -from ngraph.lib.graph import StrictMultiDiGraph, NodeID +from ngraph.lib.graph import NodeID, StrictMultiDiGraph def graph_to_node_link(graph: StrictMultiDiGraph) -> Dict[str, Any]: @@ -148,7 +148,7 @@ def edgelist_to_graph( f"Line '{line}' does not match expected columns {columns} (token count mismatch)." ) - line_dict = dict(zip(columns, tokens)) + line_dict = dict(zip(columns, tokens, strict=False)) src_id = line_dict[source] dst_id = line_dict[target] edge_key = line_dict.get(key, None) diff --git a/ngraph/lib/path.py b/ngraph/lib/path.py index 826a5c2..bb5bcb5 100644 --- a/ngraph/lib/path.py +++ b/ngraph/lib/path.py @@ -2,10 +2,10 @@ from dataclasses import dataclass, field from functools import cached_property -from typing import Iterator, Set, Tuple, Any +from typing import Any, Iterator, Set, Tuple from ngraph.lib.algorithms.base import Cost, PathTuple -from ngraph.lib.graph import EdgeID, StrictMultiDiGraph, NodeID +from ngraph.lib.graph import EdgeID, NodeID, StrictMultiDiGraph @dataclass diff --git a/ngraph/lib/path_bundle.py b/ngraph/lib/path_bundle.py index 283e123..3032ef8 100644 --- a/ngraph/lib/path_bundle.py +++ b/ngraph/lib/path_bundle.py @@ -6,7 +6,7 @@ from ngraph.lib.algorithms.base import Cost, EdgeSelect from ngraph.lib.algorithms.edge_select import edge_select_fabric from ngraph.lib.algorithms.path_utils import resolve_to_paths -from ngraph.lib.graph import EdgeID, StrictMultiDiGraph, NodeID +from ngraph.lib.graph import AttrDict, EdgeID, NodeID, StrictMultiDiGraph from ngraph.lib.path import Path @@ -86,8 +86,10 @@ def __lt__(self, other: PathBundle) -> bool: """Compare two PathBundles by cost (for sorting).""" return self.cost < other.cost - def __eq__(self, other: PathBundle) -> bool: + def __eq__(self, other: object) -> bool: """Check equality of two PathBundles by (src, dst, cost, edges).""" + if not isinstance(other, PathBundle): + return False return ( self.src_node == other.src_node and self.dst_node == other.dst_node @@ -96,17 +98,12 @@ def __eq__(self, other: PathBundle) -> bool: ) def __hash__(self) -> int: - """Create a unique hash based on (src, dst, cost, sorted edges).""" - return hash( - (self.src_node, self.dst_node, self.cost, tuple(sorted(self.edges))) - ) + """Create a unique hash based on (src, dst, cost, frozenset of edges).""" + return hash((self.src_node, self.dst_node, self.cost, frozenset(self.edges))) def __repr__(self) -> str: """String representation of this PathBundle.""" - return ( - f"PathBundle(" - f"{self.src_node}, {self.dst_node}, {self.pred}, {self.cost})" - ) + return f"PathBundle({self.src_node}, {self.dst_node}, {self.pred}, {self.cost})" def add(self, other: PathBundle) -> PathBundle: """ @@ -177,6 +174,10 @@ def from_path( raise ValueError( "A StrictMultiDiGraph `graph` is required when resolve_edges=True." ) + if edge_select is None: + raise ValueError( + "edge_select must be provided when resolve_edges=True." + ) edge_selector = edge_select_fabric( edge_select, cost_attr=cost_attr, @@ -185,24 +186,32 @@ def from_path( else: edge_selector = None - src_node = path[0][0] - dst_node = path[-1][0] + src_node = path.path_tuple[0][0] + dst_node = path.path_tuple[-1][0] pred_map: Dict[NodeID, Dict[NodeID, List[EdgeID]]] = {src_node: {}} total_cost: Cost = 0 # Build the predecessor map from each hop - for (a_node, a_edges), (z_node, _) in zip(path[:-1], path[1:]): + for (a_node, a_edges), (z_node, _) in zip( + path.path_tuple[:-1], path.path_tuple[1:], strict=True + ): pred_map.setdefault(z_node, {}) # If we're not resolving edges, just copy whatever the path has if not resolve_edges: pred_map[z_node][a_node] = list(a_edges) else: # Re-select edges from a_node to z_node - min_cost, edge_list = edge_selector( - graph, a_node, z_node, graph[a_node][z_node] - ) - pred_map[z_node][a_node] = edge_list - total_cost += min_cost + if edge_selector is not None and graph is not None: + # Convert edges_dict to the expected Dict[EdgeID, AttrDict] format + # Since EdgeID is just Hashable, we can cast the keys directly + typed_edges_dict: Dict[EdgeID, AttrDict] = { + k: v for k, v in graph[a_node][z_node].items() + } + min_cost, edge_list = edge_selector( + graph, a_node, z_node, typed_edges_dict, None, None + ) + pred_map[z_node][a_node] = edge_list + total_cost += min_cost if resolve_edges: return cls(src_node, dst_node, pred_map, total_cost) diff --git a/ngraph/lib/util.py b/ngraph/lib/util.py index 150122e..a6ed848 100644 --- a/ngraph/lib/util.py +++ b/ngraph/lib/util.py @@ -1,7 +1,8 @@ -from typing import Optional, Callable, Any +from typing import Callable, Optional + import networkx as nx -from ngraph.lib.graph import StrictMultiDiGraph, NodeID +from ngraph.lib.graph import NodeID, StrictMultiDiGraph def to_digraph( @@ -31,11 +32,13 @@ def to_digraph( nx_graph = nx.DiGraph() nx_graph.add_nodes_from(graph.get_nodes()) - # Iterate over nodes and their neighbors using the internal _adj attribute. - for u, neighbors in graph._adj.items(): + # Iterate over nodes and their neighbors using the adjacency method. + for u, neighbors in graph.adjacency(): for v, edges in neighbors.items(): + # Convert edges to the expected dict format + typed_edges: dict = dict(edges) if edge_func: - edge_data = edge_func(graph, u, v, edges) + edge_data = edge_func(graph, u, v, typed_edges) nx_graph.add_edge(u, v, **edge_data) else: nx_graph.add_edge(u, v) @@ -44,7 +47,7 @@ def to_digraph( # Store the original multi-edge data in the '_uv_edges' attribute. edge_attr = nx_graph.edges[u, v] edge_attr.setdefault("_uv_edges", []) - edge_attr["_uv_edges"].append((u, v, edges)) + edge_attr["_uv_edges"].append((u, v, typed_edges)) return nx_graph @@ -65,7 +68,7 @@ def from_digraph(nx_graph: nx.DiGraph) -> StrictMultiDiGraph: graph.add_nodes_from(nx_graph.nodes) # Restore original multi-edges from the consolidated edge attribute. - for u, v, data in nx_graph.edges(data=True): + for _u, _v, data in nx_graph.edges(data=True): uv_edges = data.get("_uv_edges", []) for orig_u, orig_v, edges in uv_edges: for edge_id, edge_data in edges.items(): @@ -96,11 +99,13 @@ def to_graph( nx_graph = nx.Graph() nx_graph.add_nodes_from(graph.get_nodes()) - # Iterate over the internal _adj attribute to consolidate edges. - for u, neighbors in graph._adj.items(): + # Iterate over the adjacency to consolidate edges. + for u, neighbors in graph.adjacency(): for v, edges in neighbors.items(): + # Convert edges to the expected dict format + typed_edges: dict = dict(edges) if edge_func: - edge_data = edge_func(graph, u, v, edges) + edge_data = edge_func(graph, u, v, typed_edges) nx_graph.add_edge(u, v, **edge_data) else: nx_graph.add_edge(u, v) @@ -108,7 +113,7 @@ def to_graph( if revertible: edge_attr = nx_graph.edges[u, v] edge_attr.setdefault("_uv_edges", []) - edge_attr["_uv_edges"].append((u, v, edges)) + edge_attr["_uv_edges"].append((u, v, typed_edges)) return nx_graph @@ -129,7 +134,7 @@ def from_graph(nx_graph: nx.Graph) -> StrictMultiDiGraph: graph.add_nodes_from(nx_graph.nodes) # Restore multi-edge data from each edge's '_uv_edges' attribute. - for u, v, data in nx_graph.edges(data=True): + for _u, _v, data in nx_graph.edges(data=True): uv_edges = data.get("_uv_edges", []) for orig_u, orig_v, edges in uv_edges: for edge_id, edge_data in edges.items(): diff --git a/ngraph/network.py b/ngraph/network.py index 1459ea7..93b7ff3 100644 --- a/ngraph/network.py +++ b/ngraph/network.py @@ -4,7 +4,7 @@ import re import uuid from dataclasses import dataclass, field -from typing import Any, Dict, List, Optional, Tuple, Set +from typing import Any, Dict, List, Optional, Set, Tuple from ngraph.lib.algorithms.base import FlowPlacement from ngraph.lib.algorithms.max_flow import calc_max_flow @@ -21,7 +21,7 @@ def new_base64_uuid() -> str: return base64.urlsafe_b64encode(uuid.uuid4().bytes).decode("ascii").rstrip("=") -@dataclass(slots=True) +@dataclass class Node: """ Represents a node in the network. @@ -42,7 +42,7 @@ class Node: attrs: Dict[str, Any] = field(default_factory=dict) -@dataclass(slots=True) +@dataclass class Link: """ Represents a directed link between two nodes in the network. @@ -74,7 +74,7 @@ def __post_init__(self) -> None: self.id = f"{self.source}|{self.target}|{new_base64_uuid()}" -@dataclass(slots=True) +@dataclass class RiskGroup: """ Represents a shared-risk or failure domain, which may have nested children. @@ -92,7 +92,7 @@ class RiskGroup: attrs: Dict[str, Any] = field(default_factory=dict) -@dataclass(slots=True) +@dataclass class Network: """ A container for network nodes and links. @@ -312,7 +312,7 @@ def _compute_flow_single_group( sources (List[Node]): List of source nodes. sinks (List[Node]): List of sink nodes. shortest_path (bool): If True, restrict flows to shortest paths only. - flow_placement (FlowPlacement or None): Strategy for placing flow among + flow_placement (Optional[FlowPlacement]): Strategy for placing flow among parallel equal-cost paths. If None, defaults to FlowPlacement.PROPORTIONAL. Returns: @@ -450,8 +450,8 @@ def find_links( Search for links using optional regex patterns for source or target node names. Args: - source_regex (str or None): Regex to match link.source. If None, matches all sources. - target_regex (str or None): Regex to match link.target. If None, matches all targets. + source_regex (Optional[str]): Regex to match link.source. If None, matches all sources. + target_regex (Optional[str]): Regex to match link.target. If None, matches all targets. any_direction (bool): If True, also match reversed source/target. Returns: diff --git a/ngraph/results.py b/ngraph/results.py index 5422045..b2a737d 100644 --- a/ngraph/results.py +++ b/ngraph/results.py @@ -2,7 +2,7 @@ from typing import Any, Dict -@dataclass(slots=True) +@dataclass class Results: """ A container for storing arbitrary key-value data that arises during workflow steps. @@ -23,9 +23,10 @@ def put(self, step_name: str, key: str, value: Any) -> None: Store a value under (step_name, key). If the step_name sub-dict does not exist, it is created. - :param step_name: The workflow step that produced the result. - :param key: A short label describing the data (e.g. "total_capacity"). - :param value: The actual data to store (can be any Python object). + Args: + step_name (str): The workflow step that produced the result. + key (str): A short label describing the data (e.g. "total_capacity"). + value (Any): The actual data to store (can be any Python object). """ if step_name not in self._store: self._store[step_name] = {} @@ -35,10 +36,13 @@ def get(self, step_name: str, key: str, default: Any = None) -> Any: """ Retrieve the value from (step_name, key). If the key is missing, return `default`. - :param step_name: The workflow step name. - :param key: The key under which the data was stored. - :param default: Value to return if the (step_name, key) is not present. - :return: The data, or `default` if not found. + Args: + step_name (str): The workflow step name. + key (str): The key under which the data was stored. + default (Any): Value to return if the (step_name, key) is not present. + + Returns: + Any: The data, or `default` if not found. """ return self._store.get(step_name, {}).get(key, default) @@ -46,8 +50,11 @@ def get_all(self, key: str) -> Dict[str, Any]: """ Retrieve a dictionary of {step_name: value} for all step_names that contain the specified key. - :param key: The key to look up in each step. - :return: A dict mapping step_name -> value for all steps that have stored something under 'key'. + Args: + key (str): The key to look up in each step. + + Returns: + Dict[str, Any]: A dict mapping step_name -> value for all steps that have stored something under 'key'. """ result = {} for step_name, data in self._store.items(): @@ -56,5 +63,10 @@ def get_all(self, key: str) -> Dict[str, Any]: return result def to_dict(self) -> Dict[str, Dict[str, Any]]: - """Return a dictionary representation of all stored results.""" + """ + Return a dictionary representation of all stored results. + + Returns: + Dict[str, Dict[str, Any]]: Dictionary representation of all stored results. + """ return {step: data.copy() for step, data in self._store.items()} diff --git a/ngraph/scenario.py b/ngraph/scenario.py index c43ff53..62dd230 100644 --- a/ngraph/scenario.py +++ b/ngraph/scenario.py @@ -1,23 +1,24 @@ from __future__ import annotations -import yaml from dataclasses import dataclass, field from typing import Any, Dict, List, Optional -from ngraph.network import Network, RiskGroup +import yaml + +from ngraph.blueprints import expand_network_dsl +from ngraph.components import ComponentsLibrary from ngraph.failure_policy import ( + FailureCondition, FailurePolicy, FailureRule, - FailureCondition, ) -from ngraph.traffic_demand import TrafficDemand +from ngraph.network import Network, RiskGroup from ngraph.results import Results -from ngraph.workflow.base import WorkflowStep, WORKFLOW_STEP_REGISTRY -from ngraph.blueprints import expand_network_dsl -from ngraph.components import ComponentsLibrary +from ngraph.traffic_demand import TrafficDemand +from ngraph.workflow.base import WORKFLOW_STEP_REGISTRY, WorkflowStep -@dataclass(slots=True) +@dataclass class Scenario: """ Represents a complete scenario for building and executing network workflows. @@ -153,7 +154,7 @@ def from_yaml( if rg.disabled: network_obj.disable_risk_group(rg.name, recursive=True) - return cls( + return Scenario( network=network_obj, failure_policy=failure_policy, traffic_demands=traffic_demands, diff --git a/ngraph/traffic_demand.py b/ngraph/traffic_demand.py index 03c9476..75238ae 100644 --- a/ngraph/traffic_demand.py +++ b/ngraph/traffic_demand.py @@ -1,11 +1,11 @@ from dataclasses import dataclass, field from typing import Any, Dict, Optional -from ngraph.lib.flow_policy import FlowPolicyConfig, FlowPolicy +from ngraph.lib.flow_policy import FlowPolicy, FlowPolicyConfig from ngraph.network import new_base64_uuid -@dataclass(slots=True) +@dataclass class TrafficDemand: """ Represents a single traffic demand in a network. @@ -16,7 +16,7 @@ class TrafficDemand: priority (int): A priority class for this demand (default=0). demand (float): The total demand volume (default=0.0). demand_placed (float): The portion of this demand that has been placed so far. - flow_policy_config ((Optional[FlowPolicyConfig]): The routing/placement policy config. + flow_policy_config (Optional[FlowPolicyConfig]): The routing/placement policy config. flow_policy (Optional[FlowPolicy]): A fully constructed FlowPolicy instance. If provided, it overrides flow_policy_config. mode (str): Expansion mode for generating sub-demands. diff --git a/ngraph/traffic_manager.py b/ngraph/traffic_manager.py index 7420b23..4ca89c6 100644 --- a/ngraph/traffic_manager.py +++ b/ngraph/traffic_manager.py @@ -1,12 +1,12 @@ +import statistics from collections import defaultdict from dataclasses import dataclass, field -import statistics -from typing import Dict, List, Optional, Tuple, Union, NamedTuple +from typing import Dict, List, NamedTuple, Optional, Tuple, Union from ngraph.lib.algorithms import base from ngraph.lib.algorithms.flow_init import init_flow_graph from ngraph.lib.demand import Demand -from ngraph.lib.flow_policy import FlowPolicyConfig, FlowPolicy, get_flow_policy +from ngraph.lib.flow_policy import FlowPolicyConfig, get_flow_policy from ngraph.lib.graph import StrictMultiDiGraph from ngraph.network import Network, Node from ngraph.traffic_demand import TrafficDemand @@ -177,6 +177,13 @@ def place_all_demands( if isinstance(placement_rounds, str) and placement_rounds.lower() == "auto": placement_rounds = self._estimate_rounds() + # Ensure placement_rounds is an int for range() and arithmetic operations + placement_rounds_int = ( + int(placement_rounds) + if isinstance(placement_rounds, str) + else placement_rounds + ) + # Group demands by priority class prio_map: Dict[int, List[Demand]] = defaultdict(list) for dmd in self.demands: @@ -188,9 +195,9 @@ def place_all_demands( for priority_class in sorted_priorities: demands_in_class = prio_map[priority_class] - for round_idx in range(placement_rounds): + for round_idx in range(placement_rounds_int): placed_in_this_round = 0.0 - rounds_left = placement_rounds - round_idx + rounds_left = placement_rounds_int - round_idx for demand in demands_in_class: leftover = demand.volume - demand.placed_demand @@ -257,8 +264,8 @@ def get_flow_details(self) -> Dict[Tuple[int, int], Dict[str, object]]: for i, dmd in enumerate(self.demands): if not dmd.flow_policy: continue - for f_idx, flow_obj in dmd.flow_policy.flows.items(): - details[(i, f_idx)] = { + for j, (_f_idx, flow_obj) in enumerate(dmd.flow_policy.flows.items()): + details[(i, j)] = { "placed_flow": flow_obj.placed_flow, "src_node": flow_obj.src_node, "dst_node": flow_obj.dst_node, @@ -279,7 +286,7 @@ def summarize_link_usage(self) -> Dict[str, float]: for edge_key, edge_tuple in self.graph.get_edges().items(): attr_dict = edge_tuple[3] - usage[edge_key] = attr_dict.get("flow", 0.0) + usage[str(edge_key)] = attr_dict.get("flow", 0.0) return usage @@ -331,8 +338,8 @@ def get_traffic_results(self, detailed: bool = False) -> List[TrafficResult]: total_volume=total_volume, placed_volume=placed_volume, unplaced_volume=unplaced_volume, - src=dmd.src_node, - dst=dmd.dst_node, + src=str(dmd.src_node), + dst=str(dmd.dst_node), ) ) diff --git a/ngraph/transform/__init__.py b/ngraph/transform/__init__.py index 0251f26..8f20805 100644 --- a/ngraph/transform/__init__.py +++ b/ngraph/transform/__init__.py @@ -1,15 +1,14 @@ from __future__ import annotations from ngraph.transform.base import ( - NetworkTransform, TRANSFORM_REGISTRY, + NetworkTransform, register_transform, ) - -from ngraph.transform.enable_nodes import EnableNodesTransform from ngraph.transform.distribute_external import ( DistributeExternalConnectivity, ) +from ngraph.transform.enable_nodes import EnableNodesTransform __all__ = [ "NetworkTransform", diff --git a/ngraph/transform/base.py b/ngraph/transform/base.py index dc2ff7b..f07a459 100644 --- a/ngraph/transform/base.py +++ b/ngraph/transform/base.py @@ -1,7 +1,7 @@ from __future__ import annotations import abc -from typing import Any, Dict, Type, Self +from typing import Any, Dict, Self, Type from ngraph.scenario import Scenario from ngraph.workflow.base import WorkflowStep, register_workflow_step diff --git a/ngraph/transform/distribute_external.py b/ngraph/transform/distribute_external.py index c434d03..1b18d6c 100644 --- a/ngraph/transform/distribute_external.py +++ b/ngraph/transform/distribute_external.py @@ -23,7 +23,7 @@ from ngraph.transform.base import NetworkTransform, register_transform -@dataclass(slots=True) +@dataclass class _StripeChooser: """Round-robin stripe selection.""" diff --git a/ngraph/transform/enable_nodes.py b/ngraph/transform/enable_nodes.py index b0b45b6..7f91ccd 100644 --- a/ngraph/transform/enable_nodes.py +++ b/ngraph/transform/enable_nodes.py @@ -3,8 +3,8 @@ import itertools from typing import List -from ngraph.transform.base import NetworkTransform, register_transform, Scenario from ngraph.network import Network, Node +from ngraph.transform.base import NetworkTransform, Scenario, register_transform @register_transform("EnableNodes") diff --git a/ngraph/workflow/__init__.py b/ngraph/workflow/__init__.py index 11b24d1..316c567 100644 --- a/ngraph/workflow/__init__.py +++ b/ngraph/workflow/__init__.py @@ -1,3 +1,5 @@ from .base import WorkflowStep, register_workflow_step from .build_graph import BuildGraph from .capacity_probe import CapacityProbe + +__all__ = ["WorkflowStep", "register_workflow_step", "BuildGraph", "CapacityProbe"] diff --git a/ngraph/workflow/base.py b/ngraph/workflow/base.py index 0cc5a3f..8239e7a 100644 --- a/ngraph/workflow/base.py +++ b/ngraph/workflow/base.py @@ -1,7 +1,8 @@ from __future__ import annotations -from dataclasses import dataclass, field + from abc import ABC, abstractmethod -from typing import Dict, Type, TYPE_CHECKING +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, Type if TYPE_CHECKING: # Only imported for type-checking; not at runtime, so no circular import occurs. diff --git a/ngraph/workflow/build_graph.py b/ngraph/workflow/build_graph.py index ea14cac..38123fb 100644 --- a/ngraph/workflow/build_graph.py +++ b/ngraph/workflow/build_graph.py @@ -1,4 +1,5 @@ from __future__ import annotations + from dataclasses import dataclass from typing import TYPE_CHECKING diff --git a/ngraph/workflow/capacity_probe.py b/ngraph/workflow/capacity_probe.py index b83fa6d..0cdd617 100644 --- a/ngraph/workflow/capacity_probe.py +++ b/ngraph/workflow/capacity_probe.py @@ -1,9 +1,10 @@ from __future__ import annotations -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Dict, Tuple, Pattern -from ngraph.workflow.base import WorkflowStep, register_workflow_step +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, Tuple + from ngraph.lib.algorithms.base import FlowPlacement +from ngraph.workflow.base import WorkflowStep, register_workflow_step if TYPE_CHECKING: from ngraph.scenario import Scenario @@ -26,8 +27,8 @@ class CapacityProbe(WorkflowStep): flow_placement (FlowPlacement): Handling strategy for parallel equal cost paths (default PROPORTIONAL). """ - source_path: Pattern[str] = "" - sink_path: Pattern[str] = "" + source_path: str = "" + sink_path: str = "" mode: str = "combine" probe_reverse: bool = False shortest_path: bool = False @@ -42,7 +43,7 @@ def __post_init__(self): raise ValueError( f"Invalid flow_placement '{self.flow_placement}'. " f"Valid values are: {valid_values}" - ) + ) from None def run(self, scenario: Scenario) -> None: """ diff --git a/notebooks/bb_fabric.ipynb b/notebooks/bb_fabric.ipynb index feb61fd..57ce7a6 100644 --- a/notebooks/bb_fabric.ipynb +++ b/notebooks/bb_fabric.ipynb @@ -7,14 +7,8 @@ "metadata": {}, "outputs": [], "source": [ - "from ngraph.scenario import Scenario\n", - "from ngraph.traffic_demand import TrafficDemand\n", - "from ngraph.traffic_manager import TrafficManager\n", - "from ngraph.lib.flow_policy import FlowPolicyConfig, FlowPolicy, FlowPlacement\n", - "from ngraph.lib.algorithms.base import PathAlg, EdgeSelect\n", - "from ngraph.failure_manager import FailureManager\n", - "from ngraph.failure_policy import FailurePolicy, FailureRule, FailureCondition\n", - "from ngraph.explorer import NetworkExplorer" + "from ngraph.explorer import NetworkExplorer\n", + "from ngraph.scenario import Scenario" ] }, { diff --git a/notebooks/lib_examples.ipynb b/notebooks/lib_examples.ipynb index 0388885..808e2dc 100644 --- a/notebooks/lib_examples.ipynb +++ b/notebooks/lib_examples.ipynb @@ -23,8 +23,8 @@ } ], "source": [ - "from ngraph.lib.graph import StrictMultiDiGraph\n", "from ngraph.lib.algorithms.max_flow import calc_max_flow\n", + "from ngraph.lib.graph import StrictMultiDiGraph\n", "\n", "# Create a graph\n", "g = StrictMultiDiGraph()\n", @@ -47,10 +47,14 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ + "from ngraph.lib.algorithms.base import FlowPlacement\n", + "from ngraph.lib.algorithms.max_flow import calc_max_flow\n", + "from ngraph.lib.graph import StrictMultiDiGraph\n", + "\n", "\"\"\"\n", "Tests max flow calculations on a graph with parallel edges.\n", "\n", @@ -74,10 +78,6 @@ "- Flow placement using an equal-balanced strategy on the shortest paths (expected flow: 2.0)\n", "\"\"\"\n", "\n", - "from ngraph.lib.graph import StrictMultiDiGraph\n", - "from ngraph.lib.algorithms.max_flow import calc_max_flow\n", - "from ngraph.lib.algorithms.base import FlowPlacement\n", - "\n", "g = StrictMultiDiGraph()\n", "for node in (\"A\", \"B\", \"C\", \"D\"):\n", " g.add_node(node)\n", @@ -108,10 +108,15 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ + "from ngraph.lib.algorithms.flow_init import init_flow_graph\n", + "from ngraph.lib.demand import Demand\n", + "from ngraph.lib.flow_policy import FlowPolicyConfig, get_flow_policy\n", + "from ngraph.lib.graph import StrictMultiDiGraph\n", + "\n", "\"\"\"\n", "Demonstrates traffic engineering by placing two demands on a network.\n", "\n", @@ -131,11 +136,6 @@ "- The test verifies that each demand is fully placed at 20 units.\n", "\"\"\"\n", "\n", - "from ngraph.lib.graph import StrictMultiDiGraph\n", - "from ngraph.lib.algorithms.flow_init import init_flow_graph\n", - "from ngraph.lib.flow_policy import FlowPolicyConfig, get_flow_policy\n", - "from ngraph.lib.demand import Demand\n", - "\n", "# Build the graph.\n", "g = StrictMultiDiGraph()\n", "for node in (\"A\", \"B\", \"C\"):\n", @@ -188,7 +188,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.13.1" + "version": "3.13.3" } }, "nbformat": 4, diff --git a/notebooks/scenario_dc.ipynb b/notebooks/scenario_dc.ipynb index 5aa45fd..8747ad4 100644 --- a/notebooks/scenario_dc.ipynb +++ b/notebooks/scenario_dc.ipynb @@ -6,14 +6,8 @@ "metadata": {}, "outputs": [], "source": [ - "from ngraph.scenario import Scenario\n", - "from ngraph.traffic_demand import TrafficDemand\n", - "from ngraph.traffic_manager import TrafficManager\n", - "from ngraph.lib.flow_policy import FlowPolicyConfig, FlowPolicy, FlowPlacement\n", - "from ngraph.lib.algorithms.base import PathAlg, EdgeSelect\n", - "from ngraph.failure_manager import FailureManager\n", - "from ngraph.failure_policy import FailurePolicy, FailureRule, FailureCondition\n", - "from ngraph.explorer import NetworkExplorer" + "from ngraph.explorer import NetworkExplorer\n", + "from ngraph.scenario import Scenario" ] }, { diff --git a/notebooks/simple.ipynb b/notebooks/simple.ipynb index 11ecf47..1c42e37 100644 --- a/notebooks/simple.ipynb +++ b/notebooks/simple.ipynb @@ -7,14 +7,8 @@ "metadata": {}, "outputs": [], "source": [ - "from ngraph.scenario import Scenario\n", - "from ngraph.traffic_demand import TrafficDemand\n", - "from ngraph.traffic_manager import TrafficManager\n", - "from ngraph.lib.flow_policy import FlowPolicyConfig, FlowPolicy, FlowPlacement\n", - "from ngraph.lib.algorithms.base import PathAlg, EdgeSelect\n", - "from ngraph.failure_manager import FailureManager\n", - "from ngraph.failure_policy import FailurePolicy, FailureRule, FailureCondition\n", - "from ngraph.explorer import NetworkExplorer" + "from ngraph.lib.flow_policy import FlowPlacement\n", + "from ngraph.scenario import Scenario" ] }, { diff --git a/notebooks/small_demo.ipynb b/notebooks/small_demo.ipynb index ce1f9a1..15289bb 100644 --- a/notebooks/small_demo.ipynb +++ b/notebooks/small_demo.ipynb @@ -6,13 +6,12 @@ "metadata": {}, "outputs": [], "source": [ + "from ngraph.failure_manager import FailureManager\n", + "from ngraph.failure_policy import FailurePolicy, FailureRule\n", + "from ngraph.lib.flow_policy import FlowPlacement, FlowPolicyConfig\n", "from ngraph.scenario import Scenario\n", "from ngraph.traffic_demand import TrafficDemand\n", - "from ngraph.traffic_manager import TrafficManager\n", - "from ngraph.lib.flow_policy import FlowPolicyConfig, FlowPolicy, FlowPlacement\n", - "from ngraph.lib.algorithms.base import PathAlg, EdgeSelect\n", - "from ngraph.failure_manager import FailureManager\n", - "from ngraph.failure_policy import FailurePolicy, FailureRule, FailureCondition" + "from ngraph.traffic_manager import TrafficManager" ] }, { @@ -187,7 +186,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -210,10 +209,11 @@ } ], "source": [ + "from collections import defaultdict\n", + "\n", + "import matplotlib.pyplot as plt\n", "import pandas as pd\n", "import seaborn as sns\n", - "import matplotlib.pyplot as plt\n", - "from collections import defaultdict\n", "\n", "\n", "def plot_priority_cdf(results, complementary: bool = True):\n", @@ -240,7 +240,7 @@ " # 1) Aggregate total placed volume for each iteration & priority\n", " # (similar logic as before, but we'll directly store iteration-level sums).\n", " volume_per_iter_priority = defaultdict(float)\n", - " for (src, dst, priority), data_list in by_src_dst.items():\n", + " for (_src, _dst, priority), data_list in by_src_dst.items():\n", " for entry in data_list:\n", " it = entry[\"iteration\"]\n", " volume_per_iter_priority[(it, priority)] += entry[\"placed_volume\"]\n", diff --git a/pyproject.toml b/pyproject.toml index 74e880b..55c7094 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,10 +11,13 @@ readme = "README.md" authors = [{ name = "Andrey Golovanov" }] license = "MIT" license-files = ["LICENSE"] -requires-python = ">=3.9" +requires-python = ">=3.11" classifiers = [ "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Operating System :: OS Independent", ] @@ -29,30 +32,75 @@ dependencies = [ "seaborn", ] -# Dev/CI extras +# Dev / CI extras [project.optional-dependencies] dev = [ + # testing "pytest>=8", "pytest-cov", "pytest-benchmark", "pytest-mock", - "black", - "isort", - "pylint", + # docs "mkdocs-material", "pdoc", + # style + type checking + "ruff==0.11.13", + "pyright", + # pre-commit hooks + "pre-commit", + # build + "build", + # publishing + "twine", ] + [project.scripts] ngraph = "ngraph.cli:main" - # --------------------------------------------------------------------- # Pytest flags [tool.pytest.ini_options] addopts = "--cov=ngraph --cov-fail-under=85 --cov-report term-missing" # --------------------------------------------------------------------- -# Tell setuptools to package ONLY the 'ngraph' package tree +# Package discovery [tool.setuptools.packages.find] -include = ["ngraph*"] # anything under ngraph/ +include = ["ngraph*"] exclude = ["tests*", "notebooks*", "examples*", "dev*"] + +# --------------------------------------------------------------------- +# Ruff +[tool.ruff] +line-length = 88 +indent-width = 4 + +[tool.ruff.lint] +select = ["E4", "E7", "E9", "F", "B", "I"] # core + Bugbear + isort +ignore = ["E501"] # long lines handled by formatter +fixable = ["ALL"] + +[tool.ruff.lint.isort] +known-first-party = ["ngraph"] + +[tool.ruff.format] +quote-style = "double" +skip-magic-trailing-comma = false + +# --------------------------------------------------------------------- +# Pyright / Pylance +[tool.pyright] +typeCheckingMode = "standard" # balanced level +pythonVersion = "3.11" +exclude = [ + "tests/**", # tests often use dynamic patterns + "**/venv/**", # virtual environments (generic) + "**/*venv/**", # virtual environments (various naming) + "build/**", # build artifacts + "dist/**", # distribution files + "**/__pycache__/**", # Python cache files + "**/*.egg-info/**" # egg info directories +] +reportMissingTypeStubs = "warning" +reportUnknownMemberType = "none" +reportUnknownVariableType = "none" +reportUnknownArgumentType = "none" diff --git a/run.sh b/run.sh index fde2539..596d7c0 100755 --- a/run.sh +++ b/run.sh @@ -206,4 +206,4 @@ case "$COMMAND" in printf "%b" "$USAGE" >&2 exit 2 ;; -esac \ No newline at end of file +esac diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..1035f11 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,2 @@ +# Import all fixtures from sample_graphs to make them available to all test files +pytest_plugins = ["tests.lib.algorithms.sample_graphs"] diff --git a/tests/lib/algorithms/test_calc_capacity.py b/tests/lib/algorithms/test_calc_capacity.py index 513c6d0..1eca6a0 100644 --- a/tests/lib/algorithms/test_calc_capacity.py +++ b/tests/lib/algorithms/test_calc_capacity.py @@ -1,10 +1,15 @@ # pylint: disable=protected-access,invalid-name +from typing import Dict, List + import pytest +from ngraph.lib.algorithms.calc_capacity import FlowPlacement, calc_graph_capacity from ngraph.lib.algorithms.flow_init import init_flow_graph from ngraph.lib.algorithms.spf import spf -from ngraph.lib.algorithms.calc_capacity import calc_graph_capacity, FlowPlacement -from tests.lib.algorithms.sample_graphs import * +from ngraph.lib.graph import EdgeID, NodeID, StrictMultiDiGraph + +# Type alias to ensure consistency with library expectations +PredDict = Dict[NodeID, Dict[NodeID, List[EdgeID]]] class TestGraphCapacity: @@ -40,7 +45,7 @@ def test_calc_graph_capacity_no_cap(self): g.add_edge("A", "B", key=0, capacity=0) g.add_edge("B", "C", key=1, capacity=1) r = init_flow_graph(g) - pred = {"A": {}, "B": {"A": [0]}, "C": {"B": [1]}} + pred: PredDict = {"A": {}, "B": {"A": [0]}, "C": {"B": [1]}} # Expected max_flow = 0 because there is no capacity along the path max_flow, flow_dict = calc_graph_capacity( @@ -50,6 +55,7 @@ def test_calc_graph_capacity_no_cap(self): def test_calc_graph_capacity_line1(self, line1): _, pred = spf(line1, "A") + pred: PredDict = pred # Type annotation for clarity r = init_flow_graph(line1) max_flow, flow_dict = calc_graph_capacity( diff --git a/tests/lib/algorithms/test_edge_select.py b/tests/lib/algorithms/test_edge_select.py index 363aa78..1724af6 100644 --- a/tests/lib/algorithms/test_edge_select.py +++ b/tests/lib/algorithms/test_edge_select.py @@ -1,11 +1,12 @@ from math import isclose -import pytest -from unittest.mock import MagicMock from typing import Dict, Set, Tuple +from unittest.mock import MagicMock + +import pytest -from ngraph.lib.graph import StrictMultiDiGraph, NodeID, EdgeID, AttrDict +from ngraph.lib.algorithms.base import Cost from ngraph.lib.algorithms.edge_select import EdgeSelect, edge_select_fabric -from ngraph.lib.algorithms.base import Cost, MIN_CAP +from ngraph.lib.graph import AttrDict, EdgeID, NodeID, StrictMultiDiGraph @pytest.fixture diff --git a/tests/lib/algorithms/test_max_flow.py b/tests/lib/algorithms/test_max_flow.py index e32e745..d831a48 100644 --- a/tests/lib/algorithms/test_max_flow.py +++ b/tests/lib/algorithms/test_max_flow.py @@ -1,14 +1,9 @@ import pytest from pytest import approx -from ngraph.lib.graph import StrictMultiDiGraph from ngraph.lib.algorithms.base import FlowPlacement from ngraph.lib.algorithms.max_flow import calc_max_flow -from tests.lib.algorithms.sample_graphs import ( - line1, - square4, - graph5, -) +from ngraph.lib.graph import StrictMultiDiGraph class TestMaxFlowBasic: diff --git a/tests/lib/algorithms/test_path_utils.py b/tests/lib/algorithms/test_path_utils.py index 9f15ae8..8a33012 100644 --- a/tests/lib/algorithms/test_path_utils.py +++ b/tests/lib/algorithms/test_path_utils.py @@ -1,4 +1,3 @@ -import pytest from ngraph.lib.algorithms.path_utils import resolve_to_paths diff --git a/tests/lib/algorithms/test_place_flow.py b/tests/lib/algorithms/test_place_flow.py index 5156985..27570fd 100644 --- a/tests/lib/algorithms/test_place_flow.py +++ b/tests/lib/algorithms/test_place_flow.py @@ -1,13 +1,10 @@ -import pytest +from ngraph.lib.algorithms.calc_capacity import FlowPlacement from ngraph.lib.algorithms.flow_init import init_flow_graph from ngraph.lib.algorithms.place_flow import ( place_flow_on_graph, remove_flow_from_graph, ) -from ngraph.lib.algorithms.calc_capacity import FlowPlacement - from ngraph.lib.algorithms.spf import spf -from tests.lib.algorithms.sample_graphs import * class TestPlaceFlowOnGraph: @@ -144,7 +141,7 @@ def test_place_flow_on_graph_line1_equal(self, line1): assert flow_placement_meta.nodes == {"A", "C", "B"} assert flow_placement_meta.edges == {0, 2, 4} - def test_place_flow_on_graph_line1_proportional(self, line1): + def test_place_flow_on_graph_line1_proportional_partial(self, line1): """ In two steps, place 3 units of flow, then attempt another 3. Check partial flow placement when capacity is partially exhausted. diff --git a/tests/lib/algorithms/test_spf.py b/tests/lib/algorithms/test_spf.py index c967dd4..a990247 100644 --- a/tests/lib/algorithms/test_spf.py +++ b/tests/lib/algorithms/test_spf.py @@ -1,7 +1,5 @@ -import pytest -from ngraph.lib.algorithms.spf import spf, ksp from ngraph.lib.algorithms.edge_select import EdgeSelect, edge_select_fabric -from tests.lib.algorithms.sample_graphs import * +from ngraph.lib.algorithms.spf import ksp, spf class TestSPF: @@ -137,15 +135,14 @@ def test_ksp_5(self, graph5): """ paths = list(ksp(graph5, "A", "B", multipath=True)) visited = set() - for costs, pred in paths: + for _costs, pred in paths: edge_ids = tuple( - sorted( - edge_id - for nbrs in pred.values() - for edge_list in nbrs.values() - for edge_id in edge_list - ) + str(edge_id) + for nbrs in pred.values() + for edge_list in nbrs.values() + for edge_id in edge_list ) + edge_ids = tuple(sorted(edge_ids)) if edge_ids in visited: raise Exception(f"Duplicate path found: {edge_ids}") visited.add(edge_ids) diff --git a/tests/lib/algorithms/test_spf_bench.py b/tests/lib/algorithms/test_spf_bench.py index 5b12c6c..69d5624 100644 --- a/tests/lib/algorithms/test_spf_bench.py +++ b/tests/lib/algorithms/test_spf_bench.py @@ -1,9 +1,10 @@ import random -import pytest + import networkx as nx +import pytest -from ngraph.lib.graph import StrictMultiDiGraph from ngraph.lib.algorithms.spf import spf +from ngraph.lib.graph import StrictMultiDiGraph random.seed(0) diff --git a/tests/lib/test_demand.py b/tests/lib/test_demand.py index c286f21..010f8c1 100644 --- a/tests/lib/test_demand.py +++ b/tests/lib/test_demand.py @@ -1,10 +1,10 @@ import pytest -from ngraph.lib.algorithms.base import EdgeSelect, PathAlg, FlowPlacement + +from ngraph.lib.algorithms.base import EdgeSelect, FlowPlacement, PathAlg from ngraph.lib.algorithms.flow_init import init_flow_graph from ngraph.lib.demand import Demand -from ngraph.lib.flow_policy import FlowPolicy, FlowPolicyConfig, get_flow_policy from ngraph.lib.flow import FlowIndex -from .algorithms.sample_graphs import line1, square1, square2, triangle1, graph3 +from ngraph.lib.flow_policy import FlowPolicy, FlowPolicyConfig, get_flow_policy def create_flow_policy( @@ -14,7 +14,7 @@ def create_flow_policy( edge_select: EdgeSelect, multipath: bool, max_flow_count: int = None, - max_path_cost_factor: float = None + max_path_cost_factor: float = None, ) -> FlowPolicy: """Helper to create a FlowPolicy for testing.""" return FlowPolicy( diff --git a/tests/lib/test_flow.py b/tests/lib/test_flow.py index 2b85631..63f1ebe 100644 --- a/tests/lib/test_flow.py +++ b/tests/lib/test_flow.py @@ -1,15 +1,8 @@ -from ngraph.lib.algorithms.base import ( - EdgeSelect, - PathAlg, - FlowPlacement, - MIN_FLOW, -) +from ngraph.lib.algorithms.base import FlowPlacement from ngraph.lib.algorithms.flow_init import init_flow_graph from ngraph.lib.flow import Flow, FlowIndex from ngraph.lib.path_bundle import PathBundle -from .algorithms.sample_graphs import * - class TestFlow: def test_flow_1(self, square1): @@ -17,7 +10,7 @@ def test_flow_1(self, square1): path_bundle = PathBundle( "A", "C", {"A": {}, "C": {"B": [1]}, "B": {"A": [0]}}, 2 ) - flow = Flow(path_bundle, ("A", "C", "test_flow")) + flow = Flow(path_bundle, FlowIndex("A", "C", "test_flow", 0)) placed_flow, remaining_flow = flow.place_flow( flow_graph, 0, flow_placement=FlowPlacement.EQUAL_BALANCED ) @@ -29,7 +22,7 @@ def test_flow_2(self, square1): path_bundle = PathBundle( "A", "C", {"A": {}, "C": {"B": [1]}, "B": {"A": [0]}}, 2 ) - flow = Flow(path_bundle, ("A", "C", "test_flow")) + flow = Flow(path_bundle, FlowIndex("A", "C", "test_flow", 0)) placed_flow, remaining_flow = flow.place_flow( flow_graph, 1, flow_placement=FlowPlacement.EQUAL_BALANCED ) @@ -41,7 +34,7 @@ def test_flow_3(self, square1): path_bundle = PathBundle( "A", "C", {"A": {}, "C": {"B": [1]}, "B": {"A": [0]}}, 2 ) - flow = Flow(path_bundle, ("A", "C", "test_flow")) + flow = Flow(path_bundle, FlowIndex("A", "C", "test_flow", 0)) placed_flow, remaining_flow = flow.place_flow( flow_graph, 1, flow_placement=FlowPlacement.EQUAL_BALANCED ) diff --git a/tests/lib/test_flow_policy.py b/tests/lib/test_flow_policy.py index 09a5340..934ec25 100644 --- a/tests/lib/test_flow_policy.py +++ b/tests/lib/test_flow_policy.py @@ -1,16 +1,16 @@ +import pytest + from ngraph.lib.algorithms.base import ( + MIN_FLOW, EdgeSelect, - PathAlg, FlowPlacement, - MIN_FLOW, + PathAlg, ) from ngraph.lib.algorithms.flow_init import init_flow_graph -from ngraph.lib.flow import Flow, FlowIndex +from ngraph.lib.flow import FlowIndex from ngraph.lib.flow_policy import FlowPolicy from ngraph.lib.path_bundle import PathBundle -from .algorithms.sample_graphs import * - class TestFlowPolicy: def test_flow_policy_1(self): @@ -30,7 +30,8 @@ def test_flow_policy_get_path_bundle_1(self, square1): multipath=True, ) r = init_flow_graph(square1) - path_bundle: PathBundle = flow_policy._get_path_bundle(r, "A", "C") + path_bundle = flow_policy._get_path_bundle(r, "A", "C") + assert path_bundle is not None assert path_bundle.pred == {"A": {}, "C": {"B": [1]}, "B": {"A": [0]}} assert path_bundle.edges == {0, 1} assert path_bundle.nodes == {"A", "B", "C"} @@ -43,7 +44,8 @@ def test_flow_policy_get_path_bundle_2(self, square1): multipath=True, ) r = init_flow_graph(square1) - path_bundle: PathBundle = flow_policy._get_path_bundle(r, "A", "C", 2) + path_bundle = flow_policy._get_path_bundle(r, "A", "C", 2) + assert path_bundle is not None assert path_bundle.pred == {"A": {}, "C": {"D": [3]}, "D": {"A": [2]}} assert path_bundle.edges == {2, 3} assert path_bundle.nodes == {"D", "C", "A"} @@ -57,6 +59,7 @@ def test_flow_policy_create_flow_1(self, square1): ) r = init_flow_graph(square1) flow = flow_policy._create_flow(r, "A", "C", "test_flow") + assert flow is not None assert flow.path_bundle.pred == {"A": {}, "C": {"B": [1]}, "B": {"A": [0]}} def test_flow_policy_create_flow_2(self, square1): @@ -68,6 +71,7 @@ def test_flow_policy_create_flow_2(self, square1): ) r = init_flow_graph(square1) flow = flow_policy._create_flow(r, "A", "C", "test_flow", 2) + assert flow is not None assert flow.path_bundle.pred == {"A": {}, "C": {"D": [3]}, "D": {"A": [2]}} def test_flow_policy_create_flow_3(self, square1): @@ -754,7 +758,8 @@ def test_flow_policy_delete_flow(self, square1): flow_policy.place_demand(r, "A", "C", "test_flow", 2) initial_count = len(flow_policy.flows) # Pick any flow_index that was created - flow_index_to_delete = next(iter(flow_policy.flows.keys())) + flow_index_tuple = next(iter(flow_policy.flows.keys())) + flow_index_to_delete = FlowIndex(*flow_index_tuple) flow_policy._delete_flow(r, flow_index_to_delete) assert len(flow_policy.flows) == initial_count - 1 @@ -779,21 +784,23 @@ def test_flow_policy_reoptimize_flow(self, square1): placed_flow, remaining = flow_policy.place_demand(r, "A", "C", "test_flow", 1) assert placed_flow == 1 # We'll pick the first flow index - flow_index_to_reopt = next(iter(flow_policy.flows.keys())) + flow_index_tuple = next(iter(flow_policy.flows.keys())) + flow_index_to_reopt = FlowIndex(*flow_index_tuple) # Reoptimize with additional "headroom" that might force a different path new_flow = flow_policy._reoptimize_flow(r, flow_index_to_reopt, headroom=1) # Because the alternative path has capacity=2, we expect re-optimization to succeed assert new_flow is not None # The old flow index still references the new flow - assert flow_policy.flows[flow_index_to_reopt] == new_flow + assert flow_policy.flows[flow_index_tuple] == new_flow # Now try re-optimizing with very large headroom; no path should be found, so revert - flow_index_to_reopt2 = next(iter(flow_policy.flows.keys())) - flow_before_reopt = flow_policy.flows[flow_index_to_reopt2] + flow_index_tuple2 = next(iter(flow_policy.flows.keys())) + flow_index_to_reopt2 = FlowIndex(*flow_index_tuple2) + flow_before_reopt = flow_policy.flows[flow_index_tuple2] reverted_flow = flow_policy._reoptimize_flow( r, flow_index_to_reopt2, headroom=10 ) # We expect a revert -> None returned assert reverted_flow is None # The flow in the dictionary should still be the same old flow - assert flow_policy.flows[flow_index_to_reopt2] == flow_before_reopt + assert flow_policy.flows[flow_index_tuple2] == flow_before_reopt diff --git a/tests/lib/test_graph.py b/tests/lib/test_graph.py index 736f321..0a1a20f 100644 --- a/tests/lib/test_graph.py +++ b/tests/lib/test_graph.py @@ -1,5 +1,5 @@ -import pytest import networkx as nx +import pytest from ngraph.lib.graph import StrictMultiDiGraph diff --git a/tests/lib/test_io.py b/tests/lib/test_io.py index 7a212ed..8e48bc3 100644 --- a/tests/lib/test_io.py +++ b/tests/lib/test_io.py @@ -1,10 +1,11 @@ import pytest + from ngraph.lib.graph import StrictMultiDiGraph from ngraph.lib.io import ( - graph_to_node_link, - node_link_to_graph, edgelist_to_graph, graph_to_edgelist, + graph_to_node_link, + node_link_to_graph, ) @@ -41,7 +42,7 @@ def test_graph_to_node_link_basic(): # Check one link's structure # For example, find the link with key=e1 - link_e1 = next(l for l in links if l["key"] == e1) + link_e1 = next(link for link in links if link["key"] == e1) assert link_e1["source"] == 0 # "A" => index 0 assert link_e1["target"] == 1 # "B" => index 1 assert link_e1["attr"] == {"weight": "10"} or {"weight": 10} @@ -139,7 +140,7 @@ def test_edgelist_to_graph_basic(): # Check each edge's attribute e_map = g.get_edges() # We can't assume numeric IDs, just find them by iteration - for eid, (src, dst, _, attrs) in e_map.items(): + for _eid, (src, dst, _, attrs) in e_map.items(): w = attrs["weight"] if src == "A" and dst == "B": assert w == "10" @@ -192,10 +193,10 @@ def test_graph_to_edgelist_basic(): g.add_node("B") g.add_node("C") - e1 = g.add_edge("A", "B", cost=10) - e2 = g.add_edge("B", "C", cost=20) + g.add_edge("A", "B", cost=10) + g.add_edge("B", "C", cost=20) # No custom keys for the rest -> random base64 IDs - e3 = g.add_edge("C", "A", label="X") + g.add_edge("C", "A", label="X") lines = graph_to_edgelist(g) # By default: [src, dst, key] + sorted(attributes) @@ -214,7 +215,7 @@ def test_graph_to_edgelist_basic(): # but for e1, e2 we have "cost" attribute, for e3 we have "label" # Check adjacency edges_seen = set() - for eid, (s, d, _, attrs) in e2_map.items(): + for _eid, (s, d, _, _attrs) in e2_map.items(): edges_seen.add((s, d)) # if there's a "cost" in attrs, it might be "10" or "20" # if there's a "label" in attrs, it's "X" @@ -229,7 +230,7 @@ def test_graph_to_edgelist_columns(): g = StrictMultiDiGraph() g.add_node("A") g.add_node("B") - eAB = g.add_edge("A", "B", cost=10, color="red") + g.add_edge("A", "B", cost=10, color="red") lines = graph_to_edgelist(g, columns=["src", "dst", "cost", "color"], separator=",") # We expect one line: "A,B,10,red" diff --git a/tests/lib/test_path.py b/tests/lib/test_path.py index a6f6347..43a2a8d 100644 --- a/tests/lib/test_path.py +++ b/tests/lib/test_path.py @@ -1,6 +1,7 @@ import pytest -from ngraph.lib.graph import StrictMultiDiGraph, EdgeID + from ngraph.lib.algorithms.base import PathTuple +from ngraph.lib.graph import StrictMultiDiGraph from ngraph.lib.path import Path diff --git a/tests/lib/test_path_bundle.py b/tests/lib/test_path_bundle.py index 09000f0..7c09d66 100644 --- a/tests/lib/test_path_bundle.py +++ b/tests/lib/test_path_bundle.py @@ -1,9 +1,10 @@ -import pytest from typing import List, Set -from ngraph.lib.graph import StrictMultiDiGraph +import pytest + +from ngraph.lib.algorithms.base import EdgeSelect +from ngraph.lib.graph import StrictMultiDiGraph from ngraph.lib.path_bundle import Path, PathBundle -from ngraph.lib.algorithms.base import EdgeSelect, Cost @pytest.fixture diff --git a/tests/lib/test_util.py b/tests/lib/test_util.py index 7ed6fb4..149d6bb 100644 --- a/tests/lib/test_util.py +++ b/tests/lib/test_util.py @@ -1,8 +1,7 @@ -import pytest import networkx as nx from ngraph.lib.graph import StrictMultiDiGraph -from ngraph.lib.util import to_digraph, from_digraph, to_graph, from_graph +from ngraph.lib.util import from_digraph, from_graph, to_digraph, to_graph def create_sample_graph(with_attrs: bool = False) -> StrictMultiDiGraph: diff --git a/tests/scenarios/scenario_3.yaml b/tests/scenarios/scenario_3.yaml index 2eeaeb8..298c381 100644 --- a/tests/scenarios/scenario_3.yaml +++ b/tests/scenarios/scenario_3.yaml @@ -75,7 +75,7 @@ network: attrs: shared_risk_groups: ["SpineSRG"] hw_component: "400G-LR4" - + # Example node overrides that assign SRGs and hardware types node_overrides: - path: my_clos1/b1/t1 @@ -88,7 +88,7 @@ network: shared_risk_groups: ["clos2-b2t1-SRG"] hw_component: "LeafHW-B" - - path: my_clos1/spine/t3.* + - path: my_clos1/spine/t3.* attrs: shared_risk_groups: ["clos1-spine-SRG"] hw_component: "SpineHW" diff --git a/tests/scenarios/test_scenario_1.py b/tests/scenarios/test_scenario_1.py index 39d1a1a..a3958db 100644 --- a/tests/scenarios/test_scenario_1.py +++ b/tests/scenarios/test_scenario_1.py @@ -1,9 +1,8 @@ -import pytest from pathlib import Path +from ngraph.failure_policy import FailurePolicy from ngraph.lib.graph import StrictMultiDiGraph from ngraph.scenario import Scenario -from ngraph.failure_policy import FailurePolicy def test_scenario_1_build_graph() -> None: @@ -28,32 +27,32 @@ def test_scenario_1_build_graph() -> None: # 4) Retrieve the graph built by BuildGraph graph = scenario.results.get("build_graph", "graph") - assert isinstance( - graph, StrictMultiDiGraph - ), "Expected a StrictMultiDiGraph in scenario.results under key ('build_graph', 'graph')." + assert isinstance(graph, StrictMultiDiGraph), ( + "Expected a StrictMultiDiGraph in scenario.results under key ('build_graph', 'graph')." + ) # 5) Check the total number of nodes matches what's listed in scenario_1.yaml # For a 6-node scenario, we expect 6 nodes in the final Nx graph. expected_nodes = 6 actual_nodes = len(graph.nodes) - assert ( - actual_nodes == expected_nodes - ), f"Expected {expected_nodes} nodes, found {actual_nodes}" + assert actual_nodes == expected_nodes, ( + f"Expected {expected_nodes} nodes, found {actual_nodes}" + ) # 6) Each physical link from the YAML becomes 2 directed edges in MultiDiGraph. # If the YAML has 10 link definitions, we expect 2 * 10 = 20 directed edges. expected_links = 10 expected_nx_edges = expected_links * 2 actual_edges = len(graph.edges) - assert ( - actual_edges == expected_nx_edges - ), f"Expected {expected_nx_edges} directed edges, found {actual_edges}" + assert actual_edges == expected_nx_edges, ( + f"Expected {expected_nx_edges} directed edges, found {actual_edges}" + ) # 7) Verify the traffic demands. expected_demands = 4 - assert ( - len(scenario.traffic_demands) == expected_demands - ), f"Expected {expected_demands} traffic demands." + assert len(scenario.traffic_demands) == expected_demands, ( + f"Expected {expected_demands} traffic demands." + ) # 8) Check the multi-rule failure policy for "any single link". # This should have exactly 1 rule that picks exactly 1 link from all links. diff --git a/tests/scenarios/test_scenario_2.py b/tests/scenarios/test_scenario_2.py index 60e6bec..956d9c0 100644 --- a/tests/scenarios/test_scenario_2.py +++ b/tests/scenarios/test_scenario_2.py @@ -1,9 +1,8 @@ -import pytest from pathlib import Path +from ngraph.failure_policy import FailurePolicy from ngraph.lib.graph import StrictMultiDiGraph from ngraph.scenario import Scenario -from ngraph.failure_policy import FailurePolicy def test_scenario_2_build_graph() -> None: @@ -29,9 +28,9 @@ def test_scenario_2_build_graph() -> None: # 4) Retrieve the graph built by BuildGraph graph = scenario.results.get("build_graph", "graph") - assert isinstance( - graph, StrictMultiDiGraph - ), "Expected a StrictMultiDiGraph in scenario.results under key ('build_graph', 'graph')." + assert isinstance(graph, StrictMultiDiGraph), ( + "Expected a StrictMultiDiGraph in scenario.results under key ('build_graph', 'graph')." + ) # 5) Verify total node count after blueprint expansion # city_cloud blueprint: (4 leaves + 6 spines + 4 edge_nodes) = 14 @@ -40,9 +39,9 @@ def test_scenario_2_build_graph() -> None: # => 14 + 1 + 4 = 19 total expected_nodes = 19 actual_nodes = len(graph.nodes) - assert ( - actual_nodes == expected_nodes - ), f"Expected {expected_nodes} nodes, found {actual_nodes}" + assert actual_nodes == expected_nodes, ( + f"Expected {expected_nodes} nodes, found {actual_nodes}" + ) # 6) Verify total physical links before direction is applied to Nx # - clos_2tier adjacency: 4 leaf * 6 spine = 24 @@ -60,15 +59,15 @@ def test_scenario_2_build_graph() -> None: expected_links = 56 expected_nx_edges = expected_links * 2 actual_edges = len(graph.edges) - assert ( - actual_edges == expected_nx_edges - ), f"Expected {expected_nx_edges} directed edges, found {actual_edges}" + assert actual_edges == expected_nx_edges, ( + f"Expected {expected_nx_edges} directed edges, found {actual_edges}" + ) # 7) Verify the traffic demands (should have 4) expected_demands = 4 - assert ( - len(scenario.traffic_demands) == expected_demands - ), f"Expected {expected_demands} traffic demands." + assert len(scenario.traffic_demands) == expected_demands, ( + f"Expected {expected_demands} traffic demands." + ) # 8) Check the single-rule failure policy "anySingleLink" policy: FailurePolicy = scenario.failure_policy @@ -88,9 +87,9 @@ def test_scenario_2_build_graph() -> None: # 9) Check presence of key expanded nodes # For example: the overridden spine node "myspine-6" under "SEA/clos_instance/spine" # and the single node blueprint "SFO/single/single-1". - assert ( - "SEA/clos_instance/spine/myspine-6" in scenario.network.nodes - ), "Missing expected overridden spine node (myspine-6) in expanded blueprint." - assert ( - "SFO/single/single-1" in scenario.network.nodes - ), "Missing expected single-node blueprint expansion under SFO." + assert "SEA/clos_instance/spine/myspine-6" in scenario.network.nodes, ( + "Missing expected overridden spine node (myspine-6) in expanded blueprint." + ) + assert "SFO/single/single-1" in scenario.network.nodes, ( + "Missing expected single-node blueprint expansion under SFO." + ) diff --git a/tests/scenarios/test_scenario_3.py b/tests/scenarios/test_scenario_3.py index bd73856..08cfeb0 100644 --- a/tests/scenarios/test_scenario_3.py +++ b/tests/scenarios/test_scenario_3.py @@ -1,9 +1,8 @@ -import pytest from pathlib import Path +from ngraph.failure_policy import FailurePolicy from ngraph.lib.graph import StrictMultiDiGraph from ngraph.scenario import Scenario -from ngraph.failure_policy import FailurePolicy def test_scenario_3_build_graph_and_capacity_probe() -> None: @@ -32,17 +31,17 @@ def test_scenario_3_build_graph_and_capacity_probe() -> None: # 4) Retrieve the graph from the BuildGraph step graph = scenario.results.get("build_graph", "graph") - assert isinstance( - graph, StrictMultiDiGraph - ), "Expected a StrictMultiDiGraph in scenario.results under key ('build_graph', 'graph')." + assert isinstance(graph, StrictMultiDiGraph), ( + "Expected a StrictMultiDiGraph in scenario.results under key ('build_graph', 'graph')." + ) # 5) Verify total node count: # Each 3-tier CLOS instance has 32 nodes -> 2 instances => 64 total. expected_nodes = 64 actual_nodes = len(graph.nodes) - assert ( - actual_nodes == expected_nodes - ), f"Expected {expected_nodes} nodes, found {actual_nodes}" + assert actual_nodes == expected_nodes, ( + f"Expected {expected_nodes} nodes, found {actual_nodes}" + ) # 6) Verify total physical links (before direction): # Each 3-tier CLOS has 64 links internally => 2 instances => 128 @@ -51,9 +50,9 @@ def test_scenario_3_build_graph_and_capacity_probe() -> None: expected_links = 144 expected_directed_edges = expected_links * 2 actual_edges = len(graph.edges) - assert ( - actual_edges == expected_directed_edges - ), f"Expected {expected_directed_edges} edges, found {actual_edges}" + assert actual_edges == expected_directed_edges, ( + f"Expected {expected_directed_edges} edges, found {actual_edges}" + ) # 7) Verify no traffic demands in this scenario assert len(scenario.traffic_demands) == 0, "Expected zero traffic demands." @@ -63,24 +62,24 @@ def test_scenario_3_build_graph_and_capacity_probe() -> None: assert policy is None, "Expected no failure policy in this scenario." # 9) Check presence of some expanded nodes - assert ( - "my_clos1/b1/t1/t1-1" in scenario.network.nodes - ), "Missing expected node 'my_clos1/b1/t1/t1-1' in expanded blueprint." - assert ( - "my_clos2/spine/t3-16" in scenario.network.nodes - ), "Missing expected node 'my_clos2/spine/t3-16' in expanded blueprint." + assert "my_clos1/b1/t1/t1-1" in scenario.network.nodes, ( + "Missing expected node 'my_clos1/b1/t1/t1-1' in expanded blueprint." + ) + assert "my_clos2/spine/t3-16" in scenario.network.nodes, ( + "Missing expected node 'my_clos2/spine/t3-16' in expanded blueprint." + ) net = scenario.network # (A) Node attribute checks from node_overrides: # For "my_clos1/b1/t1/t1-1", we expect hw_component="LeafHW-A" and SRG="clos1-b1t1-SRG" node_a1 = net.nodes["my_clos1/b1/t1/t1-1"] - assert ( - node_a1.attrs.get("hw_component") == "LeafHW-A" - ), "Expected hw_component=LeafHW-A for 'my_clos1/b1/t1/t1-1', but not found." - assert node_a1.attrs.get("shared_risk_groups") == [ - "clos1-b1t1-SRG" - ], "Expected shared_risk_group=clos1-b1t1-SRG for 'my_clos1/b1/t1/t1-1'." + assert node_a1.attrs.get("hw_component") == "LeafHW-A", ( + "Expected hw_component=LeafHW-A for 'my_clos1/b1/t1/t1-1', but not found." + ) + assert node_a1.attrs.get("shared_risk_groups") == ["clos1-b1t1-SRG"], ( + "Expected shared_risk_group=clos1-b1t1-SRG for 'my_clos1/b1/t1/t1-1'." + ) # For "my_clos2/b2/t1/t1-1", check hw_component="LeafHW-B" and SRG="clos2-b2t1-SRG" node_b2 = net.nodes["my_clos2/b2/t1/t1-1"] @@ -115,12 +114,12 @@ def test_scenario_3_build_graph_and_capacity_probe() -> None: ) assert link_id_2, "Spine link (t3-2) not found for override check." for link_obj in link_id_2: - assert link_obj.attrs.get("shared_risk_groups") == [ - "SpineSRG" - ], "Expected SRG=SpineSRG on spine<->spine link." - assert ( - link_obj.attrs.get("hw_component") == "400G-LR4" - ), "Expected hw_component=400G-LR4 on spine<->spine link." + assert link_obj.attrs.get("shared_risk_groups") == ["SpineSRG"], ( + "Expected SRG=SpineSRG on spine<->spine link." + ) + assert link_obj.attrs.get("hw_component") == "400G-LR4", ( + "Expected hw_component=400G-LR4 on spine<->spine link." + ) # 10) The capacity probe step computed forward and reverse flows in 'combine' mode # with PROPORTIONAL flow placement. diff --git a/tests/test_api_docs.py b/tests/test_api_docs.py index 2b2dc46..b8ab95f 100644 --- a/tests/test_api_docs.py +++ b/tests/test_api_docs.py @@ -5,80 +5,80 @@ """ import os -import sys import subprocess +import sys from pathlib import Path + import pytest def test_api_doc_generation_imports(): - """Test that the API documentation generator script exists and is executable.""" - scripts_dir = Path(__file__).parent.parent / "scripts" + """Test that the API documentation generator script can run and generate docs.""" + project_root = Path(__file__).parent.parent + scripts_dir = project_root / "dev" generator_script = scripts_dir / "generate_api_docs.py" # Verify script exists - assert ( - generator_script.exists() - ), f"Generator script not found at {generator_script}" + assert generator_script.exists(), ( + f"Generator script not found at {generator_script}" + ) # Verify script is executable stat_info = generator_script.stat() assert stat_info.st_mode & 0o111, "Generator script is not executable" - # Test that script can be run with --help or similar (quick syntax check) + # Test that script can generate documentation to stdout (no file writes) try: result = subprocess.run( - [sys.executable, str(generator_script), "--help"], + [sys.executable, str(generator_script)], capture_output=True, - timeout=10, - cwd=scripts_dir.parent, # Run from project root + text=True, + timeout=60, + cwd=project_root, ) - # Script might not support --help, but it should at least not crash with syntax errors - # Exit code 0 (success) or 2 (argument error) are both acceptable - assert result.returncode in [ - 0, - 2, - ], f"Script syntax check failed with code {result.returncode}" - except subprocess.TimeoutExpired: - pytest.fail("Generator script timed out - possible infinite loop") + assert result.returncode == 0, ( + f"Generator script failed with code {result.returncode}:\n" + f"STDOUT: {result.stdout[:1000]}...\n" + f"STDERR: {result.stderr}" + ) -def test_api_doc_generation_output(): - """Test that API documentation generation produces valid output.""" - scripts_dir = Path(__file__).parent.parent / "scripts" - generator_script = scripts_dir / "generate_api_docs.py" - project_root = Path(__file__).parent.parent + # Verify the output contains expected documentation + output = result.stdout + assert len(output) > 10000, ( + f"Generated docs seem too short: {len(output)} characters" + ) + assert "NetGraph API Reference (Auto-Generated)" in output, ( + "Missing expected header" + ) + assert "ngraph.scenario" in output, "Module ngraph.scenario not found in output" + assert "ngraph.network" in output, "Module ngraph.network not found in output" - # Run the generator script - result = subprocess.run( - [sys.executable, str(generator_script)], - capture_output=True, - text=True, - timeout=60, - cwd=project_root, # Run from project root (required by generator) - ) + except subprocess.TimeoutExpired: + pytest.fail("Generator script timed out - possible infinite loop") - # Check that script ran successfully - assert ( - result.returncode == 0 - ), f"Generator script failed:\nSTDOUT: {result.stdout}\nSTDERR: {result.stderr}" - # Verify the file was created and has content +def test_api_documentation_exists_and_valid(): + """Test that the existing API documentation file has valid content.""" + project_root = Path(__file__).parent.parent api_doc_path = project_root / "docs" / "reference" / "api-full.md" - assert api_doc_path.exists(), f"API documentation not generated at {api_doc_path}" + # The file should exist (generated separately via make docs) + assert api_doc_path.exists(), ( + f"API documentation missing at {api_doc_path}. Run 'make docs' to generate it." + ) content = api_doc_path.read_text() # Verify substantial content - assert ( - len(content) > 10000 - ), f"API documentation seems too short: {len(content)} characters" + assert len(content) > 10000, ( + f"API documentation seems too short: {len(content)} characters" + ) # Verify expected header - assert ( - "NetGraph API Reference (Auto-Generated)" in content - ), "Missing expected header" + assert "NetGraph API Reference (Auto-Generated)" in content, ( + "Missing expected header" + ) # Verify key modules are documented expected_modules = [ @@ -96,6 +96,40 @@ def test_api_doc_generation_output(): assert "**Attributes:**" in content, "No attribute documentation found" +def test_api_doc_generator_can_run(): + """Test that the API documentation generator script can be executed without errors.""" + project_root = Path(__file__).parent.parent + + # Test dry-run: just check that the script can import its modules without crashing + # We'll use Python's -c flag to test imports without actually generating docs + test_code = """ +import sys +sys.path.insert(0, ".") +try: + import ngraph.scenario + import ngraph.network + import ngraph.components + print("SUCCESS: All modules can be imported") +except Exception as e: + print(f"ERROR: {e}") + sys.exit(1) +""" + + result = subprocess.run( + [sys.executable, "-c", test_code], + capture_output=True, + text=True, + timeout=30, + cwd=project_root, + ) + + assert result.returncode == 0, ( + f"API doc generator dependencies failed to import:\n" + f"STDOUT: {result.stdout}\nSTDERR: {result.stderr}" + ) + assert "SUCCESS" in result.stdout, "Expected success message not found" + + def test_documentation_cross_references(): """Test that documentation files properly cross-reference each other.""" docs_dir = Path(__file__).parent.parent / "docs" / "reference" @@ -105,18 +139,18 @@ def test_documentation_cross_references(): assert api_md.exists(), "Main API documentation missing" api_content = api_md.read_text() - assert ( - "api-full.md" in api_content - ), "Main API guide doesn't reference auto-generated docs" + assert "api-full.md" in api_content, ( + "Main API guide doesn't reference auto-generated docs" + ) # Check auto-generated API docs api_full_md = docs_dir / "api-full.md" assert api_full_md.exists(), "Auto-generated API documentation missing" api_full_content = api_full_md.read_text() - assert ( - "api.md" in api_full_content - ), "Auto-generated docs don't reference main API guide" + assert "api.md" in api_full_content, ( + "Auto-generated docs don't reference main API guide" + ) @pytest.mark.skipif( @@ -124,13 +158,13 @@ def test_documentation_cross_references(): reason="Skip integration test in CI to avoid circular dependency", ) def test_scripts_directory_structure(): - """Test that scripts directory has expected structure.""" - scripts_dir = Path(__file__).parent.parent / "scripts" + """Test that dev directory has expected structure.""" + scripts_dir = Path(__file__).parent.parent / "dev" - assert scripts_dir.exists(), "Scripts directory missing" - assert ( - scripts_dir / "generate_api_docs.py" - ).exists(), "API generator script missing" + assert scripts_dir.exists(), "Dev directory missing" + assert (scripts_dir / "generate_api_docs.py").exists(), ( + "API generator script missing" + ) # Verify script is executable generator_script = scripts_dir / "generate_api_docs.py" diff --git a/tests/test_blueprints.py b/tests/test_blueprints.py index 3dbab18..490f0a8 100644 --- a/tests/test_blueprints.py +++ b/tests/test_blueprints.py @@ -1,24 +1,24 @@ import pytest -from ngraph.network import Network, Node, Link from ngraph.blueprints import ( - DSLExpansionContext, Blueprint, + DSLExpansionContext, _apply_parameters, - _join_paths, _create_link, + _expand_adjacency, _expand_adjacency_pattern, - _process_direct_nodes, - _process_direct_links, _expand_blueprint_adjacency, - _expand_adjacency, _expand_group, - _update_nodes, - _update_links, - _process_node_overrides, + _join_paths, + _process_direct_links, + _process_direct_nodes, _process_link_overrides, + _process_node_overrides, + _update_links, + _update_nodes, expand_network_dsl, ) +from ngraph.network import Link, Network, Node def test_join_paths(): @@ -130,10 +130,10 @@ def test_expand_adjacency_pattern_one_to_one(): _expand_adjacency_pattern(ctx, "S", "T", "one_to_one", {"capacity": 10}) # We expect 2 links: S1->T1, S2->T2 assert len(ctx_net.links) == 2 - pairs = {(l.source, l.target) for l in ctx_net.links.values()} + pairs = {(link.source, link.target) for link in ctx_net.links.values()} assert pairs == {("S1", "T1"), ("S2", "T2")} - for l in ctx_net.links.values(): - assert l.capacity == 10 + for link in ctx_net.links.values(): + assert link.capacity == 10 def test_expand_adjacency_pattern_one_to_one_wrap(): @@ -151,7 +151,7 @@ def test_expand_adjacency_pattern_one_to_one_wrap(): _expand_adjacency_pattern(ctx, "S", "T", "one_to_one", {"cost": 99}) # Expect 4 total links assert len(ctx_net.links) == 4 - pairs = {(l.source, l.target) for l in ctx_net.links.values()} + pairs = {(link.source, link.target) for link in ctx_net.links.values()} expected = { ("S1", "T1"), ("S2", "T2"), @@ -159,8 +159,8 @@ def test_expand_adjacency_pattern_one_to_one_wrap(): ("S4", "T2"), } assert pairs == expected - for l in ctx_net.links.values(): - assert l.cost == 99 + for link in ctx_net.links.values(): + assert link.cost == 99 def test_expand_adjacency_pattern_one_to_one_mismatch(): @@ -853,14 +853,14 @@ def test_adjacency_one_to_one(): # 4 total nodes => 2 from each group assert len(net.nodes) == 4 # one_to_one => 2 links - pairs = {(l.source, l.target) for l in net.links.values()} + pairs = {(link.source, link.target) for link in net.links.values()} expected = { ("GroupA/GroupA-1", "GroupB/GroupB-1"), ("GroupA/GroupA-2", "GroupB/GroupB-2"), } assert pairs == expected - for l in net.links.values(): - assert l.capacity == 99 + for link in net.links.values(): + assert link.capacity == 99 def test_adjacency_one_to_one_wrap(): @@ -884,7 +884,7 @@ def test_adjacency_one_to_one_wrap(): assert len(net.nodes) == 6 # wrap => 4 links assert len(net.links) == 4 - link_pairs = {(l.source, l.target) for l in net.links.values()} + link_pairs = {(link.source, link.target) for link in net.links.values()} expected = { ("Big/Big-1", "Small/Small-1"), ("Big/Big-2", "Small/Small-2"), @@ -892,8 +892,8 @@ def test_adjacency_one_to_one_wrap(): ("Big/Big-4", "Small/Small-2"), } assert link_pairs == expected - for l in net.links.values(): - assert l.cost == 555 + for link in net.links.values(): + assert link.cost == 555 def test_adjacency_mesh(): @@ -1210,7 +1210,7 @@ def test_expand_adjacency_with_variables_zip(): # (A->B), (B->C), (C->A) assert len(net.nodes) == 3 assert len(net.links) == 3 - link_pairs = {(l.source, l.target) for l in net.links.values()} + link_pairs = {(link.source, link.target) for link in net.links.values()} expected = { ("RackA/RackA-1", "RackB/RackB-1"), ("RackB/RackB-1", "RackC/RackC-1"), diff --git a/tests/test_components.py b/tests/test_components.py index 241fa63..bdf6eeb 100644 --- a/tests/test_components.py +++ b/tests/test_components.py @@ -1,6 +1,4 @@ import pytest -from copy import deepcopy -from typing import Dict from ngraph.components import Component, ComponentsLibrary diff --git a/tests/test_dsl_examples.py b/tests/test_dsl_examples.py index 74c14a4..32360ed 100644 --- a/tests/test_dsl_examples.py +++ b/tests/test_dsl_examples.py @@ -263,8 +263,8 @@ def test_failure_policy_example(): scenario = Scenario.from_yaml(yaml_content) assert scenario.failure_policy is not None - assert scenario.failure_policy.fail_shared_risk_groups == True - assert scenario.failure_policy.fail_risk_group_children == False + assert scenario.failure_policy.fail_shared_risk_groups + assert not scenario.failure_policy.fail_risk_group_children assert len(scenario.failure_policy.rules) == 1 rule = scenario.failure_policy.rules[0] assert rule.entity_scope == "node" @@ -313,7 +313,7 @@ def test_node_overrides_example(): groups: my_clos1: use_blueprint: test_bp - + node_overrides: - path: "^my_clos1/switches/switch-(1|3)$" disabled: true @@ -344,7 +344,7 @@ def test_link_overrides_example(): group2: node_count: 2 name_template: "node-{node_num}" - + adjacency: - source: /group1 target: /group2 @@ -352,7 +352,7 @@ def test_link_overrides_example(): link_params: capacity: 100 cost: 10 - + link_overrides: - source: "^group1/node-1$" target: "^group2/node-1$" @@ -386,7 +386,7 @@ def test_variable_expansion(): node_count: 2 name_template: "rack-{node_num}" plane2_rack: - node_count: 2 + node_count: 2 name_template: "rack-{node_num}" spine: node_count: 2 diff --git a/tests/test_explorer.py b/tests/test_explorer.py index a7316d1..93180cf 100644 --- a/tests/test_explorer.py +++ b/tests/test_explorer.py @@ -1,14 +1,13 @@ -import pytest import logging +import pytest + +from ngraph.components import Component, ComponentsLibrary from ngraph.explorer import ( NetworkExplorer, TreeNode, - TreeStats, - ExternalLinkBreakdown, ) -from ngraph.network import Network, Node, Link -from ngraph.components import ComponentsLibrary, Component +from ngraph.network import Link, Network, Node def create_mock_components_library() -> ComponentsLibrary: diff --git a/tests/test_failure_manager.py b/tests/test_failure_manager.py index 2161e99..72729b2 100644 --- a/tests/test_failure_manager.py +++ b/tests/test_failure_manager.py @@ -1,14 +1,15 @@ """Tests for the FailureManager class.""" -import pytest -from unittest.mock import MagicMock from typing import List +from unittest.mock import MagicMock + +import pytest +from ngraph.failure_manager import FailureManager +from ngraph.failure_policy import FailurePolicy from ngraph.network import Network from ngraph.traffic_demand import TrafficDemand -from ngraph.traffic_manager import TrafficManager, TrafficResult -from ngraph.failure_policy import FailurePolicy -from ngraph.failure_manager import FailureManager +from ngraph.traffic_manager import TrafficResult @pytest.fixture diff --git a/tests/test_failure_policy.py b/tests/test_failure_policy.py index 2437a78..b212c3d 100644 --- a/tests/test_failure_policy.py +++ b/tests/test_failure_policy.py @@ -1,11 +1,9 @@ -import pytest from unittest.mock import patch from ngraph.failure_policy import ( + FailureCondition, FailurePolicy, FailureRule, - FailureCondition, - _evaluate_condition, ) diff --git a/tests/test_network.py b/tests/test_network.py index daf2e95..81b2814 100644 --- a/tests/test_network.py +++ b/tests/test_network.py @@ -1,12 +1,12 @@ import pytest + from ngraph.network import ( + Link, Network, Node, - Link, RiskGroup, new_base64_uuid, ) -from ngraph.lib.graph import StrictMultiDiGraph def test_new_base64_uuid_length_and_uniqueness(): @@ -593,7 +593,7 @@ def test_find_links(): # No filter => returns all all_links = net.find_links() assert len(all_links) == 2 - assert set(l.id for l in all_links) == {link_a_c.id, link_b_c.id} + assert set(link.id for link in all_links) == {link_a_c.id, link_b_c.id} # Filter by source pattern "srcA" a_links = net.find_links(source_regex="^srcA$") diff --git a/tests/test_readme_examples.py b/tests/test_readme_examples.py index 22588c9..a0aa809 100644 --- a/tests/test_readme_examples.py +++ b/tests/test_readme_examples.py @@ -21,9 +21,9 @@ def test_max_flow_variants(): - The flow along the shortest paths (expected flow: 3.0) - Flow placement using an equal-balanced strategy on the shortest paths (expected flow: 2.0) """ - from ngraph.lib.graph import StrictMultiDiGraph - from ngraph.lib.algorithms.max_flow import calc_max_flow from ngraph.lib.algorithms.base import FlowPlacement + from ngraph.lib.algorithms.max_flow import calc_max_flow + from ngraph.lib.graph import StrictMultiDiGraph g = StrictMultiDiGraph() for node in ("A", "B", "C", "D"): @@ -72,10 +72,10 @@ def test_traffic_engineering_simulation(): - Each demand uses its own FlowPolicy, so the policy's global flow accounting does not overlap. - The test verifies that each demand is fully placed at 20 units. """ - from ngraph.lib.graph import StrictMultiDiGraph from ngraph.lib.algorithms.flow_init import init_flow_graph - from ngraph.lib.flow_policy import FlowPolicyConfig, get_flow_policy from ngraph.lib.demand import Demand + from ngraph.lib.flow_policy import FlowPolicyConfig, get_flow_policy + from ngraph.lib.graph import StrictMultiDiGraph # Build the graph. g = StrictMultiDiGraph() diff --git a/tests/test_result.py b/tests/test_result.py index d7d1508..d8d21ca 100644 --- a/tests/test_result.py +++ b/tests/test_result.py @@ -1,4 +1,3 @@ -import pytest from ngraph.results import Results diff --git a/tests/test_scenario.py b/tests/test_scenario.py index 81f0935..b17bb72 100644 --- a/tests/test_scenario.py +++ b/tests/test_scenario.py @@ -1,17 +1,16 @@ -import pytest -import yaml -from typing import TYPE_CHECKING from dataclasses import dataclass +from typing import TYPE_CHECKING -from ngraph.scenario import Scenario +import pytest + +from ngraph.failure_policy import FailurePolicy from ngraph.network import Network -from ngraph.failure_policy import FailurePolicy, FailureRule, FailureCondition -from ngraph.traffic_demand import TrafficDemand from ngraph.results import Results +from ngraph.scenario import Scenario from ngraph.workflow.base import ( + WORKFLOW_STEP_REGISTRY, WorkflowStep, register_workflow_step, - WORKFLOW_STEP_REGISTRY, ) if TYPE_CHECKING: diff --git a/tests/test_traffic_demand.py b/tests/test_traffic_demand.py index b278e79..a981cbe 100644 --- a/tests/test_traffic_demand.py +++ b/tests/test_traffic_demand.py @@ -1,4 +1,3 @@ -import pytest from ngraph.traffic_demand import TrafficDemand diff --git a/tests/test_traffic_manager.py b/tests/test_traffic_manager.py index d742087..3d09c66 100644 --- a/tests/test_traffic_manager.py +++ b/tests/test_traffic_manager.py @@ -1,12 +1,10 @@ import pytest -from ngraph.network import Network, Node, Link -from ngraph.traffic_demand import TrafficDemand +from ngraph.lib.algorithms.base import MIN_FLOW from ngraph.lib.flow_policy import FlowPolicyConfig from ngraph.lib.graph import StrictMultiDiGraph -from ngraph.lib.algorithms.base import MIN_FLOW -from ngraph.lib.demand import Demand - +from ngraph.network import Link, Network, Node +from ngraph.traffic_demand import TrafficDemand from ngraph.traffic_manager import TrafficManager @@ -110,9 +108,9 @@ def test_place_all_demands_simple(small_network): # Check final placed_demand on each Demand for d in tm.demands: - assert ( - abs(d.placed_demand - d.volume) < MIN_FLOW - ), "Demand should be fully placed" + assert abs(d.placed_demand - d.volume) < MIN_FLOW, ( + "Demand should be fully placed" + ) # Summarize link usage usage = tm.summarize_link_usage() @@ -237,9 +235,9 @@ def test_place_all_demands_auto_rounds(small_network): total_placed = tm.place_all_demands(placement_rounds="auto") assert total_placed == 25.0, "Should place all traffic under auto rounds" for d in tm.demands: - assert ( - abs(d.placed_demand - d.volume) < MIN_FLOW - ), "Demand should be fully placed" + assert abs(d.placed_demand - d.volume) < MIN_FLOW, ( + "Demand should be fully placed" + ) def test_combine_mode_multi_source_sink(): @@ -281,9 +279,9 @@ def test_combine_mode_multi_source_sink(): for _, (src, dst, _, data) in tm.graph.get_edges().items() if src == d.src_node ] - assert ( - len(edges_out_of_pseudo_src) == 2 - ), "2 edges from pseudo-source to real sources" + assert len(edges_out_of_pseudo_src) == 2, ( + "2 edges from pseudo-source to real sources" + ) edges_into_pseudo_snk = [ (src, dst) @@ -375,9 +373,9 @@ def test_full_mesh_mode_self_pairs(): # So we expect 2 demands, each with 10.0 assert len(tm.demands) == 2, "Only N1->N2 and N2->N1 should be created" for d in tm.demands: - assert ( - abs(d.volume - 10.0) < MIN_FLOW - ), "Volume should be evenly split among 2 pairs" + assert abs(d.volume - 10.0) < MIN_FLOW, ( + "Volume should be evenly split among 2 pairs" + ) def test_estimate_rounds_no_demands(small_network): diff --git a/tests/transform/test_base.py b/tests/transform/test_base.py index 54e824b..a47ff6b 100644 --- a/tests/transform/test_base.py +++ b/tests/transform/test_base.py @@ -1,8 +1,9 @@ import pytest + from ngraph.transform.base import ( TRANSFORM_REGISTRY, - register_transform, NetworkTransform, + register_transform, ) diff --git a/tests/transform/test_distribute_external.py b/tests/transform/test_distribute_external.py index 7c51193..875f243 100644 --- a/tests/transform/test_distribute_external.py +++ b/tests/transform/test_distribute_external.py @@ -1,10 +1,11 @@ import pytest + +from ngraph.network import Network, Node +from ngraph.scenario import Scenario from ngraph.transform.distribute_external import ( - _StripeChooser, DistributeExternalConnectivity, + _StripeChooser, ) -from ngraph.network import Network, Node, Link -from ngraph.scenario import Scenario def make_scenario_with_network(net): diff --git a/tests/transform/test_enable_nodes.py b/tests/transform/test_enable_nodes.py index 6cc41c2..8da0fcd 100644 --- a/tests/transform/test_enable_nodes.py +++ b/tests/transform/test_enable_nodes.py @@ -1,9 +1,8 @@ -import pytest +import random + from ngraph.network import Network, Node from ngraph.scenario import Scenario from ngraph.transform.enable_nodes import EnableNodesTransform -import ngraph.transform.enable_nodes as en_mod -import random def make_scenario(nodes): diff --git a/tests/workflow/test_base.py b/tests/workflow/test_base.py index 1fb4dd8..396d356 100644 --- a/tests/workflow/test_base.py +++ b/tests/workflow/test_base.py @@ -1,10 +1,11 @@ -import pytest from unittest.mock import MagicMock +import pytest + from ngraph.workflow.base import ( + WORKFLOW_STEP_REGISTRY, WorkflowStep, register_workflow_step, - WORKFLOW_STEP_REGISTRY, ) diff --git a/tests/workflow/test_build_graph.py b/tests/workflow/test_build_graph.py index ce2458e..ee3129a 100644 --- a/tests/workflow/test_build_graph.py +++ b/tests/workflow/test_build_graph.py @@ -1,9 +1,10 @@ -import pytest from unittest.mock import MagicMock +import pytest + from ngraph.lib.graph import StrictMultiDiGraph +from ngraph.network import Link, Network, Node from ngraph.workflow.build_graph import BuildGraph -from ngraph.network import Network, Node, Link @pytest.fixture @@ -57,9 +58,9 @@ def test_build_graph_stores_multidigraph_in_results(mock_scenario): assert call_args[0][0] == "MyBuildStep" assert call_args[0][1] == "graph" created_graph = call_args[0][2] - assert isinstance( - created_graph, StrictMultiDiGraph - ), "Resulting object must be a StrictMultiDiGraph." + assert isinstance(created_graph, StrictMultiDiGraph), ( + "Resulting object must be a StrictMultiDiGraph." + ) # Verify the correct nodes were added assert set(created_graph.nodes()) == { @@ -73,9 +74,9 @@ def test_build_graph_stores_multidigraph_in_results(mock_scenario): # Verify edges # We expect two edges for each link: forward ("L1") and reverse ("L1_rev"), etc. # So we should have 4 edges in total (2 from L1, 2 from L2). - assert ( - created_graph.number_of_edges() == 4 - ), "Should have two edges (forward/reverse) for each link." + assert created_graph.number_of_edges() == 4, ( + "Should have two edges (forward/reverse) for each link." + ) # Check forward edge from link 'L1' edge_data_l1 = created_graph.get_edge_data("A", "B", key="L1") @@ -86,12 +87,12 @@ def test_build_graph_stores_multidigraph_in_results(mock_scenario): # Check reverse edge from link 'L1' rev_edge_data_l1 = created_graph.get_edge_data("B", "A", key="L1_rev") - assert ( - rev_edge_data_l1 is not None - ), "Reverse edge 'L1_rev' should exist from B to A." - assert ( - rev_edge_data_l1["capacity"] == 100 - ), "Reverse edge should share the same capacity." + assert rev_edge_data_l1 is not None, ( + "Reverse edge 'L1_rev' should exist from B to A." + ) + assert rev_edge_data_l1["capacity"] == 100, ( + "Reverse edge should share the same capacity." + ) # Check forward edge from link 'L2' edge_data_l2 = created_graph.get_edge_data("B", "A", key="L2") @@ -102,9 +103,9 @@ def test_build_graph_stores_multidigraph_in_results(mock_scenario): # Check reverse edge from link 'L2' rev_edge_data_l2 = created_graph.get_edge_data("A", "B", key="L2_rev") - assert ( - rev_edge_data_l2 is not None - ), "Reverse edge 'L2_rev' should exist from A to B." - assert ( - rev_edge_data_l2["capacity"] == 50 - ), "Reverse edge should share the same capacity." + assert rev_edge_data_l2 is not None, ( + "Reverse edge 'L2_rev' should exist from A to B." + ) + assert rev_edge_data_l2["capacity"] == 50, ( + "Reverse edge should share the same capacity." + ) diff --git a/tests/workflow/test_capacity_probe.py b/tests/workflow/test_capacity_probe.py index 663eafd..262c42c 100644 --- a/tests/workflow/test_capacity_probe.py +++ b/tests/workflow/test_capacity_probe.py @@ -1,9 +1,10 @@ +from unittest.mock import MagicMock + import pytest -from unittest.mock import MagicMock, call -from ngraph.network import Network, Node, Link -from ngraph.workflow.capacity_probe import CapacityProbe from ngraph.lib.algorithms.base import FlowPlacement +from ngraph.network import Link, Network, Node +from ngraph.workflow.capacity_probe import CapacityProbe @pytest.fixture