diff --git a/.github/workflows/azure-sdk-tools.yml b/.github/workflows/azure-sdk-tools.yml index 4858ff6e9aad..bc9f87afa3da 100644 --- a/.github/workflows/azure-sdk-tools.yml +++ b/.github/workflows/azure-sdk-tools.yml @@ -21,7 +21,7 @@ jobs: - name: Install azure-sdk-tools run: | - python -m pip install -e eng/tools/azure-sdk-tools[build,ghtools,conda] + python -m pip install -e eng/tools/azure-sdk-tools[ghtools,conda] python -m pip freeze shell: bash @@ -43,7 +43,7 @@ jobs: - name: Install azure-sdk-tools run: | - python -m pip install -e eng/tools/azure-sdk-tools[build,ghtools,conda] + python -m pip install -e eng/tools/azure-sdk-tools[ghtools,conda] python -m pip install black==24.4.0 python -m pip freeze shell: bash @@ -70,7 +70,7 @@ jobs: - name: Install azure-sdk-tools on in global uv, discover azpysdk checks run: | - uv pip install --system eng/tools/azure-sdk-tools[build,ghtools,conda] + uv pip install --system eng/tools/azure-sdk-tools[ghtools,conda,systemperf] # Discover available azpysdk commands from the {command1,command2,...} line in help output CHECKS=$(azpysdk -h 2>&1 | \ @@ -92,19 +92,20 @@ jobs: - name: Run all discovered checks against azure-template using uv as package manager run: | - python eng/scripts/dispatch_checks.py --checks "$AZPYSDK_CHECKS" azure-template + sdk_build azure-template -d $(pwd)/wheels --build_id 20250101.1 + python eng/scripts/dispatch_checks.py --checks "$AZPYSDK_CHECKS" --wheel_dir $(pwd)/wheels azure-template shell: bash env: TOX_PIP_IMPL: "uv" - name: Install azure-sdk-tools on global pip env run: | - python -m pip install -e eng/tools/azure-sdk-tools[build,ghtools,conda] + python -m pip install -e eng/tools/azure-sdk-tools[ghtools,conda] shell: bash - name: Run all discovered checks against azure-template using pip as package manager run: | - python eng/scripts/dispatch_checks.py --checks "$AZPYSDK_CHECKS" azure-template + python eng/scripts/dispatch_checks.py --checks "$AZPYSDK_CHECKS" --wheel_dir $(pwd)/wheels azure-template shell: bash dev-setup-and-import: diff --git a/.gitignore b/.gitignore index 84bf07aea48c..4b64cafe2e1f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # Default Assets restore directory .assets +.assets_distributed # Python cache __pycache__/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e17a4a5472ee..8fade23c59f6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,9 +11,7 @@ If you want to contribute to a file that is generated (header contains `Code gen We utilize a variety of tools to ensure smooth development, testing, and code quality for the Azure Python SDK. Below is a list of key tools and their purpose in the workflow: -- Tox: [Tox](https://tox.wiki/en/latest/) is our primary tool for managing test environments. It allows us to distribute tests to virtual environments, install dependencies, and maintain consistency between local and CI builds. Tox is configured to handle various testing scenarios, including linting, type checks, and running unit tests. - -- Virtualenv: [Virtualenv](https://virtualenv.pypa.io/en/latest/) is leveraged by Tox to create isolated environments for each test suite, ensuring consistent dependencies and reducing conflicts. +- azpysdk: The `azpysdk` CLI is our primary tool for running checks locally and in CI. It is an entrypoint provided by the `eng/tools/azure-sdk-tools` package and abstracts all checks (linting, type checking, tests, doc generation, etc.) behind a single command. See the [Tool Usage Guide](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md) for full details. - UV: [UV](https://docs.astral.sh/uv/) is a fast package manager that can manage Python versions, run and install Python packages, and be used instead of pip, virtualenv, and more. @@ -31,140 +29,65 @@ We utilize a variety of tools to ensure smooth development, testing, and code qu ## Building and Testing -The Azure SDK team's Python CI leverages the tool `tox` to distribute tests to virtual environments, handle test dependency installation, and coordinate tooling reporting during PR/CI builds. This means that a dev working locally can reproduce _exactly_ what the build machine is doing. - -[A Brief Overview of Tox](https://tox.wiki/en/latest/) - -#### A Monorepo and Tox in Harmony - -Traditionally, the `tox.ini` file for a package sits _alongside the setup.py_ in source code. The `azure-sdk-for-python` necessarily does not adhere to this policy. There are over one-hundred packages contained here-in. That's a lot of `tox.ini` files to maintain! - -Instead, the CI system leverages the `--root` argument which is new to `tox4`. The `--root` argument allows `tox` to act as if the `tox.ini` is located in whatever directory you specify! - -#### Tox Environments - -A given `tox.ini` works on the concept of `test environments`. A given test environment is a combination of: - -1. An identifier (or identifiers) -2. A targeted Python version - 1. `tox` will default to base python executing the `tox` command if no Python environment is specified -3. (optionally) an OS platform - -Internally `tox` leverages `virtualenv` to create each test environment's virtual environment. +The Azure SDK team's Python CI leverages the `azpysdk` CLI tool to run checks, tests, and linters during PR/CI builds. This means that a dev working locally can reproduce _exactly_ what the build machine is doing. -This means that once the `tox` workflow is in place, all tests will be executed _within a virtual environment._ +The `azpysdk` entrypoint is provided by the `eng/tools/azure-sdk-tools` package. For full setup instructions and the list of available checks, see the [Tool Usage Guide](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md). -You can use the command `tox list` to list all the environments provided by a `tox.ini` file. You can either use that command in the -same directory as the file itself, or use the `--conf` argument to specify the path to it directly. +### Quick Setup - -Sample output of `tox list`: +From the root of your target package: ``` -sdk-for-python/eng/tox> tox list -default environments: -whl -> Builds a wheel and runs tests -sdist -> Builds a source distribution and runs tests - -additional environments: -pylint -> Lints a package with a pinned version of pylint -next-pylint -> Lints a package with pylint (version 2.15.8) -mypy -> Typechecks a package with mypy (version 1.9.0) -next-mypy -> Typechecks a package with the latest version of mypy -pyright -> Typechecks a package with pyright (version 1.1.287) -next-pyright -> Typechecks a package with the latest version of static type-checker pyright -verifytypes -> Verifies the "type completeness" of a package with pyright -whl_no_aio -> Builds a wheel without aio and runs tests -develop -> Tests a package -sphinx -> Builds a package's documentation with sphinx -depends -> Ensures all modules in a target package can be successfully imported -verifywhl -> Verify directories included in whl and contents in manifest file -verifysdist -> Verify directories included in sdist and contents in manifest file. Also ensures that py.typed configuration is correct within the setup.py -devtest -> Tests a package against dependencies installed from a dev index -latestdependency -> Tests a package against the released, upper-bound versions of its azure dependencies -mindependency -> Tests a package against the released, lower-bound versions of its azure dependencies -apistub -> Generate an api stub of a package ( for https://apiview.dev ) -bandit -> Runs bandit, a tool to find common security issues, against a package -samples -> Runs a package's samples -breaking -> Runs the breaking changes checker against a package +pip install -r dev_requirements.txt ``` -### Example Usage of the common Azure SDK For Python `tox.ini` - -Basic usage of `tox` within this monorepo is: - -1. `pip install "tox<5"` -2. Run `tox run -e ENV_NAME -c path/to/tox.ini --root path/to/python_package` - * **Note**: You can use environment variables to provide defaults for tox config values - * With `TOX_CONFIG_FILE` set to the absolute path of `tox.ini`, you can avoid needing `-c path/to/tox.ini` in your tox invocations - * With `TOX_ROOT_DIR` set to the absolute path to your python package, you can avoid needing `--root path/to/python_package` - -The common `tox.ini` location is `eng/tox/tox.ini` within the repository. - -If at any time you want to blow away the tox created virtual environments and start over, simply append `-r` to any tox invocation! - -#### Example `azure-core` mypy - -1. Run `tox run -e mypy -c ./eng/tox/tox.ini --root sdk/core/azure-core` +This installs `azure-sdk-tools` (which provides `azpysdk`) along with the package's dev dependencies. -#### Example `azure-storage-blob` tests +### Available Checks -2. Execute `tox run -c ./eng/tox/tox.ini --root sdk/storage/azure-storage-blob` - -Note that we didn't provide an `environment` argument for this example. Reason here is that the _default_ environment selected by our common `tox.ini` file is one that runs `pytest`. - -#### `whl` environment -Used for test execution across the spectrum of all the platforms we want to support. Maintained at a `platform specific` level just in case we run into platform-specific bugs. - -* Installs the wheel, runs tests using the wheel +You can discover all available checks by running `azpysdk --help`. Some common checks: ``` -\> tox run -e whl -c --root - +azpysdk pylint . # Lint with pylint +azpysdk mypy . # Type check with mypy +azpysdk pyright . # Type check with pyright +azpysdk verifytypes . # Verify type completeness +azpysdk sphinx . # Build documentation +azpysdk bandit . # Security analysis +azpysdk black . # Code formatting +azpysdk verifywhl . # Verify wheel contents +azpysdk verifysdist . # Verify sdist contents +azpysdk import_all . # Verify all imports resolve +azpysdk apistub . # Generate API stub +azpysdk samples . # Run samples +azpysdk breaking . # Check for breaking changes +azpysdk devtest . # Test against dev feed dependencies ``` -#### `sdist` environment -Used for the local dev loop. +### Running from the repo root -* Installs package in editable mode -* Runs tests using the editable mode installation, not the wheel +`azpysdk` also supports globbing and comma-separated package names when invoked from the repo root: ``` - -\> tox run -e sdist -c --root - +azure-sdk-for-python> azpysdk import_all azure-storage* +azure-sdk-for-python> azpysdk pylint azure-storage-blob,azure-core ``` -#### `pylint` environment -Pylint install and run. - -``` -\> tox run -e pylint -c --root -``` +### Isolated environments - -#### `mypy` environment -Mypy install and run. - -``` -\> tox run -e mypy -c --root -``` - -#### `sphinx` environment -Generate sphinx doc for this package. +To run a check in a completely fresh virtual environment, add `--isolate`: ``` -\> tox run -e sphinx -c --root +azpysdk pylint . --isolate ``` ### Custom Pytest Arguments -`tox` supports custom arguments, and the defined pytest environments within the common `tox.ini` also allow these. Essentially, separate the arguments you want passed to `pytest` by a `--` in your tox invocation. - -[Tox Documentation on Positional Arguments](https://tox.wiki/en/latest/config.html#substitutions-for-positional-arguments-in-commands) +When running test-related checks, you can pass additional arguments to `pytest` after `--`: -**Example: Invoke tox, breaking into the debugger on failure** -`tox run -e whl -c --root -- --pdb` +``` +azpysdk devtest . -- --pdb +``` ### Performance Testing @@ -175,7 +98,7 @@ SDK performance testing is supported via the custom `perfstress` framework. For We maintain an [additional document](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/eng_sys_checks.md) that has a ton of detail as to what is actually _happening_ in these executions. ### Dev Feed -Daily dev build version of Azure sdk packages for python are available and are uploaded to Azure devops feed daily. We have also created a tox environment to test a package against dev built version of dependent packages. Below is the link to Azure devops feed. +Daily dev build version of Azure sdk packages for python are available and are uploaded to Azure devops feed daily. Below is the link to Azure devops feed. [`https://dev.azure.com/azure-sdk/public/_packaging?_a=feed&feed=azure-sdk-for-python`](https://dev.azure.com/azure-sdk/public/_packaging?_a=feed&feed=azure-sdk-for-python) ##### To install latest dev build version of a package @@ -191,13 +114,13 @@ pip install azure-appconfiguration==1.0.0b6.dev20191205001 --extra-index-url htt To test a package being developed against latest dev build version of dependent packages: a. cd to package root folder -b. run tox environment devtest +b. run `azpysdk devtest` ``` -\> tox run -e devtest -c --root +azpysdk devtest . ``` -This tox test( devtest) will fail if installed dependent packages are not dev build version. +This check will fail if installed dependent packages are not dev build version. ## Samples diff --git a/doc/dev/conda-builds.md b/doc/dev/conda-builds.md index 413257320fea..72e845cea1eb 100644 --- a/doc/dev/conda-builds.md +++ b/doc/dev/conda-builds.md @@ -22,7 +22,7 @@ Follow the instructions [here](https://docs.conda.io/projects/conda-build/en/lat ```bash # cd -pip install "eng/tools/azure-sdk-tools[build,conda]" +pip install "eng/tools/azure-sdk-tools[conda]" ``` ### Get the configuration blob diff --git a/doc/dev/dev_setup.md b/doc/dev/dev_setup.md index b88d97cb4f0f..1306904e5387 100644 --- a/doc/dev/dev_setup.md +++ b/doc/dev/dev_setup.md @@ -33,11 +33,10 @@ or execute the various commands available in the toolbox. 4. Setup your development environment - Install the development requirements for a specific library (located in the `dev_requirements.txt` file at the root of the library), [Tox][tox] and an editable install of your library. For example, to install requirements for `azure-ai-formrecognizer`: + Install the development requirements for a specific library (located in the `dev_requirements.txt` file at the root of the library) and an editable install of your library. This will also install `azure-sdk-tools` which provides the `azpysdk` CLI for running checks. For example, to install requirements for `azure-ai-formrecognizer`: ``` azure-sdk-for-python> cd sdk/formrecognizer/azure-ai-formrecognizer azure-sdk-for-python/sdk/formrecognizer/azure-ai-formrecognizer> pip install -r dev_requirements.txt - azure-sdk-for-python/sdk/formrecognizer/azure-ai-formrecognizer> pip install "tox<5" azure-sdk-for-python/sdk/formrecognizer/azure-ai-formrecognizer> pip install -e . ``` @@ -54,5 +53,4 @@ After following the steps above, you'll be able to run recorded SDK tests with ` [python_website]: https://www.python.org/downloads/ [python_312]: https://apps.microsoft.com/detail/9ncvdn91xzqp [tests]: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md -[tox]: https://tox.wiki/en/latest/ [virtual_environment]: https://docs.python.org/3/tutorial/venv.html diff --git a/doc/dev/engineering_assumptions.md b/doc/dev/engineering_assumptions.md index 7b910c9111c5..5debb67a2ae3 100644 --- a/doc/dev/engineering_assumptions.md +++ b/doc/dev/engineering_assumptions.md @@ -13,7 +13,7 @@ universal=1 Build CI for `azure-sdk-for-python` essentially builds and tests packages in one of two methodologies. ### Individual Packages -1. Leverage `tox` to create wheel, install, and execute tests against newly installed wheel +1. Leverage `azpysdk` to create wheel, install, and execute tests against newly installed wheel 2. Tests each package in isolation (outside of dev_requirements.txt dependencies + necessary `pylint` and `mypy`) ### Global Method diff --git a/doc/dev/pylint_checking.md b/doc/dev/pylint_checking.md index 858a53128a9f..7bee34e36590 100644 --- a/doc/dev/pylint_checking.md +++ b/doc/dev/pylint_checking.md @@ -22,11 +22,11 @@ In the Azure SDK for Python repository, in addition to the standard pylint libra ## How to run Pylint? -One way to run pylint is to run at the package level with tox: +The recommended way to run pylint is with `azpysdk` at the package level: - .../azure-sdk-for-python/sdk/eventgrid/azure-eventgrid>tox run -e pylint -c ../../../eng/tox/tox.ini --root . + .../azure-sdk-for-python/sdk/eventgrid/azure-eventgrid> azpysdk pylint . -If you don't want to use tox, you can also install and run pylint on its own: +If you don't want to use `azpysdk`, you can also install and run pylint on its own: - If taking this approach, in order to run with the pylintrc formatting and the custom pylint checkers you must also install the custom checkers and `SET` the pylintrc path. @@ -36,8 +36,7 @@ If you don't want to use tox, you can also install and run pylint on its own: .../azure-sdk-for-python>SET PYLINTRC="./pylintrc" .../azure-sdk-for-python>pylint ./sdk/eventgrid/azure-eventgrid - Note that you may see different errors if running a different [version of pylint or azure-pylint-guidelines-checker](https://github.com/Azure/azure-sdk-for-python/blob/fdf7c49ea760b1e1698ebbbac48794e8382d8de5/eng/tox/tox.ini#L90) than the one in CI. - + Note that you may see different errors if running a different version of [pylint](https://github.com/Azure/azure-sdk-for-python/blob/main/eng/tools/azure-sdk-tools/azpysdk/pylint.py#L17) or [azure-pylint-guidelines-checker](https://github.com/Azure/azure-sdk-for-python/blob/main/eng/tools/azure-sdk-tools/azpysdk/pylint.py#L61) than the one in CI. # Ignoring Pylint Checkers @@ -58,12 +57,12 @@ In addition to being a part of the CI, the custom pylint checkers are also integ There is now a new step on the CI pipeline called `Run Pylint Next`. This is merely a duplicate of the `Run Pylint` step with the exception that `Run Pylint Next` uses the latest version of pylint and the latest version of the custom pylint checkers. -This next-pylint environment can also be run locally through tox: +This next-pylint check can also be run locally: - tox run -e next-pylint -c ../../../eng/tox/tox.ini --root + azpysdk pylint --next=True The errors generated by the `Run Pylint Next` step will not break your weekly test pipelines, but make sure to fix the warnings so that your client library is up to date for the next pylint release. # How to prepare your SDK for a new pylint update? -Check each client library's `Run Pylint Next` output in the [test-weekly CI pipeline](https://dev.azure.com/azure-sdk/internal/_build?pipelineNameFilter=python%20*%20tests-weekly). If there is no corresponding test-weekly pipeline, run `next-pylint` locally with `tox` as described in [How to run Pylint?](#how-to-run-pylint). In order to ensure that the SDK pipeline will not break when pylint is updated, make sure to address all pylint warnings present. +Check each client library's `Run Pylint Next` output in the [test-weekly CI pipeline](https://dev.azure.com/azure-sdk/internal/_build?pipelineNameFilter=python%20*%20tests-weekly). If there is no corresponding test-weekly pipeline, run `next-pylint` locally with `azpysdk pylint --next=True .` as described in [How to run Pylint?](#how-to-run-pylint). In order to ensure that the SDK pipeline will not break when pylint is updated, make sure to address all pylint warnings present. diff --git a/doc/dev/sample_guide.md b/doc/dev/sample_guide.md index 6a33ec0394c0..ab6ee5b5df88 100644 --- a/doc/dev/sample_guide.md +++ b/doc/dev/sample_guide.md @@ -54,17 +54,14 @@ The given `START`/`END` keywords can be used in a [sphinx literalinclude][sphinx [Literalinclude example][literalinclude] The rendered code snippets are sensitive to the indentation in the sample file. Sphinx will adjust the dedent accordingly to ensure the sample is captured accurately and not accidentally trimmed. -You can preview how published reference documentation will look by running either -- [tox][tox]: `tox run -e sphinx -c ../../../eng/tox/tox.ini --root `. -- [azpysdk](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md): run `azpysdk sphinx .` in the package directory. +You can preview how published reference documentation will look by running +[azpysdk](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md): `azpysdk sphinx .` in the package directory. ## Test run samples in CI live tests Per the [Python guidelines][snippet_guidelines], sample code and snippets should be test run in CI to ensure they remain functional. Samples should be run in the package's live test pipeline which is scheduled to run daily. To ensure samples do get tested as part of regular CI runs, add these [lines][live_tests] to the package's tests.yml. -You can test this CI step locally first with tox or azpysdk: -- To use [tox][tox], run `tox run -e samples -c ../../../eng/tox/tox.ini --root `. -- To use [azpysdk](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md), run `azpysdk samples .` in the package directory. +You can test this CI step locally first with [azpysdk](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md) by running `azpysdk samples .` in the package directory. The `Test Samples` step in CI will rely on the resources provisioned and environment variables used for running the package's tests. @@ -95,7 +92,6 @@ For general how-to with the Python SDK, see the [Azure SDK for Python Overview][ [literalinclude]: https://github.com/Azure/azure-sdk-for-python/blob/7b3dfdca0658f6a4706654556d3142b4bce2b0d1/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_operations/_patch.py#L244-L251 [snippet_guidelines]: https://azure.github.io/azure-sdk/python_design.html#code-snippets [live_tests]: https://github.com/Azure/azure-sdk-for-python/blob/7b3dfdca0658f6a4706654556d3142b4bce2b0d1/sdk/translation/tests.yml#L13-L14 -[tox]: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md#tox [msft_samples]: https://learn.microsoft.com/samples/browse/ [python_guidelines]: https://azure.github.io/azure-sdk/python_design.html [document_sdk]: https://review.learn.microsoft.com/help/platform/reference-document-sdk-client-libraries?branch=main diff --git a/doc/dev/static_type_checking.md b/doc/dev/static_type_checking.md index 3356636918a6..a936e2dea167 100644 --- a/doc/dev/static_type_checking.md +++ b/doc/dev/static_type_checking.md @@ -176,20 +176,18 @@ The versions of mypy and pyright that we run in CI are pinned to specific versio version of the type checker ships. All client libraries in the Python SDK repo are automatically opted in to running type checking. If you need to temporarily opt-out of type checking for your client library, see [How to opt out of type checking](#how-to-opt-out-of-type-checking). The easiest way to install and run the type checkers locally is -with [tox](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md#tox). This reproduces the exact type checking -environment run in CI and brings in the third party stub packages necessary. To begin, first install `tox`: - -`pip install tox<5` +with [azpysdk](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md). This reproduces the exact type checking +environment run in CI and brings in the third party stub packages necessary. To begin, install `azure-sdk-tools` by running `pip install -r dev_requirements.txt` from your package directory. ### Run mypy mypy is currently pinned to version [1.9.0](https://pypi.org/project/mypy/1.9.0/). -To run mypy on your library, run the tox mypy env at the package level: +To run mypy on your library, run `azpysdk mypy` at the package level: -`.../azure-sdk-for-python/sdk/textanalytics/azure-ai-textanalytics>tox run -e mypy -c ../../../eng/tox/tox.ini --root .` +`.../azure-sdk-for-python/sdk/textanalytics/azure-ai-textanalytics> azpysdk mypy .` -If you don't want to use `tox` you can also install and run mypy on its own: +If you don't want to use `azpysdk` you can also install and run mypy on its own: `pip install mypy==1.9.0` @@ -217,11 +215,11 @@ We pin the version of pyright to version [1.1.287](https://github.com/microsoft/ Note that pyright requires that node is installed. The command-line [wrapper package](https://pypi.org/project/pyright/) for pyright will check if node is in the `PATH`, and if not, will download it at runtime. -To run pyright on your library, run the tox pyright env at the package level: +To run pyright on your library, run `azpysdk pyright` at the package level: -`.../azure-sdk-for-python/sdk/textanalytics/azure-ai-textanalytics>tox run -e pyright -c ../../../eng/tox/tox.ini --root .` +`.../azure-sdk-for-python/sdk/textanalytics/azure-ai-textanalytics> azpysdk pyright .` -If you don't want to use `tox` you can also install and run pyright on its own: +If you don't want to use `azpysdk` you can also install and run pyright on its own: `pip install pyright==1.1.287` @@ -249,11 +247,11 @@ The report can be used to view where type hints and docstrings are missing in a verifytypes also reports a type completeness score which is the percentage of known types in the library. This score is used in the CI check to fail if the type completeness of the library worsens from the code in the PR vs. the code in main. -To run verifytypes on your library, run the tox verifytypes env at the package level: +To run verifytypes on your library, run `azpysdk verifytypes` at the package level: -`.../azure-sdk-for-python/sdk/textanalytics/azure-ai-textanalytics>tox run -e verifytypes -c ../../../eng/tox/tox.ini --root .` +`.../azure-sdk-for-python/sdk/textanalytics/azure-ai-textanalytics> azpysdk verifytypes .` -If you don't want to use `tox` you can also install and run pyright/verifytypes on its own: +If you don't want to use `azpysdk` you can also install and run pyright/verifytypes on its own: `pip install pyright==1.1.287` diff --git a/doc/dev/tests.md b/doc/dev/tests.md index 8b7470dcd183..9dcccee69ab4 100644 --- a/doc/dev/tests.md +++ b/doc/dev/tests.md @@ -11,7 +11,7 @@ testing infrastructure, and demonstrates how to write and run tests for a servic - [Dependency installation](#dependency-installation) - [Open code in IDE](#open-code-in-ide) - [Integrate with the pytest test framework](#integrate-with-the-pytest-test-framework) - - [Tox](#tox) + - [Running Checks Locally](#running-checks-locally) - [The `devtools_testutils` package](#the-devtools_testutils-package) - [Write or run tests](#write-or-run-tests) - [Set up test resources](#set-up-test-resources) @@ -119,34 +119,33 @@ If you have print statements in your tests for debugging you can add the `-s` fl (env) azure-sdk-for-python\sdk\my-service\my-package> pytest -s ``` -## Tox +## Running Checks Locally -The Python SDK uses the [tox project](https://tox.wiki/en/latest/) to automate releases, run tests, run linters, and build our documentation. The `tox.ini` file is located at `azure-sdk-for-python/eng/tox/tox.ini` for reference. You do not need to make any changes to the tox file for tox to work with your project. Tox will create a directory (`.tox`) in the head of your branch. The first time you run tox commands it may take several moments, but subsequent runs will be quicker. To install tox run the following command from within your virtual environment. -`(env) > pip install "tox<5"`. +The Python SDK uses the `azpysdk` CLI to run linters, type checkers, tests, and build documentation. The `azpysdk` entrypoint is provided by the `eng/tools/azure-sdk-tools` package and is installed as part of each package's `dev_requirements.txt`. For full setup instructions and the list of available checks, see the [Tool Usage Guide](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md). -To run a tox command from your directory use the following commands: +To run checks from your package directory: ```cmd -(env) azure-sdk-for-python\sdk\my-service\my-package> tox run -e sphinx -c ../../../eng/tox/tox.ini --root . -(env) azure-sdk-for-python\sdk\my-service\my-package> tox run -e pylint -c ../../../eng/tox/tox.ini --root . -(env) azure-sdk-for-python\sdk\my-service\my-package> tox run -e mypy -c ../../../eng/tox/tox.ini --root . -(env) azure-sdk-for-python\sdk\my-service\my-package> tox run -e pyright -c ../../../eng/tox/tox.ini --root . -(env) azure-sdk-for-python\sdk\my-service\my-package> tox run -e verifytypes -c ../../../eng/tox/tox.ini --root . -(env) azure-sdk-for-python\sdk\my-service\my-package> tox run -e whl -c ../../../eng/tox/tox.ini --root . -(env) azure-sdk-for-python\sdk\my-service\my-package> tox run -e sdist -c ../../../eng/tox/tox.ini --root . -(env) azure-sdk-for-python\sdk\my-service\my-package> tox run -e samples -c ../../../eng/tox/tox.ini --root . -(env) azure-sdk-for-python\sdk\my-service\my-package> tox run -e apistub -c ../../../eng/tox/tox.ini --root . +(env) azure-sdk-for-python\sdk\my-service\my-package> azpysdk sphinx . +(env) azure-sdk-for-python\sdk\my-service\my-package> azpysdk pylint . +(env) azure-sdk-for-python\sdk\my-service\my-package> azpysdk mypy . +(env) azure-sdk-for-python\sdk\my-service\my-package> azpysdk pyright . +(env) azure-sdk-for-python\sdk\my-service\my-package> azpysdk verifytypes . +(env) azure-sdk-for-python\sdk\my-service\my-package> azpysdk verifywhl . +(env) azure-sdk-for-python\sdk\my-service\my-package> azpysdk verifysdist . +(env) azure-sdk-for-python\sdk\my-service\my-package> azpysdk samples . +(env) azure-sdk-for-python\sdk\my-service\my-package> azpysdk apistub . ``` -A quick description of the nine commands above: +A quick description of the commands above: - sphinx: documentation generation using the inline comments written in our code -- lint: runs pylint to make sure our code adheres to the style guidance +- pylint: runs pylint to make sure our code adheres to the style guidance - mypy: runs the mypy static type checker for Python to make sure that our types are valid. - pyright: runs the pyright static type checker for Python to make sure that our types are valid. - verifytypes: runs pyright's verifytypes tool to verify the type completeness of the library. -- whl: creates a whl package for installing our package -- sdist: creates a zipped distribution of our files that the end user could install with pip +- verifywhl: verifies the wheel contents and manifest +- verifysdist: verifies the sdist contents and manifest - samples: runs all of the samples in the `samples` directory and verifies they are working correctly - apistub: runs the [apistubgenerator](https://github.com/Azure/azure-sdk-tools/tree/main/packages/python-packages/apiview-stub-generator) tool on your code diff --git a/doc/eng_sys_checks.md b/doc/eng_sys_checks.md index 645a1ea48933..2c852817e3dc 100644 --- a/doc/eng_sys_checks.md +++ b/doc/eng_sys_checks.md @@ -2,7 +2,7 @@ - [Azure SDK for Python - Engineering System](#azure-sdk-for-python---engineering-system) - [Targeting a specific package at build queue time](#targeting-a-specific-package-at-build-queue-time) - - [Skipping a tox test environment at build queue time](#skipping-a-tox-test-environment-at-build-queue-time) + - [Skipping a check at build queue time](#skipping-a-check-at-build-queue-time) - [Skipping entire sections of builds](#skipping-entire-sections-of-builds) - [The pyproject.toml](#the-pyprojecttoml) - [Coverage Enforcement](#coverage-enforcement) @@ -22,7 +22,7 @@ - [Running locally](#running-locally) - [Change log verification](#change-log-verification) - [PR Validation Checks](#pr-validation-checks) - - [PR validation tox test environments](#pr-validation-tox-test-environments) + - [PR validation checks](#pr-validation-checks-1) - [whl](#whl) - [sdist](#sdist) - [depends](#depends) @@ -38,7 +38,7 @@ There are various tests currently enabled in Azure pipeline for Python SDK and some of them are enabled only for nightly CI checks. We also run some static analysis tool to verify code completeness, security and lint check. -Check the [contributing guide](https://github.com/Azure/azure-sdk-for-python/blob/main/CONTRIBUTING.md#building-and-testing) for an intro to `tox`. For a deeper dive into the tooling that enables the CI checks below and additional detail on reproducing builds locally please refer to the azure-sdk-tools README.md. +Check the [contributing guide](https://github.com/Azure/azure-sdk-for-python/blob/main/CONTRIBUTING.md#building-and-testing) for an intro to `azpysdk`. For a deeper dive into the tooling that enables the CI checks below and additional detail on reproducing builds locally please refer to the [Tool Usage Guide](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md). As a contributor, you will see the build jobs run in two modes: `Nightly Scheduled` and `Pull Request`. @@ -48,8 +48,8 @@ Example PR build: ![res/job_snippet.png](res/job_snippet.png) -* `Analyze` tox envs run during the `Analyze job. -* `Test _` runs PR/Nightly tox envs, depending on context. +* `Analyze` checks run during the `Analyze` job. +* `Test _` runs PR/Nightly checks, depending on context. ## Targeting a specific package at build queue time @@ -60,20 +60,20 @@ In both `public` and `internal` projects, all builds allow a filter to be introd 1. For example, setting filter string `azure-mgmt-*` will filter a build to only management packages. A value of `azure-keyvault-secrets` will result in only building THAT specific package. 3. Once it's set, run the build! -## Skipping a tox test environment at build queue time +## Skipping a check at build queue time -All build definitions allow choice at queue time as to which `tox` environments actually run during the test phase. +All build definitions allow choice at queue time as to which checks actually run during the test phase. 1. Find your target service `internal` build. 2. Click `Run New`. -3. Before clicking `run` against `main` or your target commit, click `Variables` and add a variable of name `Run.ToxCustomEnvs`. The value should be a comma separated list of tox environments that you want to run in the test phase. +3. Before clicking `run` against `main` or your target commit, click `Variables` and add a variable of name `ChecksOverride`. The value should be a comma separated list of checks that you want to run in the test phase. 4. Once it's set, run the build! -This is an example setting of that narrows the default set from `whl, sdist, depends, latestdependency, minimumdependency`. +This is an example setting of that narrows the default set from `whl, sdist, depends, latestdependency, mindependency`. ![res/queue_time_variable.png](res/queue_time_variable.png) -Any combination of valid valid tox environments will work. Reference either this document or the file present at `eng/tox/tox.ini` to find what options are available. +Any combination of valid check names will work. Reference either this document, `azpysdk -h`, or the [Tool Usage Guide](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/tool_usage_guide.md) to find what options are available. ## Skipping entire sections of builds @@ -126,16 +126,16 @@ We default to **enabling** most of our checks like `pylint`, `mypy`, etc. Due to Here's an example: ```toml -# from sdk/core/azure-servicemanagement-legacy/pyproject.toml, which is a legacy package +# from sdk/core/azure-common/pyproject.toml, which is a legacy package # as a result, all of these checks are disabled [tool.azure-sdk-build] -mypy = false type_check_samples = false verifytypes = false pyright = false +mypy = false pylint = false +regression = false black = false -sphinx = false ``` If a package does not yet have a `pyproject.toml`, creating one with just the section `[tool.azure-sdk-build]` will do no harm to the release of the package in question. @@ -154,15 +154,15 @@ After it is implemented, the `relative_cov` key will enable the prevention of ** ## Environment variables important to CI -There are a few differences from a standard local invocation of `tox `. Primarily, these differences adjust the checks to be friendly to parallel invocation. These adjustments are necessary to prevent random CI crashes. +There are a few differences from a standard local invocation of `azpysdk `. Primarily, these differences adjust the checks to be friendly to parallel invocation. These adjustments are necessary to prevent random CI crashes. | Environment Variable | Affect on Build | |---|---| -| `TF_BUILD` | EngSys uses the presence of any value in this variable as the bit indicating "in CI" or not. The primary effect of this is that all relative dev dependencies will be prebuilt prior to running the tox environments. | -| `PREBUILT_WHEEL_DIR` | Setting this env variables means that instead of generating a fresh wheel or sdist to test, `tox` will look in this directory for the targeted package. | +| `TF_BUILD` | EngSys uses the presence of any value in this variable as the bit indicating "in CI" or not. The primary effect of this is that all relative dev dependencies will be prebuilt prior to running the checks. | +| `PREBUILT_WHEEL_DIR` | Setting this env variable means that instead of generating a fresh wheel or sdist to test, the check will look in this directory for the targeted package. | | `PIP_INDEX_URL` | Standard `pip` environment variable. During nightly `alpha` builds, this environment variable is set to a public dev feed. | -The various tooling abstracted by the environments within `eng/tox/tox.ini` take the above variables into account automatically. +The various tooling abstracted by `azpysdk` takes the above variables into account automatically. ### Atomic Overrides @@ -177,7 +177,7 @@ To temporarily **override** this restriction, a dev need only set the queue time This same methodology also applies to _individual checks_ that run during various phases of CI. Developers can use a queue time variable of format `PACKAGE_NAME_CHECK=true/false`. -The name that you should use is visible based on what the `tox environment` that the check refers to! Here are a few examples of enabling/disabling checks: +The name that you should use is visible based on the check name. Here are a few examples of enabling/disabling checks: - `AZURE_SERVICEBUS_PYRIGHT=true` <-- enable a check that normally is disabled in `pyproject.toml` - `AZURE_CORE_PYLINT=false` <-- disable a check that normally runs @@ -188,7 +188,7 @@ You can enable test logging in a pipeline by setting the queue time variable `PY `PYTEST_LOG_LEVEL=INFO` -This also works locally with tox by setting the `PYTEST_LOG_LEVEL` environment variable. +This also works locally by setting the `PYTEST_LOG_LEVEL` environment variable. Note that if you want DEBUG level logging with sensitive information unredacted in the test logs, then you still must pass `logging_enable=True` into the client(s) being used in tests. @@ -201,14 +201,14 @@ Analyze job in both nightly CI and pull request validation pipeline runs a set o [`MyPy`](https://pypi.org/project/mypy/) is a static analysis tool that runs type checking of python package. Following are the steps to run `MyPy` locally for a specific package: 1. Go to root of the package -2. Execute following command: `tox run -e mypy -c ../../../eng/tox/tox.ini --root .` +2. Execute following command: `azpysdk mypy .` ### Pyright [`Pyright`](https://github.com/microsoft/pyright/) is a static analysis tool that runs type checking of python package. Following are the steps to run `pyright` locally for a specific package: 1. Go to root of the package -2. Execute following command: `tox run -e pyright -c ../../../eng/tox/tox.ini --root .` +2. Execute following command: `azpysdk pyright .` ### Verifytypes @@ -216,7 +216,7 @@ Analyze job in both nightly CI and pull request validation pipeline runs a set o [`Verifytypes`](https://github.com/microsoft/pyright/blob/main/docs/typed-libraries.md#verifying-type-completeness) is a feature of pyright that checks the type completeness of a python package. Following are the steps to run `verifytypes` locally for a specific package: 1. Go to root of the package -2. Execute following command: `tox run -e verifytypes -c ../../../eng/tox/tox.ini --root .` +2. Execute following command: `azpysdk verifytypes .` ### Pylint @@ -224,9 +224,9 @@ Analyze job in both nightly CI and pull request validation pipeline runs a set o [`Pylint`](https://pypi.org/project/pylint/) is a static analysis tool to run lint checking, it is automatically run on all PRs. Following are the steps to run `pylint` locally for a specific package. 1. Go to root of the package. -2. Execute following command: `tox run -e pylint -c ../../../eng/tox/tox.ini --root .` +2. Execute following command: `azpysdk pylint .` -Note that the `pylint` environment is configured to run against the **earliest supported python version**. This means that users **must** have `python 3.7` installed on their machine to run this check locally. +Note that the `pylint` check is configured to run against the **earliest supported python version**. This means that users **must** have `python 3.9` installed on their machine to run this check locally. ### Sphinx and docstring checker @@ -234,14 +234,14 @@ Note that the `pylint` environment is configured to run against the **earliest s fail if docstring are invalid, helping to ensure the resulting documentation will be of high quality. Following are the steps to run `sphinx` locally for a specific package with strict docstring checking: 1. Go to root of the package. -2. Execute following command: `tox run -e sphinx -c ../../../eng/tox/tox.ini --root .` +2. Execute following command: `azpysdk sphinx .` ### Bandit `Bandit` is static security analysis tool. This check is triggered for all Azure SDK package as part of analyze job. Following are the steps to `Bandit` tool locally for a specific package. 1. Got to package root directory. -2. Execute command: `tox run -e bandit -c ../../../eng/tox/tox.ini --root .` +2. Execute command: `azpysdk bandit .` ### ApiStubGen @@ -267,9 +267,7 @@ to opt into the black invocation. #### Running locally 1. Go to package root directory. -2. Execute command: `tox run -e black -c ../../../eng/tox/tox.ini --root . -- .` - -**Tip**: You can provide any arguments that `black` accepts after the `--`. Example: `tox run -e black -c ../../../eng/tox/tox.ini --root . -- path/to/file.py` +2. Execute command: `azpysdk black .` ### Change log verification @@ -281,39 +279,39 @@ Each pull request runs various tests using `pytest` in addition to all the tests |`Python Version`|`Platform` | |--|--| -|2.7|Linux| -|3.5|Windows| -|3.8|Linux| +|3.9|Linux| +|3.9|Windows| +|3.13|Linux| -### PR validation tox test environments +### PR validation checks -Tests are executed using tox environment and following are the tox test names that are part of pull request validation +Tests are executed as part of pull request validation. Following are the checks that are part of pull request validation: #### whl -This test installs wheel of the package being tested and runs all tests cases in the package using `pytest`. Following is the command to run this test environment locally. +This test installs wheel of the package being tested and runs all tests cases in the package using `pytest`. Following is the command to run this check locally. 1. Go to package root folder on a command line 2. Run following command - `tox run -e whl -c ../../../eng/tox/tox.ini --root .` + `azpysdk whl .` #### sdist -This test installs sdist of the package being tested and runs all tests cases in the package using `pytest`. Following is the command to run this test environment locally. +This test installs sdist of the package being tested and runs all tests cases in the package using `pytest`. Following is the command to run this check locally. 1. Go to package root folder on a command line 2. Run following command - `tox run -e sdist -c ../../../eng/tox/tox.ini --root .` + `azpysdk sdist .` #### depends The `depends` check ensures all modules in a target package can be successfully imported. Actually installing and importing will verify that all package requirements are properly set in setup.py and that the `__all__` set for the package is properly defined. This test installs the package and its required packages, then executes `from import *`. For example from `azure-core`, the following would be invoked: `from azure.core import *`. -Following is the command to run this test environment locally. +Following is the command to run this check locally. 1. Go to package root folder on a command line 2. Run following command - `tox run -e sdist -c ../../../eng/tox/tox.ini --root .` + `azpysdk import_all .` ## Nightly CI Checks @@ -358,10 +356,10 @@ Note: Any dependency mentioned only in dev_requirements are not considered to id 4. Install current package that is being tested 5. Run pytest of all test cases in current package -Tox name of this test is `latestdependency` and steps to manually run this test locally is as follows. +Steps to manually run this test locally: 1. Go to package root. For e.g azure-storage-blob or azure-identity -2. Run command `tox run -e latestdependency -c ../../../eng/tox/tox.ini --root .` +2. Run command `azpysdk latestdependency .` #### Minimum Dependency Test @@ -374,11 +372,11 @@ Note: Any dependency mentioned only in dev_requirements are not considered to id 4. Install current package that is being tested 5. Run pytest of all test cases in current package -Tox name of this test is `mindependency` and steps to manually run this test locally is as follows. +Steps to manually run this test locally: 1. Go to package root. For e.g azure-storage-blob or azure-identity 2. Run following command -`tox run -e mindependency -c ../../../eng/tox/tox.ini --root .` +`azpysdk mindependency .` #### Regression Test diff --git a/doc/repo_health_status.md b/doc/repo_health_status.md index ff573ec6e171..4bdb81179d4d 100644 --- a/doc/repo_health_status.md +++ b/doc/repo_health_status.md @@ -76,7 +76,7 @@ This is the overall status of your library and indicates whether you can release ### Tests - CI (required check): -[Tests - CI](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/eng_sys_checks.md#pr-validation-tox-test-environments) checks the status of the most recent (python - {service-directory})scheduled build of your library's recorded tests. This is the same CI that will run when triggering a release build. To learn more about tests in our repo, see our [Testing Guide](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md). Possible statuses include: +[Tests - CI](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/eng_sys_checks.md#pr-validation-checks) checks the status of the most recent (python - {service-directory})scheduled build of your library's recorded tests. This is the same CI that will run when triggering a release build. To learn more about tests in our repo, see our [Testing Guide](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md). Possible statuses include: - $${\color{red}FAIL}$$ - The library is failing CI recorded tests. Check the build result and address the errors present. This will block the release of your library and should be fixed immediately. - $${\color{yellow}DISABLED}$$ - The library has its CI disabled due to non-compliance with required checks. Please take action to re-enable and fix all checks highlighted in yellow. Once all checks are fixed, you can remove the `ci_enabled=false` from your library's pyproject.toml file. diff --git a/doc/tool_usage_guide.md b/doc/tool_usage_guide.md index 4d6ecf55f865..ab034e6d4db2 100644 --- a/doc/tool_usage_guide.md +++ b/doc/tool_usage_guide.md @@ -11,7 +11,7 @@ A `tool` in this context is merely a single entrypoint provided by the `azpysdk` ## Available Tools -This repo is currently migrating all checks from a slower `tox`-based framework, to a lightweight implementation that uses `asyncio` to simultaneously run checks. This tools list is the current set that has been migrated from `tox` to the `azpysdk` entrypoint. +The following checks are available via the `azpysdk` entrypoint. |tool|description|invocation| |---|---|---| diff --git a/eng/ci_tools.txt b/eng/ci_tools.txt index 460c5e09f757..85779e81f7e9 100644 --- a/eng/ci_tools.txt +++ b/eng/ci_tools.txt @@ -30,4 +30,4 @@ urllib3==2.2.3 six==1.17.0 # local dev packages -./eng/tools/azure-sdk-tools[build] +./eng/tools/azure-sdk-tools diff --git a/eng/dependency_tools.txt b/eng/dependency_tools.txt index 737848994a6b..b879db0430f3 100644 --- a/eng/dependency_tools.txt +++ b/eng/dependency_tools.txt @@ -1,2 +1,2 @@ -../../../eng/tools/azure-sdk-tools[build] +../../../eng/tools/azure-sdk-tools aiohttp>=3.0; python_version >= '3.5' \ No newline at end of file diff --git a/eng/pipelines/templates/jobs/build-conda-dependencies.yml b/eng/pipelines/templates/jobs/build-conda-dependencies.yml index c1cc771994c7..64d13327bfe2 100644 --- a/eng/pipelines/templates/jobs/build-conda-dependencies.yml +++ b/eng/pipelines/templates/jobs/build-conda-dependencies.yml @@ -28,7 +28,7 @@ jobs: - pwsh: | $ErrorActionPreference = 'Stop' $PSNativeCommandUseErrorActionPreference = $true - pip install "eng/tools/azure-sdk-tools[build,conda]" + pip install "eng/tools/azure-sdk-tools[conda]" pip install disutils Invoke-WebRequest "$(VS_INSTALLER_URL)" -OutFile "$(VS_INSTALLER_PATH)" # In order of component appearance in the install command below, these are the names of the components diff --git a/eng/pipelines/templates/jobs/ci.yml b/eng/pipelines/templates/jobs/ci.yml index 8ad87408ae29..34c714d3d358 100644 --- a/eng/pipelines/templates/jobs/ci.yml +++ b/eng/pipelines/templates/jobs/ci.yml @@ -325,7 +325,7 @@ jobs: - pwsh: | $ErrorActionPreference = 'Stop' $PSNativeCommandUseErrorActionPreference = $true - $(PIP_EXE) install "./eng/tools/azure-sdk-tools[build]" + $(PIP_EXE) install "./eng/tools/azure-sdk-tools" displayName: 'Prep Environment' - task: PythonScript@0 displayName: 'Ensure service coverage' diff --git a/eng/pipelines/templates/stages/archetype-python-release.yml b/eng/pipelines/templates/stages/archetype-python-release.yml index 791452af113f..af224ffebe08 100644 --- a/eng/pipelines/templates/stages/archetype-python-release.yml +++ b/eng/pipelines/templates/stages/archetype-python-release.yml @@ -312,7 +312,7 @@ stages: - checkout: self - task: UsePythonVersion@0 - script: | - python -m pip install "./eng/tools/azure-sdk-tools[build]" + python -m pip install "./eng/tools/azure-sdk-tools" displayName: Install versioning tool dependencies - pwsh: | diff --git a/eng/pipelines/templates/steps/analyze.yml b/eng/pipelines/templates/steps/analyze.yml index 491b1a9e5148..fef2d8d1f0cc 100644 --- a/eng/pipelines/templates/steps/analyze.yml +++ b/eng/pipelines/templates/steps/analyze.yml @@ -26,10 +26,10 @@ steps: DevFeedName: ${{ parameters.DevFeedName }} - task: PythonScript@0 - displayName: 'Set Tox Environment Skips' + displayName: 'Set Checks Environment Skips' condition: succeededOrFailed() inputs: - scriptPath: 'scripts/devops_tasks/set_tox_environment.py' + scriptPath: 'eng/scripts/set_checks.py' arguments: '"$(TargetingString)" --team-project="$(System.TeamProject)" --service="${{ parameters.ServiceDirectory }}"' - ${{ if eq(variables['Build.Reason'], 'PullRequest') }}: @@ -48,7 +48,7 @@ steps: Condition: succeededOrFailed() - script: | - $(PIP_EXE) install "./eng/tools/azure-sdk-tools[build]" + $(PIP_EXE) install "./eng/tools/azure-sdk-tools" sdk_find_invalid_versions --always-succeed --service=${{parameters.ServiceDirectory}} displayName: Find Invalid Versions condition: succeededOrFailed() diff --git a/eng/pipelines/templates/steps/build-conda-artifacts.yml b/eng/pipelines/templates/steps/build-conda-artifacts.yml index 734f3f67f62a..57a0bb875375 100644 --- a/eng/pipelines/templates/steps/build-conda-artifacts.yml +++ b/eng/pipelines/templates/steps/build-conda-artifacts.yml @@ -18,7 +18,7 @@ steps: - pwsh: | $ErrorActionPreference = 'Stop' $PSNativeCommandUseErrorActionPreference = $true - python -m pip install "eng/tools/azure-sdk-tools[build,conda]" + python -m pip install "eng/tools/azure-sdk-tools[conda]" python -m pip install disutils python -m pip install typing-extensions==4.12.2 displayName: Install build script requirements diff --git a/eng/pipelines/templates/steps/build-test.yml b/eng/pipelines/templates/steps/build-test.yml index ce2181a7b983..4bb6c2377d20 100644 --- a/eng/pipelines/templates/steps/build-test.yml +++ b/eng/pipelines/templates/steps/build-test.yml @@ -50,24 +50,17 @@ steps: displayName: 'Prep Environment' - task: PythonScript@0 - displayName: 'Set Tox Environment' + displayName: 'Set Checks for Run' inputs: - scriptPath: 'scripts/devops_tasks/set_tox_environment.py' + scriptPath: 'eng/scripts/set_checks.py' arguments: >- --unsupported="$(UnsupportedToxEnvironments)" - --override="$(Run.ToxCustomEnvs)" + --override="$(ChecksOverride)" --team-project="$(System.TeamProject)" - - template: /eng/common/testproxy/test-proxy-tool.yml - parameters: - runProxy: false - - ${{ parameters.BeforeTestSteps }} - - template: /eng/pipelines/templates/steps/seed-virtualenv-wheels.yml - - ${{ if eq('true', parameters.UseFederatedAuth) }}: - - task: AzurePowerShell@5 displayName: Run Tests (AzurePowerShell@5) env: @@ -79,6 +72,7 @@ steps: pwsh: true ScriptType: InlineScript Inline: >- + $env:TOX_PIP_IMPL="uv" $account = (Get-AzContext).Account; $env:AZURESUBSCRIPTION_CLIENT_ID = $account.Id; $env:AZURESUBSCRIPTION_TENANT_ID = $account.Tenants; @@ -92,20 +86,20 @@ steps: $markArg = "${{ parameters.TestMarkArgument }}" } - python scripts/devops_tasks/dispatch_tox.py + python eng/scripts/dispatch_checks.py "$(TargetingString)" ${{ parameters.AdditionalTestArgs }} ${{ parameters.CoverageArg }} --mark_arg="$markArg" --service="${{ parameters.ServiceDirectory }}" - --toxenv="${{ parameters.ToxTestEnv }}" + --checks="${{ parameters.ToxTestEnv }}" --injected-packages="${{ parameters.InjectedPackages }}" - --tenvparallel="${{ parameters.ToxEnvParallel }}"; Write-Host "Last exit code: $LASTEXITCODE"; exit $LASTEXITCODE; - ${{ else }}: - pwsh: | + $env:TOX_PIP_IMPL="uv" Write-Host (Get-Command python).Source if ($env:TESTMARKARGUMENT) { @@ -115,14 +109,13 @@ steps: $markArg = "${{ parameters.TestMarkArgument }}" } - python scripts/devops_tasks/dispatch_tox.py "$(TargetingString)" ` + python eng/scripts/dispatch_checks.py "$(TargetingString)" ` ${{ parameters.AdditionalTestArgs }} ` ${{ parameters.CoverageArg }} ` --mark_arg="$markArg" ` --service="${{ parameters.ServiceDirectory }}" ` - --toxenv="${{ parameters.ToxTestEnv }}" ` - --injected-packages="${{ parameters.InjectedPackages }}" ` - --tenvparallel="${{ parameters.ToxEnvParallel }}"; + --checks="${{ parameters.ToxTestEnv }}" ` + --injected-packages="${{ parameters.InjectedPackages }}" exit $LASTEXITCODE; env: ${{ parameters.EnvVars }} displayName: Run Tests @@ -168,24 +161,18 @@ steps: Write-Host (Get-Command python).Source - python scripts/devops_tasks/dispatch_tox.py "$(TargetingString)" ` + python eng/scripts/dispatch_checks.py "$(TargetingString)" ` --service="${{ parameters.ServiceDirectory }}" ` - --toxenv="samples" + --checks="samples" Write-Host "Last exit code: $LASTEXITCODE"; exit $LASTEXITCODE; - ${{ else }}: - pwsh: | - if ($IsWindows) { - . $(VENV_LOCATION)/Scripts/Activate.ps1 - } - else { - . $(VENV_LOCATION)/bin/activate.ps1 - } Write-Host (Get-Command python).Source - python scripts/devops_tasks/dispatch_tox.py "$(TargetingString)" ` + python eng/scripts/dispatch_checks.py "$(TargetingString)" ` --service="${{ parameters.ServiceDirectory }}" ` - --toxenv="samples" + --checks="samples" exit $LASTEXITCODE; env: ${{ parameters.EnvVars }} displayName: 'Test Samples' diff --git a/eng/pipelines/templates/steps/run_bandit.yml b/eng/pipelines/templates/steps/run_bandit.yml index 24a605a49846..2086c39fbeea 100644 --- a/eng/pipelines/templates/steps/run_bandit.yml +++ b/eng/pipelines/templates/steps/run_bandit.yml @@ -14,7 +14,6 @@ steps: scriptPath: 'eng/scripts/dispatch_checks.py' arguments: >- "$(TargetingString)" - --mark_arg="${{ parameters.TestMarkArgument }}" --service="${{ parameters.ServiceDirectory }}" --checks="bandit" --disable-compatibility-filter diff --git a/eng/pipelines/templates/steps/run_breaking_changes.yml b/eng/pipelines/templates/steps/run_breaking_changes.yml index 7cb3e496be3d..7154f6da5e56 100644 --- a/eng/pipelines/templates/steps/run_breaking_changes.yml +++ b/eng/pipelines/templates/steps/run_breaking_changes.yml @@ -11,7 +11,6 @@ steps: scriptPath: 'eng/scripts/dispatch_checks.py' arguments: >- "$(TargetingString)" - --mark_arg="${{ parameters.TestMarkArgument }}" --service="${{ parameters.ServiceDirectory }}" --checks="breaking" --disable-compatibility-filter diff --git a/eng/pipelines/templates/steps/set-dev-build.yml b/eng/pipelines/templates/steps/set-dev-build.yml index 4030dc3e0195..4eaddf310ceb 100644 --- a/eng/pipelines/templates/steps/set-dev-build.yml +++ b/eng/pipelines/templates/steps/set-dev-build.yml @@ -9,7 +9,7 @@ steps: - template: /eng/common/pipelines/templates/steps/daily-dev-build-variable.yml - pwsh: | - $(PIP_EXE) install "eng/tools/azure-sdk-tools[build]" + $(PIP_EXE) install "eng/tools/azure-sdk-tools" sdk_set_dev_version "*" --build-id="$(Build.BuildNumber)" displayName: "Update package versions for dev build" condition: and(succeededOrFailed(), eq(variables['SetDevVersion'],'true'), ${{ parameters.Condition }}) diff --git a/eng/pipelines/trigger-ml-sample-pipeline.yml b/eng/pipelines/trigger-ml-sample-pipeline.yml index 6cc793c5e190..8187cda0644c 100644 --- a/eng/pipelines/trigger-ml-sample-pipeline.yml +++ b/eng/pipelines/trigger-ml-sample-pipeline.yml @@ -40,7 +40,7 @@ jobs: versionSpec: $(PythonVersion) - script: | - python -m pip install eng/tools/azure-sdk-tools[build] + python -m pip install eng/tools/azure-sdk-tools python -m pip install azure-identity python -m pip install azure-storage-blob displayName: 'Prep Environment' diff --git a/eng/regression_tools.txt b/eng/regression_tools.txt index ca6d669a1db5..b37ff2dab6a9 100644 --- a/eng/regression_tools.txt +++ b/eng/regression_tools.txt @@ -23,4 +23,4 @@ pytest-cov==4.0.0 coverage==7.2.5 # local dev packages -./eng/tools/azure-sdk-tools[build] +./eng/tools/azure-sdk-tools diff --git a/eng/scripts/Language-Settings.ps1 b/eng/scripts/Language-Settings.ps1 index b1483d52cbf2..0d42ca41f866 100644 --- a/eng/scripts/Language-Settings.ps1 +++ b/eng/scripts/Language-Settings.ps1 @@ -162,7 +162,7 @@ function Get-AllPackageInfoFromRepo ($serviceDirectory) $allPkgPropLines = $null try { - $pathToBuild = (Join-Path $RepoRoot "eng" "tools" "azure-sdk-tools[build]") + $pathToBuild = (Join-Path $RepoRoot "eng" "tools" "azure-sdk-tools") # Use ‘uv pip install’ if uv is on PATH, otherwise fall back to python -m pip if (Get-Command uv -ErrorAction SilentlyContinue) { Write-Host "Using uv pip install" @@ -420,9 +420,9 @@ function SetPackageVersion ($PackageName, $Version, $ServiceDirectory, $ReleaseD $ReleaseDate = Get-Date -Format "yyyy-MM-dd" } if (Get-Command uv -ErrorAction SilentlyContinue) { - uv pip install "$RepoRoot/eng/tools/azure-sdk-tools[build]" + uv pip install "$RepoRoot/eng/tools/azure-sdk-tools" } else { - python -m pip install "$RepoRoot/eng/tools/azure-sdk-tools[build]" -q -I + python -m pip install "$RepoRoot/eng/tools/azure-sdk-tools" -q -I } sdk_set_version --package-name $PackageName --new-version $Version ` --service $ServiceDirectory --release-date $ReleaseDate --replace-latest-entry-title $ReplaceLatestEntryTitle diff --git a/eng/scripts/dispatch_checks.py b/eng/scripts/dispatch_checks.py index 498e7194cc9c..1b0d036dab05 100644 --- a/eng/scripts/dispatch_checks.py +++ b/eng/scripts/dispatch_checks.py @@ -5,16 +5,21 @@ import time import signal import shutil +import subprocess +import re from dataclasses import dataclass -from typing import List +from typing import IO, List, Optional from ci_tools.functions import discover_targeted_packages from ci_tools.variables import in_ci from ci_tools.scenario.generation import build_whl_for_req, replace_dev_reqs from ci_tools.logging import configure_logging, logger from ci_tools.environment_exclusions import is_check_enabled, CHECK_DEFAULTS +from devtools_testutils.proxy_startup import prepare_local_tool +from packaging.requirements import Requirement root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) +ISOLATE_DIRS_TO_CLEAN: List[str] = [] @dataclass @@ -27,6 +32,83 @@ class CheckResult: stderr: str +@dataclass +class ProxyProcess: + port: int + process: subprocess.Popen + log_handle: Optional[IO[str]] + + +PROXY_STATUS_SUFFIX = "/Info/Available" +PROXY_STARTUP_TIMEOUT = 60 +BASE_PROXY_PORT = 5050 +# Checks implemented via InstallAndTest all require shared recording restore behavior. +INSTALL_AND_TEST_CHECKS = {"whl", "whl_no_aio", "sdist", "devtest", "optional", "latestdependency", "mindependency"} +SHARED_RESTORE_ENV = "__shared_restore__" + + +def _cleanup_isolate_dirs() -> None: + if not ISOLATE_DIRS_TO_CLEAN: + return + + for path in ISOLATE_DIRS_TO_CLEAN: + if not path: + continue + if os.path.exists(path): + try: + shutil.rmtree(path) + except Exception: + logger.warning(f"Failed to remove isolate dir {path}") + ISOLATE_DIRS_TO_CLEAN.clear() + + +def _normalize_newlines(text: str) -> str: + return text.replace("\r\n", "\n").replace("\r", "\n") + + +def _checks_require_recording_restore(checks: List[str]) -> bool: + return any(check in INSTALL_AND_TEST_CHECKS for check in checks) + + +def _compare_req_to_injected_reqs(parsed_req, injected_packages: List[str]) -> bool: + if parsed_req is None: + return False + return any(parsed_req.name in req for req in injected_packages) + + +def _inject_custom_reqs(req_file: str, injected_packages: str, package_dir: str) -> None: + req_lines = [] + injected_list = [p for p in re.split(r"[\s,]", injected_packages) if p] + + if not injected_list: + return + + logger.info(f"Adding custom packages to requirements for {package_dir}") + with open(req_file, "r") as handle: + for line in handle: + logger.info(f"Attempting to parse {line}") + try: + parsed_req = Requirement(line.strip()) + except Exception as exc: + logger.error(exc) + parsed_req = None + req_lines.append((line, parsed_req)) + + if req_lines: + all_adjustments = injected_list + [ + line_tuple[0].strip() + for line_tuple in req_lines + if line_tuple[0].strip() and not _compare_req_to_injected_reqs(line_tuple[1], injected_list) + ] + else: + all_adjustments = injected_list + + logger.info(f"Generated Custom Reqs: {req_lines}") + + with open(req_file, "w") as handle: + handle.write("\n".join(all_adjustments)) + + async def run_check( semaphore: asyncio.Semaphore, package: str, @@ -34,6 +116,8 @@ async def run_check( base_args: List[str], idx: int, total: int, + proxy_port: int, + mark_arg: Optional[str], ) -> CheckResult: """Run a single check (subprocess) within a concurrency semaphore, capturing output and timing. @@ -49,19 +133,30 @@ async def run_check( :type idx: int :param total: Total number of tasks (used for logging progress). :type total: int + :param proxy_port: Dedicated proxy port assigned to this check instance. + :type proxy_port: int :returns: A :class:`CheckResult` describing exit code, duration and captured output. :rtype: CheckResult """ async with semaphore: start = time.time() cmd = base_args + [check, "--isolate", package] + if mark_arg: + cmd += ["--mark_arg", mark_arg] logger.info(f"[START {idx}/{total}] {check} :: {package}\nCMD: {' '.join(cmd)}") + env = os.environ.copy() + env["PROXY_URL"] = f"http://localhost:{proxy_port}" + + if in_ci(): + env["PROXY_ASSETS_FOLDER"] = os.path.join(root_dir, ".assets_distributed", str(proxy_port)) try: + logger.info(" ".join(cmd)) proc = await asyncio.create_subprocess_exec( *cmd, cwd=package, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + env=env, ) except Exception as ex: # subprocess failed to launch logger.error(f"Failed to start check {check} for {package}: {ex}") @@ -77,24 +172,28 @@ async def run_check( # Print captured output after completion to avoid interleaving header = f"===== OUTPUT: {check} :: {package} (exit {exit_code}) =====" trailer = "=" * len(header) + if in_ci(): + print(f"##[group]{package} :: {check} :: {exit_code}") + if stdout: print(header) - print(stdout.rstrip()) + print(_normalize_newlines(stdout).rstrip()) print(trailer) if stderr: print(header.replace("OUTPUT", "STDERR")) - print(stderr.rstrip()) + print(_normalize_newlines(stderr).rstrip()) print(trailer) + if in_ci(): + print("##[endgroup]") + # if we have any output collections to complete, do so now here # finally, we need to clean up any temp dirs created by --isolate if in_ci(): - isolate_dir = os.path.join(package, f".venv_{check}") - try: - shutil.rmtree(isolate_dir) - except: - logger.warning(f"Failed to remove isolate dir {isolate_dir} for {package} / {check}") + package_name = os.path.basename(os.path.normpath(package)) + isolate_dir = os.path.join(root_dir, ".venv", package_name, f".venv_{check}") + ISOLATE_DIRS_TO_CLEAN.append(isolate_dir) return CheckResult(package, check, exit_code, duration, stdout, stderr) @@ -125,7 +224,7 @@ def summarize(results: List[CheckResult]) -> int: return worst -async def run_all_checks(packages, checks, max_parallel, wheel_dir): +async def run_all_checks(packages, checks, max_parallel, wheel_dir, mark_arg: Optional[str], injected_packages: str): """Run all checks for all packages concurrently and return the worst exit code. :param packages: Iterable of package paths to run checks against. @@ -144,7 +243,7 @@ async def run_all_checks(packages, checks, max_parallel, wheel_dir): tasks = [] semaphore = asyncio.Semaphore(max_parallel) combos = [(p, c) for p in packages for c in checks] - total = len(combos) + scheduled: List[tuple] = [] test_tools_path = os.path.join(root_dir, "eng", "test_tools.txt") dependency_tools_path = os.path.join(root_dir, "eng", "dependency_tools.txt") @@ -156,22 +255,35 @@ async def run_all_checks(packages, checks, max_parallel, wheel_dir): logger.info("Replacing relative requirements in eng/dependency_tools.txt with prebuilt wheels.") replace_dev_reqs(dependency_tools_path, root_dir, wheel_dir) - for pkg in packages: - destination_dev_req = os.path.join(pkg, "dev_requirements.txt") + for pkg in packages: + destination_dev_req = os.path.join(pkg, "dev_requirements.txt") - logger.info(f"Replacing dev requirements w/ path {destination_dev_req}") - if not os.path.exists(destination_dev_req): - logger.info("No dev_requirements present.") - with open(destination_dev_req, "w+") as file: - file.write("\n") + logger.info(f"Replacing dev requirements w/ path {destination_dev_req}") + if not os.path.exists(destination_dev_req): + logger.info("No dev_requirements present.") + with open(destination_dev_req, "w+") as file: + file.write("\n") + if in_ci(): replace_dev_reqs(destination_dev_req, pkg, wheel_dir) - for idx, (package, check) in enumerate(combos, start=1): + _inject_custom_reqs(destination_dev_req, injected_packages, pkg) + + next_proxy_port = BASE_PROXY_PORT + for package, check in combos: if not is_check_enabled(package, check, CHECK_DEFAULTS.get(check, True)): - logger.warning(f"Skipping disabled check {check} ({idx}/{total}) for package {package}") + logger.warning(f"Skipping disabled check {check} for package {package}") continue - tasks.append(asyncio.create_task(run_check(semaphore, package, check, base_args, idx, total))) + logger.info(f"Assigning proxy port {next_proxy_port} to check {check} for package {package}") + scheduled.append((package, check, next_proxy_port)) + next_proxy_port += 1 + + total = len(scheduled) + + for idx, (package, check, proxy_port) in enumerate(scheduled, start=1): + tasks.append( + asyncio.create_task(run_check(semaphore, package, check, base_args, idx, total or 1, proxy_port, mark_arg)) + ) # Handle Ctrl+C gracefully pending = set(tasks) @@ -184,7 +296,7 @@ async def run_all_checks(packages, checks, max_parallel, wheel_dir): raise # Normalize exceptions norm_results: List[CheckResult] = [] - for res, (package, check) in zip(results, combos): + for res, (package, check, _) in zip(results, scheduled): if isinstance(res, CheckResult): norm_results.append(res) elif isinstance(res, Exception): @@ -381,14 +493,36 @@ def handler(signum, frame): logger.error("No valid checks provided via -c/--checks.") sys.exit(2) + # ensure that the proxy exe is available before we start running checks that may need to populate it + if in_ci() and _checks_require_recording_restore(checks): + try: + proxy_executable = prepare_local_tool(root_dir) + except Exception as exc: + logger.error(f"Unable to prepare test proxy executable for recording restore: {exc}") + sys.exit(1) + logger.info( f"Running {len(checks)} check(s) across {len(targeted_packages)} packages (max_parallel={args.max_parallel})." ) configure_interrupt_handling() + proxy_processes: List[ProxyProcess] = [] try: - exit_code = asyncio.run(run_all_checks(targeted_packages, checks, args.max_parallel, temp_wheel_dir)) + if in_ci(): + logger.info(f"Ensuring {len(checks)} test proxies are running for requested checks...") + exit_code = asyncio.run( + run_all_checks( + targeted_packages, + checks, + args.max_parallel, + temp_wheel_dir, + args.mark_arg, + args.injected_packages, + ) + ) except KeyboardInterrupt: logger.error("Aborted by user.") exit_code = 130 + finally: + _cleanup_isolate_dirs() sys.exit(exit_code) diff --git a/scripts/devops_tasks/set_tox_environment.py b/eng/scripts/set_checks.py similarity index 89% rename from scripts/devops_tasks/set_tox_environment.py rename to eng/scripts/set_checks.py index fa689640ee55..865edd1b4777 100644 --- a/scripts/devops_tasks/set_tox_environment.py +++ b/eng/scripts/set_checks.py @@ -15,7 +15,7 @@ FULL_BUILD_SET = [ "whl", "sdist", - "depends", + "import_all", "latestdependency", "mindependency", "whl_no_aio", @@ -29,6 +29,8 @@ def resolve_devops_variable(var_value: str) -> List[str]: return [] else: return [tox_env.strip() for tox_env in var_value.split(",") if tox_env.strip()] + else: + return [] def set_devops_value(resolved_set: List[str]) -> None: @@ -42,8 +44,8 @@ def remove_unsupported_values(selected_set: List[str], unsupported_values: List[ selected_set.remove(unsupported_tox_env) -def process_ci_skips(glob_string: str, service: str ) -> None: - checks_with_global_skip = ["pylint", "verifywhl", "verifysdist" "bandit", "mypy", "pyright", "verifytypes"] +def process_ci_skips(glob_string: str, service: str) -> None: + checks_with_global_skip = ["pylint", "verifywhl", "verifysdist", "bandit", "mypy", "pyright", "verifytypes"] root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "..")) if service and service != "auto": @@ -64,15 +66,14 @@ def process_ci_skips(glob_string: str, service: str ) -> None: all_packages = set([os.path.basename(pkg) for pkg in targeted_packages]) set_ci_variable(f"Skip.{check[0].upper()}{check[1:]}", "true") output_ci_warning( - f"All targeted packages {all_packages} skip the {check} check. Omitting step from build.", - "set_tox_environment.py", + f"All targeted packages {all_packages} skip the {check} check. Omitting step from build.", + "set_checks.py", ) - if __name__ == "__main__": parser = argparse.ArgumentParser( - description="This script is used to resolve a set of arguments (that correspond to devops runtime variables) and determine which tox environments should be run for the current job. " + description="This script is used to resolve a set of arguments (that correspond to devops runtime variables) and determine which checks should be run for the current job. " + "When running against a specific service directory, attempts to find entire analysis steps that can be skipped. EG if pylint is disabled for every package in a given service directory, that " + "step should never actually run." ) @@ -92,14 +93,14 @@ def process_ci_skips(glob_string: str, service: str ) -> None: "-o", "--override", dest="override_set", - help="If you have a set of tox environments that should override the defaults, provide it here. In CI this is runtime variable $(Run.ToxCustomEnvs). EG: \"whl,sdist\".", + help='If you have a set of tox environments that should override the defaults, provide it here. In CI this is runtime variable $(ChecksOverride). EG: "whl,sdist".', ) parser.add_argument( "-u", "--unsupported", dest="unsupported", - help="A list of unsupported environments. EG: \"pylint,sdist\"", + help='A list of unsupported environments. EG: "pylint,sdist"', ) parser.add_argument( diff --git a/eng/test_tools.txt b/eng/test_tools.txt index 2bfe6a1bc62e..979a374ad722 100644 --- a/eng/test_tools.txt +++ b/eng/test_tools.txt @@ -1,4 +1,4 @@ -# requirements leveraged by ci for testing + pytest==8.3.5 pytest-asyncio==0.24.0 pytest-cov==5.0.0 @@ -10,7 +10,7 @@ stevedore==5.4.1 pyproject-api==1.8.0 build==1.2.2.post1 -# locking packages defined as deps from azure-sdk-tools + Jinja2==3.1.6 json-delta==2.0.2 readme_renderer==43.0 diff --git a/eng/tools/azure-sdk-tools/azpysdk/Check.py b/eng/tools/azure-sdk-tools/azpysdk/Check.py index 2bddca212d1a..2c87589f91e4 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/Check.py +++ b/eng/tools/azure-sdk-tools/azpysdk/Check.py @@ -27,6 +27,18 @@ # being called from within a site-packages folder. Due to that, we can't trust the location of __file__ REPO_ROOT = discover_repo_root() +PACKAGING_REQUIREMENTS = [ + "wheel==0.45.1", + "packaging==24.2", + "urllib3==2.2.3", + "tomli==2.2.1", + "build==1.2.2.post1", + "pkginfo==1.12.1.2", +] + +TEST_TOOLS_REQUIREMENTS = os.path.join(REPO_ROOT, "eng/test_tools.txt") +DEPENDENCY_TOOLS_REQUIREMENTS = os.path.join(REPO_ROOT, "eng/dependency_tools.txt") + class Check(abc.ABC): """ @@ -87,9 +99,7 @@ def create_venv(self, isolate: bool, venv_location: str) -> str: f" unable to locate prebuilt azure-sdk-tools within {wheel_dir}" ) else: - install_into_venv( - venv_location, [os.path.join(REPO_ROOT, "eng/tools/azure-sdk-tools[build]")], REPO_ROOT - ) + install_into_venv(venv_location, [os.path.join(REPO_ROOT, "eng/tools/azure-sdk-tools")], REPO_ROOT) venv_python_exe = get_venv_python(venv_location) @@ -100,8 +110,11 @@ def create_venv(self, isolate: bool, venv_location: str) -> str: def get_executable(self, isolate: bool, check_name: str, executable: str, package_folder: str) -> Tuple[str, str]: """Get the Python executable that should be used for this check.""" - venv_location = os.path.join(package_folder, f".venv_{check_name}") - + # Keep venvs under a shared repo-level folder to prevent nested import errors during pytest collection + package_name = os.path.basename(os.path.normpath(package_folder)) + shared_venv_root = os.path.join(REPO_ROOT, ".venv", package_name) + os.makedirs(shared_venv_root, exist_ok=True) + venv_location = os.path.join(shared_venv_root, f".venv_{check_name}") # if isolation is required, the executable we get back will align with the venv # otherwise we'll just get sys.executable and install in current executable = self.create_venv(isolate, venv_location) @@ -117,6 +130,7 @@ def run_venv_command( check: bool = False, append_executable: bool = True, immediately_dump: bool = False, + additional_environment_settings: Optional[dict] = None, ) -> subprocess.CompletedProcess[str]: """Run a command in the given virtual environment. - Prepends the virtual environment's bin directory to the PATH environment variable (if one exists) @@ -130,6 +144,8 @@ def run_venv_command( ) env = os.environ.copy() + if additional_environment_settings: + env.update(additional_environment_settings) python_exec = pathlib.Path(executable) if python_exec.exists(): @@ -249,14 +265,16 @@ def pip_freeze(self, executable: str) -> None: logger.error(e.stdout) logger.error(e.stderr) - def _build_pytest_args(self, package_dir: str, args: argparse.Namespace) -> List[str]: - """ - Builds the pytest arguments used for the given package directory. - - :param package_dir: The package directory to build pytest args for. - :param args: The argparse.Namespace object containing command-line arguments. - :return: A list of pytest arguments. - """ + def _build_pytest_args_base( + self, + package_dir: str, + args: argparse.Namespace, + *, + ignore_globs: Optional[List[str]] = None, + extra_args: Optional[List[str]] = None, + test_target: Optional[str] = None, + ) -> List[str]: + """Build common pytest args for a package directory.""" log_level = os.getenv("PYTEST_LOG_LEVEL", "51") junit_path = os.path.join(package_dir, f"test-junit-{args.command}.xml") @@ -268,13 +286,30 @@ def _build_pytest_args(self, package_dir: str, args: argparse.Namespace) -> List "--durations=10", "--ignore=azure", "--ignore=.tox", - "--ignore-glob=.venv*", "--ignore=build", "--ignore=.eggs", "--ignore=samples", f"--log-cli-level={log_level}", ] - additional = args.pytest_args if args.pytest_args else [] + for glob in ignore_globs or [".venv*"]: + default_args.append(f"--ignore-glob={glob}") + + pytest_args = [*default_args] + + if extra_args: + pytest_args.extend(extra_args) + + if getattr(args, "mark_arg", None): + pytest_args.extend(["-m", args.mark_arg]) - return [*default_args, *additional, package_dir] + if getattr(args, "pytest_args", None): + pytest_args.extend(args.pytest_args) + + pytest_args.append(test_target or ".") + + return pytest_args + + def _build_pytest_args(self, package_dir: str, args: argparse.Namespace) -> List[str]: + """Build pytest args for a package directory.""" + return self._build_pytest_args_base(package_dir, args) diff --git a/eng/tools/azure-sdk-tools/azpysdk/dependency_check.py b/eng/tools/azure-sdk-tools/azpysdk/dependency_check.py new file mode 100644 index 000000000000..9acca357a364 --- /dev/null +++ b/eng/tools/azure-sdk-tools/azpysdk/dependency_check.py @@ -0,0 +1,181 @@ +import argparse +import os +import sys +from subprocess import CalledProcessError +from typing import Dict, List, Optional + +from .Check import Check, DEPENDENCY_TOOLS_REQUIREMENTS, PACKAGING_REQUIREMENTS, TEST_TOOLS_REQUIREMENTS +from .proxy_ports import get_proxy_url_for_check + +from ci_tools.functions import install_into_venv, is_error_code_5_allowed +from ci_tools.scenario.generation import create_package_and_install +from ci_tools.scenario.dependency_resolution import install_dependent_packages +from ci_tools.variables import discover_repo_root, set_envvar_defaults +from ci_tools.logging import logger + +REPO_ROOT = discover_repo_root() + + +class DependencyCheck(Check): + """Shared implementation for dependency bound test environments.""" + + def __init__( + self, + *, + dependency_type: str, + proxy_url: Optional[str], + display_name: str, + additional_packages: Optional[List[str]] = None, + ) -> None: + super().__init__() + self.dependency_type = dependency_type + self.display_name = display_name + resolved_proxy = get_proxy_url_for_check(display_name) + if proxy_url and proxy_url != resolved_proxy: + logger.debug( + "Overriding provided proxy_url %s with mapping value %s for check %s", + proxy_url, + resolved_proxy, + display_name, + ) + self.proxy_url = resolved_proxy + self.additional_packages = list(additional_packages or []) + + def register( + self, subparsers: "argparse._SubParsersAction", parent_parsers: Optional[List[argparse.ArgumentParser]] = None + ) -> None: + raise NotImplementedError + + def run(self, args: argparse.Namespace) -> int: + logger.info(f"Running {self.display_name} check...") + + env_defaults = self.get_env_defaults() + if env_defaults: + set_envvar_defaults(env_defaults) + + targeted = self.get_targeted_directories(args) + if not targeted: + logger.warning(f"No target packages discovered for {self.display_name} check.") + return 0 + + results: List[int] = [] + + for parsed in targeted: + package_dir = parsed.folder + package_name = parsed.name + + executable, staging_directory = self.get_executable(args.isolate, args.command, sys.executable, package_dir) + logger.info(f"Processing {package_name} using interpreter {executable}") + + try: + self._install_dependency_requirements(executable, package_dir) + except CalledProcessError as exc: + logger.error(f"Failed to install base dependencies for {package_name}: {exc}") + results.append(exc.returncode) + continue + + try: + install_dependent_packages( + setup_py_file_path=package_dir, + dependency_type=self.dependency_type, + temp_dir=staging_directory, + python_executable=executable, + ) + except Exception as exc: # pragma: no cover - defensive logging + logger.error(f"Dependency resolution failed for {package_name}: {exc}") + results.append(1) + continue + + try: + create_package_and_install( + distribution_directory=staging_directory, + target_setup=package_dir, + skip_install=False, + cache_dir=None, + work_dir=staging_directory, + force_create=False, + package_type="wheel", + pre_download_disabled=True, + python_executable=executable, + ) + except CalledProcessError as exc: + logger.error(f"Failed to build/install wheel for {package_name}: {exc}") + results.append(1) + continue + + self.pip_freeze(executable) + + if not self._verify_installed_packages(executable, package_dir, staging_directory): + results.append(1) + continue + + pytest_args = self._build_pytest_args(package_dir, args) + pytest_command = ["pytest", *pytest_args] + pytest_result = self.run_venv_command( + executable, + pytest_command, + cwd=package_dir, + immediately_dump=True, + append_executable=False, + ) + + if pytest_result.returncode != 0: + if pytest_result.returncode == 5 and is_error_code_5_allowed(package_dir, package_name): + logger.info( + "pytest exited with code 5 for %s, which is allowed for management or opt-out packages.", + package_name, + ) + continue + logger.error(f"pytest failed for {package_name} with exit code {pytest_result.returncode}.") + results.append(pytest_result.returncode) + + return max(results) if results else 0 + + def get_env_defaults(self) -> Dict[str, str]: + defaults: Dict[str, str] = {"DEPENDENCY_TYPE": self.dependency_type} + if self.proxy_url: + defaults["PROXY_URL"] = self.proxy_url + return defaults + + def _install_dependency_requirements(self, executable: str, package_dir: str) -> None: + install_into_venv(executable, PACKAGING_REQUIREMENTS, package_dir) + + if os.path.exists(DEPENDENCY_TOOLS_REQUIREMENTS): + install_into_venv(executable, ["-r", DEPENDENCY_TOOLS_REQUIREMENTS], package_dir) + else: + logger.warning(f"Dependency tools requirements file not found at {DEPENDENCY_TOOLS_REQUIREMENTS}.") + + if os.path.exists(TEST_TOOLS_REQUIREMENTS): + install_into_venv(executable, ["-r", TEST_TOOLS_REQUIREMENTS], package_dir) + else: + logger.warning(f"Test tools requirements file not found at {TEST_TOOLS_REQUIREMENTS}.") + + if self.additional_packages: + install_into_venv(executable, self.additional_packages, package_dir) + + def _verify_installed_packages(self, executable: str, package_dir: str, staging_directory: str) -> bool: + packages_file = os.path.join(staging_directory, "packages.txt") + if not os.path.exists(packages_file): + logger.error(f"Expected packages.txt not found at {packages_file} for {package_dir}.") + return False + + verify_script = os.path.join(REPO_ROOT, "eng/tox/verify_installed_packages.py") + verify_command = [verify_script, "--packages-file", packages_file] + verify_result = self.run_venv_command(executable, verify_command, cwd=package_dir) + + if verify_result.returncode != 0: + logger.error(f"verify_installed_packages failed for {package_dir} (exit code {verify_result.returncode}).") + if verify_result.stdout: + logger.error(verify_result.stdout) + if verify_result.stderr: + logger.error(verify_result.stderr) + return False + + return True + + def _build_pytest_args(self, package_dir: str, args: argparse.Namespace) -> List[str]: + return self._build_pytest_args_base( + package_dir, + args, + extra_args=["--no-cov"], + ) diff --git a/eng/tools/azure-sdk-tools/azpysdk/devtest.py b/eng/tools/azure-sdk-tools/azpysdk/devtest.py index a45614d77a3f..31ec05c569c3 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/devtest.py +++ b/eng/tools/azure-sdk-tools/azpysdk/devtest.py @@ -1,22 +1,21 @@ import argparse -from subprocess import CalledProcessError import sys import os import glob from typing import Optional, List -from .Check import Check from ci_tools.functions import ( install_into_venv, uninstall_from_venv, - is_error_code_5_allowed, discover_targeted_packages, ) -from ci_tools.scenario.generation import create_package_and_install -from ci_tools.variables import discover_repo_root, set_envvar_defaults +from ci_tools.variables import discover_repo_root from ci_tools.logging import logger +from .install_and_test import InstallAndTest +from .proxy_ports import get_proxy_url_for_check + REPO_ROOT = discover_repo_root() common_task_path = os.path.abspath(os.path.join(REPO_ROOT, "scripts", "devops_tasks")) sys.path.append(common_task_path) @@ -122,9 +121,13 @@ def install_dev_build_packages(executable: str, pkg_name_to_exclude: str, workin install_packages(executable, azure_pkgs, working_directory) -class devtest(Check): +class devtest(InstallAndTest): def __init__(self) -> None: - super().__init__() + super().__init__( + package_type="sdist", + proxy_url=get_proxy_url_for_check("devtest"), + display_name="devtest", + ) def register( self, subparsers: "argparse._SubParsersAction", parent_parsers: Optional[List[argparse.ArgumentParser]] = None @@ -142,80 +145,13 @@ def register( nargs=argparse.REMAINDER, help="Additional arguments forwarded to pytest.", ) + p.add_argument( + "--mark_arg", + dest="mark_arg", + help='Optional pytest marker expression passed as -m "" (e.g. "cosmosEmulator").', + ) - def run(self, args: argparse.Namespace) -> int: - """Run the devtest check command.""" - logger.info("Running devtest check...") - - set_envvar_defaults({"PROXY_URL": "http://localhost:5002"}) - targeted = self.get_targeted_directories(args) - - results: List[int] = [] - - for parsed in targeted: - package_dir = parsed.folder - package_name = parsed.name - executable, staging_directory = self.get_executable(args.isolate, args.command, sys.executable, package_dir) - logger.info(f"Processing {package_name} for devtest check") - - # install dependencies - try: - self.install_dev_reqs(executable, args, package_dir) - except CalledProcessError as e: - logger.error(f"Failed to install dev requirements: {e}") - results.append(1) - continue - - try: - create_package_and_install( - distribution_directory=staging_directory, - target_setup=package_dir, - skip_install=False, - cache_dir=None, - work_dir=staging_directory, - force_create=False, - package_type="sdist", - pre_download_disabled=False, - python_executable=executable, - ) - except CalledProcessError as e: - logger.error(f"Failed to create and install package {package_name}: {e}") - results.append(1) - continue - - if os.path.exists(TEST_TOOLS_REQUIREMENTS): - try: - install_into_venv(executable, ["-r", TEST_TOOLS_REQUIREMENTS], package_dir) - except Exception as e: - logger.error(f"Failed to install test tools requirements: {e}") - results.append(1) - continue - else: - logger.warning(f"Test tools requirements file not found at {TEST_TOOLS_REQUIREMENTS}.") - - try: - install_dev_build_packages(executable, package_name, package_dir) - except Exception as e: - logger.error(f"Failed to install dev build packages: {e}") - results.append(1) - continue - - pytest_args = self._build_pytest_args(package_dir, args) - - pytest_result = self.run_venv_command( - executable, ["-m", "pytest", *pytest_args], cwd=package_dir, immediately_dump=True - ) - - if pytest_result.returncode != 0: - if pytest_result.returncode == 5 and is_error_code_5_allowed(package_dir, package_name): - logger.info( - "pytest exited with code 5 for %s, which is allowed for management or opt-out packages.", - package_name, - ) - # Align with tox: skip coverage when tests are skipped entirely - continue - - logger.error(f"pytest failed for {package_name} with exit code {pytest_result.returncode}.") - results.append(pytest_result.returncode) - - return max(results) if results else 0 + def before_pytest( + self, executable: str, package_dir: str, package_name: str, staging_directory: str, args: argparse.Namespace + ) -> None: + install_dev_build_packages(executable, package_name, package_dir) diff --git a/eng/tools/azure-sdk-tools/azpysdk/import_all.py b/eng/tools/azure-sdk-tools/azpysdk/import_all.py index a624512013c8..f1f487fcd1ec 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/import_all.py +++ b/eng/tools/azure-sdk-tools/azpysdk/import_all.py @@ -76,8 +76,14 @@ def run(self, args: argparse.Namespace) -> int: import_script_all = "from {0} import *".format(parsed.namespace) commands = [executable, "-c", import_script_all] - outcomes.append(check_call(commands)) - logger.info("Verified module dependency, no issues found") + outcomes.append(check_call(commands, cwd=staging_directory)) + + if outcomes[-1] == 0: + logger.info("Verified module dependency, no issues found") + else: + logger.error( + f'Dependency issue found when invoking "{import_script_all}" against package {parsed.name}' + ) else: logger.info("Package {} is excluded from dependency check".format(parsed.name)) diff --git a/eng/tools/azure-sdk-tools/azpysdk/install_and_test.py b/eng/tools/azure-sdk-tools/azpysdk/install_and_test.py new file mode 100644 index 000000000000..0d27ff1790a0 --- /dev/null +++ b/eng/tools/azure-sdk-tools/azpysdk/install_and_test.py @@ -0,0 +1,189 @@ +import argparse +import os +import sys +from subprocess import CalledProcessError +from typing import Dict, List, Optional + +from .Check import Check, DEPENDENCY_TOOLS_REQUIREMENTS, PACKAGING_REQUIREMENTS, TEST_TOOLS_REQUIREMENTS + +from ci_tools.functions import is_error_code_5_allowed, install_into_venv +from ci_tools.scenario.generation import create_package_and_install +from ci_tools.variables import discover_repo_root, set_envvar_defaults +from ci_tools.logging import logger + +REPO_ROOT = discover_repo_root() + + +class InstallAndTest(Check): + """Shared implementation for build-and-test style checks.""" + + def __init__( + self, + *, + package_type: str, + proxy_url: Optional[str], + display_name: str, + additional_pytest_args: Optional[List[str]] = None, + coverage_enabled: bool = True, + ) -> None: + super().__init__() + self.package_type = package_type + self.proxy_url = proxy_url + self.display_name = display_name + self.additional_pytest_args = list(additional_pytest_args or []) + self.coverage_enabled = coverage_enabled + + def register( + self, subparsers: "argparse._SubParsersAction", parent_parsers: Optional[List[argparse.ArgumentParser]] = None + ) -> None: + raise NotImplementedError + + def run(self, args: argparse.Namespace) -> int: + logger.info(f"Running {self.display_name} check...") + + env_defaults = self.get_env_defaults() + if env_defaults: + set_envvar_defaults(env_defaults) + + targeted = self.get_targeted_directories(args) + if not targeted: + logger.warning(f"No target packages discovered for {self.display_name} check.") + return 0 + + results: List[int] = [] + + for parsed in targeted: + package_dir = parsed.folder + package_name = parsed.name + + executable, staging_directory = self.get_executable(args.isolate, args.command, sys.executable, package_dir) + logger.info(f"Processing {package_name} using interpreter {executable}") + + install_result = self.install_all_requirements( + executable, staging_directory, package_name, package_dir, args + ) + if install_result != 0: + results.append(install_result) + continue + + pytest_args = self._build_pytest_args(package_dir, args) + pytest_result = self.run_pytest(executable, staging_directory, package_dir, package_name, pytest_args) + if pytest_result != 0: + results.append(pytest_result) + continue + + if not self.coverage_enabled: + continue + + coverage_result = self.check_coverage(executable, package_dir, package_name) + if coverage_result != 0: + results.append(coverage_result) + + return max(results) if results else 0 + + def check_coverage(self, executable: str, package_dir: str, package_name: str) -> int: + coverage_command = [ + os.path.join(REPO_ROOT, "eng/tox/run_coverage.py"), + "-t", + package_dir, + "-r", + REPO_ROOT, + ] + coverage_result = self.run_venv_command(executable, coverage_command, cwd=package_dir) + if coverage_result.returncode != 0: + logger.error(f"Coverage generation failed for {package_name} with exit code {coverage_result.returncode}.") + if coverage_result.stdout: + logger.error(coverage_result.stdout) + if coverage_result.stderr: + logger.error(coverage_result.stderr) + return coverage_result.returncode + return 0 + + def run_pytest( + self, executable: str, staging_directory: str, package_dir: str, package_name: str, pytest_args: List[str] + ) -> int: + pytest_command = ["pytest", *pytest_args] + + environment = os.environ.copy() + environment.update({"PYTHONPYCACHEPREFIX": staging_directory}) + + logger.info(f"Running pytest for {package_name} with command: {pytest_command}") + logger.debug(f"with environment vars: {environment}") + + pytest_result = self.run_venv_command( + executable, + pytest_command, + cwd=package_dir, + immediately_dump=True, + additional_environment_settings=environment, + append_executable=False, + ) + if pytest_result.returncode != 0: + if pytest_result.returncode == 5 and is_error_code_5_allowed(package_dir, package_name): + logger.info( + "pytest exited with code 5 for %s, which is allowed for management or opt-out packages.", + package_name, + ) + # Align with tox: skip coverage when tests are skipped entirely + return 0 + else: + logger.error(f"pytest failed for {package_name} with exit code {pytest_result.returncode}.") + return pytest_result.returncode + return 0 + + def install_all_requirements( + self, executable: str, staging_directory: str, package_name: str, package_dir: str, args: argparse.Namespace + ) -> int: + try: + self._install_common_requirements(executable, package_dir) + if self.should_install_dev_requirements(): + self.install_dev_reqs(executable, args, package_dir) + except CalledProcessError as exc: + logger.error(f"Failed to prepare dependencies for {package_name}: {exc}") + return exc.returncode or 1 + + try: + create_package_and_install( + distribution_directory=staging_directory, + target_setup=package_dir, + skip_install=False, + cache_dir=None, + work_dir=staging_directory, + force_create=False, + package_type=self.package_type, + pre_download_disabled=False, + python_executable=executable, + ) + except CalledProcessError as exc: + logger.error(f"Failed to build/install {self.package_type} for {package_name}: {exc}") + return 1 + return 0 + + def get_env_defaults(self) -> Dict[str, str]: + defaults: Dict[str, str] = {} + + if os.getenv("PROXY_URL") is not None: + defaults["PROXY_URL"] = str(os.getenv("PROXY_URL")) + if self.proxy_url: + defaults["PROXY_URL"] = self.proxy_url + return defaults + + def should_install_dev_requirements(self) -> bool: + return True + + def _install_common_requirements(self, executable: str, package_dir: str) -> None: + install_into_venv(executable, PACKAGING_REQUIREMENTS, package_dir) + + if os.path.exists(TEST_TOOLS_REQUIREMENTS): + install_into_venv(executable, ["-r", TEST_TOOLS_REQUIREMENTS], package_dir) + else: + logger.warning(f"Test tools requirements file not found at {TEST_TOOLS_REQUIREMENTS}.") + + def _build_pytest_args(self, package_dir: str, args: argparse.Namespace) -> List[str]: + return self._build_pytest_args_base( + package_dir, + args, + ignore_globs=["**/.venv*", "**/.venv*/**"], + extra_args=self.additional_pytest_args, + test_target=package_dir, + ) diff --git a/eng/tools/azure-sdk-tools/azpysdk/latestdependency.py b/eng/tools/azure-sdk-tools/azpysdk/latestdependency.py new file mode 100644 index 000000000000..68a069cf4887 --- /dev/null +++ b/eng/tools/azure-sdk-tools/azpysdk/latestdependency.py @@ -0,0 +1,33 @@ +import argparse +from typing import List, Optional + +from .dependency_check import DependencyCheck +from .proxy_ports import get_proxy_url_for_check + + +class latestdependency(DependencyCheck): + def __init__(self) -> None: + super().__init__( + dependency_type="Latest", + proxy_url=get_proxy_url_for_check("latestdependency"), + display_name="latestdependency", + ) + + def register( + self, subparsers: "argparse._SubParsersAction", parent_parsers: Optional[List[argparse.ArgumentParser]] = None + ) -> None: + """Register the `latestdependency` check.""" + + parents = parent_parsers or [] + parser = subparsers.add_parser("latestdependency", parents=parents, help="Run the latestdependency check") + parser.set_defaults(func=self.run) + parser.add_argument( + "--pytest-args", + nargs=argparse.REMAINDER, + help="Additional arguments forwarded to pytest.", + ) + parser.add_argument( + "--mark_arg", + dest="mark_arg", + help='Optional pytest marker expression passed as -m "" (e.g. "cosmosEmulator").', + ) diff --git a/eng/tools/azure-sdk-tools/azpysdk/main.py b/eng/tools/azure-sdk-tools/azpysdk/main.py index 95d4ad96115c..f3617c724b51 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/main.py +++ b/eng/tools/azure-sdk-tools/azpysdk/main.py @@ -10,6 +10,7 @@ import argparse import shutil import os +import sys from typing import Sequence, Optional from .import_all import import_all @@ -27,11 +28,15 @@ from .apistub import apistub from .verify_sdist import verify_sdist from .whl import whl +from .sdist import sdist +from .whl_no_aio import whl_no_aio from .verify_whl import verify_whl from .bandit import bandit from .verify_keywords import verify_keywords from .generate import generate from .breaking import breaking +from .mindependency import mindependency +from .latestdependency import latestdependency from .samples import samples from .devtest import devtest from .optional import optional @@ -93,11 +98,15 @@ def build_parser() -> argparse.ArgumentParser: apistub().register(subparsers, [common]) verify_sdist().register(subparsers, [common]) whl().register(subparsers, [common]) + sdist().register(subparsers, [common]) + whl_no_aio().register(subparsers, [common]) verify_whl().register(subparsers, [common]) bandit().register(subparsers, [common]) verify_keywords().register(subparsers, [common]) generate().register(subparsers, [common]) breaking().register(subparsers, [common]) + mindependency().register(subparsers, [common]) + latestdependency().register(subparsers, [common]) samples().register(subparsers, [common]) devtest().register(subparsers, [common]) optional().register(subparsers, [common]) @@ -130,6 +139,7 @@ def main(argv: Optional[Sequence[str]] = None) -> int: try: result = args.func(args) + print(f"{args.command} check completed with exit code {result}") return int(result or 0) except KeyboardInterrupt: logger.error("Interrupted by user") diff --git a/eng/tools/azure-sdk-tools/azpysdk/mindependency.py b/eng/tools/azure-sdk-tools/azpysdk/mindependency.py new file mode 100644 index 000000000000..95e06c222e38 --- /dev/null +++ b/eng/tools/azure-sdk-tools/azpysdk/mindependency.py @@ -0,0 +1,38 @@ +import argparse +from typing import List, Optional + +from .dependency_check import DependencyCheck +from .proxy_ports import get_proxy_url_for_check + + +class mindependency(DependencyCheck): + def __init__(self) -> None: + super().__init__( + dependency_type="Minimum", + proxy_url=get_proxy_url_for_check("mindependency"), + display_name="mindependency", + additional_packages=[ + "azure-mgmt-keyvault<7.0.0", + "azure-mgmt-resource<15.0.0", + "azure-mgmt-storage<15.0.0", + ], + ) + + def register( + self, subparsers: "argparse._SubParsersAction", parent_parsers: Optional[List[argparse.ArgumentParser]] = None + ) -> None: + """Register the `mindependency` check.""" + + parents = parent_parsers or [] + parser = subparsers.add_parser("mindependency", parents=parents, help="Run the mindependency check") + parser.set_defaults(func=self.run) + parser.add_argument( + "--pytest-args", + nargs=argparse.REMAINDER, + help="Additional arguments forwarded to pytest.", + ) + parser.add_argument( + "--mark_arg", + dest="mark_arg", + help='Optional pytest marker expression passed as -m "" (e.g. "cosmosEmulator").', + ) diff --git a/eng/tools/azure-sdk-tools/azpysdk/optional.py b/eng/tools/azure-sdk-tools/azpysdk/optional.py index 3a873d234cca..307012666e5c 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/optional.py +++ b/eng/tools/azure-sdk-tools/azpysdk/optional.py @@ -5,24 +5,27 @@ from typing import Optional, List -from .Check import Check +from .install_and_test import InstallAndTest +from .proxy_ports import get_proxy_url_for_check from ci_tools.functions import ( install_into_venv, uninstall_from_venv, - is_error_code_5_allowed, ) -from ci_tools.scenario.generation import create_package_and_install, prepare_environment -from ci_tools.variables import discover_repo_root, in_ci, set_envvar_defaults -from ci_tools.environment_exclusions import is_check_enabled +from ci_tools.scenario.generation import prepare_environment +from ci_tools.variables import discover_repo_root, set_envvar_defaults from ci_tools.parsing import get_config_setting from ci_tools.logging import logger REPO_ROOT = discover_repo_root() -class optional(Check): +class optional(InstallAndTest): def __init__(self) -> None: - super().__init__() + super().__init__( + package_type="sdist", + proxy_url=get_proxy_url_for_check("optional"), + display_name="optional", + ) def register( self, subparsers: "argparse._SubParsersAction", parent_parsers: Optional[List[argparse.ArgumentParser]] = None @@ -43,13 +46,24 @@ def register( help="The target environment. If not provided, all optional environments will be run.", required=False, ) + p.add_argument( + "--mark_arg", + dest="mark_arg", + help='Optional pytest marker expression passed as -m "" (e.g. "cosmosEmulator").', + ) def run(self, args: argparse.Namespace) -> int: """Run the optional check command.""" logger.info("Running optional check...") - set_envvar_defaults({"PROXY_URL": "http://localhost:5004"}) + env_defaults = self.get_env_defaults() + if env_defaults: + set_envvar_defaults(env_defaults) + targeted = self.get_targeted_directories(args) + if not targeted: + logger.warning("No target packages discovered for optional check.") + return 0 results: List[int] = [] @@ -57,22 +71,14 @@ def run(self, args: argparse.Namespace) -> int: package_dir = parsed.folder package_name = parsed.name executable, staging_directory = self.get_executable(args.isolate, args.command, sys.executable, package_dir) - logger.info(f"Processing {package_name} for optional check") - - if in_ci(): - if not is_check_enabled(package_dir, "optional", False): - logger.info(f"Package {package_name} opts-out of optional check.") - continue + logger.info(f"Processing {package_name} using interpreter {executable}") try: - self.install_dev_reqs(executable, args, package_dir) - except CalledProcessError as exc: - logger.error(f"Failed to install dependencies for {package_name}: {exc}") - results.append(exc.returncode) - continue - - try: - self.prepare_and_test_optional(package_name, package_dir, staging_directory, args.optional) + result = self.prepare_and_test_optional( + package_name, package_dir, staging_directory, args.optional, args + ) + if result != 0: + results.append(result) except Exception as e: logger.error(f"Optional check for package {package_name} failed with exception: {e}") results.append(1) @@ -83,16 +89,19 @@ def run(self, args: argparse.Namespace) -> int: # TODO copying from generation.py, remove old code later # TODO remove pytest() function from ci_tools.functions as it was only used in the old version of this logic def prepare_and_test_optional( - self, package_name: str, package_dir: str, temp_dir: str, target_env_name: str - ) -> None: + self, package_name: str, package_dir: str, temp_dir: str, target_env_name: str, args: argparse.Namespace + ) -> int: """ Prepare and test the optional environment for the given package. """ optional_configs = get_config_setting(package_dir, "optional") + if not isinstance(optional_configs, list): + optional_configs = [] + if len(optional_configs) == 0: logger.info(f"No optional environments detected in pyproject.toml within {package_dir}.") - exit(0) + return 0 config_results = [] @@ -109,26 +118,18 @@ def prepare_and_test_optional( environment_exe = prepare_environment(package_dir, temp_dir, env_name) - create_package_and_install( - distribution_directory=temp_dir, - target_setup=package_dir, - skip_install=False, - cache_dir=None, - work_dir=temp_dir, - force_create=False, - package_type="sdist", - pre_download_disabled=False, - python_executable=environment_exe, - ) - dev_reqs = os.path.join(package_dir, "dev_requirements.txt") - test_tools = os.path.join(REPO_ROOT, "eng", "test_tools.txt") - - # install the dev requirements and test_tools requirements files to ensure tests can run + # install package and testing requirements try: - install_into_venv(environment_exe, ["-r", dev_reqs, "-r", test_tools], package_dir) + install_result = self.install_all_requirements( + environment_exe, temp_dir, package_name, package_dir, args + ) + if install_result != 0: + logger.error(f"Failed to install base requirements for {package_name} in optional env {env_name}.") + config_results.append(False) + break except CalledProcessError as exc: logger.error( - f"Unable to complete installation of dev_requirements.txt and/or test_tools.txt for {package_name}, check command output above." + f"Failed to install base requirements for {package_name} in optional env {env_name}: {exc}" ) config_results.append(False) break @@ -177,34 +178,22 @@ def prepare_and_test_optional( "--ignore=samples", f"--log-cli-level={log_level}", ] + if getattr(args, "mark_arg", None): + pytest_args.extend(["-m", args.mark_arg]) pytest_args.extend(config.get("additional_pytest_args", [])) logger.info(f"Invoking tests for package {package_name} and optional environment {env_name}") - pytest_command = ["-m", "pytest", *pytest_args] - pytest_result = self.run_venv_command( - environment_exe, pytest_command, cwd=package_dir, immediately_dump=True - ) - - if pytest_result.returncode != 0: - if pytest_result.returncode == 5 and is_error_code_5_allowed(package_dir, package_name): - logger.info( - "pytest exited with code 5 for %s, which is allowed for management or opt-out packages.", - package_name, - ) - # Align with tox: skip coverage when tests are skipped entirely - continue - logger.error( - f"pytest failed for {package_name} and optional environment {env_name} with exit code {pytest_result.returncode}." + try: + pytest_result = self.run_pytest( + environment_exe, temp_dir, package_dir, package_name, pytest_args, cwd=package_dir ) + config_results.append(True if pytest_result == 0 else False) + except CalledProcessError as exc: config_results.append(False) - else: - logger.info(f"pytest succeeded for {package_name} and optional environment {env_name}.") - config_results.append(True) if all(config_results): logger.info(f"All optional environment(s) for {package_name} completed successfully.") - exit(0) else: for i, config in enumerate(optional_configs): if i >= len(config_results): @@ -214,4 +203,5 @@ def prepare_and_test_optional( logger.error( f"Optional environment {config_name} for {package_name} completed with non-zero exit-code. Check test results above." ) - exit(1) + return 1 + return 0 diff --git a/eng/tools/azure-sdk-tools/azpysdk/proxy_ports.py b/eng/tools/azure-sdk-tools/azpysdk/proxy_ports.py new file mode 100644 index 000000000000..8a48be560ff1 --- /dev/null +++ b/eng/tools/azure-sdk-tools/azpysdk/proxy_ports.py @@ -0,0 +1,52 @@ +"""Proxy port assignments for azpysdk checks. + +This mapping mirrors the explicit `PROXY_URL` configuration found in +`eng/tox/tox.ini`. Because `dispatch_checks.py` runs multiple checks in +parallel, each check must bind to its own dedicated test-proxy port to avoid +races. Keeping this data in a single module allows both the CLI and the CI +launcher to share the same source of truth without having to parse the tox +configuration file at runtime. +""" + +from __future__ import annotations + +from typing import Dict, Optional + +DEFAULT_PROXY_PORT = 5000 +DEFAULT_PROXY_URL = f"http://localhost:{DEFAULT_PROXY_PORT}" + +# NOTE: `import_all` shares the same configuration as the legacy `depends` +# tox environment. All other entries match the tox environment names 1:1. +CHECK_PROXY_PORTS: Dict[str, int] = { + "whl": DEFAULT_PROXY_PORT, + "sdist": 5001, + "whl_no_aio": 5002, + "devtest": 5003, + "optional": 5004, + "mindependency": 5005, + "latestdependency": 5006, +} + + +def get_proxy_port_for_check(check_name: Optional[str]) -> int: + """Return the proxy port assigned to the given azpysdk check.""" + + if not check_name: + return DEFAULT_PROXY_PORT + return CHECK_PROXY_PORTS.get(check_name, DEFAULT_PROXY_PORT) + + +def get_proxy_url_for_check(check_name: Optional[str]) -> str: + """Return the proxy URL assigned to the given azpysdk check.""" + + port = get_proxy_port_for_check(check_name) + return f"http://localhost:{port}" + + +__all__ = [ + "CHECK_PROXY_PORTS", + "DEFAULT_PROXY_PORT", + "DEFAULT_PROXY_URL", + "get_proxy_port_for_check", + "get_proxy_url_for_check", +] diff --git a/eng/tools/azure-sdk-tools/azpysdk/samples.py b/eng/tools/azure-sdk-tools/azpysdk/samples.py index 24a476161c52..f80cece0bf5f 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/samples.py +++ b/eng/tools/azure-sdk-tools/azpysdk/samples.py @@ -6,6 +6,7 @@ from typing import Optional, List from .Check import Check +from .proxy_ports import get_proxy_url_for_check from ci_tools.functions import install_into_venv from ci_tools.scenario.generation import create_package_and_install from ci_tools.variables import discover_repo_root, set_envvar_defaults @@ -302,7 +303,7 @@ def run(self, args: argparse.Namespace) -> int: """Run the samples check command.""" logger.info("Running samples check...") - set_envvar_defaults({"PROXY_URL": "http://localhost:5003"}) + set_envvar_defaults({"PROXY_URL": get_proxy_url_for_check(args.command)}) targeted = self.get_targeted_directories(args) results: List[int] = [] diff --git a/eng/tools/azure-sdk-tools/azpysdk/sdist.py b/eng/tools/azure-sdk-tools/azpysdk/sdist.py new file mode 100644 index 000000000000..af092a29e10f --- /dev/null +++ b/eng/tools/azure-sdk-tools/azpysdk/sdist.py @@ -0,0 +1,33 @@ +import argparse +from typing import List, Optional + +from .install_and_test import InstallAndTest +from .proxy_ports import get_proxy_url_for_check + + +class sdist(InstallAndTest): + def __init__(self) -> None: + super().__init__( + package_type="sdist", + proxy_url=get_proxy_url_for_check("sdist"), + display_name="sdist", + ) + + def register( + self, subparsers: "argparse._SubParsersAction", parent_parsers: Optional[List[argparse.ArgumentParser]] = None + ) -> None: + """Register the `sdist` check. This builds and installs the source distribution before running pytest.""" + + parents = parent_parsers or [] + parser = subparsers.add_parser("sdist", parents=parents, help="Run the sdist check") + parser.set_defaults(func=self.run) + parser.add_argument( + "--pytest-args", + nargs=argparse.REMAINDER, + help="Additional arguments forwarded to pytest.", + ) + parser.add_argument( + "--mark_arg", + dest="mark_arg", + help='Optional pytest marker expression passed as -m "" (e.g. "cosmosEmulator").', + ) diff --git a/eng/tools/azure-sdk-tools/azpysdk/whl.py b/eng/tools/azure-sdk-tools/azpysdk/whl.py index 515ea0cdf645..f01f6ea17bdd 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/whl.py +++ b/eng/tools/azure-sdk-tools/azpysdk/whl.py @@ -1,134 +1,34 @@ import argparse -import os -import sys -from subprocess import CalledProcessError from typing import List, Optional -from .Check import Check +from .install_and_test import InstallAndTest +from .proxy_ports import get_proxy_url_for_check -from ci_tools.functions import is_error_code_5_allowed, install_into_venv -from ci_tools.scenario.generation import create_package_and_install -from ci_tools.variables import discover_repo_root, set_envvar_defaults -from ci_tools.logging import logger -REPO_ROOT = discover_repo_root() - -PACKAGING_REQUIREMENTS = [ - "wheel==0.45.1", - "packaging==24.2", - "urllib3==2.2.3", - "tomli==2.2.1", - "build==1.2.2.post1", - "pkginfo==1.12.1.2", -] - -TEST_TOOLS_REQUIREMENTS = os.path.join(REPO_ROOT, "eng/test_tools.txt") - - -class whl(Check): +class whl(InstallAndTest): def __init__(self) -> None: - super().__init__() + super().__init__( + package_type="wheel", + proxy_url=get_proxy_url_for_check("whl"), + display_name="whl", + ) def register( self, subparsers: "argparse._SubParsersAction", parent_parsers: Optional[List[argparse.ArgumentParser]] = None ) -> None: - """Register the `whl` check. The `whl` check installs the wheel version of the target package + its dev_requirements.txt, - then invokes pytest. Failures indicate a test issue. - """ + """Register the `whl` check. The `whl` check installs the wheel version of the target package + its + dev requirements, then invokes pytest. Failures indicate a test issue.""" + parents = parent_parsers or [] - p = subparsers.add_parser("whl", parents=parents, help="Run the whl check") - p.set_defaults(func=self.run) - p.add_argument( + parser = subparsers.add_parser("whl", parents=parents, help="Run the whl check") + parser.set_defaults(func=self.run) + parser.add_argument( "--pytest-args", nargs=argparse.REMAINDER, help="Additional arguments forwarded to pytest.", ) - - def run(self, args: argparse.Namespace) -> int: - """Run the whl check command.""" - logger.info("Running whl check...") - - set_envvar_defaults({"PROXY_URL": "http://localhost:5001"}) - - targeted = self.get_targeted_directories(args) - if not targeted: - logger.warning("No target packages discovered for whl check.") - return 0 - - overall_result = 0 - - for parsed in targeted: - package_dir = parsed.folder - package_name = parsed.name - - executable, staging_directory = self.get_executable(args.isolate, args.command, sys.executable, package_dir) - logger.info(f"Processing {package_name} using interpreter {executable}") - - try: - self._install_common_requirements(executable, package_dir) - self.install_dev_reqs(executable, args, package_dir) - except CalledProcessError as exc: - logger.error(f"Failed to install dependencies for {package_name}: {exc}") - overall_result = max(overall_result, exc.returncode or 1) - continue - - try: - create_package_and_install( - distribution_directory=staging_directory, - target_setup=package_dir, - skip_install=False, - cache_dir=None, - work_dir=staging_directory, - force_create=False, - package_type="wheel", - pre_download_disabled=False, - python_executable=executable, - ) - except CalledProcessError as exc: - logger.error(f"Failed to build/install wheel for {package_name}: {exc}") - overall_result = max(overall_result, exc.returncode or 1) - continue - - pytest_args = self._build_pytest_args(package_dir, args) - pytest_command = ["-m", "pytest", *pytest_args] - pytest_result = self.run_venv_command(executable, pytest_command, cwd=package_dir, immediately_dump=True) - - if pytest_result.returncode != 0: - if pytest_result.returncode == 5 and is_error_code_5_allowed(package_dir, package_name): - logger.info( - "pytest exited with code 5 for %s, which is allowed for management or opt-out packages.", - package_name, - ) - # Align with tox: skip coverage when tests are skipped entirely - continue - - logger.error(f"pytest failed for {package_name} with exit code {pytest_result.returncode}.") - continue - - coverage_command = [ - os.path.join(REPO_ROOT, "eng/tox/run_coverage.py"), - "-t", - package_dir, - "-r", - REPO_ROOT, - ] - coverage_result = self.run_venv_command(executable, coverage_command, cwd=package_dir) - if coverage_result.returncode != 0: - logger.error( - f"Coverage generation failed for {package_name} with exit code {coverage_result.returncode}." - ) - if coverage_result.stdout: - logger.error(coverage_result.stdout) - if coverage_result.stderr: - logger.error(coverage_result.stderr) - overall_result = max(overall_result, coverage_result.returncode) - - return overall_result - - def _install_common_requirements(self, executable: str, package_dir: str) -> None: - install_into_venv(executable, PACKAGING_REQUIREMENTS, package_dir) - - if os.path.exists(TEST_TOOLS_REQUIREMENTS): - install_into_venv(executable, ["-r", TEST_TOOLS_REQUIREMENTS], package_dir) - else: - logger.warning(f"Test tools requirements file not found at {TEST_TOOLS_REQUIREMENTS}.") + parser.add_argument( + "--mark_arg", + dest="mark_arg", + help='Optional pytest marker expression passed as -m "" (e.g. "cosmosEmulator").', + ) diff --git a/eng/tools/azure-sdk-tools/azpysdk/whl_no_aio.py b/eng/tools/azure-sdk-tools/azpysdk/whl_no_aio.py new file mode 100644 index 000000000000..ba7bb0f433d5 --- /dev/null +++ b/eng/tools/azure-sdk-tools/azpysdk/whl_no_aio.py @@ -0,0 +1,50 @@ +import argparse +from typing import List, Optional + +from .install_and_test import InstallAndTest +from .proxy_ports import get_proxy_url_for_check +from ci_tools.logging import logger + + +class whl_no_aio(InstallAndTest): + def __init__(self) -> None: + super().__init__( + package_type="wheel", + proxy_url=get_proxy_url_for_check("whl_no_aio"), + display_name="whl_no_aio", + ) + + def register( + self, subparsers: "argparse._SubParsersAction", parent_parsers: Optional[List[argparse.ArgumentParser]] = None + ) -> None: + """Register the `whl_no_aio` check. Matches the wheel check but ensures aiohttp is absent before pytest.""" + + parents = parent_parsers or [] + parser = subparsers.add_parser("whl_no_aio", parents=parents, help="Run the whl_no_aio check") + parser.set_defaults(func=self.run) + parser.add_argument( + "--pytest-args", + nargs=argparse.REMAINDER, + help="Additional arguments forwarded to pytest.", + ) + parser.add_argument( + "--mark_arg", + dest="mark_arg", + help='Optional pytest marker expression passed as -m "" (e.g. "cosmosEmulator").', + ) + + def before_pytest( + self, executable: str, package_dir: str, package_name: str, staging_directory: str, args: argparse.Namespace + ) -> None: + uninstall_cmd = ["-m", "pip", "uninstall", "aiohttp", "--yes"] + result = self.run_venv_command(executable, uninstall_cmd, cwd=package_dir) + if result.returncode != 0: + logger.warning( + "Failed to uninstall aiohttp prior to pytest for %s. Exit code %s.", + package_dir, + result.returncode, + ) + if result.stdout: + logger.warning(result.stdout) + if result.stderr: + logger.warning(result.stderr) diff --git a/eng/tools/azure-sdk-tools/ci_tools/functions.py b/eng/tools/azure-sdk-tools/ci_tools/functions.py index 5e812576d29f..afddff9c9aa2 100644 --- a/eng/tools/azure-sdk-tools/ci_tools/functions.py +++ b/eng/tools/azure-sdk-tools/ci_tools/functions.py @@ -599,8 +599,9 @@ def run_pip_freeze(python_executable: Optional[str] = None) -> List[str]: pip_cmd = get_pip_command(exe) + # we use `freeze` because it is present on both pip and uv out = subprocess.Popen( - pip_cmd + ["list", "--disable-pip-version-check", "--format", "freeze"], + pip_cmd + ["freeze", "--disable-pip-version-check"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) @@ -610,7 +611,7 @@ def run_pip_freeze(python_executable: Optional[str] = None) -> List[str]: collected_output = [] if stdout and (stderr is None): - for line in stdout.decode("utf-8").split(os.linesep): + for line in stdout.decode("utf-8").splitlines(): if line: collected_output.append(line) else: diff --git a/eng/tools/azure-sdk-tools/ci_tools/logging/__init__.py b/eng/tools/azure-sdk-tools/ci_tools/logging/__init__.py index a7491ec3a461..ff6e22e6fe22 100644 --- a/eng/tools/azure-sdk-tools/ci_tools/logging/__init__.py +++ b/eng/tools/azure-sdk-tools/ci_tools/logging/__init__.py @@ -32,7 +32,7 @@ def configure_logging(args: argparse.Namespace, fmt: str = "%(asctime)s [%(level logger.setLevel(numeric_level) # Propagate logger config globally if needed - logging.basicConfig(level=numeric_level, format=fmt) + logging.basicConfig(level=numeric_level, format=fmt, force=True) def now() -> str: diff --git a/eng/tools/azure-sdk-tools/ci_tools/scenario/dependency_resolution.py b/eng/tools/azure-sdk-tools/ci_tools/scenario/dependency_resolution.py new file mode 100644 index 000000000000..5c9859f795f5 --- /dev/null +++ b/eng/tools/azure-sdk-tools/ci_tools/scenario/dependency_resolution.py @@ -0,0 +1,341 @@ +"""Utilities for resolving dependency sets for tox-style checks. + +This module contains the logic previously hosted in ``eng/tox/install_depend_packages.py`` +so that both the legacy tox entry point and the azpysdk checks can share a +single implementation. +""" + +import logging +import os +import re +import subprocess +import sys +from typing import Callable, List, Optional + +from packaging.requirements import Requirement +from packaging.specifiers import SpecifierSet +from packaging.version import Version +from pypi_tools.pypi import PyPIClient + +from ci_tools.functions import ( + compare_python_version, + get_pip_command, + handle_incompatible_minimum_dev_reqs, +) +from ci_tools.parsing import ParsedSetup, parse_require + +logger = logging.getLogger(__name__) + +DEV_REQ_FILE = "dev_requirements.txt" +NEW_DEV_REQ_FILE = "new_dev_requirements.txt" +PKGS_TXT_FILE = "packages.txt" + +# GENERIC_OVERRIDES dictionaries pair a specific dependency with a MINIMUM or MAXIMUM inclusive bound. +# During LATEST and MINIMUM dependency checks, we sometimes need to ignore versions for various compatibility +# reasons. +MINIMUM_VERSION_GENERIC_OVERRIDES = { + "azure-common": "1.1.10", + "msrest": "0.6.10", + "typing-extensions": "4.6.0", + "opentelemetry-api": "1.3.0", + "opentelemetry-sdk": "1.3.0", + "azure-core": "1.11.0", + "requests": "2.19.0", + "six": "1.12.0", + "cryptography": "41.0.0", + "msal": "1.23.0", + "azure-storage-file-datalake": "12.2.0", +} + +MAXIMUM_VERSION_GENERIC_OVERRIDES = {} + +# SPECIFIC OVERRIDES provide additional filtering of upper and lower bound by +# binding an override to the specific package being processed. As an example, when +# processing the latest or minimum deps for "azure-eventhub", the minimum version of "azure-core" +# will be overridden to 1.25.0. +MINIMUM_VERSION_SPECIFIC_OVERRIDES = { + "azure-eventhub": {"azure-core": "1.25.0"}, + "azure-eventhub-checkpointstoreblob-aio": {"azure-core": "1.25.0", "azure-eventhub": "5.11.0"}, + "azure-eventhub-checkpointstoreblob": {"azure-core": "1.25.0", "azure-eventhub": "5.11.0"}, + "azure-eventhub-checkpointstoretable": {"azure-core": "1.25.0", "azure-eventhub": "5.11.0"}, + "azure-identity": {"msal": "1.23.0"}, + "azure-core-tracing-opentelemetry": {"azure-core": "1.28.0"}, + "azure-storage-file-datalake": {"azure-storage-blob": "12.22.0"}, + "azure-cosmos": {"azure-core": "1.30.0"}, + "azure-appconfiguration-provider": {"azure-appconfiguration": "1.7.2"}, + "azure-ai-evaluation": {"aiohttp": "3.8.6"}, +} + +MAXIMUM_VERSION_SPECIFIC_OVERRIDES = {} + +# PLATFORM SPECIFIC OVERRIDES provide additional generic (EG not tied to the package whose dependencies are being processed) +# filtering on a _per platform_ basis. Primarily used to limit certain packages due to platform compatibility. +PLATFORM_SPECIFIC_MINIMUM_OVERRIDES = { + ">=3.14.0": { + "typing-extensions": "4.15.0", + }, + ">=3.12.0": { + "azure-core": "1.23.1", + "aiohttp": "3.9.0", + "six": "1.16.0", + "requests": "2.30.0", + }, + ">=3.13.0": { + "typing-extensions": "4.13.0", + "aiohttp": "3.10.6", + }, +} + +PLATFORM_SPECIFIC_MAXIMUM_OVERRIDES = {} + +# This is used to actively _add_ requirements to the install set. These are used to actively inject +# a new requirement specifier to the set of packages being installed. +SPECIAL_CASE_OVERRIDES = { + # this package has an override + "azure-core": { + # if the version being installed matches this specifier, add the listed packages to the install list + "<1.24.0": ["msrest<0.7.0"], + } +} + +__all__ = [ + "install_dependent_packages", + "filter_dev_requirements", + "find_released_packages", +] + + +def install_dependent_packages( + setup_py_file_path: str, + dependency_type: str, + temp_dir: str, + python_executable: Optional[str] = None, +) -> None: + """Identify and install the dependency set for a package. + + :param setup_py_file_path: Path to the target package directory. + :param dependency_type: Either ``"Latest"`` or ``"Minimum"``. + :param temp_dir: Directory where temporary artifacts (e.g. filtered requirements, packages.txt) are written. + :param python_executable: Optional interpreter whose environment should receive the installations. Defaults to + the current ``sys.executable``. + """ + + python_exe = python_executable or sys.executable + + released_packages = find_released_packages(setup_py_file_path, dependency_type) + override_added_packages: List[str] = [] + + for pkg_spec in released_packages: + override_added_packages.extend(check_pkg_against_overrides(pkg_spec)) + + logger.info("%s released packages: %s", dependency_type, released_packages) + + additional_filter_fn: Optional[Callable[[str, List[str], List[Requirement]], List[str]]] = None + if dependency_type == "Minimum": + additional_filter_fn = handle_incompatible_minimum_dev_reqs + + dev_req_file_path = filter_dev_requirements(setup_py_file_path, released_packages, temp_dir, additional_filter_fn) + + if override_added_packages: + logger.info("Expanding the requirement set by the packages %s.", override_added_packages) + + install_set = released_packages + list(set(override_added_packages)) + + if install_set or dev_req_file_path: + install_packages(install_set, dev_req_file_path, python_exe) + + if released_packages: + pkgs_file_path = os.path.join(temp_dir, PKGS_TXT_FILE) + with open(pkgs_file_path, "w", encoding="utf-8") as pkgs_file: + for package in released_packages: + pkgs_file.write(package + "\n") + logger.info("Created file %s to track azure packages found on PyPI", pkgs_file_path) + + +def check_pkg_against_overrides(pkg_specifier: str) -> List[str]: + """Apply ``SPECIAL_CASE_OVERRIDES`` for a resolved package specifier.""" + + additional_installs: List[str] = [] + target_package, target_version = pkg_specifier.split("==") + + target_version_obj = Version(target_version) + if target_package in SPECIAL_CASE_OVERRIDES: + for specifier_set, extras in SPECIAL_CASE_OVERRIDES[target_package].items(): + spec = SpecifierSet(specifier_set) + if target_version_obj in spec: + additional_installs.extend(extras) + + return additional_installs + + +def find_released_packages(setup_py_path: str, dependency_type: str) -> List[str]: + """Resolve the appropriate released dependency versions for a package.""" + + pkg_info = ParsedSetup.from_path(setup_py_path) + requires = [r for r in pkg_info.requires if "-nspkg" not in r] + available_packages = [ + spec for spec in map(lambda req: process_requirement(req, dependency_type, pkg_info.name), requires) if spec + ] + return available_packages + + +def process_bounded_versions(originating_pkg_name: str, pkg_name: str, versions: List[str]) -> List[str]: + """Apply generic, platform, and package-specific bounds to the available versions list.""" + + if pkg_name in MINIMUM_VERSION_GENERIC_OVERRIDES: + versions = [v for v in versions if Version(v) >= Version(MINIMUM_VERSION_GENERIC_OVERRIDES[pkg_name])] + + for platform_bound, restrictions in PLATFORM_SPECIFIC_MINIMUM_OVERRIDES.items(): + if compare_python_version(platform_bound) and pkg_name in restrictions: + versions = [v for v in versions if Version(v) >= Version(restrictions[pkg_name])] + + if ( + originating_pkg_name in MINIMUM_VERSION_SPECIFIC_OVERRIDES + and pkg_name in MINIMUM_VERSION_SPECIFIC_OVERRIDES[originating_pkg_name] + ): + versions = [ + v + for v in versions + if Version(v) >= Version(MINIMUM_VERSION_SPECIFIC_OVERRIDES[originating_pkg_name][pkg_name]) + ] + + if pkg_name in MAXIMUM_VERSION_GENERIC_OVERRIDES: + versions = [v for v in versions if Version(v) <= Version(MAXIMUM_VERSION_GENERIC_OVERRIDES[pkg_name])] + + for platform_bound, restrictions in PLATFORM_SPECIFIC_MAXIMUM_OVERRIDES.items(): + if compare_python_version(platform_bound) and pkg_name in restrictions: + versions = [v for v in versions if Version(v) <= Version(restrictions[pkg_name])] + + if ( + originating_pkg_name in MAXIMUM_VERSION_SPECIFIC_OVERRIDES + and pkg_name in MAXIMUM_VERSION_SPECIFIC_OVERRIDES[originating_pkg_name] + ): + versions = [ + v + for v in versions + if Version(v) <= Version(MAXIMUM_VERSION_SPECIFIC_OVERRIDES[originating_pkg_name][pkg_name]) + ] + + return versions + + +def process_requirement(req: str, dependency_type: str, orig_pkg_name: str) -> str: + """Determine the matching version for a requirement based on dependency type.""" + + requirement = parse_require(req) + pkg_name = requirement.name + spec = requirement.specifier if len(requirement.specifier) else None + + if not (requirement.marker is None or requirement.marker.evaluate()): + logger.info( + "Skipping requirement %r. Environment marker %r does not apply to current environment.", + req, + str(requirement.marker), + ) + return "" + + # if the specifier includes preview versions, then we can resolve preview versions + # otherwise, we should filter them out + allows_prereleases = spec is not None and spec.prereleases is True + + client = PyPIClient() + versions = [str(v) for v in client.get_ordered_versions(pkg_name, True)] + logger.info("Versions available on PyPI for %s: %s", pkg_name, versions) + + # prepass filter before choosing a latest or minimum, eliminate prerelease versions if they are not allowed based on the specifier + if not allows_prereleases: + versions = [v for v in versions if not Version(v).is_prerelease] + logger.info( + f"Filtered out pre-release versions for {pkg_name} based on specifier. Remaining versions: {versions}" + ) + + versions = process_bounded_versions(orig_pkg_name, pkg_name, versions) + + if dependency_type == "Latest": + versions.reverse() + + for version in versions: + if spec is None or version in spec: + logger.info( + "Found %s version %s that matches specifier %s", + dependency_type, + version, + spec, + ) + return pkg_name + "==" + version + + logger.error("No version is found on PyPI for package %s that matches specifier %s", pkg_name, spec) + return "" + + +def check_req_against_exclusion(req: str, req_to_exclude: str) -> bool: + """Return ``True`` if the dev requirement matches the package slated for exclusion.""" + + req_id = "" + for char in req: + if re.match(r"[A-Za-z0-9_-]", char): + req_id += char + else: + break + + return req_id == req_to_exclude + + +def filter_dev_requirements( + package_directory: str, + released_packages: List[str], + temp_dir: str, + additional_filter_fn: Optional[Callable[[str, List[str], List[Requirement]], List[str]]] = None, +) -> str: + """Filter dev requirements to avoid reinstalling packages we just resolved.""" + + dev_req_path = os.path.join(package_directory, DEV_REQ_FILE) + with open(dev_req_path, "r", encoding="utf-8") as dev_req_file: + requirements = dev_req_file.readlines() + + released_packages_parsed = [parse_require(p) for p in released_packages] + released_package_names = [p.name for p in released_packages_parsed] + + prebuilt_dev_reqs = [os.path.basename(req.replace("\n", "")) for req in requirements if os.path.sep in req] + req_to_exclude = [req for req in prebuilt_dev_reqs if req.split("-")[0].replace("_", "-") in released_package_names] + req_to_exclude.extend(released_package_names) + + filtered_req = [ + req + for req in requirements + if os.path.basename(req.replace("\n", "")) not in req_to_exclude + and not any(check_req_against_exclusion(req, item) for item in req_to_exclude) + ] + + if additional_filter_fn: + filtered_req = additional_filter_fn(package_directory, filtered_req, released_packages_parsed) + + logger.info("Filtered dev requirements: %s", filtered_req) + + new_dev_req_path = "" + if filtered_req: + new_dev_req_path = os.path.join(temp_dir, NEW_DEV_REQ_FILE) + with open(new_dev_req_path, "w", encoding="utf-8") as dev_req_file: + dev_req_file.writelines(line if line.endswith("\n") else line + "\n" for line in filtered_req) + + return new_dev_req_path + + +def install_packages(packages: List[str], req_file: str, python_executable: str) -> None: + """Install resolved packages (and optionally a requirements file) into the target environment.""" + + python_exe = python_executable or sys.executable + commands = get_pip_command(python_exe) + commands.append("install") + + if commands[0] == "uv": + commands.extend(["--python", python_exe]) + + if packages: + commands.extend(packages) + + if req_file: + commands.extend(["-r", req_file]) + + logger.info("Installing packages. Command: %s", commands) + subprocess.check_call(commands) diff --git a/eng/tools/azure-sdk-tools/ci_tools/variables.py b/eng/tools/azure-sdk-tools/ci_tools/variables.py index 89dce671cd8a..5732d3deb9f0 100644 --- a/eng/tools/azure-sdk-tools/ci_tools/variables.py +++ b/eng/tools/azure-sdk-tools/ci_tools/variables.py @@ -99,7 +99,6 @@ def in_analyze_weekly() -> int: DEFAULT_ENVIRONMENT_VARIABLES = { "SPHINX_APIDOC_OPTIONS": "members,undoc-members,inherited-members", - "PROXY_URL": "http://localhost:5000", "VIRTUALENV_WHEEL": "0.45.1", "VIRTUALENV_PIP": "24.0", "VIRTUALENV_SETUPTOOLS": "75.3.2", @@ -117,8 +116,7 @@ def set_environment_from_dictionary(settings: Dict[str, str]) -> None: settings (Dict[str, str]): A dictionary of environment variable names and their default values. """ for key, value in settings.items(): - if key not in os.environ: - os.environ.setdefault(key, value) + os.environ.setdefault(key, value) def set_envvar_defaults(settings: Optional[Dict[str, str]] = None) -> None: diff --git a/eng/tools/azure-sdk-tools/devtools_testutils/config.py b/eng/tools/azure-sdk-tools/devtools_testutils/config.py index af127405b9f3..605844dcb152 100644 --- a/eng/tools/azure-sdk-tools/devtools_testutils/config.py +++ b/eng/tools/azure-sdk-tools/devtools_testutils/config.py @@ -12,10 +12,13 @@ ENV_LIVE_TEST = "AZURE_TEST_RUN_LIVE" -PROXY_URL = os.getenv("PROXY_URL", "http://localhost:5000").rstrip("/") TEST_SETTING_FILENAME = "testsettings_local.cfg" +def PROXY_URL(): + return os.getenv("PROXY_URL", "http://localhost:5000").rstrip("/") + + class TestConfig(object): # pylint: disable=too-few-public-methods def __init__(self, parent_parsers=None, config_file=None): parent_parsers = parent_parsers or [] diff --git a/eng/tools/azure-sdk-tools/devtools_testutils/proxy_startup.py b/eng/tools/azure-sdk-tools/devtools_testutils/proxy_startup.py index ac5f5bdc3429..9dec587773b5 100644 --- a/eng/tools/azure-sdk-tools/devtools_testutils/proxy_startup.py +++ b/eng/tools/azure-sdk-tools/devtools_testutils/proxy_startup.py @@ -19,12 +19,13 @@ from dotenv import load_dotenv, find_dotenv import pytest import subprocess +from urllib.parse import urlparse from urllib3.exceptions import SSLError from ci_tools.variables import in_ci from .config import PROXY_URL -from .fake_credentials import FAKE_ACCESS_TOKEN, FAKE_ID, SERVICEBUS_FAKE_SAS, SANITIZED +from .fake_credentials import SANITIZED from .helpers import get_http_client, is_live_and_not_recording from .sanitizers import ( add_batch_sanitizers, @@ -44,7 +45,7 @@ CONTAINER_STARTUP_TIMEOUT = 60 PROXY_MANUALLY_STARTED = os.getenv("PROXY_MANUAL_START", False) -PROXY_CHECK_URL = PROXY_URL + "/Info/Available" +PROXY_CHECK_URL = PROXY_URL() + "/Info/Available" TOOL_ENV_VAR = "PROXY_PID" AVAILABLE_TEST_PROXY_BINARIES = { @@ -97,6 +98,27 @@ discovered_roots = [] +def _get_proxy_log_suffix() -> str: + """Derive a log suffix based on the configured proxy port.""" + proxy_url = PROXY_URL() + normalized = proxy_url if "://" in proxy_url else f"http://{proxy_url}" + try: + parsed = urlparse(normalized) + except Exception as exc: # pragma: no cover - defensive parsing guard + _LOGGER.debug("Unable to parse PROXY_URL '%s': %s", proxy_url, exc) + return "default" + + if parsed.port: + return str(parsed.port) + + if parsed.netloc and ":" in parsed.netloc: + candidate = parsed.netloc.rsplit(":", 1)[-1] + if candidate.isdigit(): + return candidate + + return "default" + + def get_target_version(repo_root: str) -> str: """Gets the target test-proxy version from the target_version.txt file in /eng/common/testproxy""" version_file_location = os.path.relpath("eng/common/testproxy/target_version.txt") @@ -347,7 +369,7 @@ def start_test_proxy(request) -> None: """ repo_root = ascend_to_root(request.node.items[0].module.__file__) - requires_https = PROXY_URL.startswith("https://") + requires_https = PROXY_URL().startswith("https://") if requires_https: check_certificate_location(repo_root) @@ -362,19 +384,10 @@ def start_test_proxy(request) -> None: # If we're in CI, allow for tox environment parallelization and write proxy output to a log file log = None if in_ci(): - envname = os.getenv("TOX_ENV_NAME", "default") - log = open(os.path.join(root, "_proxy_log_{}.log".format(envname)), "a") - - os.environ["PROXY_ASSETS_FOLDER"] = os.path.join(root, "l", envname) - if not os.path.exists(os.environ["PROXY_ASSETS_FOLDER"]): - os.makedirs(os.environ["PROXY_ASSETS_FOLDER"]) + log_suffix = _get_proxy_log_suffix() + log = open(os.path.join(root, f"_proxy_log_{log_suffix}.log"), "a") - if os.getenv("TF_BUILD"): - _LOGGER.info("Starting the test proxy tool from dotnet tool cache...") - tool_name = "test-proxy" - else: - _LOGGER.info("Downloading and starting standalone proxy executable...") - tool_name = prepare_local_tool(root) + tool_name = prepare_local_tool(root) if requires_https: # Always start the proxy with these two defaults set to allow SSL connection @@ -387,11 +400,17 @@ def start_test_proxy(request) -> None: else: passenv = {} + # When the proxy is started in context of a directory, deletions of files under that directory crashes the test-proxy + # due to how asp.net kestrel loads configuration files. We can disable this behavior by setting this environment + # variable in env for the proxy process, which will allow us to clean up the --isolate directories without crashing + # running proxies. + passenv["DOTNET_HOSTBUILDER__RELOADCONFIGONCHANGE"] = "false" + # If they are already set, override what we give the proxy with what is in os.environ passenv.update(os.environ) proc = subprocess.Popen( - shlex.split(f'{tool_name} start --storage-location="{root}" -- --urls "{PROXY_URL}"'), + shlex.split(f'{tool_name} start --storage-location="{root}" -- --urls "{PROXY_URL()}"'), stdout=log or subprocess.DEVNULL, stderr=log or subprocess.STDOUT, env=passenv, diff --git a/eng/tools/azure-sdk-tools/devtools_testutils/proxy_testcase.py b/eng/tools/azure-sdk-tools/devtools_testutils/proxy_testcase.py index 2e31916d9102..a4e109d6f685 100644 --- a/eng/tools/azure-sdk-tools/devtools_testutils/proxy_testcase.py +++ b/eng/tools/azure-sdk-tools/devtools_testutils/proxy_testcase.py @@ -46,10 +46,10 @@ # defaults -RECORDING_START_URL = "{}/record/start".format(PROXY_URL) -RECORDING_STOP_URL = "{}/record/stop".format(PROXY_URL) -PLAYBACK_START_URL = "{}/playback/start".format(PROXY_URL) -PLAYBACK_STOP_URL = "{}/playback/stop".format(PROXY_URL) +RECORDING_START_URL = "{}/record/start".format(PROXY_URL()) +RECORDING_STOP_URL = "{}/record/stop".format(PROXY_URL()) +PLAYBACK_START_URL = "{}/playback/start".format(PROXY_URL()) +PLAYBACK_STOP_URL = "{}/playback/stop".format(PROXY_URL()) class RecordedTransport(str, Enum): @@ -159,7 +159,7 @@ def stop_record_or_playback(test_id: str, recording_id: str, test_variables: "Di def get_proxy_netloc() -> "Dict[str, str]": - parsed_result = url_parse.urlparse(PROXY_URL) + parsed_result = url_parse.urlparse(PROXY_URL()) return {"scheme": parsed_result.scheme, "netloc": parsed_result.netloc} diff --git a/eng/tools/azure-sdk-tools/devtools_testutils/sanitizers.py b/eng/tools/azure-sdk-tools/devtools_testutils/sanitizers.py index 83dbd7be7faa..7bdf6624dbc3 100644 --- a/eng/tools/azure-sdk-tools/devtools_testutils/sanitizers.py +++ b/eng/tools/azure-sdk-tools/devtools_testutils/sanitizers.py @@ -469,7 +469,7 @@ def add_batch_sanitizers(sanitizers: Dict[str, List[Optional[Dict[str, str]]]], http_client = get_http_client() http_client.request( method="POST", - url="{}/Admin/AddSanitizers".format(PROXY_URL), + url="{}/Admin/AddSanitizers".format(PROXY_URL()), headers=headers_to_send, body=json.dumps(data).encode("utf-8"), ) @@ -501,7 +501,7 @@ def remove_batch_sanitizers(sanitizers: List[str], headers: Optional[Dict] = Non http_client = get_http_client() http_client.request( method="POST", - url="{}/Admin/RemoveSanitizers".format(PROXY_URL), + url="{}/Admin/RemoveSanitizers".format(PROXY_URL()), headers=headers_to_send, body=json.dumps(data).encode("utf-8"), ) @@ -718,7 +718,7 @@ def _send_matcher_request(matcher: str, headers: Dict, parameters: Optional[Dict http_client = get_http_client() http_client.request( method="POST", - url=f"{PROXY_URL}/Admin/SetMatcher", + url=f"{PROXY_URL()}/Admin/SetMatcher", headers=headers_to_send, body=json.dumps(parameters).encode("utf-8"), ) @@ -747,7 +747,7 @@ def _send_recording_options_request(parameters: Dict, headers: Optional[Dict] = http_client = get_http_client() http_client.request( method="POST", - url=f"{PROXY_URL}/Admin/SetRecordingOptions", + url=f"{PROXY_URL()}/Admin/SetRecordingOptions", headers=headers_to_send, body=json.dumps(parameters).encode("utf-8"), ) @@ -771,7 +771,7 @@ def _send_reset_request(headers: Dict) -> None: headers_to_send[key] = headers[key] http_client = get_http_client() - http_client.request(method="POST", url=f"{PROXY_URL}/Admin/Reset", headers=headers_to_send) + http_client.request(method="POST", url=f"{PROXY_URL()}/Admin/Reset", headers=headers_to_send) def _send_sanitizer_request(sanitizer: str, parameters: Dict, headers: Optional[Dict] = None) -> None: @@ -796,7 +796,7 @@ def _send_sanitizer_request(sanitizer: str, parameters: Dict, headers: Optional[ http_client = get_http_client() http_client.request( method="POST", - url="{}/Admin/AddSanitizer".format(PROXY_URL), + url="{}/Admin/AddSanitizer".format(PROXY_URL()), headers=headers_to_send, body=json.dumps(parameters).encode("utf-8"), ) @@ -823,7 +823,7 @@ def _send_transform_request(transform: str, parameters: Dict, headers: Optional[ http_client = get_http_client() http_client.request( method="POST", - url=f"{PROXY_URL}/Admin/AddTransform", + url=f"{PROXY_URL()}/Admin/AddTransform", headers=headers_to_send, body=json.dumps(parameters).encode("utf-8"), ) diff --git a/eng/tools/azure-sdk-tools/pyproject.toml b/eng/tools/azure-sdk-tools/pyproject.toml index c71c3e2de579..0d97d4835e3e 100644 --- a/eng/tools/azure-sdk-tools/pyproject.toml +++ b/eng/tools/azure-sdk-tools/pyproject.toml @@ -13,6 +13,12 @@ authors = [ urls = { "Homepage" = "https://github.com/Azure/azure-sdk-for-python" } dependencies = [ + "setuptools", + "pyparsing", + "certifi", + "cibuildwheel", + "pkginfo", + "build", "packaging", "wheel", "Jinja2", @@ -53,7 +59,6 @@ systemperf = "devtools_testutils.perfstress_tests:run_system_perfstress_tests_cm azpysdk = "azpysdk.main:main" [project.optional-dependencies] -build = ["setuptools", "pyparsing", "certifi", "cibuildwheel", "pkginfo", "build"] conda = ["beautifulsoup4"] systemperf = ["aiohttp>=3.0", "requests>=2.0", "tornado==6.0.3", "httpx>=0.21", "azure-core"] ghtools = ["GitPython", "PyGithub>=1.59.0", "requests>=2.0"] diff --git a/eng/tools/azure-sdk-tools/tests/integration/scenarios/dev_requirement_samples/relative_requirements.txt b/eng/tools/azure-sdk-tools/tests/integration/scenarios/dev_requirement_samples/relative_requirements.txt index 1c0445b205fd..d448b39d6fda 100644 --- a/eng/tools/azure-sdk-tools/tests/integration/scenarios/dev_requirement_samples/relative_requirements.txt +++ b/eng/tools/azure-sdk-tools/tests/integration/scenarios/dev_requirement_samples/relative_requirements.txt @@ -6,7 +6,7 @@ tests/testserver_tests/coretestserver # random comment that should be ignored ../azure-mgmt-core -e ../azure-mgmt-core -../../../eng/tools/azure-sdk-tools[build] --e ../../../eng/tools/azure-sdk-tools[build] +../../../eng/tools/azure-sdk-tools[conda] +-e ../../../eng/tools/azure-sdk-tools[conda] -e . . \ No newline at end of file diff --git a/eng/tools/azure-sdk-tools/tests/test_logging_config.py b/eng/tools/azure-sdk-tools/tests/test_logging_config.py index 9e3db3175fec..c58af719598a 100644 --- a/eng/tools/azure-sdk-tools/tests/test_logging_config.py +++ b/eng/tools/azure-sdk-tools/tests/test_logging_config.py @@ -22,5 +22,5 @@ def test_configure_logging_various_levels(mock_basic_config, cli_args, level_env configure_logging(cli_args) assert logger.level == expected_level mock_basic_config.assert_called_with( - level=expected_level, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s" + level=expected_level, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", force=True ) diff --git a/eng/tools/azure-sdk-tools/tests/test_requirements_parse.py b/eng/tools/azure-sdk-tools/tests/test_requirements_parse.py index 87dd716f24c6..e73dd0cf27a7 100644 --- a/eng/tools/azure-sdk-tools/tests/test_requirements_parse.py +++ b/eng/tools/azure-sdk-tools/tests/test_requirements_parse.py @@ -79,8 +79,8 @@ def test_replace_dev_reqs_relative(tmp_directory_create): os.path.join(expected_output_folder, f"azure_identity-{identity_version}-py3-none-any.whl"), os.path.join(expected_output_folder, f"azure_mgmt_core-{mgmt_core_version}-py3-none-any.whl"), os.path.join(expected_output_folder, f"azure_mgmt_core-{mgmt_core_version}-py3-none-any.whl"), - os.path.join(expected_output_folder, f"azure_sdk_tools-{sdk_tools_version}-py3-none-any.whl[build]"), - os.path.join(expected_output_folder, f"azure_sdk_tools-{sdk_tools_version}-py3-none-any.whl[build]"), + os.path.join(expected_output_folder, f"azure_sdk_tools-{sdk_tools_version}-py3-none-any.whl[conda]"), + os.path.join(expected_output_folder, f"azure_sdk_tools-{sdk_tools_version}-py3-none-any.whl[conda]"), os.path.join(expected_output_folder, f"azure_core-{core_version}-py3-none-any.whl"), os.path.join(expected_output_folder, f"azure_core-{core_version}-py3-none-any.whl"), ] diff --git a/eng/tox/install_depend_packages.py b/eng/tox/install_depend_packages.py index 1f5247280833..469f27a4a1b0 100644 --- a/eng/tox/install_depend_packages.py +++ b/eng/tox/install_depend_packages.py @@ -6,382 +6,16 @@ # -------------------------------------------------------------------------------------------- import argparse +import logging import os import sys -import logging -import re - -from subprocess import check_call -from typing import TYPE_CHECKING, Callable, Optional -from pypi_tools.pypi import PyPIClient -from packaging.specifiers import SpecifierSet -from packaging.version import Version -from packaging.requirements import Requirement -from ci_tools.parsing import ParsedSetup, parse_require -from ci_tools.functions import compare_python_version, handle_incompatible_minimum_dev_reqs, get_pip_command - -from typing import List - -DEV_REQ_FILE = "dev_requirements.txt" -NEW_DEV_REQ_FILE = "new_dev_requirements.txt" -PKGS_TXT_FILE = "packages.txt" +from ci_tools.scenario.dependency_resolution import install_dependent_packages logging.getLogger().setLevel(logging.INFO) -# GENERIC_OVERRIDES dictionaries pair a specific dependency with a MINIMUM or MAXIMUM inclusive bound. -# During LATEST and MINIMUM dependency checks, we sometimes need to ignore versions for various compatibility -# reasons. -MINIMUM_VERSION_GENERIC_OVERRIDES = { - "azure-common": "1.1.10", - "msrest": "0.6.10", - "typing-extensions": "4.6.0", - "opentelemetry-api": "1.3.0", - "opentelemetry-sdk": "1.3.0", - "azure-core": "1.11.0", - "requests": "2.19.0", - "six": "1.12.0", - "cryptography": "41.0.0", - "msal": "1.23.0", - "azure-storage-file-datalake": "12.2.0", -} - -MAXIMUM_VERSION_GENERIC_OVERRIDES = {} - -# SPECIFIC OVERRIDES provide additional filtering of upper and lower bound by -# binding an override to the specific package being processed. As an example, when -# processing the latest or minimum deps for "azure-eventhub", the minimum version of "azure-core" -# will be overridden to 1.25.0. -MINIMUM_VERSION_SPECIFIC_OVERRIDES = { - "azure-eventhub": {"azure-core": "1.25.0"}, - "azure-eventhub-checkpointstoreblob-aio": {"azure-core": "1.25.0", "azure-eventhub": "5.11.0"}, - "azure-eventhub-checkpointstoreblob": {"azure-core": "1.25.0", "azure-eventhub": "5.11.0"}, - "azure-eventhub-checkpointstoretable": {"azure-core": "1.25.0", "azure-eventhub": "5.11.0"}, - "azure-identity": {"msal": "1.23.0"}, - "azure-core-tracing-opentelemetry": {"azure-core": "1.28.0"}, - "azure-storage-file-datalake": {"azure-storage-blob": "12.22.0"}, - "azure-cosmos": {"azure-core": "1.30.0"}, - "azure-appconfiguration-provider": {"azure-appconfiguration": "1.7.2"}, - "azure-ai-evaluation": {"aiohttp": "3.8.6"}, -} - -MAXIMUM_VERSION_SPECIFIC_OVERRIDES = {} - -# PLATFORM SPECIFIC OVERRIDES provide additional generic (EG not tied to the package whos dependencies are being processed) -# filtering on a _per platform_ basis. Primarily used to limit certain packages due to platform compat -PLATFORM_SPECIFIC_MINIMUM_OVERRIDES = { - ">=3.14.0": { - "typing-extensions": "4.15.0", - }, - ">=3.12.0": {"azure-core": "1.23.1", "aiohttp": "3.9.0", "six": "1.16.0", "requests": "2.30.0"}, - ">=3.13.0": {"typing-extensions": "4.13.0", "aiohttp": "3.10.6"}, -} - -PLATFORM_SPECIFIC_MAXIMUM_OVERRIDES = {} - -# This is used to actively _add_ requirements to the install set. These are used to actively inject -# a new requirement specifier to the set of packages being installed. -SPECIAL_CASE_OVERRIDES = { - # this package has an override - "azure-core": { - # if the version being installed matches this specifier, add the listed packages to the install list - "<1.24.0": ["msrest<0.7.0"] - } -} - - -def install_dependent_packages(setup_py_file_path, dependency_type, temp_dir): - # This method identifies latest/ minimal version of dependent packages and installs them from pyPI - # dependency type must either be latest or minimum - # Latest dependency will find latest released package that satisfies requires of given package name - # Minimum type will find minimum version on PyPI that satisfies requires of given package name - released_packages = find_released_packages(setup_py_file_path, dependency_type) - override_added_packages = [] - - # new section added to account for difficulties with msrest - for pkg_spec in released_packages: - override_added_packages.extend(check_pkg_against_overrides(pkg_spec)) - - logging.info("%s released packages: %s", dependency_type, released_packages) - - additional_filter_fn = None - if dependency_type == "Minimum": - additional_filter_fn = handle_incompatible_minimum_dev_reqs - - # before september 2024, filter_dev_requirements only would remove any packages present in released_packages from the dev_requirements, - # then create a new file "new_dev_requirements.txt" without the problematic packages. - # after september 2024, filter_dev_requirements will also check for **compatibility** with the packages being installed when filtering the dev_requirements. - dev_req_file_path = filter_dev_requirements(setup_py_file_path, released_packages, temp_dir, additional_filter_fn) - - if override_added_packages: - logging.info(f"Expanding the requirement set by the packages {override_added_packages}.") - - install_set = released_packages + list(set(override_added_packages)) - - # install released dependent packages - if released_packages or dev_req_file_path: - install_packages(install_set, dev_req_file_path) - - if released_packages: - # create a file with list of packages and versions found based on minimum or latest check on PyPI - # This file can be used to verify if we have correct version installed - pkgs_file_path = os.path.join(temp_dir, PKGS_TXT_FILE) - with open(pkgs_file_path, "w") as pkgs_file: - for package in released_packages: - pkgs_file.write(package + "\n") - logging.info("Created file %s to track azure packages found on PyPI", pkgs_file_path) - - -def check_pkg_against_overrides(pkg_specifier: str) -> List[str]: - """ - Checks a set of package specifiers of form "[A==1.0.0, B=2.0.0]". Used to inject additional package installations - as indicated by the SPECIAL_CASE_OVERRIDES dictionary. - - :param str pkg_specifier: A specifically targeted package that is about to be passed to install_packages. - """ - additional_installs = [] - target_package, target_version = pkg_specifier.split("==") - - target_version = Version(target_version) - if target_package in SPECIAL_CASE_OVERRIDES: - special_case_specifiers = SPECIAL_CASE_OVERRIDES[target_package] - - for specifier_set in special_case_specifiers.keys(): - spec = SpecifierSet(specifier_set) - if target_version in spec: - additional_installs.extend(special_case_specifiers[specifier_set]) - - return additional_installs - - -def find_released_packages(setup_py_path, dependency_type): - # this method returns list of required available package on PyPI in format == - pkg_info = ParsedSetup.from_path(setup_py_path) - - # parse setup.py and find install requires - requires = [r for r in pkg_info.requires if "-nspkg" not in r] - - # Get available version on PyPI for each required package - avlble_packages = [x for x in map(lambda x: process_requirement(x, dependency_type, pkg_info.name), requires) if x] - - return avlble_packages - - -def process_bounded_versions(originating_pkg_name: str, pkg_name: str, versions: List[str]) -> List[str]: - """ - Processes a target package based on an originating package (target is a dep of originating) and the versions available from pypi for the target package. - - Returns the set of versions AFTER general, platform, and package-specific overrides have been applied. - - :param str originating_pkg_name: The name of the package whos requirements are being processed. - :param str pkg_name: A specific requirement of the originating package being processed. - :param List[str] versions: All the versions available on pypi for pkg_name. - """ - - # lower bound general - if pkg_name in MINIMUM_VERSION_GENERIC_OVERRIDES: - versions = [v for v in versions if Version(v) >= Version(MINIMUM_VERSION_GENERIC_OVERRIDES[pkg_name])] - - # lower bound platform-specific - for platform_bound in PLATFORM_SPECIFIC_MINIMUM_OVERRIDES.keys(): - if compare_python_version(platform_bound): - restrictions = PLATFORM_SPECIFIC_MINIMUM_OVERRIDES[platform_bound] - - if pkg_name in restrictions: - versions = [v for v in versions if Version(v) >= Version(restrictions[pkg_name])] - - # lower bound package-specific - if ( - originating_pkg_name in MINIMUM_VERSION_SPECIFIC_OVERRIDES - and pkg_name in MINIMUM_VERSION_SPECIFIC_OVERRIDES[originating_pkg_name] - ): - versions = [ - v - for v in versions - if Version(v) >= Version(MINIMUM_VERSION_SPECIFIC_OVERRIDES[originating_pkg_name][pkg_name]) - ] - - # upper bound general - if pkg_name in MAXIMUM_VERSION_GENERIC_OVERRIDES: - versions = [v for v in versions if Version(v) <= Version(MAXIMUM_VERSION_GENERIC_OVERRIDES[pkg_name])] - # upper bound platform - for platform_bound in PLATFORM_SPECIFIC_MAXIMUM_OVERRIDES.keys(): - if compare_python_version(platform_bound): - restrictions = PLATFORM_SPECIFIC_MAXIMUM_OVERRIDES[platform_bound] - - if pkg_name in restrictions: - versions = [v for v in versions if Version(v) <= Version(restrictions[pkg_name])] - - # upper bound package-specific - if ( - originating_pkg_name in MAXIMUM_VERSION_SPECIFIC_OVERRIDES - and pkg_name in MAXIMUM_VERSION_SPECIFIC_OVERRIDES[originating_pkg_name] - ): - versions = [ - v - for v in versions - if Version(v) <= Version(MAXIMUM_VERSION_SPECIFIC_OVERRIDES[originating_pkg_name][pkg_name]) - ] - - return versions - - -def process_requirement(req, dependency_type, orig_pkg_name): - # this method finds either latest or minimum version of a package that is available on PyPI - - # find package name and requirement specifier from requires - requirement = parse_require(req) - pkg_name = requirement.name - spec = requirement.specifier if len(requirement.specifier) else None - - # Filter out requirements with environment markers that don't match the current environment - # e.g. `; python_version > 3.10` when running on Python3.9 - if not (requirement.marker is None or requirement.marker.evaluate()): - logging.info( - f"Skipping requirement {req!r}. Environment marker {str(requirement.marker)!r} " - + "does not apply to current environment." - ) - return "" - - # if the specifier includes preview versions, then we can resolve preview versions - # otherwise, we should filter them out - allows_prereleases = spec is not None and spec.prereleases is True - - # get available versions on PyPI - client = PyPIClient() - versions = [str(v) for v in client.get_ordered_versions(pkg_name, True)] - logging.info("Versions available on PyPI for %s: %s", pkg_name, versions) - - # prepass filter before choosing a latest or minimum, eliminate prerelease versions if they are not allowed based on the specifier - if not allows_prereleases: - versions = [v for v in versions if not Version(v).is_prerelease] - logging.info( - "Filtered out pre-release versions for %s based on specifier. Remaining versions: %s", pkg_name, versions - ) - - # think of the various versions that come back from pypi as the top of a funnel - # We apply generic overrides -> platform specific overrides -> package specific overrides - versions = process_bounded_versions(orig_pkg_name, pkg_name, versions) - - # Search from lowest to latest in case of finding minimum dependency - # Search from latest to lowest in case of finding latest required version - # reverse the list to get latest version first - if dependency_type == "Latest": - versions.reverse() - - # return first version that matches specifier in == format - for version in versions: - # if there IS NO specifier, then we should take the first entry. we have already sorted for latest/minimum. - if spec is None: - return pkg_name + "==" + version - - if version in spec: - logging.info( - "Found %s version %s that matches specifier %s", - dependency_type, - version, - spec, - ) - return pkg_name + "==" + version - - logging.error( - "No version is found on PyPI for package %s that matches specifier %s", - pkg_name, - spec, - ) - return "" - - -def check_req_against_exclusion(req, req_to_exclude): - """ - This function evaluates a requirement from a dev_requirements file against a file name. Returns True - if the requirement is for the same package listed in "req_to_exclude". False otherwise. - - :param req: An incoming "req" looks like a requirement that appears in a dev_requirements file. EG: [ "../../../eng/tools/azure-sdk-tools", - "https://docsupport.blob.core.windows.net/repackaged/cffi-1.14.6-cp310-cp310-win_amd64.whl; sys_platform=='win32' and python_version >= '3.10'", - "msrestazure>=0.4.11", "pytest" ] - - :param req_to_exclude: A valid and complete python package name. No specifiers. - """ - req_id = "" - for c in req: - if re.match(r"[A-Za-z0-9_-]", c): - req_id += c - else: - break - - return req_id == req_to_exclude - - -def filter_dev_requirements( - package_directory, - released_packages, - temp_dir, - additional_filter_fn: Optional[Callable[[str, List[str], List[Requirement]], List[str]]] = None, -): - """ - This function takes an existing package path, a list of specific package specifiers that we have resolved, a temporary directory to write - the modified dev_requirements to, and an optional additional_filter_fn that can be used to further filter the dev_requirements file if necessary. - - The function will filter out any requirements present in the dev_requirements file that are present in the released_packages list (aka are required - by the package). - """ - # This method returns list of requirements from dev_requirements by filtering out packages in given list - dev_req_path = os.path.join(package_directory, DEV_REQ_FILE) - requirements = [] - with open(dev_req_path, "r") as dev_req_file: - requirements = dev_req_file.readlines() - - # filter out any package available on PyPI (released_packages) - # include packages without relative reference and packages not available on PyPI - released_packages = [parse_require(p) for p in released_packages] - released_package_names = [p.name for p in released_packages] - # find prebuilt whl paths in dev requiremente - prebuilt_dev_reqs = [os.path.basename(req.replace("\n", "")) for req in requirements if os.path.sep in req] - # filter any req if wheel is for a released package - req_to_exclude = [req for req in prebuilt_dev_reqs if req.split("-")[0].replace("_", "-") in released_package_names] - req_to_exclude.extend(released_package_names) - - filtered_req = [ - req - for req in requirements - if os.path.basename(req.replace("\n", "")) not in req_to_exclude - and not any([check_req_against_exclusion(req, i) for i in req_to_exclude]) - ] - - if additional_filter_fn: - # this filter function handles the case where a dev requirement is incompatible with the current set of targeted packages - filtered_req = additional_filter_fn(package_directory, filtered_req, released_packages) - - logging.info("Filtered dev requirements: %s", filtered_req) - - new_dev_req_path = "" - if filtered_req: - # create new dev requirements file with different name for filtered requirements - new_dev_req_path = os.path.join(temp_dir, NEW_DEV_REQ_FILE) - with open(new_dev_req_path, "w") as dev_req_file: - dev_req_file.writelines(line if line.endswith("\n") else line + "\n" for line in filtered_req) - - return new_dev_req_path - - -def install_packages(packages, req_file): - # install list of given packages from PyPI - commands = get_pip_command() - commands.append("install") - - if packages: - commands.extend(packages) - - if req_file: - commands.extend(["-r", req_file]) - - logging.info("Installing packages. Command: %s", commands) - check_call(commands) - - -if __name__ == "__main__": +def main() -> None: parser = argparse.ArgumentParser(description="Install either latest or minimum version of dependent packages.") parser.add_argument( @@ -411,10 +45,14 @@ def install_packages(packages, req_file): args = parser.parse_args() - setup_path = os.path.join(os.path.abspath(args.target_package)) + setup_path = os.path.abspath(args.target_package) if not (os.path.exists(setup_path) and os.path.exists(args.work_dir)): logging.error("Invalid arguments. Please make sure target directory and working directory are valid path") sys.exit(1) - install_dependent_packages(setup_path, args.dependency_type, args.work_dir) + install_dependent_packages(setup_path, args.dependency_type, args.work_dir, python_executable=sys.executable) + + +if __name__ == "__main__": + main() diff --git a/eng/tox/tox.ini b/eng/tox/tox.ini index a5292f184e1d..83572aa08518 100644 --- a/eng/tox/tox.ini +++ b/eng/tox/tox.ini @@ -602,5 +602,5 @@ setenv = {[testenv]setenv} PROXY_URL=http://localhost:5018 commands = - {[tox]pip_command} install {toxinidir}/../../../eng/tools/azure-sdk-tools[build] + {[tox]pip_command} install {toxinidir}/../../../eng/tools/azure-sdk-tools python {repository_root}/eng/tox/run_optional.py -t {toxinidir} --temp={envtmpdir} {posargs} diff --git a/scripts/auto_release/PythonSdkLiveTest.yml b/scripts/auto_release/PythonSdkLiveTest.yml index bebc4c1d587c..6ca6dcd57580 100644 --- a/scripts/auto_release/PythonSdkLiveTest.yml +++ b/scripts/auto_release/PythonSdkLiveTest.yml @@ -97,7 +97,7 @@ jobs: export ISSUE_OWNER=$(ISSUE_OWNER) # install azure-sdk-tools - python -m pip install $root_path/eng/tools/azure-sdk-tools[build,ghtools,sdkgenerator] + python -m pip install $root_path/eng/tools/azure-sdk-tools[ghtools,sdkgenerator] # install requirements python -m pip install -r $script_path/requirement.txt diff --git a/scripts/automation_init.sh b/scripts/automation_init.sh index 308bcf24aee4..6d444c097e1c 100644 --- a/scripts/automation_init.sh +++ b/scripts/automation_init.sh @@ -2,7 +2,7 @@ # init env python -m pip install -U pip > /dev/null -python -m pip install eng/tools/azure-sdk-tools[build,ghtools,sdkgenerator] > /dev/null +python -m pip install eng/tools/azure-sdk-tools[ghtools,sdkgenerator] > /dev/null # install tsp-client echo Install tsp-client diff --git a/scripts/breaking_changes_checker/README.md b/scripts/breaking_changes_checker/README.md index 15aff938e77d..7c32110c0166 100644 --- a/scripts/breaking_changes_checker/README.md +++ b/scripts/breaking_changes_checker/README.md @@ -11,7 +11,7 @@ Add your package name to the `RUN_BREAKING_CHANGES_PACKAGES` found [here](https: **1) Install azpysdk:** -`pip install -e eng/tools/azure-sdk-tools[build]` +`pip install -e eng/tools/azure-sdk-tools` **2) Run the `breaking` check.** diff --git a/sdk/core/azure-core/tests/async_tests/conftest.py b/sdk/core/azure-core/tests/async_tests/conftest.py index 6db03141b52c..59cdfe4a3674 100644 --- a/sdk/core/azure-core/tests/async_tests/conftest.py +++ b/sdk/core/azure-core/tests/async_tests/conftest.py @@ -60,7 +60,7 @@ def start_testserver(): port = get_port() os.environ["FLASK_APP"] = "coretestserver" os.environ["FLASK_PORT"] = str(port) - cmd = "flask run -p {}".format(port) + cmd = f"{sys.executable} -m flask run -p {port}" if os.name == "nt": # On windows, subprocess creation works without being in the shell child_process = subprocess.Popen(cmd, env=dict(os.environ)) else: diff --git a/sdk/core/azure-core/tests/conftest.py b/sdk/core/azure-core/tests/conftest.py index e9a947c796ca..0d9ebb6c8c24 100644 --- a/sdk/core/azure-core/tests/conftest.py +++ b/sdk/core/azure-core/tests/conftest.py @@ -30,6 +30,7 @@ import subprocess import random import platform +import sys import urllib from typing import Generator @@ -74,7 +75,7 @@ def start_testserver(): # to set these additional env vars for pypy os.environ["LC_ALL"] = "C.UTF-8" os.environ["LANG"] = "C.UTF-8" - cmd = "flask run -p {}".format(port) + cmd = f"{sys.executable} -m flask run -p {port}" if os.name == "nt": # On windows, subprocess creation works without being in the shell child_process = subprocess.Popen(cmd, env=dict(os.environ)) else: diff --git a/sdk/cosmos/cosmos-emulator-internal-matrix.json b/sdk/cosmos/cosmos-emulator-internal-matrix.json index 71362ef80275..67c575f5c218 100644 --- a/sdk/cosmos/cosmos-emulator-internal-matrix.json +++ b/sdk/cosmos/cosmos-emulator-internal-matrix.json @@ -19,87 +19,87 @@ "PythonVersion": "3.9", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "whl,sdist" + "ChecksOverride": "whl,sdist" }, "Emulator Tests Python 3.10 Standard": { "PythonVersion": "3.10", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "whl,sdist" + "ChecksOverride": "whl,sdist" }, "Emulator Tests Python 3.12 Standard": { "PythonVersion": "3.12", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "whl,sdist" + "ChecksOverride": "whl,sdist" }, "Emulator Tests Python 3.13 Standard": { "PythonVersion": "3.13", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "whl,sdist" + "ChecksOverride": "whl,sdist" }, "Emulator Tests Python 3.9 Special": { "PythonVersion": "3.9", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "depends,whl_no_aio" + "ChecksOverride": "depends,whl_no_aio" }, "Emulator Tests Python 3.10 Special": { "PythonVersion": "3.10", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "depends,whl_no_aio" + "ChecksOverride": "depends,whl_no_aio" }, "Emulator Tests Python 3.12 Special": { "PythonVersion": "3.12", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "depends,whl_no_aio" + "ChecksOverride": "depends,whl_no_aio" }, "Emulator Tests Python 3.13 Special": { "PythonVersion": "3.13", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "depends,whl_no_aio" + "ChecksOverride": "depends,whl_no_aio" }, "Emulator Tests Python 3.14 Special": { "PythonVersion": "3.14", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "depends,whl_no_aio" + "ChecksOverride": "depends,whl_no_aio" }, "Emulator Tests Python 3.9 Dependency Checks": { "PythonVersion": "3.9", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "latestdependency,mindependency" + "ChecksOverride": "latestdependency,mindependency" }, "Emulator Tests Python 3.10 Dependency Checks": { "PythonVersion": "3.10", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "latestdependency,mindependency" + "ChecksOverride": "latestdependency,mindependency" }, "Emulator Tests Python 3.12 Dependency Checks": { "PythonVersion": "3.12", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "latestdependency,mindependency" + "ChecksOverride": "latestdependency,mindependency" }, "Emulator Tests Python 3.13 Dependency Checks": { "PythonVersion": "3.13", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "latestdependency,mindependency" + "ChecksOverride": "latestdependency,mindependency" }, "Emulator Tests Python 3.14 Dependency Checks": { "PythonVersion": "3.14", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "latestdependency,mindependency" + "ChecksOverride": "latestdependency,mindependency" } } } diff --git a/sdk/cosmos/cosmos-emulator-public-matrix.json b/sdk/cosmos/cosmos-emulator-public-matrix.json index f78ca6ef6654..3278fe204843 100644 --- a/sdk/cosmos/cosmos-emulator-public-matrix.json +++ b/sdk/cosmos/cosmos-emulator-public-matrix.json @@ -19,50 +19,50 @@ "PythonVersion": "3.10", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "whl,sdist" + "ChecksOverride": "whl,sdist" }, "Emulator Tests Python 3.12 Standard": { "PythonVersion": "3.12", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "whl,sdist" + "ChecksOverride": "whl,sdist" }, "Emulator Tests Python 3.13 Standard": { "PythonVersion": "3.13", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "whl,sdist" + "ChecksOverride": "whl,sdist" }, "Emulator Tests Python 3.14 Standard": { "PythonVersion": "3.14", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "whl,sdist" + "ChecksOverride": "whl,sdist" }, "Emulator Tests Python 3.9 Dependency Checks": { "PythonVersion": "3.9", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "mindependency" + "ChecksOverride": "mindependency" }, "Emulator Tests Python 3.10 Dependency Checks": { "PythonVersion": "3.10", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "mindependency" + "ChecksOverride": "mindependency" }, "Emulator Tests Python 3.12 Dependency Checks": { "PythonVersion": "3.12", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "mindependency" + "ChecksOverride": "mindependency" }, "Emulator Tests Python 3.13 Dependency Checks": { "PythonVersion": "3.13", "CoverageArg": "--disablecov", "TestSamples": "false", - "Run.ToxCustomEnvs": "mindependency" + "ChecksOverride": "mindependency" } } } diff --git a/sdk/ml/azure-ai-ml/dev_requirements.txt b/sdk/ml/azure-ai-ml/dev_requirements.txt index eedda6cf8025..fb3341648e00 100644 --- a/sdk/ml/azure-ai-ml/dev_requirements.txt +++ b/sdk/ml/azure-ai-ml/dev_requirements.txt @@ -23,3 +23,4 @@ pytest-reportlog python-dotenv azureml-dataprep-rslex>=2.22.0; platform_python_implementation == "CPython" and python_version < "3.13" azureml-dataprep-rslex>=2.22.0; platform_python_implementation == "PyPy" and python_version < "3.10" +pip