diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index ac9a2e75..ff261bad 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,7 +3,7 @@ FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} USER vscode -RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.35.0" RYE_INSTALL_OPTION="--yes" bash +RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.44.0" RYE_INSTALL_OPTION="--yes" bash ENV PATH=/home/vscode/.rye/shims:$PATH -RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc +RUN echo "[[ -d .venv ]] && source .venv/bin/activate || export PATH=\$PATH" >> /home/vscode/.bashrc diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 66df046a..c17fdc16 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -14,7 +14,7 @@ "extensions": [ "ms-python.python" ], - "settings": { + "settings": { "terminal.integrated.shell.linux": "/bin/bash", "python.pythonPath": ".venv/bin/python", "python.defaultInterpreterPath": ".venv/bin/python", @@ -24,6 +24,9 @@ } } } + }, + "features": { + "ghcr.io/devcontainers/features/node:1": {} } // Features to add to the dev container. More info: https://containers.dev/features. diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3f946652..f9dbedb4 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,5 +1 @@ -# Each line is a file pattern followed by one or more owners. - -# These owners will be the default owners for everything in -# the repo. Unless a later match takes precedence, -* @ashwinb @yanxi0830 @hardikjshah @dltn @raghotham @dineshyv @vladimirivic @ehhuang @SLR722 @reluctantfuturist +@ehhuang @ashwinb @raghotham @reluctantfuturist @leseb diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml deleted file mode 100644 index 1f7dabb9..00000000 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ /dev/null @@ -1,77 +0,0 @@ -name: 🐛 Bug Report -description: Create a report to help us reproduce and fix the bug - -body: - - type: markdown - attributes: - value: > - #### Before submitting a bug, please make sure the issue hasn't been already addressed by searching through [the - existing and past issues](https://github.com/meta-llama/llama-stack/issues). - - - type: textarea - id: system-info - attributes: - label: System Info - description: | - Please share your system info with us. You can use the following command to capture your environment information - python -m "torch.utils.collect_env" - - placeholder: | - PyTorch version, CUDA version, GPU type, #num of GPUs... - validations: - required: true - - - type: checkboxes - id: information-scripts-examples - attributes: - label: Information - description: 'The problem arises when using:' - options: - - label: "The official example scripts" - - label: "My own modified scripts" - - - type: textarea - id: bug-description - attributes: - label: 🐛 Describe the bug - description: | - Please provide a clear and concise description of what the bug is. - - Please also paste or describe the results you observe instead of the expected results. - placeholder: | - A clear and concise description of what the bug is. - - ```llama stack - # Command that you used for running the examples - ``` - Description of the results - validations: - required: true - - - type: textarea - attributes: - label: Error logs - description: | - If you observe an error, please paste the error message including the **full** traceback of the exception. It may be relevant to wrap error messages in ```` ```triple quotes blocks``` ````. - - placeholder: | - ``` - The error message you got, with the full traceback. - ``` - - validations: - required: true - - - - type: textarea - id: expected-behavior - validations: - required: true - attributes: - label: Expected behavior - description: "A clear and concise description of what you would expect to happen." - - - type: markdown - attributes: - value: > - Thanks for contributing 🎉! diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 044518ab..00000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,11 +0,0 @@ -# What does this PR do? -[Provide a short summary of what this PR does and why. Link to relevant issues if applicable.] - -[//]: # (If resolving an issue, uncomment and update the line below) -[//]: # (Closes #[issue-number]) - -## Test Plan -[Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.*] - -[//]: # (## Documentation) -[//]: # (- [ ] Added a Changelog entry if the change is significant) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..ee914c4e --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,96 @@ +name: CI +on: + push: + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' + +jobs: + lint: + timeout-minutes: 10 + name: lint + runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run lints + run: ./scripts/lint + + build: + if: github.repository == 'stainless-sdks/llama-stack-client-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) + timeout-minutes: 10 + name: build + permissions: + contents: read + id-token: write + runs-on: depot-ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run build + run: rye build + + - name: Get GitHub OIDC Token + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh + + test: + timeout-minutes: 10 + name: test + runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run tests + run: ./scripts/test diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 58853453..8bcc292a 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Python uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: - python-version: '3.11.10' + python-version: '3.12' cache: pip cache-dependency-path: | **/requirements*.txt diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 00000000..e62e7dd5 --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,31 @@ +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/llamastack/llama-stack-client-python/actions/workflows/publish-pypi.yml +name: Publish PyPI +on: + workflow_dispatch: + + release: + types: [published] + +jobs: + publish: + name: publish + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.LLAMA_STACK_CLIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-to-test-pypi.yml b/.github/workflows/publish-to-test-pypi.yml deleted file mode 100644 index a669c93f..00000000 --- a/.github/workflows/publish-to-test-pypi.yml +++ /dev/null @@ -1,76 +0,0 @@ -name: Publish Python 🐍 distribution 📦 to TestPyPI - -on: - repository_dispatch: # on trigger from llama-stack - types: [build-client-package] - - workflow_dispatch: # Keep manual trigger - inputs: - version: - description: 'Version number (e.g. 0.0.63.dev20250111)' - required: true - type: string - -jobs: - build: - name: Build distribution 📦 - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - with: - persist-credentials: false - - name: Get date - id: date - run: echo "date=$(date +'%Y%m%d')" >> $GITHUB_OUTPUT - - name: Update version for manual RC - if: github.event_name == 'workflow_dispatch' - run: | - sed -i 's/version = "\([^"]*\)"/version = "${{ inputs.version }}"/' pyproject.toml - sed -i 's/__version__ = "\([^"]*\)"/__version__ = "${{ inputs.version }}"/' src/llama_stack_client/_version.py - - name: Update version for repository_dispatch - if: github.event_name == 'repository_dispatch' && github.event.client_payload.source == 'llama-stack-nightly' - run: | - sed -i 's/version = "\([^"]*\)"/version = "${{ github.event.client_payload.version }}"/' pyproject.toml - sed -i 's/__version__ = "\([^"]*\)"/__version__ = "${{ github.event.client_payload.version }}"/' src/llama_stack_client/_version.py - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - name: Install pypa/build - run: >- - python3 -m - pip install - build - --user - - name: Build a binary wheel and a source tarball - run: python3 -m build - - name: Store the distribution packages - uses: actions/upload-artifact@v4 - with: - name: python-package-distributions - path: dist/ - - publish-to-testpypi: - name: Publish Python 🐍 distribution 📦 to TestPyPI - needs: - - build - runs-on: ubuntu-latest - - environment: - name: testrelease - url: https://test.pypi.org/p/llama-stack-client - - permissions: - id-token: write # IMPORTANT: mandatory for trusted publishing - - steps: - - name: Download all the dists - uses: actions/download-artifact@v4 - with: - name: python-package-distributions - path: dist/ - - name: Publish distribution 📦 to TestPyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - repository-url: https://test.pypi.org/legacy/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0571dee6..b55a0a86 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -55,20 +55,6 @@ repos: # - id: markdown-link-check # args: ['--quiet'] -# - repo: local -# hooks: -# - id: distro-codegen -# name: Distribution Template Codegen -# additional_dependencies: -# - rich -# - pydantic -# entry: python -m llama_stack.scripts.distro_codegen -# language: python -# pass_filenames: false -# require_serial: true -# files: ^llama_stack/templates/.*$ -# stages: [manual] - ci: autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks autoupdate_commit_msg: ⬆ [pre-commit.ci] pre-commit autoupdate diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 00000000..7cb1a16e --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.2.18-alpha.2" +} diff --git a/.ruff.toml b/.ruff.toml deleted file mode 100644 index a913ae69..00000000 --- a/.ruff.toml +++ /dev/null @@ -1,37 +0,0 @@ -# Suggested config from pytorch that we can adapt -lint.select = ["B", "C", "E" , "F" , "N", "W", "B9"] - -line-length = 120 - -# C408 ignored because we like the dict keyword argument syntax -# E501 is not flexible enough, we're using B950 instead -# N812 ignored because import torch.nn.functional as F is PyTorch convention -# N817 ignored because importing using acronyms is convention (DistributedDataParallel as DDP) -# E731 allow usage of assigning lambda expressions -# E701 let black auto-format statements on one line -# E704 let black auto-format statements on one line -lint.ignore = [ - "E203", "E305", "E402", "E501", "E721", "E741", "F405", "F821", "F841", - "C408", "E302", "W291", "E303", "N812", "N817", "E731", "E701", - # These are the additional ones we started ignoring after moving to ruff. We should look into each one of them later. - "C901", "C405", "C414", "N803", "N999", "C403", "C416", "B028", "C419", "C401", "B023", - # shebang has extra meaning in fbcode lints, so I think it's not worth trying - # to line this up with executable bit - "EXE001", - # random naming hints don't need - "N802", - # these ignores are from flake8-bugbear; please fix! - "B007", "B008" -] - -exclude = [ - "./.git", - "./docs/*", - "./build", - "./scripts", - "./venv", - "*.pyi", - ".pre-commit-config.yaml", - "*.md", - ".flake8" -] diff --git a/.stats.yml b/.stats.yml index 4517049a..1f5340b2 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,4 @@ -configured_endpoints: 51 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/meta%2Fllama-stack-d52e4c19360cc636336d6a60ba6af1db89736fc0a3025c2b1d11870a5f1a1e3d.yml +configured_endpoints: 106 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-7c002d994b96113926e24a0f99ff80a52b937481e383b584496087ecdc2d92d6.yml +openapi_spec_hash: e9c825e9199979fc5f754426a1334499 +config_hash: e67fd054e95c1e82f78f4b834e96bb65 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..7070ef69 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,235 @@ +# Changelog + +## 0.2.18-alpha.2 (2025-08-12) + +Full Changelog: [v0.2.18-alpha.1...v0.2.18-alpha.2](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.18-alpha.1...v0.2.18-alpha.2) + +### Features + +* **api:** update via SDK Studio ([e8e7433](https://github.com/llamastack/llama-stack-client-python/commit/e8e7433dab536ac6f03e72acfbf82505298fd44d)) + +## 0.2.18-alpha.1 (2025-08-12) + +Full Changelog: [v0.2.18-alpha.1...v0.2.18-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.18-alpha.1...v0.2.18-alpha.1) + +### Features + +* **api:** update via SDK Studio ([db99707](https://github.com/llamastack/llama-stack-client-python/commit/db9970745de255a3718edb6aee8360b55f58592e)) + +## 0.2.18-alpha.1 (2025-08-12) + +Full Changelog: [v0.2.17...v0.2.18-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.17...v0.2.18-alpha.1) + +### Features + +* **api:** update via SDK Studio ([8afae6c](https://github.com/llamastack/llama-stack-client-python/commit/8afae6c1e1a4614cc59db7ae511440693e0479a6)) +* **api:** update via SDK Studio ([143a973](https://github.com/llamastack/llama-stack-client-python/commit/143a973ea9ff81da1d93c421af8c85dbd171ef3c)) +* **api:** update via SDK Studio ([b8e32bb](https://github.com/llamastack/llama-stack-client-python/commit/b8e32bbbf68f8a75c956079119c6b65d7ac165e5)) +* **api:** update via SDK Studio ([1a2c77d](https://github.com/llamastack/llama-stack-client-python/commit/1a2c77df732eb9d0c031e0ff7558176fbf754ad8)) +* **api:** update via SDK Studio ([d66fb5f](https://github.com/llamastack/llama-stack-client-python/commit/d66fb5fe89acb66a55066d82b849bbf4d402db99)) + + +### Chores + +* **internal:** update comment in script ([8d599cd](https://github.com/llamastack/llama-stack-client-python/commit/8d599cd47f98f704f89c9bd979a55cc334895107)) +* update @stainless-api/prism-cli to v5.15.0 ([5f8ae94](https://github.com/llamastack/llama-stack-client-python/commit/5f8ae94955bb3403c0abe89f2999c2d49af97b07)) + +## 0.2.17 (2025-08-06) + +Full Changelog: [v0.2.15...v0.2.17](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.15...v0.2.17) + +### Features + +* **api:** update via SDK Studio ([9c69353](https://github.com/llamastack/llama-stack-client-python/commit/9c693530330ad5e2bb427ccfeb154ac993601e05)) +* **api:** update via SDK Studio ([5f90b04](https://github.com/llamastack/llama-stack-client-python/commit/5f90b04bd0b07cc20729551b88578ff322231723)) +* **api:** update via SDK Studio ([6e26309](https://github.com/llamastack/llama-stack-client-python/commit/6e26309d14cb0b0a0b5d43b7cbab56528b878fd9)) +* **api:** update via SDK Studio ([54ff3c4](https://github.com/llamastack/llama-stack-client-python/commit/54ff3c405af01ce068230990654b75d26967e745)) +* **api:** update via SDK Studio ([a34c823](https://github.com/llamastack/llama-stack-client-python/commit/a34c8230f8a3f6f356c4f990f66bb02eda229819)) +* **api:** update via SDK Studio ([f6b80ca](https://github.com/llamastack/llama-stack-client-python/commit/f6b80caaad58711957b7935f9b6833528ae3bd78)) +* **api:** update via SDK Studio ([2a4296d](https://github.com/llamastack/llama-stack-client-python/commit/2a4296d3df60787b4fc3fe2812d06d6080b0d6db)) +* **api:** update via SDK Studio ([07691ac](https://github.com/llamastack/llama-stack-client-python/commit/07691acac571ff68cd1ff90f9d60ac3e49b1e144)) +* **api:** update via SDK Studio ([585f9ce](https://github.com/llamastack/llama-stack-client-python/commit/585f9ce929e0ac17775febb573fa109d9f3d07ac)) +* **api:** update via SDK Studio ([6d609e3](https://github.com/llamastack/llama-stack-client-python/commit/6d609e3b9e31477fd540dff8c0ecb24bc9d524d1)) +* **api:** update via SDK Studio ([3dbf2a4](https://github.com/llamastack/llama-stack-client-python/commit/3dbf2a4f205d7199cd4d92a7f3f6a2ee5723cb71)) +* **api:** update via SDK Studio ([dd0ae96](https://github.com/llamastack/llama-stack-client-python/commit/dd0ae96300ce6d2940063a7b33c0948d250bbc5e)) +* **api:** update via SDK Studio ([80a2969](https://github.com/llamastack/llama-stack-client-python/commit/80a296977917382fa42b0def0c6bf1a66be45780)) +* **api:** update via SDK Studio ([748e6db](https://github.com/llamastack/llama-stack-client-python/commit/748e6db5002f1ec2c8880414b803d1cfc3ff95ea)) +* **api:** update via SDK Studio ([b6fa2b1](https://github.com/llamastack/llama-stack-client-python/commit/b6fa2b194bc4d66adcc40b5cc07404c45a211cd3)) +* **api:** update via SDK Studio ([e97f870](https://github.com/llamastack/llama-stack-client-python/commit/e97f870b037685af1e65d8d895a063ab2381dc81)) +* **api:** update via SDK Studio ([489b54d](https://github.com/llamastack/llama-stack-client-python/commit/489b54d7acfee41874e2fa253578d3e95f6b111a)) +* **api:** update via SDK Studio ([13cfa4a](https://github.com/llamastack/llama-stack-client-python/commit/13cfa4aa1f12b7369f1bc13c3dff8d4cea46a3f6)) +* **api:** update via SDK Studio ([25c1e49](https://github.com/llamastack/llama-stack-client-python/commit/25c1e49f503e15649e0cdc18b0ac8dd00c2dff7e)) +* **api:** update via SDK Studio ([4a54d61](https://github.com/llamastack/llama-stack-client-python/commit/4a54d613ee0a7ff7a561bc41db626aaea3c00096)) +* **api:** update via SDK Studio ([ac4614a](https://github.com/llamastack/llama-stack-client-python/commit/ac4614a70aa632a7bc55037aa777f0ab40ea908b)) +* **api:** update via SDK Studio ([a201e22](https://github.com/llamastack/llama-stack-client-python/commit/a201e22e2bad1b2290092784d4e2255eaaf73758)) +* **client:** support file upload requests ([e84459f](https://github.com/llamastack/llama-stack-client-python/commit/e84459fc65a28e68ed185d6dba28b559e6882b99)) +* **client:** support file upload requests ([6c73da7](https://github.com/llamastack/llama-stack-client-python/commit/6c73da7c97a558468296f1e8d6da5ba7ae9ea1c4)) + + +### Bug Fixes + +* **ci:** correct conditional ([d7c2ab8](https://github.com/llamastack/llama-stack-client-python/commit/d7c2ab87065aaade14a143113c90a0082ef35ee4)) +* **ci:** correct conditional ([4368fbd](https://github.com/llamastack/llama-stack-client-python/commit/4368fbd1f733cfda7a2d4273f0c983e44be63fe1)) +* **client:** don't send Content-Type header on GET requests ([d6a80a5](https://github.com/llamastack/llama-stack-client-python/commit/d6a80a5c38305c63494a9f8498e47ba0c0031295)) +* **client:** don't send Content-Type header on GET requests ([c6e0026](https://github.com/llamastack/llama-stack-client-python/commit/c6e0026218d4fde46e23663b55384bdf417fbcbf)) +* helptext for 'inspect version' and 'providers inspect' ([#8](https://github.com/llamastack/llama-stack-client-python/issues/8)) ([d79345e](https://github.com/llamastack/llama-stack-client-python/commit/d79345e42d6a3f3b828396b1ac00e2ecf196c0eb)) +* kill requirements.txt ([a6bd44c](https://github.com/llamastack/llama-stack-client-python/commit/a6bd44c5bdb9415a8cacd53b552b8b43e341d91c)) +* model register missing model-type and not accepting metadata ([#11](https://github.com/llamastack/llama-stack-client-python/issues/11)) ([f3f4515](https://github.com/llamastack/llama-stack-client-python/commit/f3f45155864379f227824d00f6febb1b46ed4839)) +* **parsing:** correctly handle nested discriminated unions ([9f95130](https://github.com/llamastack/llama-stack-client-python/commit/9f95130b77729d2adcf906355ddef41d109999d0)) +* **parsing:** correctly handle nested discriminated unions ([8b7e9ba](https://github.com/llamastack/llama-stack-client-python/commit/8b7e9ba42dbafb89d765f870d7874c86f47b2e7b)) +* **parsing:** ignore empty metadata ([a8a398f](https://github.com/llamastack/llama-stack-client-python/commit/a8a398fb7ca67117d3b7663354a406d1432fd8fb)) +* **parsing:** ignore empty metadata ([264f24c](https://github.com/llamastack/llama-stack-client-python/commit/264f24c9c564a0a5ea862418bfebb6c3cad01cf0)) +* **parsing:** parse extra field types ([f981bdc](https://github.com/llamastack/llama-stack-client-python/commit/f981bdc927411cb3b69febd578d39299dac27670)) +* **parsing:** parse extra field types ([d54c5db](https://github.com/llamastack/llama-stack-client-python/commit/d54c5db3df7b6e5dca66e8e7c855998c67d03250)) +* pre-commit formatting ([a83b1c3](https://github.com/llamastack/llama-stack-client-python/commit/a83b1c36b8acff7d7f762d0eab9d832a3320bcce)) +* update agent event logger ([#10](https://github.com/llamastack/llama-stack-client-python/issues/10)) ([0a10b70](https://github.com/llamastack/llama-stack-client-python/commit/0a10b70f91f28f533710433ae860789f2cb0f70f)) + + +### Chores + +* **ci:** change upload type ([7827103](https://github.com/llamastack/llama-stack-client-python/commit/78271038dcd35ea78fc2addf0676c4cdbea07a0e)) +* **ci:** change upload type ([5febc13](https://github.com/llamastack/llama-stack-client-python/commit/5febc136956ce6ac5af8e638a6fa430a9d0f3dc3)) +* **ci:** only run for pushes and fork pull requests ([03a7636](https://github.com/llamastack/llama-stack-client-python/commit/03a7636bce1974ef9be709cd6df395d687f0f22b)) +* **ci:** only run for pushes and fork pull requests ([c05df66](https://github.com/llamastack/llama-stack-client-python/commit/c05df6620f31a4860e11c5b94b3d7bf85fc9d197)) +* **ci:** only run for pushes and fork pull requests ([87c9d01](https://github.com/llamastack/llama-stack-client-python/commit/87c9d01fd4f8451882e1b936ba43375e20a56622)) +* **ci:** only run for pushes and fork pull requests ([9d04993](https://github.com/llamastack/llama-stack-client-python/commit/9d04993f6cc133f6ea6ca943d14a59e9b309938a)) +* **ci:** only run for pushes and fork pull requests ([4da7f49](https://github.com/llamastack/llama-stack-client-python/commit/4da7f495eb06d0cb386deeef3825c4876c64cbe2)) +* **ci:** only run for pushes and fork pull requests ([8b37cd3](https://github.com/llamastack/llama-stack-client-python/commit/8b37cd35c06ba045c25be9f6777b854bd9d9dbf8)) +* **ci:** only run for pushes and fork pull requests ([3f0a4b9](https://github.com/llamastack/llama-stack-client-python/commit/3f0a4b9ba82bd9db5ae9f854a2a775781eb75fd0)) +* **ci:** only run for pushes and fork pull requests ([8a1efad](https://github.com/llamastack/llama-stack-client-python/commit/8a1efade982126d1742c912069321ce7bd267bd8)) +* delete unused scripts based on rye ([dae6506](https://github.com/llamastack/llama-stack-client-python/commit/dae65069d31bc4d3e55c15f3f1848d00c35a75ce)) +* **internal:** bump pinned h11 dep ([4a7073f](https://github.com/llamastack/llama-stack-client-python/commit/4a7073f0e60aea8a2b7ec6d72b31fc9554234ef0)) +* **internal:** bump pinned h11 dep ([0568d6d](https://github.com/llamastack/llama-stack-client-python/commit/0568d6d078eab8f65ac191218d6467df9bfa7901)) +* **internal:** codegen related update ([4d4afec](https://github.com/llamastack/llama-stack-client-python/commit/4d4afec936a1e6b2f0bf96a5508fb54620c894e4)) +* **internal:** codegen related update ([7cd543f](https://github.com/llamastack/llama-stack-client-python/commit/7cd543f782490fe6ed5a90474114c1ef084a8b34)) +* **internal:** codegen related update ([3165cad](https://github.com/llamastack/llama-stack-client-python/commit/3165cad3251782f4bfe529d9bdde1f18b5813fc0)) +* **internal:** codegen related update ([c27a701](https://github.com/llamastack/llama-stack-client-python/commit/c27a7015e1627582e00de6c4f6cbc9df9da99c54)) +* **internal:** codegen related update ([aa45ba3](https://github.com/llamastack/llama-stack-client-python/commit/aa45ba35f7107e6278c45134f6130ffaf99eb20e)) +* **internal:** codegen related update ([5d6ccb5](https://github.com/llamastack/llama-stack-client-python/commit/5d6ccb56adf0cdeafd2d027ba2f897fd2f5c7070)) +* **internal:** fix ruff target version ([c50a0e0](https://github.com/llamastack/llama-stack-client-python/commit/c50a0e0ee44f97ee1ac8ac2a9e80860ae7b71a37)) +* **internal:** version bump ([5af7869](https://github.com/llamastack/llama-stack-client-python/commit/5af7869be75f6e577c57509c11e55a6dbbcdc4d8)) +* **internal:** version bump ([148be8d](https://github.com/llamastack/llama-stack-client-python/commit/148be8d37f92a77e553edd599ad4a5981642b40c)) +* **internal:** version bump ([86a0766](https://github.com/llamastack/llama-stack-client-python/commit/86a0766da6a2e282a2185b42530266aaa4c1a9ce)) +* **internal:** version bump ([5d6cc6b](https://github.com/llamastack/llama-stack-client-python/commit/5d6cc6be97ca098140575e65803d3d51ddc1e9ea)) +* **internal:** version bump ([cc7a519](https://github.com/llamastack/llama-stack-client-python/commit/cc7a51927110f8f4ef7309b9f6c92ace0434b24e)) +* **internal:** version bump ([8f15ef0](https://github.com/llamastack/llama-stack-client-python/commit/8f15ef01b12c88af245e477362f86785586b697f)) +* **internal:** version bump ([f52cb89](https://github.com/llamastack/llama-stack-client-python/commit/f52cb89e8a8d2e2b41155b6b5db2e700d85fcc29)) +* **internal:** version bump ([2e1a629](https://github.com/llamastack/llama-stack-client-python/commit/2e1a629e8d24c37031d8d853ec5e3d9200952934)) +* **internal:** version bump ([da26ed0](https://github.com/llamastack/llama-stack-client-python/commit/da26ed01f5ad7ff77d0b2166a0c282806a6d1aff)) +* **internal:** version bump ([3727fa5](https://github.com/llamastack/llama-stack-client-python/commit/3727fa5703c3e6cfc38fc963650cee1af23c6d68)) +* **internal:** version bump ([443ce02](https://github.com/llamastack/llama-stack-client-python/commit/443ce023733e06e1a83920727630ad4442aa2104)) +* **internal:** version bump ([b2875ec](https://github.com/llamastack/llama-stack-client-python/commit/b2875ecbe69976ccaeeafb7b6216b711a0214edb)) +* **internal:** version bump ([9a4320d](https://github.com/llamastack/llama-stack-client-python/commit/9a4320d7a4a81412a8657f23a9b8e3331770951a)) +* **internal:** version bump ([39155e5](https://github.com/llamastack/llama-stack-client-python/commit/39155e53bff8e0255b5c62e7aa3e9b801c719f96)) +* **internal:** version bump ([607c7be](https://github.com/llamastack/llama-stack-client-python/commit/607c7bea3d8e24d12069fa8a496380319badd71c)) +* **internal:** version bump ([62901e7](https://github.com/llamastack/llama-stack-client-python/commit/62901e7b3bb26956f28b2443508d59ab6bc926b4)) +* **internal:** version bump ([4132af9](https://github.com/llamastack/llama-stack-client-python/commit/4132af981fe9d59864c6f2d23258c893200355c1)) +* **internal:** version bump ([e6ae920](https://github.com/llamastack/llama-stack-client-python/commit/e6ae920385cf6a92f1f0623428a61e0325521e67)) +* **internal:** version bump ([96768dc](https://github.com/llamastack/llama-stack-client-python/commit/96768dc3db60936a960a9a46b9597df292a9e85e)) +* **internal:** version bump ([74f7eda](https://github.com/llamastack/llama-stack-client-python/commit/74f7eda7bf4a5d024bdeaf36a0f228d610134530)) +* **internal:** version bump ([d59862a](https://github.com/llamastack/llama-stack-client-python/commit/d59862a1bca2d31bf0f6cd0138bf2a1d804aad9d)) +* **internal:** version bump ([ce98414](https://github.com/llamastack/llama-stack-client-python/commit/ce98414b294a451ac67b9fcee045f28ecce7b408)) +* **internal:** version bump ([9746774](https://github.com/llamastack/llama-stack-client-python/commit/9746774316aed9a04b5ee161452df14e88f3e62c)) +* **internal:** version bump ([6114dbf](https://github.com/llamastack/llama-stack-client-python/commit/6114dbf530354a56539a16a49a7c314bf643fca7)) +* **internal:** version bump ([02c9953](https://github.com/llamastack/llama-stack-client-python/commit/02c9953a78c22d447d5a93b901a2684cce25ee3d)) +* **internal:** version bump ([16f2953](https://github.com/llamastack/llama-stack-client-python/commit/16f2953d3292c3787e28f5178d1d149d6c808258)) +* **internal:** version bump ([c32029b](https://github.com/llamastack/llama-stack-client-python/commit/c32029b26c4e10bba8378cbb61d6b2d7e6c3d10d)) +* **internal:** version bump ([aef5dee](https://github.com/llamastack/llama-stack-client-python/commit/aef5dee81270b6372479fbeb2257d42f487dfcf3)) +* **internal:** version bump ([590de6d](https://github.com/llamastack/llama-stack-client-python/commit/590de6d2ac748199b489c00fe8f79d9f8111a283)) +* **internal:** version bump ([072269f](https://github.com/llamastack/llama-stack-client-python/commit/072269f0c2421313a1ba7a9feb372a72cc5f5f0f)) +* **internal:** version bump ([eee6f0b](https://github.com/llamastack/llama-stack-client-python/commit/eee6f0b5cd146fc962d13da371e09e5abd66f05e)) +* **internal:** version bump ([e6a964e](https://github.com/llamastack/llama-stack-client-python/commit/e6a964e9970e5d4bbd9f3bb9dae959ce6488b3bf)) +* **package:** mark python 3.13 as supported ([2afc17b](https://github.com/llamastack/llama-stack-client-python/commit/2afc17ba76b498f6f0c975111bfd9456090d10b5)) +* **package:** mark python 3.13 as supported ([d1a4e40](https://github.com/llamastack/llama-stack-client-python/commit/d1a4e40ba6a6d1b0ecf7b84cff55a79a6c00b925)) +* **project:** add settings file for vscode ([405febd](https://github.com/llamastack/llama-stack-client-python/commit/405febd7158db4c129c854293a735c8c71712bc5)) +* **project:** add settings file for vscode ([1dd3e53](https://github.com/llamastack/llama-stack-client-python/commit/1dd3e5310f668e81d246f929e2bd6b216a4ac9ad)) +* **readme:** fix version rendering on pypi ([ca89c7f](https://github.com/llamastack/llama-stack-client-python/commit/ca89c7fb2e09ef52565f7de34068b3b4bbb575dc)) +* **readme:** fix version rendering on pypi ([193fb64](https://github.com/llamastack/llama-stack-client-python/commit/193fb64864ce57e9a488d9ee874cededeaad1eae)) +* update SDK settings ([2d422f9](https://github.com/llamastack/llama-stack-client-python/commit/2d422f92ee95364dc67c6557beafccde42ea11eb)) +* update SDK settings ([59b933c](https://github.com/llamastack/llama-stack-client-python/commit/59b933ca39e08b9a36669995b3b5424231df84f5)) +* update version ([10ef53e](https://github.com/llamastack/llama-stack-client-python/commit/10ef53e74dbdd72a8dd829957820e61522fbe6ad)) + + +### Build System + +* Bump version to 0.2.14 ([745a94e](https://github.com/llamastack/llama-stack-client-python/commit/745a94e1d2875c8e7b4fac5b1676b890aebf4915)) +* Bump version to 0.2.15 ([8700dc6](https://github.com/llamastack/llama-stack-client-python/commit/8700dc6ed9411d436422ee94af2702f10a96b49e)) +* Bump version to 0.2.15 ([4692024](https://github.com/llamastack/llama-stack-client-python/commit/46920241be5f8b921bbba367e65a7afa3aefd612)) +* Bump version to 0.2.16 ([6ce9b84](https://github.com/llamastack/llama-stack-client-python/commit/6ce9b84007967702f6844679604e1b812df864e4)) +* Bump version to 0.2.17 ([69f67ef](https://github.com/llamastack/llama-stack-client-python/commit/69f67ef77c9ca6ffc089a6d24261272aa2fee36f)) + +## 0.1.0-alpha.4 (2025-06-27) + +Full Changelog: [v0.1.0-alpha.3...v0.1.0-alpha.4](https://github.com/llamastack/llama-stack-client-python/compare/v0.1.0-alpha.3...v0.1.0-alpha.4) + +### Features + +* **api:** update via SDK Studio ([4333cb0](https://github.com/llamastack/llama-stack-client-python/commit/4333cb0307fd99654e53e8f87b3b2951be027b44)) + + +### Bug Fixes + +* **ci:** update pyproject.toml to use uv and remove broken CI ([#5](https://github.com/llamastack/llama-stack-client-python/issues/5)) ([7bc925c](https://github.com/llamastack/llama-stack-client-python/commit/7bc925c00401799d8f3345a4873f1b0028cb45ea)) + + +### Chores + +* **internal:** version bump ([867ea24](https://github.com/llamastack/llama-stack-client-python/commit/867ea24344fd71fc9787807a47144af5e3de82f8)) + +## 0.1.0-alpha.3 (2025-06-27) + +Full Changelog: [v0.1.0-alpha.2...v0.1.0-alpha.3](https://github.com/llamastack/llama-stack-client-python/compare/v0.1.0-alpha.2...v0.1.0-alpha.3) + +### Features + +* **api:** update via SDK Studio ([e87f225](https://github.com/llamastack/llama-stack-client-python/commit/e87f2257b00a287dd34dc95f4d39661728075891)) +* make custom code changes ([#3](https://github.com/llamastack/llama-stack-client-python/issues/3)) ([83fa371](https://github.com/llamastack/llama-stack-client-python/commit/83fa37124133aab73bf2bbbdcd39338b9a192475)) + +## 0.1.0-alpha.2 (2025-06-27) + +Full Changelog: [v0.1.0-alpha.1...v0.1.0-alpha.2](https://github.com/llamastack/llama-stack-client-python/compare/v0.1.0-alpha.1...v0.1.0-alpha.2) + +### Features + +* **api:** update via SDK Studio ([f568f65](https://github.com/llamastack/llama-stack-client-python/commit/f568f6508002eff7eae4a6b0a1cc13aba6fab98e)) + +## 0.1.0-alpha.1 (2025-06-27) + +Full Changelog: [v0.0.1-alpha.0...v0.1.0-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.0.1-alpha.0...v0.1.0-alpha.1) + +### Features + +* **client:** add follow_redirects request option ([a77a9ee](https://github.com/llamastack/llama-stack-client-python/commit/a77a9eed9038782ba6b93ce0d3147ee4a6b8a3b7)) +* **client:** add support for aiohttp ([d78982b](https://github.com/llamastack/llama-stack-client-python/commit/d78982b197c5e0a0fb67afcb44e9644fd8d931be)) + + +### Bug Fixes + +* **ci:** release-doctor — report correct token name ([6f3a4e2](https://github.com/llamastack/llama-stack-client-python/commit/6f3a4e24d8b357d7dc01adb0d9f736989fa9517d)) +* **client:** correctly parse binary response | stream ([85d6bbd](https://github.com/llamastack/llama-stack-client-python/commit/85d6bbd97efac7509cbff0bb2d461a80d09b5e61)) +* **package:** support direct resource imports ([a862d55](https://github.com/llamastack/llama-stack-client-python/commit/a862d551553aac41573306ce39480e1eb16ea3d3)) +* **tests:** fix: tests which call HTTP endpoints directly with the example parameters ([347a4bf](https://github.com/llamastack/llama-stack-client-python/commit/347a4bffa920f5727a4c02eba18bd207001698b5)) + + +### Chores + +* change publish docs url ([fdd7a07](https://github.com/llamastack/llama-stack-client-python/commit/fdd7a075564ac206e91b2d06bf130c4de9473838)) +* **ci:** enable for pull requests ([c9b6347](https://github.com/llamastack/llama-stack-client-python/commit/c9b6347f084acb1566b8e8283cf0bcfde7f6562c)) +* **ci:** fix installation instructions ([40d9854](https://github.com/llamastack/llama-stack-client-python/commit/40d9854bd2630a471f1ca93d249e4d44b73fa864)) +* **ci:** upload sdks to package manager ([2d2282b](https://github.com/llamastack/llama-stack-client-python/commit/2d2282bb49d58daef1f32fa0f1e5a356abf8df0d)) +* **docs:** grammar improvements ([6f57b13](https://github.com/llamastack/llama-stack-client-python/commit/6f57b1363367de7ed5035fd1d6ba1a071eee67ba)) +* **docs:** remove reference to rye shell ([bcf315a](https://github.com/llamastack/llama-stack-client-python/commit/bcf315ae00c458f89dfa3684bcc7abdb732b6c5f)) +* **docs:** remove unnecessary param examples ([60ec829](https://github.com/llamastack/llama-stack-client-python/commit/60ec829e809156217cf2f911b3cac6b23a06baad)) +* **internal:** avoid errors for isinstance checks on proxies ([758a188](https://github.com/llamastack/llama-stack-client-python/commit/758a188dbfaa284a13b70816689c99917a05d16c)) +* **internal:** codegen related update ([ab9f05c](https://github.com/llamastack/llama-stack-client-python/commit/ab9f05cc1da5b21afceacdf9c8eb54b6e59eed01)) +* **internal:** update conftest.py ([218e172](https://github.com/llamastack/llama-stack-client-python/commit/218e172c16014dad41a7c189c5620077955d6bdf)) +* **readme:** update badges ([9b63e1b](https://github.com/llamastack/llama-stack-client-python/commit/9b63e1b7dbbbd7556d046a2a4224a8385bbea24c)) +* **tests:** add tests for httpx client instantiation & proxies ([b27b11b](https://github.com/llamastack/llama-stack-client-python/commit/b27b11bbe0a9c5778b757733c11828d9603307ea)) +* **tests:** run tests in parallel ([1287a3c](https://github.com/llamastack/llama-stack-client-python/commit/1287a3c11f668d916c8c7af534a48523e2e69140)) +* **tests:** skip some failing tests on the latest python versions ([73b5705](https://github.com/llamastack/llama-stack-client-python/commit/73b57051c48d2ec42b844a288ffc9b5e3bbe6f2b)) +* update SDK settings ([e54ba91](https://github.com/llamastack/llama-stack-client-python/commit/e54ba9163792ab80362a189acb825bcd00e5384b)) + + +### Documentation + +* **client:** fix httpx.Timeout documentation reference ([497f2a1](https://github.com/llamastack/llama-stack-client-python/commit/497f2a198140f73525a880497bf1c51b5749c1f3)) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7cdb9833..e4d657d0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,16 +1,36 @@ ## Setting up the environment -### With UV +### With Rye -We use [UV](https://docs.astral.sh/uv/) to manage dependencies so we highly recommend [installing it](https://docs.astral.sh/uv/installation/) as it will automatically provision a Python environment with the expected Python version. +We use [Rye](https://rye.astral.sh/) to manage dependencies because it will automatically provision a Python environment with the expected Python version. To set it up, run: -After installing UV, you'll just have to run this command: +```sh +$ ./scripts/bootstrap +``` -```bash -uv sync +Or [install Rye manually](https://rye.astral.sh/guide/installation/) and run: + +```sh +$ rye sync --all-features ``` +You can then run scripts using `rye run python script.py` or by activating the virtual environment: + +```sh +# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work +$ source .venv/bin/activate + +# now you can omit the `rye run` prefix +$ python script.py +``` +### Without Rye + +Alternatively if you don't want to install `Rye`, you can stick with the standard `pip` setup by ensuring you have the Python version specified in `.python-version`, create a virtual environment however you desire and then install dependencies using this command: + +```sh +$ pip install -r requirements-dev.lock +``` ## Modifying/Adding code @@ -18,14 +38,31 @@ Most of the SDK is generated code. Modifications to code will be persisted betwe result in merge conflicts between manual patches and changes from the generator. The generator will never modify the contents of the `src/llama_stack_client/lib/` and `examples/` directories. +## Adding and running examples + +All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. + +```py +# add an example to examples/.py + +#!/usr/bin/env -S rye run python +… +``` + +```sh +$ chmod +x examples/.py +# run the example against your api +$ ./examples/.py +``` + ## Using the repository from source If you’d like to use the repository from source, you can either install from git or link to a cloned repository: To install via git: -```bash -uv pip install git+ssh://git@github.com/stainless-sdks/llama-stack-python.git +```sh +$ pip install git+ssh://git@github.com/llamastack/llama-stack-client-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -34,37 +71,46 @@ Building this package will create two files in the `dist/` directory, a `.tar.gz To create a distributable version of the library, all you have to do is run this command: -```bash -uv build +```sh +$ rye build +# or +$ python -m build ``` Then to install: ```sh -uv pip install ./path-to-wheel-file.whl +$ pip install ./path-to-wheel-file.whl ``` ## Running tests Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. -```bash +```sh # you will need npm installed -npx prism mock path/to/your/openapi.yml +$ npx prism mock path/to/your/openapi.yml ``` -```bash -uv run pytest +```sh +$ ./scripts/test ``` ## Linting and formatting -There is a pre-commit hook that will run ruff and black on the code. +This repository uses [ruff](https://github.com/astral-sh/ruff) and +[black](https://github.com/psf/black) to format the code in the repository. -To run the pre-commit hook: +To lint: -```bash -uv run pre-commit +```sh +$ ./scripts/lint +``` + +To format and fix all ruff issues automatically: + +```sh +$ ./scripts/format ``` ## Publishing and releases @@ -74,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/stainless-sdks/llama-stack-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/llamastack/llama-stack-client-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index 2f2694b0..cb5c6237 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # Llama Stack Client Python API library + [![PyPI version](https://img.shields.io/pypi/v/llama_stack_client.svg)](https://pypi.org/project/llama_stack_client/) [![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-stack-client)](https://pypi.org/project/llama-stack-client/) [![Discord](https://img.shields.io/discord/1257833999603335178)](https://discord.gg/llama-stack) @@ -11,9 +12,9 @@ It is generated with [Stainless](https://www.stainlessapi.com/). ## Documentation -For starting up a Llama Stack server, please checkout our guides in our [llama-stack](https://github.com/meta-llama/llama-stack/blob/main/docs/resources/llama-stack-spec.html) repo. +For starting up a Llama Stack server, please checkout our [Quickstart guide to start a Llama Stack server](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) -The REST API documentation can be found on our [llama-stack OpenAPI spec](https://github.com/meta-llama/llama-stack/blob/main/docs/resources/llama-stack-spec.html). The full API of this library can be found in [api.md](api.md). +The REST API documentation can be found on [llama-stack API Reference](https://llama-stack.readthedocs.io/en/latest/references/api_reference/index.html). The full API of this library can be found in [api.md](api.md). You can find more example apps with client SDKs to talk with the Llama Stack server in our [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main) repo. @@ -42,6 +43,8 @@ response = client.chat.completions.create( print(response) ``` +While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) to add `LLAMA_STACK_CLIENT_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control. + After installing the `llama-stack-client` package, you can also use the [`llama-stack-client` CLI](https://github.com/meta-llama/llama-stack/tree/main/llama-stack-client) to interact with the Llama Stack server. ```bash llama-stack-client inference chat-completion --message "hello, what model are you" diff --git a/SECURITY.md b/SECURITY.md index 0117165c..1b5f3a4d 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,9 +2,9 @@ ## Reporting Security Issues -This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. +This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. -To report a security issue, please contact the Stainless team at security@stainlessapi.com. +To report a security issue, please contact the Stainless team at security@stainless.com. ## Responsible Disclosure @@ -16,11 +16,11 @@ before making any information public. ## Reporting Non-SDK Related Security Issues If you encounter security issues that are not directly related to SDKs but pertain to the services -or products provided by Llama Stack Client please follow the respective company's security reporting guidelines. +or products provided by Llama Stack Client, please follow the respective company's security reporting guidelines. ### Llama Stack Client Terms and Policies -Please contact dev-feedback@llama-stack-client.com for any questions or concerns regarding security of our services. +Please contact llamastack@meta.com for any questions or concerns regarding the security of our services. --- diff --git a/api.md b/api.md index b7a863dd..4056f4a6 100644 --- a/api.md +++ b/api.md @@ -2,29 +2,106 @@ ```python from llama_stack_client.types import ( - Attachment, + AgentConfig, BatchCompletion, + ChatCompletionResponse, CompletionMessage, + ContentDelta, + Document, + InterleavedContent, + InterleavedContentItem, + Message, + ParamType, + QueryConfig, + QueryGeneratorConfig, + QueryResult, + ResponseFormat, + ReturnType, + SafetyViolation, SamplingParams, + ScoringResult, SystemMessage, ToolCall, + ToolCallOrString, + ToolParamDefinition, ToolResponseMessage, UserMessage, ) ``` -# Telemetry +# Toolgroups + +Types: + +```python +from llama_stack_client.types import ListToolGroupsResponse, ToolGroup, ToolgroupListResponse +``` + +Methods: + +- client.toolgroups.list() -> ToolgroupListResponse +- client.toolgroups.get(toolgroup_id) -> ToolGroup +- client.toolgroups.register(\*\*params) -> None +- client.toolgroups.unregister(toolgroup_id) -> None + +# Tools + +Types: + +```python +from llama_stack_client.types import ListToolsResponse, Tool, ToolListResponse +``` + +Methods: + +- client.tools.list(\*\*params) -> ToolListResponse +- client.tools.get(tool_name) -> Tool + +# ToolRuntime + +Types: + +```python +from llama_stack_client.types import ToolDef, ToolInvocationResult, ToolRuntimeListToolsResponse +``` + +Methods: + +- client.tool_runtime.invoke_tool(\*\*params) -> ToolInvocationResult +- client.tool_runtime.list_tools(\*\*params) -> ToolRuntimeListToolsResponse + +## RagTool + +Methods: + +- client.tool_runtime.rag_tool.insert(\*\*params) -> None +- client.tool_runtime.rag_tool.query(\*\*params) -> QueryResult + +# Responses + +Types: + +```python +from llama_stack_client.types import ResponseObject, ResponseObjectStream, ResponseListResponse +``` + +Methods: + +- client.responses.create(\*\*params) -> ResponseObject +- client.responses.retrieve(response_id) -> ResponseObject +- client.responses.list(\*\*params) -> SyncOpenAICursorPage[ResponseListResponse] + +## InputItems Types: ```python -from llama_stack_client.types import TelemetryGetTraceResponse +from llama_stack_client.types.responses import InputItemListResponse ``` Methods: -- client.telemetry.get_trace(\*\*params) -> TelemetryGetTraceResponse -- client.telemetry.log(\*\*params) -> None +- client.responses.input_items.list(response_id, \*\*params) -> InputItemListResponse # Agents @@ -34,313 +111,483 @@ Types: from llama_stack_client.types import ( InferenceStep, MemoryRetrievalStep, - RestAPIExecutionConfig, ShieldCallStep, ToolExecutionStep, - ToolParamDefinition, + ToolResponse, AgentCreateResponse, + AgentRetrieveResponse, + AgentListResponse, ) ``` Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.delete(\*\*params) -> None +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(agent_id) -> AgentRetrieveResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(agent_id) -> None -## Sessions +## Session Types: ```python -from llama_stack_client.types.agents import Session, SessionCreateResponse +from llama_stack_client.types.agents import Session, SessionCreateResponse, SessionListResponse ``` Methods: -- client.agents.sessions.create(\*\*params) -> SessionCreateResponse -- client.agents.sessions.retrieve(\*\*params) -> Session -- client.agents.sessions.delete(\*\*params) -> None +- client.agents.session.create(agent_id, \*\*params) -> SessionCreateResponse +- client.agents.session.retrieve(session_id, \*, agent_id, \*\*params) -> Session +- client.agents.session.list(agent_id, \*\*params) -> SessionListResponse +- client.agents.session.delete(session_id, \*, agent_id) -> None ## Steps Types: ```python -from llama_stack_client.types.agents import AgentsStep +from llama_stack_client.types.agents import StepRetrieveResponse ``` Methods: -- client.agents.steps.retrieve(\*\*params) -> AgentsStep +- client.agents.steps.retrieve(step_id, \*, agent_id, session_id, turn_id) -> StepRetrieveResponse -## Turns +## Turn Types: ```python -from llama_stack_client.types.agents import AgentsTurnStreamChunk, Turn, TurnStreamEvent +from llama_stack_client.types.agents import ( + AgentTurnResponseStreamChunk, + Turn, + TurnResponseEvent, + TurnResponseEventPayload, +) ``` Methods: -- client.agents.turns.create(\*\*params) -> AgentsTurnStreamChunk -- client.agents.turns.retrieve(\*\*params) -> Turn +- client.agents.turn.create(session_id, \*, agent_id, \*\*params) -> Turn +- client.agents.turn.retrieve(turn_id, \*, agent_id, session_id) -> Turn +- client.agents.turn.resume(turn_id, \*, agent_id, session_id, \*\*params) -> Turn # Datasets Types: ```python -from llama_stack_client.types import TrainEvalDataset +from llama_stack_client.types import ( + ListDatasetsResponse, + DatasetRetrieveResponse, + DatasetListResponse, + DatasetIterrowsResponse, + DatasetRegisterResponse, +) ``` Methods: -- client.datasets.create(\*\*params) -> None -- client.datasets.delete(\*\*params) -> None -- client.datasets.get(\*\*params) -> TrainEvalDataset +- client.datasets.retrieve(dataset_id) -> DatasetRetrieveResponse +- client.datasets.list() -> DatasetListResponse +- client.datasets.appendrows(dataset_id, \*\*params) -> None +- client.datasets.iterrows(dataset_id, \*\*params) -> DatasetIterrowsResponse +- client.datasets.register(\*\*params) -> DatasetRegisterResponse +- client.datasets.unregister(dataset_id) -> None -# Evaluate +# Eval Types: ```python -from llama_stack_client.types import EvaluationJob +from llama_stack_client.types import BenchmarkConfig, EvalCandidate, EvaluateResponse, Job ``` +Methods: + +- client.eval.evaluate_rows(benchmark_id, \*\*params) -> EvaluateResponse +- client.eval.evaluate_rows_alpha(benchmark_id, \*\*params) -> EvaluateResponse +- client.eval.run_eval(benchmark_id, \*\*params) -> Job +- client.eval.run_eval_alpha(benchmark_id, \*\*params) -> Job + ## Jobs +Methods: + +- client.eval.jobs.retrieve(job_id, \*, benchmark_id) -> EvaluateResponse +- client.eval.jobs.cancel(job_id, \*, benchmark_id) -> None +- client.eval.jobs.status(job_id, \*, benchmark_id) -> Job + +# Inspect + Types: ```python -from llama_stack_client.types.evaluate import ( - EvaluationJobArtifacts, - EvaluationJobLogStream, - EvaluationJobStatus, +from llama_stack_client.types import HealthInfo, ProviderInfo, RouteInfo, VersionInfo +``` + +Methods: + +- client.inspect.health() -> HealthInfo +- client.inspect.version() -> VersionInfo + +# Inference + +Types: + +```python +from llama_stack_client.types import ( + ChatCompletionResponseStreamChunk, + CompletionResponse, + EmbeddingsResponse, + TokenLogProbs, + InferenceBatchChatCompletionResponse, ) ``` Methods: -- client.evaluate.jobs.list() -> EvaluationJob -- client.evaluate.jobs.cancel(\*\*params) -> None +- client.inference.batch_chat_completion(\*\*params) -> InferenceBatchChatCompletionResponse +- client.inference.batch_completion(\*\*params) -> BatchCompletion +- client.inference.chat_completion(\*\*params) -> ChatCompletionResponse +- client.inference.completion(\*\*params) -> CompletionResponse +- client.inference.embeddings(\*\*params) -> EmbeddingsResponse -### Artifacts +# Embeddings + +Types: + +```python +from llama_stack_client.types import CreateEmbeddingsResponse +``` Methods: -- client.evaluate.jobs.artifacts.list(\*\*params) -> EvaluationJobArtifacts +- client.embeddings.create(\*\*params) -> CreateEmbeddingsResponse -### Logs +# Chat + +Types: + +```python +from llama_stack_client.types import ChatCompletionChunk +``` + +## Completions + +Types: + +```python +from llama_stack_client.types.chat import ( + CompletionCreateResponse, + CompletionRetrieveResponse, + CompletionListResponse, +) +``` Methods: -- client.evaluate.jobs.logs.list(\*\*params) -> EvaluationJobLogStream +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.chat.completions.retrieve(completion_id) -> CompletionRetrieveResponse +- client.chat.completions.list(\*\*params) -> SyncOpenAICursorPage[CompletionListResponse] -### Status +# Completions + +Types: + +```python +from llama_stack_client.types import CompletionCreateResponse +``` Methods: -- client.evaluate.jobs.status.list(\*\*params) -> EvaluationJobStatus +- client.completions.create(\*\*params) -> CompletionCreateResponse -## QuestionAnswering +# VectorIo + +Types: + +```python +from llama_stack_client.types import QueryChunksResponse +``` Methods: -- client.evaluate.question_answering.create(\*\*params) -> EvaluationJob +- client.vector_io.insert(\*\*params) -> None +- client.vector_io.query(\*\*params) -> QueryChunksResponse -# Evaluations +# VectorDBs + +Types: + +```python +from llama_stack_client.types import ( + ListVectorDBsResponse, + VectorDBRetrieveResponse, + VectorDBListResponse, + VectorDBRegisterResponse, +) +``` Methods: -- client.evaluations.summarization(\*\*params) -> EvaluationJob -- client.evaluations.text_generation(\*\*params) -> EvaluationJob +- client.vector_dbs.retrieve(vector_db_id) -> VectorDBRetrieveResponse +- client.vector_dbs.list() -> VectorDBListResponse +- client.vector_dbs.register(\*\*params) -> VectorDBRegisterResponse +- client.vector_dbs.unregister(vector_db_id) -> None -# Inference +# VectorStores Types: ```python from llama_stack_client.types import ( - ChatCompletionStreamChunk, - CompletionStreamChunk, - TokenLogProbs, - InferenceChatCompletionResponse, - InferenceCompletionResponse, + ListVectorStoresResponse, + VectorStore, + VectorStoreDeleteResponse, + VectorStoreSearchResponse, ) ``` Methods: -- client.inference.chat_completion(\*\*params) -> InferenceChatCompletionResponse -- client.inference.completion(\*\*params) -> InferenceCompletionResponse +- client.vector_stores.create(\*\*params) -> VectorStore +- client.vector_stores.retrieve(vector_store_id) -> VectorStore +- client.vector_stores.update(vector_store_id, \*\*params) -> VectorStore +- client.vector_stores.list(\*\*params) -> SyncOpenAICursorPage[VectorStore] +- client.vector_stores.delete(vector_store_id) -> VectorStoreDeleteResponse +- client.vector_stores.search(vector_store_id, \*\*params) -> VectorStoreSearchResponse -## Embeddings +## Files Types: ```python -from llama_stack_client.types.inference import Embeddings +from llama_stack_client.types.vector_stores import ( + VectorStoreFile, + FileDeleteResponse, + FileContentResponse, +) ``` Methods: -- client.inference.embeddings.create(\*\*params) -> Embeddings +- client.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFile +- client.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFile +- client.vector_stores.files.update(file_id, \*, vector_store_id, \*\*params) -> VectorStoreFile +- client.vector_stores.files.list(vector_store_id, \*\*params) -> SyncOpenAICursorPage[VectorStoreFile] +- client.vector_stores.files.delete(file_id, \*, vector_store_id) -> FileDeleteResponse +- client.vector_stores.files.content(file_id, \*, vector_store_id) -> FileContentResponse -# Safety +# Models Types: ```python -from llama_stack_client.types import RunSheidResponse +from llama_stack_client.types import ListModelsResponse, Model, ModelListResponse ``` Methods: -- client.safety.run_shield(\*\*params) -> RunSheidResponse +- client.models.retrieve(model_id) -> Model +- client.models.list() -> ModelListResponse +- client.models.register(\*\*params) -> Model +- client.models.unregister(model_id) -> None -# Memory +# PostTraining Types: ```python -from llama_stack_client.types import ( - QueryDocuments, - MemoryCreateResponse, - MemoryRetrieveResponse, - MemoryListResponse, - MemoryDropResponse, +from llama_stack_client.types import AlgorithmConfig, ListPostTrainingJobsResponse, PostTrainingJob +``` + +Methods: + +- client.post_training.preference_optimize(\*\*params) -> PostTrainingJob +- client.post_training.supervised_fine_tune(\*\*params) -> PostTrainingJob + +## Job + +Types: + +```python +from llama_stack_client.types.post_training import ( + JobListResponse, + JobArtifactsResponse, + JobStatusResponse, ) ``` Methods: -- client.memory.create(\*\*params) -> object -- client.memory.retrieve(\*\*params) -> object -- client.memory.update(\*\*params) -> None -- client.memory.list() -> object -- client.memory.drop(\*\*params) -> str -- client.memory.insert(\*\*params) -> None -- client.memory.query(\*\*params) -> QueryDocuments +- client.post_training.job.list() -> List[Data] +- client.post_training.job.artifacts(\*\*params) -> JobArtifactsResponse +- client.post_training.job.cancel(\*\*params) -> None +- client.post_training.job.status(\*\*params) -> JobStatusResponse -## Documents +# Providers Types: ```python -from llama_stack_client.types.memory import DocumentRetrieveResponse +from llama_stack_client.types import ListProvidersResponse, ProviderListResponse ``` Methods: -- client.memory.documents.retrieve(\*\*params) -> DocumentRetrieveResponse -- client.memory.documents.delete(\*\*params) -> None +- client.providers.retrieve(provider_id) -> ProviderInfo +- client.providers.list() -> ProviderListResponse -# PostTraining +# Routes Types: ```python -from llama_stack_client.types import PostTrainingJob +from llama_stack_client.types import ListRoutesResponse, RouteListResponse ``` Methods: -- client.post_training.preference_optimize(\*\*params) -> PostTrainingJob -- client.post_training.supervised_fine_tune(\*\*params) -> PostTrainingJob +- client.routes.list() -> RouteListResponse -## Jobs +# Moderations Types: ```python -from llama_stack_client.types.post_training import ( - PostTrainingJobArtifacts, - PostTrainingJobLogStream, - PostTrainingJobStatus, -) +from llama_stack_client.types import CreateResponse +``` + +Methods: + +- client.moderations.create(\*\*params) -> CreateResponse + +# Safety + +Types: + +```python +from llama_stack_client.types import RunShieldResponse ``` Methods: -- client.post_training.jobs.list() -> PostTrainingJob -- client.post_training.jobs.artifacts(\*\*params) -> PostTrainingJobArtifacts -- client.post_training.jobs.cancel(\*\*params) -> None -- client.post_training.jobs.logs(\*\*params) -> PostTrainingJobLogStream -- client.post_training.jobs.status(\*\*params) -> PostTrainingJobStatus +- client.safety.run_shield(\*\*params) -> RunShieldResponse -# RewardScoring +# Shields Types: ```python -from llama_stack_client.types import RewardScoring, ScoredDialogGenerations +from llama_stack_client.types import ListShieldsResponse, Shield, ShieldListResponse ``` Methods: -- client.reward_scoring.score(\*\*params) -> RewardScoring +- client.shields.retrieve(identifier) -> Shield +- client.shields.list() -> ShieldListResponse +- client.shields.register(\*\*params) -> Shield # SyntheticDataGeneration Types: ```python -from llama_stack_client.types import SyntheticDataGeneration +from llama_stack_client.types import SyntheticDataGenerationResponse ``` Methods: -- client.synthetic_data_generation.generate(\*\*params) -> SyntheticDataGeneration +- client.synthetic_data_generation.generate(\*\*params) -> SyntheticDataGenerationResponse -# BatchInference +# Telemetry Types: ```python -from llama_stack_client.types import BatchChatCompletion +from llama_stack_client.types import ( + Event, + QueryCondition, + QuerySpansResponse, + SpanWithStatus, + Trace, + TelemetryGetSpanResponse, + TelemetryGetSpanTreeResponse, + TelemetryQuerySpansResponse, + TelemetryQueryTracesResponse, +) ``` Methods: -- client.batch_inference.chat_completion(\*\*params) -> BatchChatCompletion -- client.batch_inference.completion(\*\*params) -> BatchCompletion +- client.telemetry.get_span(span_id, \*, trace_id) -> TelemetryGetSpanResponse +- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse +- client.telemetry.get_trace(trace_id) -> Trace +- client.telemetry.log_event(\*\*params) -> None +- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse +- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse +- client.telemetry.save_spans_to_dataset(\*\*params) -> None -# Models +# Scoring Types: ```python -from llama_stack_client.types import ModelServingSpec +from llama_stack_client.types import ScoringScoreResponse, ScoringScoreBatchResponse ``` Methods: -- client.models.list() -> ModelServingSpec -- client.models.get(\*\*params) -> Optional +- client.scoring.score(\*\*params) -> ScoringScoreResponse +- client.scoring.score_batch(\*\*params) -> ScoringScoreBatchResponse -# MemoryBanks +# ScoringFunctions Types: ```python -from llama_stack_client.types import MemoryBankSpec +from llama_stack_client.types import ( + ListScoringFunctionsResponse, + ScoringFn, + ScoringFnParams, + ScoringFunctionListResponse, +) ``` Methods: -- client.memory_banks.list() -> MemoryBankSpec -- client.memory_banks.get(\*\*params) -> Optional +- client.scoring_functions.retrieve(scoring_fn_id) -> ScoringFn +- client.scoring_functions.list() -> ScoringFunctionListResponse +- client.scoring_functions.register(\*\*params) -> None -# Shields +# Benchmarks + +Types: + +```python +from llama_stack_client.types import Benchmark, ListBenchmarksResponse, BenchmarkListResponse +``` + +Methods: + +- client.benchmarks.retrieve(benchmark_id) -> Benchmark +- client.benchmarks.list() -> BenchmarkListResponse +- client.benchmarks.register(\*\*params) -> None + +# Files Types: ```python -from llama_stack_client.types import ShieldSpec +from llama_stack_client.types import DeleteFileResponse, File, ListFilesResponse ``` Methods: -- client.shields.list() -> ShieldSpec -- client.shields.get(\*\*params) -> Optional +- client.files.create(\*\*params) -> File +- client.files.retrieve(file_id) -> File +- client.files.list(\*\*params) -> SyncOpenAICursorPage[File] +- client.files.delete(file_id) -> DeleteFileResponse +- client.files.content(file_id) -> object diff --git a/bin/check-release-environment b/bin/check-release-environment new file mode 100644 index 00000000..b845b0f4 --- /dev/null +++ b/bin/check-release-environment @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +errors=() + +if [ -z "${PYPI_TOKEN}" ]; then + errors+=("The PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") +fi + +lenErrors=${#errors[@]} + +if [[ lenErrors -gt 0 ]]; then + echo -e "Found the following errors in the release environment:\n" + + for error in "${errors[@]}"; do + echo -e "- $error\n" + done + + exit 1 +fi + +echo "The environment is ready to push releases!" diff --git a/bin/publish-pypi b/bin/publish-pypi index 05bfccbb..826054e9 100644 --- a/bin/publish-pypi +++ b/bin/publish-pypi @@ -3,7 +3,4 @@ set -eux mkdir -p dist rye build --clean -# Patching importlib-metadata version until upstream library version is updated -# https://github.com/pypa/twine/issues/977#issuecomment-2189800841 -"$HOME/.rye/self/bin/python3" -m pip install 'importlib-metadata==7.2.1' rye publish --yes --token=$PYPI_TOKEN diff --git a/docs/cli_reference.md b/docs/cli_reference.md deleted file mode 100644 index 48f36323..00000000 --- a/docs/cli_reference.md +++ /dev/null @@ -1,918 +0,0 @@ -# CLI Reference - -Welcome to the llama-stack-client CLI - a command-line interface for interacting with Llama Stack - -``` -Usage: llama-stack-client [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--version**: Show the version and exit. [default: False] - -* **--endpoint**: Llama Stack distribution endpoint [default: ] - -* **--api-key**: Llama Stack distribution API key [default: ] - -* **--config**: Path to config file - -**Commands** - -* **configure**: Configure Llama Stack Client CLI. - -* **datasets**: Manage datasets. - -* **eval**: Run evaluation tasks. - -* **eval_tasks**: Manage evaluation tasks. - -* **inference**: Inference (chat). - -* **inspect**: Inspect server configuration. - -* **models**: Manage GenAI models. - -* **post_training**: Post-training. - -* **providers**: Manage API providers. - -* **scoring_functions**: Manage scoring functions. - -* **shields**: Manage safety shield services. - -* **toolgroups**: Manage available tool groups. - -* **vector_dbs**: Manage vector databases. - - - -## configure - -Configure Llama Stack Client CLI. - -``` -Usage: llama-stack-client configure [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--endpoint**: Llama Stack distribution endpoint [default: ] - -* **--api-key**: Llama Stack distribution API key [default: ] - - - -## datasets - -Manage datasets. - -``` -Usage: llama-stack-client datasets [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **list**: Show available datasets on distribution endpoint - -* **register**: Create a new dataset - -* **unregister**: Remove a dataset - - - -### list - -Show available datasets on distribution endpoint - -``` -Usage: llama-stack-client datasets list [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -### register - -Create a new dataset - -``` -Usage: llama-stack-client datasets register [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--dataset-id**: Id of the dataset - -* **--purpose**: Purpose of the dataset - -* **--metadata**: Metadata of the dataset - -* **--url**: URL of the dataset - -* **--dataset-path**: Local file path to the dataset. If specified, upload dataset via URL - - - -### unregister - -Remove a dataset - -``` -Usage: llama-stack-client datasets unregister [OPTIONS] DATASET_ID -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Arguments** - -* **DATASET_ID** - - - -## eval - -Run evaluation tasks. - -``` -Usage: llama-stack-client eval [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **run-benchmark**: Run a evaluation benchmark task - -* **run-scoring**: Run scoring from application datasets - - - -### run-benchmark - -Run a evaluation benchmark task - -``` -Usage: llama-stack-client eval run-benchmark [OPTIONS] BENCHMARK_IDS... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--model-id**: model id to run the benchmark eval on - -* **--output-dir**: Path to the dump eval results output directory - -* **--num-examples**: Number of examples to evaluate on, useful for debugging - -* **--temperature**: temperature in the sampling params to run generation [default: 0.0] - -* **--max-tokens**: max-tokens in the sampling params to run generation [default: 4096] - -* **--top-p**: top-p in the sampling params to run generation [default: 0.9] - -* **--repeat-penalty**: repeat-penalty in the sampling params to run generation [default: 1.0] - -* **--visualize**: Visualize evaluation results after completion [default: False] - -**Arguments** - -* **BENCHMARK_IDS** - - - -### run-scoring - -Run scoring from application datasets - -``` -Usage: llama-stack-client eval run-scoring [OPTIONS] SCORING_FUNCTION_IDS... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--dataset-id**: Pre-registered dataset_id to score (from llama-stack-client datasets list) - -* **--dataset-path**: Path to the dataset file to score - -* **--scoring-params-config**: Path to the scoring params config file in JSON format - -* **--num-examples**: Number of examples to evaluate on, useful for debugging - -* **--output-dir**: Path to the dump eval results output directory - -* **--visualize**: Visualize evaluation results after completion [default: False] - -**Arguments** - -* **SCORING_FUNCTION_IDS** - - - -## eval-tasks - -Manage evaluation tasks. - -``` -Usage: llama-stack-client eval-tasks [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **list**: Show available eval tasks on distribution endpoint - -* **register**: Register a new eval task - - - -### list - -Show available eval tasks on distribution endpoint - -``` -Usage: llama-stack-client eval-tasks list [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -### register - -Register a new eval task - -``` -Usage: llama-stack-client eval-tasks register [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--eval-task-id**: ID of the eval task - -* **--dataset-id**: ID of the dataset to evaluate - -* **--scoring-functions**: Scoring functions to use for evaluation - -* **--provider-id**: Provider ID for the eval task - -* **--provider-eval-task-id**: Provider's eval task ID - -* **--metadata**: Metadata for the eval task in JSON format - - - -## inference - -Inference (chat). - -``` -Usage: llama-stack-client inference [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **chat-completion**: Show available inference chat completion endpoints on distribution endpoint - - - -### chat-completion - -Show available inference chat completion endpoints on distribution endpoint - -``` -Usage: llama-stack-client inference chat-completion [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--message**: Message - -* **--stream**: Streaming [default: False] - -* **--session**: Start a Chat Session [default: False] - -* **--model-id**: Model ID - - - -## inspect - -Inspect server configuration. - -``` -Usage: llama-stack-client inspect [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **version**: Show available providers on distribution endpoint - - - -### version - -Show available providers on distribution endpoint - -``` -Usage: llama-stack-client inspect version [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -## models - -Manage GenAI models. - -``` -Usage: llama-stack-client models [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **get**: Show details of a specific model at the distribution endpoint - -* **list**: Show available llama models at distribution endpoint - -* **register**: Register a new model at distribution endpoint - -* **unregister**: Unregister a model from distribution endpoint - - - -### get - -Show details of a specific model at the distribution endpoint - -``` -Usage: llama-stack-client models get [OPTIONS] MODEL_ID -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Arguments** - -* **MODEL_ID** - - - -### list - -Show available llama models at distribution endpoint - -``` -Usage: llama-stack-client models list [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -### register - -Register a new model at distribution endpoint - -``` -Usage: llama-stack-client models register [OPTIONS] MODEL_ID -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--provider-id**: Provider ID for the model - -* **--provider-model-id**: Provider's model ID - -* **--metadata**: JSON metadata for the model - -**Arguments** - -* **MODEL_ID** - - - -### unregister - -Unregister a model from distribution endpoint - -``` -Usage: llama-stack-client models unregister [OPTIONS] MODEL_ID -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Arguments** - -* **MODEL_ID** - - - -## post-training - -Post-training. - -``` -Usage: llama-stack-client post-training [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **artifacts**: Get the training artifacts of a specific post training job - -* **cancel**: Cancel the training job - -* **list**: Show the list of available post training jobs - -* **status**: Show the status of a specific post training job - -* **supervised_fine_tune**: Kick off a supervised fine tune job - - - -### artifacts - -Get the training artifacts of a specific post training job - -``` -Usage: llama-stack-client post-training artifacts [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--job-uuid**: Job UUID - - - -### cancel - -Cancel the training job - -``` -Usage: llama-stack-client post-training cancel [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--job-uuid**: Job UUID - - - -### list - -Show the list of available post training jobs - -``` -Usage: llama-stack-client post-training list [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -### status - -Show the status of a specific post training job - -``` -Usage: llama-stack-client post-training status [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--job-uuid**: Job UUID - - - -### supervised_fine_tune - -Kick off a supervised fine tune job - -``` -Usage: llama-stack-client post-training supervised_fine_tune - [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--job-uuid**: Job UUID - -* **--model**: Model ID - -* **--algorithm-config**: Algorithm Config - -* **--training-config**: Training Config - -* **--checkpoint-dir**: Checkpoint Config - - - -## providers - -Manage API providers. - -``` -Usage: llama-stack-client providers [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **inspect**: Show available providers on distribution endpoint - -* **list**: Show available providers on distribution endpoint - - - -### inspect - -Show available providers on distribution endpoint - -``` -Usage: llama-stack-client providers inspect [OPTIONS] PROVIDER_ID -``` - -**Options** - -* **--help**: Show this message and exit. [default: False] - -**Arguments** - -* **PROVIDER_ID** - - - -### list - -Show available providers on distribution endpoint - -``` -Usage: llama-stack-client providers list [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -## scoring-functions - -Manage scoring functions. - -``` -Usage: llama-stack-client scoring-functions [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **list**: Show available scoring functions on distribution endpoint - -* **register**: Register a new scoring function - - - -### list - -Show available scoring functions on distribution endpoint - -``` -Usage: llama-stack-client scoring-functions list [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -### register - -Register a new scoring function - -``` -Usage: llama-stack-client scoring-functions register [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--scoring-fn-id**: Id of the scoring function - -* **--description**: Description of the scoring function - -* **--return-type**: Return type of the scoring function - -* **--provider-id**: Provider ID for the scoring function - -* **--provider-scoring-fn-id**: Provider's scoring function ID - -* **--params**: Parameters for the scoring function in JSON format - - - -## shields - -Manage safety shield services. - -``` -Usage: llama-stack-client shields [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **list**: Show available safety shields on distribution endpoint - -* **register**: Register a new safety shield - - - -### list - -Show available safety shields on distribution endpoint - -``` -Usage: llama-stack-client shields list [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -### register - -Register a new safety shield - -``` -Usage: llama-stack-client shields register [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--shield-id**: Id of the shield - -* **--provider-id**: Provider ID for the shield - -* **--provider-shield-id**: Provider's shield ID - -* **--params**: JSON configuration parameters for the shield - - - -## toolgroups - -Manage available tool groups. - -``` -Usage: llama-stack-client toolgroups [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **get**: Show available llama toolgroups at distribution endpoint - -* **list**: Show available llama toolgroups at distribution endpoint - -* **register**: Register a new toolgroup at distribution endpoint - -* **unregister**: Unregister a toolgroup from distribution endpoint - - - -### get - -Show available llama toolgroups at distribution endpoint - -``` -Usage: llama-stack-client toolgroups get [OPTIONS] TOOLGROUP_ID -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Arguments** - -* **TOOLGROUP_ID** - - - -### list - -Show available llama toolgroups at distribution endpoint - -``` -Usage: llama-stack-client toolgroups list [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -### register - -Register a new toolgroup at distribution endpoint - -``` -Usage: llama-stack-client toolgroups register [OPTIONS] TOOLGROUP_ID -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--provider-id**: Provider ID for the toolgroup - -* **--provider-toolgroup-id**: Provider's toolgroup ID - -* **--mcp-config**: JSON mcp_config for the toolgroup - -* **--args**: JSON args for the toolgroup - -**Arguments** - -* **TOOLGROUP_ID** - - - -### unregister - -Unregister a toolgroup from distribution endpoint - -``` -Usage: llama-stack-client toolgroups unregister [OPTIONS] TOOLGROUP_ID -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Arguments** - -* **TOOLGROUP_ID** - - - -## vector-dbs - -Manage vector databases. - -``` -Usage: llama-stack-client vector-dbs [OPTIONS] COMMAND [ARGS]... -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Commands** - -* **list**: Show available vector dbs on distribution endpoint - -* **register**: Create a new vector db - -* **unregister**: Delete a vector db - - - -### list - -Show available vector dbs on distribution endpoint - -``` -Usage: llama-stack-client vector-dbs list [OPTIONS] -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - - - -### register - -Create a new vector db - -``` -Usage: llama-stack-client vector-dbs register [OPTIONS] VECTOR_DB_ID -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -* **--provider-id**: Provider ID for the vector db - -* **--provider-vector-db-id**: Provider's vector db ID - -* **--embedding-model**: Embedding model (for vector type) [default: all-MiniLM-L6-v2] - -* **--embedding-dimension**: Embedding dimension (for vector type) [default: 384] - -**Arguments** - -* **VECTOR_DB_ID** - - - -### unregister - -Delete a vector db - -``` -Usage: llama-stack-client vector-dbs unregister [OPTIONS] VECTOR_DB_ID -``` - -**Options** - -* **-h, --help**: Show this message and exit. [default: False] - -**Arguments** - -* **VECTOR_DB_ID** diff --git a/examples/mcp_agent.py b/examples/mcp_agent.py deleted file mode 100644 index 8dfd2f69..00000000 --- a/examples/mcp_agent.py +++ /dev/null @@ -1,136 +0,0 @@ -import json -import logging -from urllib.parse import urlparse - -import fire -import httpx -from llama_stack_client import Agent, AgentEventLogger, LlamaStackClient -from llama_stack_client.lib import get_oauth_token_for_mcp_server -from rich import print as rprint - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -import tempfile -from pathlib import Path - -TMP_DIR = Path(tempfile.gettempdir()) / "llama-stack" -TMP_DIR.mkdir(parents=True, exist_ok=True) - -CACHE_FILE = TMP_DIR / "mcp_tokens.json" - - -def main(model_id: str, mcp_servers: str = "https://mcp.asana.com/sse", llama_stack_url: str = "http://localhost:8321"): - """Run an MCP agent with the specified model and servers. - - Args: - model_id: The model to use for the agent. - mcp_servers: Comma-separated list of MCP servers to use for the agent. - llama_stack_url: The URL of the Llama Stack server to use. - - Examples: - python mcp_agent.py "meta-llama/Llama-4-Scout-17B-16E-Instruct" \ - -m "https://mcp.asana.com/sse" \ - -l "http://localhost:8321" - """ - client = LlamaStackClient(base_url=llama_stack_url) - if not check_model_exists(client, model_id): - return - - servers = [s.strip() for s in mcp_servers.split(",")] - mcp_headers = get_and_cache_mcp_headers(servers) - - toolgroup_ids = [] - for server in servers: - # we cannot use "/" in the toolgroup_id because we have some tech debt from earlier which uses - # "/" as a separator for toolgroup_id and tool_name. We should fix this in the future. - group_id = urlparse(server).netloc - toolgroup_ids.append(group_id) - client.toolgroups.register( - toolgroup_id=group_id, mcp_endpoint=dict(uri=server), provider_id="model-context-protocol" - ) - - agent = Agent( - client=client, - model=model_id, - instructions="You are a helpful assistant who can use tools when necessary to answer questions.", - tools=toolgroup_ids, - extra_headers={ - "X-LlamaStack-Provider-Data": json.dumps( - { - "mcp_headers": mcp_headers, - } - ), - }, - ) - - session_id = agent.create_session("test-session") - - while True: - user_input = input("Enter a question: ") - if user_input.lower() in ("q", "quit", "exit", "bye", ""): - print("Exiting...") - break - response = agent.create_turn( - session_id=session_id, - messages=[{"role": "user", "content": user_input}], - stream=True, - ) - for log in AgentEventLogger().log(response): - log.print() - - -def check_model_exists(client: LlamaStackClient, model_id: str) -> bool: - models = [m for m in client.models.list() if m.model_type == "llm"] - if model_id not in [m.identifier for m in models]: - rprint(f"[red]Model {model_id} not found[/red]") - rprint("[yellow]Available models:[/yellow]") - for model in models: - rprint(f" - {model.identifier}") - return False - return True - - -def get_and_cache_mcp_headers(servers: list[str]) -> dict[str, dict[str, str]]: - mcp_headers = {} - - logger.info(f"Using cache file: {CACHE_FILE} for MCP tokens") - tokens = {} - if CACHE_FILE.exists(): - with open(CACHE_FILE, "r") as f: - tokens = json.load(f) - for server, token in tokens.items(): - mcp_headers[server] = { - "Authorization": f"Bearer {token}", - } - - for server in servers: - with httpx.Client() as http_client: - headers = mcp_headers.get(server, {}) - try: - response = http_client.get(server, headers=headers, timeout=1.0) - except httpx.TimeoutException: - # timeout means success since we did not get an immediate 40X - continue - - if response.status_code in (401, 403): - logger.info(f"Server {server} requires authentication, getting token") - token = get_oauth_token_for_mcp_server(server) - if not token: - logger.error(f"No token obtained for {server}") - return - - tokens[server] = token - mcp_headers[server] = { - "Authorization": f"Bearer {token}", - } - - with open(CACHE_FILE, "w") as f: - json.dump(tokens, f, indent=2) - - return mcp_headers - - -if __name__ == "__main__": - fire.Fire(main) diff --git a/examples/post_training/supervised_fine_tune_client.py b/examples/post_training/supervised_fine_tune_client.py deleted file mode 100644 index 1aca6ee1..00000000 --- a/examples/post_training/supervised_fine_tune_client.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. -# Copyright (c) Meta Platforms, Inc. and affiliates. -# This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement. - -import asyncio -from typing import Optional - -import fire -from llama_stack_client import LlamaStackClient - -from llama_stack_client.types.post_training_supervised_fine_tune_params import ( - AlgorithmConfigLoraFinetuningConfig, - TrainingConfig, - TrainingConfigDataConfig, - TrainingConfigEfficiencyConfig, - TrainingConfigOptimizerConfig, -) - - -async def run_main( - host: str, - port: int, - job_uuid: str, - model: str, - use_https: bool = False, - checkpoint_dir: Optional[str] = None, - cert_path: Optional[str] = None, -): - # Construct the base URL with the appropriate protocol - protocol = "https" if use_https else "http" - base_url = f"{protocol}://{host}:{port}" - - # Configure client with SSL certificate if provided - client_kwargs = {"base_url": base_url} - if use_https and cert_path: - client_kwargs["verify"] = cert_path - - client = LlamaStackClient(**client_kwargs) - - algorithm_config = AlgorithmConfigLoraFinetuningConfig( - type="LoRA", - lora_attn_modules=["q_proj", "v_proj", "output_proj"], - apply_lora_to_mlp=True, - apply_lora_to_output=False, - rank=8, - alpha=16, - ) - - data_config = TrainingConfigDataConfig( - dataset_id="alpaca", - validation_dataset_id="alpaca", - batch_size=1, - shuffle=False, - ) - - optimizer_config = TrainingConfigOptimizerConfig( - optimizer_type="adamw", - lr=3e-4, - weight_decay=0.1, - num_warmup_steps=100, - ) - - effiency_config = TrainingConfigEfficiencyConfig( - enable_activation_checkpointing=True, - ) - - training_config = TrainingConfig( - n_epochs=1, - data_config=data_config, - efficiency_config=effiency_config, - optimizer_config=optimizer_config, - max_steps_per_epoch=30, - gradient_accumulation_steps=1, - ) - - training_job = client.post_training.supervised_fine_tune( - job_uuid=job_uuid, - model=model, - algorithm_config=algorithm_config, - training_config=training_config, - checkpoint_dir=checkpoint_dir, - # logger_config and hyperparam_search_config haven't been used yet - logger_config={}, - hyperparam_search_config={}, - ) - - print(f"finished the training job: {training_job.job_uuid}") - - -def main( - host: str, - port: int, - job_uuid: str, - model: str, - use_https: bool = False, - checkpoint_dir: Optional[str] = "null", - cert_path: Optional[str] = None, -): - job_uuid = str(job_uuid) - asyncio.run(run_main(host, port, job_uuid, model, use_https, checkpoint_dir, cert_path)) - - -if __name__ == "__main__": - fire.Fire(main) diff --git a/mypy.ini b/mypy.ini index 50e57de6..1865499c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -5,7 +5,10 @@ show_error_codes = True # Exclude _files.py because mypy isn't smart enough to apply # the correct type narrowing and as this is an internal module # it's fine to just use Pyright. -exclude = ^(src/llama_stack_client/_files\.py|_dev/.*\.py)$ +# +# We also exclude our `tests` as mypy doesn't always infer +# types correctly and Pyright will still catch any type errors. +exclude = ^(src/llama_stack_client/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True @@ -38,7 +41,7 @@ cache_fine_grained = True # ``` # Changing this codegen to make mypy happy would increase complexity # and would not be worth it. -disable_error_code = func-returns-value +disable_error_code = func-returns-value,overload-cannot-match # https://github.com/python/mypy/issues/12162 [mypy.overrides] diff --git a/pyproject.toml b/pyproject.toml index e97ef0b4..c25d53e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,9 @@ [project] name = "llama_stack_client" -version = "0.2.13" +version = "0.2.18-alpha.2" description = "The official Python library for the llama-stack-client API" dynamic = ["readme"] -license = "Apache-2.0" +license = "MIT" authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] dependencies = [ "httpx>=0.23.0, <1", @@ -27,13 +27,14 @@ classifiers = [ "Typing :: Typed", "Intended Audience :: Developers", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: MacOS", "Operating System :: POSIX :: Linux", "Operating System :: Microsoft :: Windows", "Topic :: Software Development :: Libraries :: Python Modules", - "License :: OSI Approved :: Apache Software License" + "License :: OSI Approved :: MIT License" ] [dependency-groups] @@ -49,13 +50,11 @@ dev = [ ] [project.urls] -Homepage = "https://github.com/meta-llama/llama-stack-client-python" -Repository = "https://github.com/meta-llama/llama-stack-client-python" - - +Homepage = "https://github.com/llamastack/llama-stack-client-python" +Repository = "https://github.com/llamastack/llama-stack-client-python" [build-system] -requires = ["hatchling", "hatch-fancy-pypi-readme"] +requires = ["hatchling==1.26.3", "hatch-fancy-pypi-readme"] build-backend = "hatchling.build" [tool.hatch.build] @@ -90,7 +89,17 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/meta-llama/llama-stack-client-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/llamastack/llama-stack-client-python/tree/main/\g<2>)' + +[tool.pytest.ini_options] +testpaths = ["tests"] +addopts = "--tb=short -n auto" +xfail_strict = true +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "session" +filterwarnings = [ + "error" +] [tool.ruff] line-length = 120 diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 00000000..04870019 --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,66 @@ +{ + "packages": { + ".": {} + }, + "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", + "include-v-in-tag": true, + "include-component-in-tag": false, + "versioning": "prerelease", + "prerelease": true, + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": false, + "pull-request-header": "Automated Release PR", + "pull-request-title-pattern": "release: ${version}", + "changelog-sections": [ + { + "type": "feat", + "section": "Features" + }, + { + "type": "fix", + "section": "Bug Fixes" + }, + { + "type": "perf", + "section": "Performance Improvements" + }, + { + "type": "revert", + "section": "Reverts" + }, + { + "type": "chore", + "section": "Chores" + }, + { + "type": "docs", + "section": "Documentation" + }, + { + "type": "style", + "section": "Styles" + }, + { + "type": "refactor", + "section": "Refactors" + }, + { + "type": "test", + "section": "Tests", + "hidden": true + }, + { + "type": "build", + "section": "Build System" + }, + { + "type": "ci", + "section": "Continuous Integration", + "hidden": true + } + ], + "release-type": "python", + "extra-files": [ + "src/llama_stack_client/_version.py" + ] +} diff --git a/requirements-dev.lock b/requirements-dev.lock new file mode 100644 index 00000000..e1a5175a --- /dev/null +++ b/requirements-dev.lock @@ -0,0 +1,135 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: true +# with-sources: false +# generate-hashes: false +# universal: false + +-e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via httpx-aiohttp + # via llama-stack-client +aiosignal==1.3.2 + # via aiohttp +annotated-types==0.6.0 + # via pydantic +anyio==4.4.0 + # via httpx + # via llama-stack-client +argcomplete==3.1.2 + # via nox +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp +certifi==2023.7.22 + # via httpcore + # via httpx +colorlog==6.7.0 + # via nox +dirty-equals==0.6.0 +distlib==0.3.7 + # via virtualenv +distro==1.8.0 + # via llama-stack-client +exceptiongroup==1.2.2 + # via anyio + # via pytest +execnet==2.1.1 + # via pytest-xdist +filelock==3.12.4 + # via virtualenv +frozenlist==1.6.2 + # via aiohttp + # via aiosignal +h11==0.16.0 + # via httpcore +httpcore==1.0.9 + # via httpx +httpx==0.28.1 + # via httpx-aiohttp + # via llama-stack-client + # via respx +httpx-aiohttp==0.1.8 + # via llama-stack-client +idna==3.4 + # via anyio + # via httpx + # via yarl +importlib-metadata==7.0.0 +iniconfig==2.0.0 + # via pytest +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py +multidict==6.4.4 + # via aiohttp + # via yarl +mypy==1.14.1 +mypy-extensions==1.0.0 + # via mypy +nest-asyncio==1.6.0 +nodeenv==1.8.0 + # via pyright +nox==2023.4.22 +packaging==23.2 + # via nox + # via pytest +platformdirs==3.11.0 + # via virtualenv +pluggy==1.5.0 + # via pytest +propcache==0.3.1 + # via aiohttp + # via yarl +pydantic==2.10.3 + # via llama-stack-client +pydantic-core==2.27.1 + # via pydantic +pygments==2.18.0 + # via rich +pyright==1.1.399 +pytest==8.3.3 + # via pytest-asyncio + # via pytest-xdist +pytest-asyncio==0.24.0 +pytest-xdist==3.7.0 +python-dateutil==2.8.2 + # via time-machine +pytz==2023.3.post1 + # via dirty-equals +respx==0.22.0 +rich==13.7.1 +ruff==0.9.4 +setuptools==68.2.2 + # via nodeenv +six==1.16.0 + # via python-dateutil +sniffio==1.3.0 + # via anyio + # via llama-stack-client +time-machine==2.9.0 +tomli==2.0.2 + # via mypy + # via pytest +typing-extensions==4.12.2 + # via anyio + # via llama-stack-client + # via multidict + # via mypy + # via pydantic + # via pydantic-core + # via pyright +virtualenv==20.24.5 + # via nox +yarl==1.20.0 + # via aiohttp +zipp==3.17.0 + # via importlib-metadata diff --git a/requirements.lock b/requirements.lock new file mode 100644 index 00000000..098354a7 --- /dev/null +++ b/requirements.lock @@ -0,0 +1,72 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: true +# with-sources: false +# generate-hashes: false +# universal: false + +-e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via httpx-aiohttp + # via llama-stack-client +aiosignal==1.3.2 + # via aiohttp +annotated-types==0.6.0 + # via pydantic +anyio==4.4.0 + # via httpx + # via llama-stack-client +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp +certifi==2023.7.22 + # via httpcore + # via httpx +distro==1.8.0 + # via llama-stack-client +exceptiongroup==1.2.2 + # via anyio +frozenlist==1.6.2 + # via aiohttp + # via aiosignal +h11==0.16.0 + # via httpcore +httpcore==1.0.9 + # via httpx +httpx==0.28.1 + # via httpx-aiohttp + # via llama-stack-client +httpx-aiohttp==0.1.8 + # via llama-stack-client +idna==3.4 + # via anyio + # via httpx + # via yarl +multidict==6.4.4 + # via aiohttp + # via yarl +propcache==0.3.1 + # via aiohttp + # via yarl +pydantic==2.10.3 + # via llama-stack-client +pydantic-core==2.27.1 + # via pydantic +sniffio==1.3.0 + # via anyio + # via llama-stack-client +typing-extensions==4.12.2 + # via anyio + # via llama-stack-client + # via multidict + # via pydantic + # via pydantic-core +yarl==1.20.0 + # via aiohttp diff --git a/scripts/bootstrap b/scripts/bootstrap index 8c5c60eb..e84fe62c 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if ! command -v rye >/dev/null 2>&1 && [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then brew bundle check >/dev/null 2>&1 || { echo "==> Installing Homebrew dependencies…" brew bundle diff --git a/scripts/gen_cli_doc.py b/scripts/gen_cli_doc.py deleted file mode 100644 index 53aea424..00000000 --- a/scripts/gen_cli_doc.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import os -from pathlib import Path - -import click -from llama_stack_client.lib.cli.llama_stack_client import llama_stack_client - - -def generate_markdown_docs(command, parent=None, level=1): - """Generate markdown documentation for a click command.""" - ctx = click.Context(command, info_name=command.name, parent=parent) - - # Start with the command name as a header - prefix = "#" * level - if level == 1: - doc = [f"{prefix} CLI Reference\n"] - else: - doc = [f"{prefix} {command.name}\n"] - - # Add command help docstring - if command.help: - doc.append(f"{command.help}\n") - - # Add usage - doc.append(f"```\n{command.get_usage(ctx)}\n```\n") - - # Add options if present - has_options = False - for param in command.get_params(ctx): - if isinstance(param, click.Option): - if not has_options: - doc.append("**Options**\n") - has_options = True - opts = ", ".join(param.opts) - help_text = param.help or "" - default = f" [default: {param.default}]" if param.default is not None else "" - doc.append(f"* **{opts}**: {help_text}{default}\n") - - # Add arguments if present - has_arguments = False - for param in command.get_params(ctx): - if isinstance(param, click.Argument): - if not has_arguments: - doc.append("**Arguments**\n") - has_arguments = True - name = param.name.upper() - doc.append(f"* **{name}**\n") - - # If this is a group with commands, add subcommands - if isinstance(command, click.Group): - doc.append("**Commands**\n") - for cmd_name in command.list_commands(ctx): - cmd = command.get_command(ctx, cmd_name) - cmd_help = cmd.get_short_help_str(limit=80) if cmd else "" - doc.append(f"* **{cmd_name}**: {cmd_help}\n") - - # Add detailed subcommand documentation - for cmd_name in command.list_commands(ctx): - cmd = command.get_command(ctx, cmd_name) - if cmd: - doc.append("\n") - doc.extend(generate_markdown_docs(cmd, ctx, level + 1)) - - return doc - - -if __name__ == "__main__": - # Generate the docs - markdown_lines = generate_markdown_docs(llama_stack_client) - markdown = "\n".join(markdown_lines) - - # Write to file - file_path = Path(__file__).parent.parent / "docs" / "cli_reference.md" - with open(file_path, "w") as f: - f.write(markdown) - - print(f"Documentation generated in {file_path}") diff --git a/scripts/lint b/scripts/lint deleted file mode 100755 index 9a7fc869..00000000 --- a/scripts/lint +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -# set -e - -# cd "$(dirname "$0")/.." - -# echo "==> Running lints" -# rye run lint - -# echo "==> Making sure it imports" -# rye run python -c 'import llama_stack_client' diff --git a/scripts/mock b/scripts/mock index d2814ae6..0b28f6ea 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" fi diff --git a/scripts/test b/scripts/test index e9e543c7..dbeda2d2 100755 --- a/scripts/test +++ b/scripts/test @@ -1,59 +1,61 @@ #!/usr/bin/env bash -# set -e - -# cd "$(dirname "$0")/.." - -# RED='\033[0;31m' -# GREEN='\033[0;32m' -# YELLOW='\033[0;33m' -# NC='\033[0m' # No Color - -# function prism_is_running() { -# curl --silent "http://localhost:4010" >/dev/null 2>&1 -# } - -# kill_server_on_port() { -# pids=$(lsof -t -i tcp:"$1" || echo "") -# if [ "$pids" != "" ]; then -# kill "$pids" -# echo "Stopped $pids." -# fi -# } - -# function is_overriding_api_base_url() { -# [ -n "$TEST_API_BASE_URL" ] -# } - -# if ! is_overriding_api_base_url && ! prism_is_running ; then -# # When we exit this script, make sure to kill the background mock server process -# trap 'kill_server_on_port 4010' EXIT - -# # Start the dev server -# ./scripts/mock --daemon -# fi - -# if is_overriding_api_base_url ; then -# echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" -# echo -# elif ! prism_is_running ; then -# echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" -# echo -e "running against your OpenAPI spec." -# echo -# echo -e "To run the server, pass in the path or url of your OpenAPI" -# echo -e "spec to the prism command:" -# echo -# echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}" -# echo - -# exit 1 -# else -# echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" -# echo -# fi - -# echo "==> Running tests" -# rye run pytest "$@" - -# echo "==> Running Pydantic v1 tests" -# rye run nox -s test-pydantic-v1 -- "$@" +set -e + +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + +function prism_is_running() { + curl --silent "http://localhost:4010" >/dev/null 2>&1 +} + +kill_server_on_port() { + pids=$(lsof -t -i tcp:"$1" || echo "") + if [ "$pids" != "" ]; then + kill "$pids" + echo "Stopped $pids." + fi +} + +function is_overriding_api_base_url() { + [ -n "$TEST_API_BASE_URL" ] +} + +if ! is_overriding_api_base_url && ! prism_is_running ; then + # When we exit this script, make sure to kill the background mock server process + trap 'kill_server_on_port 4010' EXIT + + # Start the dev server + ./scripts/mock --daemon +fi + +if is_overriding_api_base_url ; then + echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" + echo +elif ! prism_is_running ; then + echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" + echo -e "running against your OpenAPI spec." + echo + echo -e "To run the server, pass in the path or url of your OpenAPI" + echo -e "spec to the prism command:" + echo + echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}" + echo + + exit 1 +else + echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" + echo +fi + +export DEFER_PYDANTIC_BUILD=false + +echo "==> Running tests" +rye run pytest "$@" + +echo "==> Running Pydantic v1 tests" +rye run nox -s test-pydantic-v1 -- "$@" diff --git a/scripts/utils/ruffen-docs.py b/scripts/utils/ruffen-docs.py index 37b3d94f..0cf2bd2f 100644 --- a/scripts/utils/ruffen-docs.py +++ b/scripts/utils/ruffen-docs.py @@ -47,7 +47,7 @@ def _md_match(match: Match[str]) -> str: with _collect_error(match): code = format_code_block(code) code = textwrap.indent(code, match["indent"]) - return f'{match["before"]}{code}{match["after"]}' + return f"{match['before']}{code}{match['after']}" def _pycon_match(match: Match[str]) -> str: code = "" @@ -97,7 +97,7 @@ def finish_fragment() -> None: def _md_pycon_match(match: Match[str]) -> str: code = _pycon_match(match) code = textwrap.indent(code, match["indent"]) - return f'{match["before"]}{code}{match["after"]}' + return f"{match['before']}{code}{match['after']}" src = MD_RE.sub(_md_match, src) src = MD_PYCON_RE.sub(_md_pycon_match, src) diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 00000000..8593351a --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -exuo pipefail + +FILENAME=$(basename dist/*.whl) + +RESPONSE=$(curl -X POST "$URL?filename=$FILENAME" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(curl -v -X PUT \ + -H "Content-Type: binary/octet-stream" \ + --data-binary "@dist/$FILENAME" "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/llama-stack-client-python/$SHA/$FILENAME'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi diff --git a/src/llama_stack_client/_base_client.py b/src/llama_stack_client/_base_client.py index a0bbc468..b5e326e9 100644 --- a/src/llama_stack_client/_base_client.py +++ b/src/llama_stack_client/_base_client.py @@ -529,6 +529,18 @@ def _build_request( # work around https://github.com/encode/httpx/discussions/2880 kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + is_body_allowed = options.method.lower() != "get" + + if is_body_allowed: + if isinstance(json_data, bytes): + kwargs["content"] = json_data + else: + kwargs["json"] = json_data if is_given(json_data) else None + kwargs["files"] = files + else: + headers.pop("Content-Type", None) + kwargs.pop("data", None) + # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, @@ -540,8 +552,6 @@ def _build_request( # so that passing a `TypedDict` doesn't cause an error. # https://github.com/microsoft/pyright/issues/3526#event-6715453066 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, - json=json_data if is_given(json_data) else None, - files=files, **kwargs, ) diff --git a/src/llama_stack_client/_client.py b/src/llama_stack_client/_client.py index 409d8f5c..a479a9b3 100644 --- a/src/llama_stack_client/_client.py +++ b/src/llama_stack_client/_client.py @@ -41,6 +41,7 @@ toolgroups, vector_dbs, completions, + moderations, scoring_functions, synthetic_data_generation, ) @@ -91,6 +92,7 @@ class LlamaStackClient(SyncAPIClient): post_training: post_training.PostTrainingResource providers: providers.ProvidersResource routes: routes.RoutesResource + moderations: moderations.ModerationsResource safety: safety.SafetyResource shields: shields.ShieldsResource synthetic_data_generation: synthetic_data_generation.SyntheticDataGenerationResource @@ -131,14 +133,14 @@ def __init__( ) -> None: """Construct a new synchronous LlamaStackClient client instance. - This automatically infers the `api_key` argument from the `LLAMA_STACK_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `LLAMA_STACK_CLIENT_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("LLAMA_STACK_API_KEY") + api_key = os.environ.get("LLAMA_STACK_CLIENT_API_KEY") self.api_key = api_key if base_url is None: - base_url = os.environ.get("LLAMA_STACK_BASE_URL") + base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL") if base_url is None: base_url = f"http://any-hosted-llama-stack.com" @@ -177,6 +179,7 @@ def __init__( self.post_training = post_training.PostTrainingResource(self) self.providers = providers.ProvidersResource(self) self.routes = routes.RoutesResource(self) + self.moderations = moderations.ModerationsResource(self) self.safety = safety.SafetyResource(self) self.shields = shields.ShieldsResource(self) self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResource(self) @@ -315,6 +318,7 @@ class AsyncLlamaStackClient(AsyncAPIClient): post_training: post_training.AsyncPostTrainingResource providers: providers.AsyncProvidersResource routes: routes.AsyncRoutesResource + moderations: moderations.AsyncModerationsResource safety: safety.AsyncSafetyResource shields: shields.AsyncShieldsResource synthetic_data_generation: synthetic_data_generation.AsyncSyntheticDataGenerationResource @@ -355,14 +359,14 @@ def __init__( ) -> None: """Construct a new async AsyncLlamaStackClient client instance. - This automatically infers the `api_key` argument from the `LLAMA_STACK_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `LLAMA_STACK_CLIENT_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("LLAMA_STACK_API_KEY") + api_key = os.environ.get("LLAMA_STACK_CLIENT_API_KEY") self.api_key = api_key if base_url is None: - base_url = os.environ.get("LLAMA_STACK_BASE_URL") + base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL") if base_url is None: base_url = f"http://any-hosted-llama-stack.com" @@ -401,6 +405,7 @@ def __init__( self.post_training = post_training.AsyncPostTrainingResource(self) self.providers = providers.AsyncProvidersResource(self) self.routes = routes.AsyncRoutesResource(self) + self.moderations = moderations.AsyncModerationsResource(self) self.safety = safety.AsyncSafetyResource(self) self.shields = shields.AsyncShieldsResource(self) self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResource(self) @@ -540,6 +545,7 @@ def __init__(self, client: LlamaStackClient) -> None: self.post_training = post_training.PostTrainingResourceWithRawResponse(client.post_training) self.providers = providers.ProvidersResourceWithRawResponse(client.providers) self.routes = routes.RoutesResourceWithRawResponse(client.routes) + self.moderations = moderations.ModerationsResourceWithRawResponse(client.moderations) self.safety = safety.SafetyResourceWithRawResponse(client.safety) self.shields = shields.ShieldsResourceWithRawResponse(client.shields) self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithRawResponse( @@ -573,6 +579,7 @@ def __init__(self, client: AsyncLlamaStackClient) -> None: self.post_training = post_training.AsyncPostTrainingResourceWithRawResponse(client.post_training) self.providers = providers.AsyncProvidersResourceWithRawResponse(client.providers) self.routes = routes.AsyncRoutesResourceWithRawResponse(client.routes) + self.moderations = moderations.AsyncModerationsResourceWithRawResponse(client.moderations) self.safety = safety.AsyncSafetyResourceWithRawResponse(client.safety) self.shields = shields.AsyncShieldsResourceWithRawResponse(client.shields) self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithRawResponse( @@ -608,6 +615,7 @@ def __init__(self, client: LlamaStackClient) -> None: self.post_training = post_training.PostTrainingResourceWithStreamingResponse(client.post_training) self.providers = providers.ProvidersResourceWithStreamingResponse(client.providers) self.routes = routes.RoutesResourceWithStreamingResponse(client.routes) + self.moderations = moderations.ModerationsResourceWithStreamingResponse(client.moderations) self.safety = safety.SafetyResourceWithStreamingResponse(client.safety) self.shields = shields.ShieldsResourceWithStreamingResponse(client.shields) self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithStreamingResponse( @@ -643,6 +651,7 @@ def __init__(self, client: AsyncLlamaStackClient) -> None: self.post_training = post_training.AsyncPostTrainingResourceWithStreamingResponse(client.post_training) self.providers = providers.AsyncProvidersResourceWithStreamingResponse(client.providers) self.routes = routes.AsyncRoutesResourceWithStreamingResponse(client.routes) + self.moderations = moderations.AsyncModerationsResourceWithStreamingResponse(client.moderations) self.safety = safety.AsyncSafetyResourceWithStreamingResponse(client.safety) self.shields = shields.AsyncShieldsResourceWithStreamingResponse(client.shields) self.synthetic_data_generation = ( diff --git a/src/llama_stack_client/_files.py b/src/llama_stack_client/_files.py index 45f57c0a..035a1144 100644 --- a/src/llama_stack_client/_files.py +++ b/src/llama_stack_client/_files.py @@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], _read_file_content(file[1]), *file[2:]) + return (file[0], read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -def _read_file_content(file: FileContent) -> HttpxFileContent: +def read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return pathlib.Path(file).read_bytes() return file @@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], await _async_read_file_content(file[1]), *file[2:]) + return (file[0], await async_read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -async def _async_read_file_content(file: FileContent) -> HttpxFileContent: +async def async_read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return await anyio.Path(file).read_bytes() diff --git a/src/llama_stack_client/_models.py b/src/llama_stack_client/_models.py index 4f214980..b8387ce9 100644 --- a/src/llama_stack_client/_models.py +++ b/src/llama_stack_client/_models.py @@ -2,9 +2,10 @@ import os import inspect -from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast +from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast from datetime import date, datetime from typing_extensions import ( + List, Unpack, Literal, ClassVar, @@ -207,14 +208,18 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] else: fields_values[name] = field_get_default(field) + extra_field_type = _get_extra_fields_type(__cls) + _extra = {} for key, value in values.items(): if key not in model_fields: + parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value + if PYDANTIC_V2: - _extra[key] = value + _extra[key] = parsed else: _fields_set.add(key) - fields_values[key] = value + fields_values[key] = parsed object.__setattr__(m, "__dict__", fields_values) @@ -366,7 +371,24 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: if type_ is None: raise RuntimeError(f"Unexpected field type is None for {key}") - return construct_type(value=value, type_=type_) + return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) + + +def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None: + if not PYDANTIC_V2: + # TODO + return None + + schema = cls.__pydantic_core_schema__ + if schema["type"] == "model": + fields = schema["schema"] + if fields["type"] == "model-fields": + extras = fields.get("extras_schema") + if extras and "cls" in extras: + # mypy can't narrow the type + return extras["cls"] # type: ignore[no-any-return] + + return None def is_basemodel(type_: type) -> bool: @@ -420,7 +442,7 @@ def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: return cast(_T, construct_type(value=value, type_=type_)) -def construct_type(*, value: object, type_: object) -> object: +def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object: """Loose coercion to the expected type with construction of nested values. If the given value does not match the expected type then it is returned as-is. @@ -438,8 +460,10 @@ def construct_type(*, value: object, type_: object) -> object: type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(type_): - meta: tuple[Any, ...] = get_args(type_)[1:] + if metadata is not None and len(metadata) > 0: + meta: tuple[Any, ...] = tuple(metadata) + elif is_annotated_type(type_): + meta = get_args(type_)[1:] type_ = extract_type_arg(type_, 0) else: meta = tuple() diff --git a/src/llama_stack_client/_utils/_logs.py b/src/llama_stack_client/_utils/_logs.py index 77e8dc24..c9aae87e 100644 --- a/src/llama_stack_client/_utils/_logs.py +++ b/src/llama_stack_client/_utils/_logs.py @@ -16,7 +16,7 @@ def _basic_config() -> None: def setup_logging() -> None: - env = os.environ.get("LLAMA_STACK_LOG") + env = os.environ.get("LLAMA_STACK_CLIENT_LOG") if env == "debug": _basic_config() logger.setLevel(logging.DEBUG) diff --git a/src/llama_stack_client/_version.py b/src/llama_stack_client/_version.py index c320dee5..7b20b568 100644 --- a/src/llama_stack_client/_version.py +++ b/src/llama_stack_client/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "llama_stack_client" -__version__ = "0.2.13" +__version__ = "0.2.17" diff --git a/src/llama_stack_client/lib/agents/agent.py b/src/llama_stack_client/lib/agents/agent.py index ebdc4abd..5dc2f236 100644 --- a/src/llama_stack_client/lib/agents/agent.py +++ b/src/llama_stack_client/lib/agents/agent.py @@ -79,6 +79,7 @@ def get_agent_config( output_shields: Optional[List[str]] = None, response_format: Optional[ResponseFormat] = None, enable_session_persistence: Optional[bool] = None, + name: str | None = None, ) -> AgentConfig: # Create a minimal valid AgentConfig with required fields if model is None or instructions is None: @@ -106,6 +107,8 @@ def get_agent_config( agent_config["sampling_params"] = sampling_params if tool_config is not None: agent_config["tool_config"] = tool_config + if name is not None: + agent_config["name"] = name if tools is not None: toolgroups: List[Toolgroup] = [] for tool in tools: @@ -139,6 +142,7 @@ def __init__( response_format: Optional[ResponseFormat] = None, enable_session_persistence: Optional[bool] = None, extra_headers: Headers | None = None, + name: str | None = None, ): """Construct an Agent with the given parameters. @@ -164,6 +168,7 @@ def __init__( :param response_format: The response format for the agent. :param enable_session_persistence: Whether to enable session persistence. :param extra_headers: Extra headers to add to all requests sent by the agent. + :param name: Optional name for the agent, used in telemetry and identification. """ self.client = client @@ -185,6 +190,7 @@ def __init__( output_shields=output_shields, response_format=response_format, enable_session_persistence=enable_session_persistence, + name=name, ) client_tools = AgentUtils.get_client_tools(tools) @@ -389,6 +395,7 @@ def __init__( response_format: Optional[ResponseFormat] = None, enable_session_persistence: Optional[bool] = None, extra_headers: Headers | None = None, + name: str | None = None, ): """Construct an Agent with the given parameters. @@ -414,6 +421,7 @@ def __init__( :param response_format: The response format for the agent. :param enable_session_persistence: Whether to enable session persistence. :param extra_headers: Extra headers to add to all requests sent by the agent. + :param name: Optional name for the agent, used in telemetry and identification. """ self.client = client @@ -435,6 +443,7 @@ def __init__( output_shields=output_shields, response_format=response_format, enable_session_persistence=enable_session_persistence, + name=name, ) client_tools = AgentUtils.get_client_tools(tools) diff --git a/src/llama_stack_client/lib/agents/event_logger.py b/src/llama_stack_client/lib/agents/event_logger.py index 731c7b2f..b4e1a219 100644 --- a/src/llama_stack_client/lib/agents/event_logger.py +++ b/src/llama_stack_client/lib/agents/event_logger.py @@ -4,7 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from typing import Any, Iterator, Optional, Tuple +from typing import Any, Iterator, Optional from termcolor import cprint @@ -55,20 +55,11 @@ def print(self, flush: bool = True) -> None: class TurnStreamEventPrinter: - def __init__(self) -> None: - self.previous_event_type: Optional[str] = None - self.previous_step_type: Optional[str] = None - def yield_printable_events(self, chunk: Any) -> Iterator[TurnStreamPrintableEvent]: - for printable_event in self._yield_printable_events(chunk, self.previous_event_type, self.previous_step_type): + for printable_event in self._yield_printable_events(chunk): yield printable_event - if not hasattr(chunk, "error"): - self.previous_event_type, self.previous_step_type = self._get_event_type_step_type(chunk) - - def _yield_printable_events( - self, chunk: Any, previous_event_type: Optional[str] = None, previous_step_type: Optional[str] = None - ) -> Iterator[TurnStreamPrintableEvent]: + def _yield_printable_events(self, chunk: Any) -> Iterator[TurnStreamPrintableEvent]: if hasattr(chunk, "error"): yield TurnStreamPrintableEvent(role=None, content=chunk.error["message"], color="red") return @@ -146,17 +137,6 @@ def _yield_printable_events( color="green", ) - def _get_event_type_step_type(self, chunk: Any) -> Tuple[Optional[str], Optional[str]]: - if hasattr(chunk, "event"): - previous_event_type = chunk.event.payload.event_type if hasattr(chunk, "event") else None - previous_step_type = ( - chunk.event.payload.step_type - if previous_event_type not in {"turn_start", "turn_complete", "turn_awaiting_input"} - else None - ) - return previous_event_type, previous_step_type - return None, None - class EventLogger: def log(self, event_generator: Iterator[Any]) -> Iterator[TurnStreamPrintableEvent]: diff --git a/src/llama_stack_client/lib/agents/react/agent.py b/src/llama_stack_client/lib/agents/react/agent.py index 2719a7dd..95e400ed 100644 --- a/src/llama_stack_client/lib/agents/react/agent.py +++ b/src/llama_stack_client/lib/agents/react/agent.py @@ -13,6 +13,7 @@ from llama_stack_client.types.shared_params.response_format import ResponseFormat from llama_stack_client.types.shared_params.sampling_params import SamplingParams +from ..._types import Headers from ..agent import Agent, AgentUtils from ..client_tool import ClientTool from ..tool_parser import ToolParser @@ -128,6 +129,7 @@ def __init__( builtin_toolgroups: Tuple[str] = (), # DEPRECATED client_tools: Tuple[ClientTool] = (), # DEPRECATED custom_agent_config: Optional[AgentConfig] = None, # DEPRECATED + extra_headers: Headers | None = None, ): """Construct an Agent with the given parameters. @@ -156,6 +158,7 @@ def __init__( :param enable_session_persistence: Whether to enable session persistence. :param json_response_format: Whether to use the json response format with default ReAct output schema. ::deprecated: use response_format instead + :param extra_headers: Extra headers to add to all requests sent by the agent. """ use_deprecated_params = False if custom_agent_config is not None: @@ -181,6 +184,7 @@ def __init__( agent_config=agent_config, client_tools=client_tools, tool_parser=tool_parser, + extra_headers=extra_headers, ) else: @@ -220,4 +224,5 @@ def __init__( output_shields=output_shields, response_format=response_format, enable_session_persistence=enable_session_persistence, + extra_headers=extra_headers, ) diff --git a/src/llama_stack_client/lib/cli/eval/run_scoring.py b/src/llama_stack_client/lib/cli/eval/run_scoring.py index 78560a0a..a9b29bbb 100644 --- a/src/llama_stack_client/lib/cli/eval/run_scoring.py +++ b/src/llama_stack_client/lib/cli/eval/run_scoring.py @@ -61,7 +61,6 @@ def run_scoring( scoring_params_config: Optional[str], num_examples: Optional[int], output_dir: str, - visualize: bool, ): """Run scoring from application datasets""" # one of dataset_id or dataset_path is required diff --git a/src/llama_stack_client/lib/cli/inspect/version.py b/src/llama_stack_client/lib/cli/inspect/version.py index 212b9f9d..0a32195d 100644 --- a/src/llama_stack_client/lib/cli/inspect/version.py +++ b/src/llama_stack_client/lib/cli/inspect/version.py @@ -9,7 +9,7 @@ @click.pass_context @handle_client_errors("inspect version") def inspect_version(ctx): - """Show available providers on distribution endpoint""" + """Show Llama Stack version on distribution endpoint""" client = ctx.obj["client"] console = Console() version_response = client.inspect.version() diff --git a/src/llama_stack_client/lib/cli/models/models.py b/src/llama_stack_client/lib/cli/models/models.py index c724e5d5..24662a55 100644 --- a/src/llama_stack_client/lib/cli/models/models.py +++ b/src/llama_stack_client/lib/cli/models/models.py @@ -4,6 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +import json from typing import Optional import click @@ -93,12 +94,23 @@ def get_model(ctx, model_id: str): console.print(table) +class JSONParamType(click.ParamType): + name = "json" + + def convert(self, value, param, ctx): + try: + return json.loads(value) + except json.JSONDecodeError as e: + self.fail(f"Invalid JSON: {e}", param, ctx) + + @click.command(name="register", help="Register a new model at distribution endpoint") @click.help_option("-h", "--help") @click.argument("model_id") @click.option("--provider-id", help="Provider ID for the model", default=None) @click.option("--provider-model-id", help="Provider's model ID", default=None) -@click.option("--metadata", help="JSON metadata for the model", default=None) +@click.option("--metadata", type=JSONParamType(), help="JSON metadata for the model", default=None) +@click.option("--model-type", type=click.Choice(["llm", "embedding"]), default="llm", help="Model type: llm, embedding") @click.pass_context @handle_client_errors("register model") def register_model( @@ -107,6 +119,7 @@ def register_model( provider_id: Optional[str], provider_model_id: Optional[str], metadata: Optional[str], + model_type: Optional[str], ): """Register a new model at distribution endpoint""" client = ctx.obj["client"] @@ -117,6 +130,7 @@ def register_model( provider_id=provider_id, provider_model_id=provider_model_id, metadata=metadata, + model_type=model_type, ) if response: console.print(f"[green]Successfully registered model {model_id}[/green]") diff --git a/src/llama_stack_client/lib/cli/providers/inspect.py b/src/llama_stack_client/lib/cli/providers/inspect.py index fc03d00d..7902849b 100644 --- a/src/llama_stack_client/lib/cli/providers/inspect.py +++ b/src/llama_stack_client/lib/cli/providers/inspect.py @@ -10,7 +10,7 @@ @click.pass_context @handle_client_errors("inspect providers") def inspect_provider(ctx, provider_id): - """Show available providers on distribution endpoint""" + """Show specific provider configuration on distribution endpoint""" client = ctx.obj["client"] console = Console() diff --git a/src/llama_stack_client/lib/tools/mcp_oauth.py b/src/llama_stack_client/lib/tools/mcp_oauth.py index a3c03416..503b9c69 100644 --- a/src/llama_stack_client/lib/tools/mcp_oauth.py +++ b/src/llama_stack_client/lib/tools/mcp_oauth.py @@ -253,7 +253,7 @@ def do_GET(self): self.send_response(404) self.end_headers() - def log_message(self, format, *args): + def log_message(self): """Override to suppress HTTP server logs.""" return diff --git a/src/llama_stack_client/pagination.py b/src/llama_stack_client/pagination.py index c2f7fe80..9122ff46 100644 --- a/src/llama_stack_client/pagination.py +++ b/src/llama_stack_client/pagination.py @@ -5,7 +5,7 @@ from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage -__all__ = ["SyncDatasetsIterrows", "AsyncDatasetsIterrows"] +__all__ = ["SyncDatasetsIterrows", "AsyncDatasetsIterrows", "SyncOpenAICursorPage", "AsyncOpenAICursorPage"] _T = TypeVar("_T") @@ -48,3 +48,61 @@ def next_page_info(self) -> Optional[PageInfo]: return None return PageInfo(params={"start_index": next_index}) + + +class SyncOpenAICursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): + data: List[_T] + has_more: Optional[bool] = None + last_id: Optional[str] = None + + @override + def _get_page_items(self) -> List[_T]: + data = self.data + if not data: + return [] + return data + + @override + def has_next_page(self) -> bool: + has_more = self.has_more + if has_more is not None and has_more is False: + return False + + return super().has_next_page() + + @override + def next_page_info(self) -> Optional[PageInfo]: + last_id = self.last_id + if not last_id: + return None + + return PageInfo(params={"after": last_id}) + + +class AsyncOpenAICursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]): + data: List[_T] + has_more: Optional[bool] = None + last_id: Optional[str] = None + + @override + def _get_page_items(self) -> List[_T]: + data = self.data + if not data: + return [] + return data + + @override + def has_next_page(self) -> bool: + has_more = self.has_more + if has_more is not None and has_more is False: + return False + + return super().has_next_page() + + @override + def next_page_info(self) -> Optional[PageInfo]: + last_id = self.last_id + if not last_id: + return None + + return PageInfo(params={"after": last_id}) diff --git a/src/llama_stack_client/resources/__init__.py b/src/llama_stack_client/resources/__init__.py index 23f61be1..01e17f57 100644 --- a/src/llama_stack_client/resources/__init__.py +++ b/src/llama_stack_client/resources/__init__.py @@ -176,6 +176,14 @@ CompletionsResourceWithStreamingResponse, AsyncCompletionsResourceWithStreamingResponse, ) +from .moderations import ( + ModerationsResource, + AsyncModerationsResource, + ModerationsResourceWithRawResponse, + AsyncModerationsResourceWithRawResponse, + ModerationsResourceWithStreamingResponse, + AsyncModerationsResourceWithStreamingResponse, +) from .tool_runtime import ( ToolRuntimeResource, AsyncToolRuntimeResource, @@ -332,6 +340,12 @@ "AsyncRoutesResourceWithRawResponse", "RoutesResourceWithStreamingResponse", "AsyncRoutesResourceWithStreamingResponse", + "ModerationsResource", + "AsyncModerationsResource", + "ModerationsResourceWithRawResponse", + "AsyncModerationsResourceWithRawResponse", + "ModerationsResourceWithStreamingResponse", + "AsyncModerationsResourceWithStreamingResponse", "SafetyResource", "AsyncSafetyResource", "SafetyResourceWithRawResponse", diff --git a/src/llama_stack_client/resources/agents/agents.py b/src/llama_stack_client/resources/agents/agents.py index 5b34cea8..6a4ffe85 100644 --- a/src/llama_stack_client/resources/agents/agents.py +++ b/src/llama_stack_client/resources/agents/agents.py @@ -20,7 +20,7 @@ StepsResourceWithStreamingResponse, AsyncStepsResourceWithStreamingResponse, ) -from ...types import agent_create_params +from ...types import agent_list_params, agent_create_params from .session import ( SessionResource, AsyncSessionResource, @@ -40,7 +40,9 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options +from ...types.agent_list_response import AgentListResponse from ...types.agent_create_response import AgentCreateResponse +from ...types.agent_retrieve_response import AgentRetrieveResponse from ...types.shared_params.agent_config import AgentConfig __all__ = ["AgentsResource", "AsyncAgentsResource"] @@ -112,6 +114,85 @@ def create( cast_to=AgentCreateResponse, ) + def retrieve( + self, + agent_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveResponse: + """ + Describe an agent by its ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + return self._get( + f"/v1/agents/{agent_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentRetrieveResponse, + ) + + def list( + self, + *, + limit: int | NotGiven = NOT_GIVEN, + start_index: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + List all agents. + + Args: + limit: The number of agents to return. + + start_index: The index to start the pagination from. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v1/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "limit": limit, + "start_index": start_index, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + def delete( self, agent_id: str, @@ -213,6 +294,85 @@ async def create( cast_to=AgentCreateResponse, ) + async def retrieve( + self, + agent_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveResponse: + """ + Describe an agent by its ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + return await self._get( + f"/v1/agents/{agent_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentRetrieveResponse, + ) + + async def list( + self, + *, + limit: int | NotGiven = NOT_GIVEN, + start_index: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + List all agents. + + Args: + limit: The number of agents to return. + + start_index: The index to start the pagination from. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v1/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "limit": limit, + "start_index": start_index, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + async def delete( self, agent_id: str, @@ -255,6 +415,12 @@ def __init__(self, agents: AgentsResource) -> None: self.create = to_raw_response_wrapper( agents.create, ) + self.retrieve = to_raw_response_wrapper( + agents.retrieve, + ) + self.list = to_raw_response_wrapper( + agents.list, + ) self.delete = to_raw_response_wrapper( agents.delete, ) @@ -279,6 +445,12 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.create = async_to_raw_response_wrapper( agents.create, ) + self.retrieve = async_to_raw_response_wrapper( + agents.retrieve, + ) + self.list = async_to_raw_response_wrapper( + agents.list, + ) self.delete = async_to_raw_response_wrapper( agents.delete, ) @@ -303,6 +475,12 @@ def __init__(self, agents: AgentsResource) -> None: self.create = to_streamed_response_wrapper( agents.create, ) + self.retrieve = to_streamed_response_wrapper( + agents.retrieve, + ) + self.list = to_streamed_response_wrapper( + agents.list, + ) self.delete = to_streamed_response_wrapper( agents.delete, ) @@ -327,6 +505,12 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.create = async_to_streamed_response_wrapper( agents.create, ) + self.retrieve = async_to_streamed_response_wrapper( + agents.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + agents.list, + ) self.delete = async_to_streamed_response_wrapper( agents.delete, ) diff --git a/src/llama_stack_client/resources/agents/session.py b/src/llama_stack_client/resources/agents/session.py index ccefeb0b..4e1704d5 100644 --- a/src/llama_stack_client/resources/agents/session.py +++ b/src/llama_stack_client/resources/agents/session.py @@ -17,8 +17,9 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import session_create_params, session_retrieve_params +from ...types.agents import session_list_params, session_create_params, session_retrieve_params from ...types.agents.session import Session +from ...types.agents.session_list_response import SessionListResponse from ...types.agents.session_create_response import SessionCreateResponse __all__ = ["SessionResource", "AsyncSessionResource"] @@ -124,6 +125,55 @@ def retrieve( cast_to=Session, ) + def list( + self, + agent_id: str, + *, + limit: int | NotGiven = NOT_GIVEN, + start_index: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SessionListResponse: + """ + List all session(s) of a given agent. + + Args: + limit: The number of sessions to return. + + start_index: The index to start the pagination from. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + return self._get( + f"/v1/agents/{agent_id}/sessions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "limit": limit, + "start_index": start_index, + }, + session_list_params.SessionListParams, + ), + ), + cast_to=SessionListResponse, + ) + def delete( self, session_id: str, @@ -264,6 +314,55 @@ async def retrieve( cast_to=Session, ) + async def list( + self, + agent_id: str, + *, + limit: int | NotGiven = NOT_GIVEN, + start_index: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SessionListResponse: + """ + List all session(s) of a given agent. + + Args: + limit: The number of sessions to return. + + start_index: The index to start the pagination from. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + return await self._get( + f"/v1/agents/{agent_id}/sessions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "limit": limit, + "start_index": start_index, + }, + session_list_params.SessionListParams, + ), + ), + cast_to=SessionListResponse, + ) + async def delete( self, session_id: str, @@ -312,6 +411,9 @@ def __init__(self, session: SessionResource) -> None: self.retrieve = to_raw_response_wrapper( session.retrieve, ) + self.list = to_raw_response_wrapper( + session.list, + ) self.delete = to_raw_response_wrapper( session.delete, ) @@ -327,6 +429,9 @@ def __init__(self, session: AsyncSessionResource) -> None: self.retrieve = async_to_raw_response_wrapper( session.retrieve, ) + self.list = async_to_raw_response_wrapper( + session.list, + ) self.delete = async_to_raw_response_wrapper( session.delete, ) @@ -342,6 +447,9 @@ def __init__(self, session: SessionResource) -> None: self.retrieve = to_streamed_response_wrapper( session.retrieve, ) + self.list = to_streamed_response_wrapper( + session.list, + ) self.delete = to_streamed_response_wrapper( session.delete, ) @@ -357,6 +465,9 @@ def __init__(self, session: AsyncSessionResource) -> None: self.retrieve = async_to_streamed_response_wrapper( session.retrieve, ) + self.list = async_to_streamed_response_wrapper( + session.list, + ) self.delete = async_to_streamed_response_wrapper( session.delete, ) diff --git a/src/llama_stack_client/resources/chat/completions.py b/src/llama_stack_client/resources/chat/completions.py index 1355f97a..ccf2cba9 100644 --- a/src/llama_stack_client/resources/chat/completions.py +++ b/src/llama_stack_client/resources/chat/completions.py @@ -18,8 +18,9 @@ async_to_streamed_response_wrapper, ) from ..._streaming import Stream, AsyncStream +from ...pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage from ...types.chat import completion_list_params, completion_create_params -from ..._base_client import make_request_options +from ..._base_client import AsyncPaginator, make_request_options from ...types.chat_completion_chunk import ChatCompletionChunk from ...types.chat.completion_list_response import CompletionListResponse from ...types.chat.completion_create_response import CompletionCreateResponse @@ -466,7 +467,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionListResponse: + ) -> SyncOpenAICursorPage[CompletionListResponse]: """ List all chat completions. @@ -487,8 +488,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - return self._get( + return self._get_api_list( "/v1/openai/v1/chat/completions", + page=SyncOpenAICursorPage[CompletionListResponse], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -504,7 +506,7 @@ def list( completion_list_params.CompletionListParams, ), ), - cast_to=CompletionListResponse, + model=CompletionListResponse, ) @@ -933,7 +935,7 @@ async def retrieve( cast_to=CompletionRetrieveResponse, ) - async def list( + def list( self, *, after: str | NotGiven = NOT_GIVEN, @@ -946,7 +948,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionListResponse: + ) -> AsyncPaginator[CompletionListResponse, AsyncOpenAICursorPage[CompletionListResponse]]: """ List all chat completions. @@ -967,14 +969,15 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - return await self._get( + return self._get_api_list( "/v1/openai/v1/chat/completions", + page=AsyncOpenAICursorPage[CompletionListResponse], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=await async_maybe_transform( + query=maybe_transform( { "after": after, "limit": limit, @@ -984,7 +987,7 @@ async def list( completion_list_params.CompletionListParams, ), ), - cast_to=CompletionListResponse, + model=CompletionListResponse, ) diff --git a/src/llama_stack_client/resources/datasets.py b/src/llama_stack_client/resources/datasets.py index 45dcaeba..e2f0a149 100644 --- a/src/llama_stack_client/resources/datasets.py +++ b/src/llama_stack_client/resources/datasets.py @@ -7,7 +7,7 @@ import httpx -from ..types import dataset_iterrows_params, dataset_register_params +from ..types import dataset_iterrows_params, dataset_register_params, dataset_appendrows_params from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property @@ -104,6 +104,44 @@ def list( cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]), ) + def appendrows( + self, + dataset_id: str, + *, + rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Append rows to a dataset. + + Args: + rows: The rows to append to the dataset. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not dataset_id: + raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v1/datasetio/append-rows/{dataset_id}", + body=maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + def iterrows( self, dataset_id: str, @@ -342,6 +380,44 @@ async def list( cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]), ) + async def appendrows( + self, + dataset_id: str, + *, + rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Append rows to a dataset. + + Args: + rows: The rows to append to the dataset. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not dataset_id: + raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v1/datasetio/append-rows/{dataset_id}", + body=await async_maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + async def iterrows( self, dataset_id: str, @@ -514,6 +590,9 @@ def __init__(self, datasets: DatasetsResource) -> None: self.list = to_raw_response_wrapper( datasets.list, ) + self.appendrows = to_raw_response_wrapper( + datasets.appendrows, + ) self.iterrows = to_raw_response_wrapper( datasets.iterrows, ) @@ -535,6 +614,9 @@ def __init__(self, datasets: AsyncDatasetsResource) -> None: self.list = async_to_raw_response_wrapper( datasets.list, ) + self.appendrows = async_to_raw_response_wrapper( + datasets.appendrows, + ) self.iterrows = async_to_raw_response_wrapper( datasets.iterrows, ) @@ -556,6 +638,9 @@ def __init__(self, datasets: DatasetsResource) -> None: self.list = to_streamed_response_wrapper( datasets.list, ) + self.appendrows = to_streamed_response_wrapper( + datasets.appendrows, + ) self.iterrows = to_streamed_response_wrapper( datasets.iterrows, ) @@ -577,6 +662,9 @@ def __init__(self, datasets: AsyncDatasetsResource) -> None: self.list = async_to_streamed_response_wrapper( datasets.list, ) + self.appendrows = async_to_streamed_response_wrapper( + datasets.appendrows, + ) self.iterrows = async_to_streamed_response_wrapper( datasets.iterrows, ) diff --git a/src/llama_stack_client/resources/files.py b/src/llama_stack_client/resources/files.py index 3eac6486..96c5c871 100644 --- a/src/llama_stack_client/resources/files.py +++ b/src/llama_stack_client/resources/files.py @@ -18,9 +18,9 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from ..pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage from ..types.file import File -from .._base_client import make_request_options -from ..types.list_files_response import ListFilesResponse +from .._base_client import AsyncPaginator, make_request_options from ..types.delete_file_response import DeleteFileResponse __all__ = ["FilesResource", "AsyncFilesResource"] @@ -144,7 +144,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListFilesResponse: + ) -> SyncOpenAICursorPage[File]: """ Returns a list of files that belong to the user's organization. @@ -170,8 +170,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - return self._get( + return self._get_api_list( "/v1/openai/v1/files", + page=SyncOpenAICursorPage[File], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -187,7 +188,7 @@ def list( file_list_params.FileListParams, ), ), - cast_to=ListFilesResponse, + model=File, ) def delete( @@ -362,7 +363,7 @@ async def retrieve( cast_to=File, ) - async def list( + def list( self, *, after: str | NotGiven = NOT_GIVEN, @@ -375,7 +376,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListFilesResponse: + ) -> AsyncPaginator[File, AsyncOpenAICursorPage[File]]: """ Returns a list of files that belong to the user's organization. @@ -401,14 +402,15 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - return await self._get( + return self._get_api_list( "/v1/openai/v1/files", + page=AsyncOpenAICursorPage[File], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=await async_maybe_transform( + query=maybe_transform( { "after": after, "limit": limit, @@ -418,7 +420,7 @@ async def list( file_list_params.FileListParams, ), ), - cast_to=ListFilesResponse, + model=File, ) async def delete( diff --git a/src/llama_stack_client/resources/inference.py b/src/llama_stack_client/resources/inference.py index 84a8dd96..7aec2dbd 100644 --- a/src/llama_stack_client/resources/inference.py +++ b/src/llama_stack_client/resources/inference.py @@ -2,6 +2,7 @@ from __future__ import annotations +import typing_extensions from typing import List, Union, Iterable from typing_extensions import Literal, overload @@ -183,6 +184,9 @@ def batch_completion( cast_to=BatchCompletion, ) + @typing_extensions.deprecated( + "/v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions." + ) @overload def chat_completion( self, @@ -251,6 +255,9 @@ def chat_completion( """ ... + @typing_extensions.deprecated( + "/v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions." + ) @overload def chat_completion( self, @@ -319,6 +326,9 @@ def chat_completion( """ ... + @typing_extensions.deprecated( + "/v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions." + ) @overload def chat_completion( self, @@ -387,6 +397,9 @@ def chat_completion( """ ... + @typing_extensions.deprecated( + "/v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions." + ) @required_args(["messages", "model_id"], ["messages", "model_id", "stream"]) def chat_completion( self, @@ -437,6 +450,7 @@ def chat_completion( stream_cls=Stream[ChatCompletionResponseStreamChunk], ) + @typing_extensions.deprecated("/v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.") @overload def completion( self, @@ -483,6 +497,7 @@ def completion( """ ... + @typing_extensions.deprecated("/v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.") @overload def completion( self, @@ -529,6 +544,7 @@ def completion( """ ... + @typing_extensions.deprecated("/v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.") @overload def completion( self, @@ -575,6 +591,7 @@ def completion( """ ... + @typing_extensions.deprecated("/v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.") @required_args(["content", "model_id"], ["content", "model_id", "stream"]) def completion( self, @@ -617,6 +634,7 @@ def completion( stream_cls=Stream[CompletionResponse], ) + @typing_extensions.deprecated("/v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.") def embeddings( self, *, @@ -821,6 +839,9 @@ async def batch_completion( cast_to=BatchCompletion, ) + @typing_extensions.deprecated( + "/v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions." + ) @overload async def chat_completion( self, @@ -889,6 +910,9 @@ async def chat_completion( """ ... + @typing_extensions.deprecated( + "/v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions." + ) @overload async def chat_completion( self, @@ -957,6 +981,9 @@ async def chat_completion( """ ... + @typing_extensions.deprecated( + "/v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions." + ) @overload async def chat_completion( self, @@ -1025,6 +1052,9 @@ async def chat_completion( """ ... + @typing_extensions.deprecated( + "/v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions." + ) @required_args(["messages", "model_id"], ["messages", "model_id", "stream"]) async def chat_completion( self, @@ -1075,6 +1105,7 @@ async def chat_completion( stream_cls=AsyncStream[ChatCompletionResponseStreamChunk], ) + @typing_extensions.deprecated("/v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.") @overload async def completion( self, @@ -1121,6 +1152,7 @@ async def completion( """ ... + @typing_extensions.deprecated("/v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.") @overload async def completion( self, @@ -1167,6 +1199,7 @@ async def completion( """ ... + @typing_extensions.deprecated("/v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.") @overload async def completion( self, @@ -1213,6 +1246,7 @@ async def completion( """ ... + @typing_extensions.deprecated("/v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.") @required_args(["content", "model_id"], ["content", "model_id", "stream"]) async def completion( self, @@ -1255,6 +1289,7 @@ async def completion( stream_cls=AsyncStream[CompletionResponse], ) + @typing_extensions.deprecated("/v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.") async def embeddings( self, *, @@ -1327,14 +1362,20 @@ def __init__(self, inference: InferenceResource) -> None: self.batch_completion = to_raw_response_wrapper( inference.batch_completion, ) - self.chat_completion = to_raw_response_wrapper( - inference.chat_completion, + self.chat_completion = ( # pyright: ignore[reportDeprecated] + to_raw_response_wrapper( + inference.chat_completion # pyright: ignore[reportDeprecated], + ) ) - self.completion = to_raw_response_wrapper( - inference.completion, + self.completion = ( # pyright: ignore[reportDeprecated] + to_raw_response_wrapper( + inference.completion # pyright: ignore[reportDeprecated], + ) ) - self.embeddings = to_raw_response_wrapper( - inference.embeddings, + self.embeddings = ( # pyright: ignore[reportDeprecated] + to_raw_response_wrapper( + inference.embeddings # pyright: ignore[reportDeprecated], + ) ) @@ -1348,14 +1389,20 @@ def __init__(self, inference: AsyncInferenceResource) -> None: self.batch_completion = async_to_raw_response_wrapper( inference.batch_completion, ) - self.chat_completion = async_to_raw_response_wrapper( - inference.chat_completion, + self.chat_completion = ( # pyright: ignore[reportDeprecated] + async_to_raw_response_wrapper( + inference.chat_completion # pyright: ignore[reportDeprecated], + ) ) - self.completion = async_to_raw_response_wrapper( - inference.completion, + self.completion = ( # pyright: ignore[reportDeprecated] + async_to_raw_response_wrapper( + inference.completion # pyright: ignore[reportDeprecated], + ) ) - self.embeddings = async_to_raw_response_wrapper( - inference.embeddings, + self.embeddings = ( # pyright: ignore[reportDeprecated] + async_to_raw_response_wrapper( + inference.embeddings # pyright: ignore[reportDeprecated], + ) ) @@ -1369,14 +1416,20 @@ def __init__(self, inference: InferenceResource) -> None: self.batch_completion = to_streamed_response_wrapper( inference.batch_completion, ) - self.chat_completion = to_streamed_response_wrapper( - inference.chat_completion, + self.chat_completion = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + inference.chat_completion # pyright: ignore[reportDeprecated], + ) ) - self.completion = to_streamed_response_wrapper( - inference.completion, + self.completion = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + inference.completion # pyright: ignore[reportDeprecated], + ) ) - self.embeddings = to_streamed_response_wrapper( - inference.embeddings, + self.embeddings = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + inference.embeddings # pyright: ignore[reportDeprecated], + ) ) @@ -1390,12 +1443,18 @@ def __init__(self, inference: AsyncInferenceResource) -> None: self.batch_completion = async_to_streamed_response_wrapper( inference.batch_completion, ) - self.chat_completion = async_to_streamed_response_wrapper( - inference.chat_completion, + self.chat_completion = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + inference.chat_completion # pyright: ignore[reportDeprecated], + ) ) - self.completion = async_to_streamed_response_wrapper( - inference.completion, + self.completion = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + inference.completion # pyright: ignore[reportDeprecated], + ) ) - self.embeddings = async_to_streamed_response_wrapper( - inference.embeddings, + self.embeddings = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + inference.embeddings # pyright: ignore[reportDeprecated], + ) ) diff --git a/src/llama_stack_client/resources/inspect.py b/src/llama_stack_client/resources/inspect.py index eb028c16..bd67ae96 100644 --- a/src/llama_stack_client/resources/inspect.py +++ b/src/llama_stack_client/resources/inspect.py @@ -50,7 +50,7 @@ def health( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> HealthInfo: - """Get the health of the service.""" + """Get the current health status of the service.""" return self._get( "/v1/health", options=make_request_options( @@ -109,7 +109,7 @@ async def health( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> HealthInfo: - """Get the health of the service.""" + """Get the current health status of the service.""" return await self._get( "/v1/health", options=make_request_options( diff --git a/src/llama_stack_client/resources/moderations.py b/src/llama_stack_client/resources/moderations.py new file mode 100644 index 00000000..165f3ce3 --- /dev/null +++ b/src/llama_stack_client/resources/moderations.py @@ -0,0 +1,189 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union + +import httpx + +from ..types import moderation_create_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.create_response import CreateResponse + +__all__ = ["ModerationsResource", "AsyncModerationsResource"] + + +class ModerationsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ModerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers + """ + return ModerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ModerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response + """ + return ModerationsResourceWithStreamingResponse(self) + + def create( + self, + *, + input: Union[str, List[str]], + model: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateResponse: + """ + Classifies if text and/or image inputs are potentially harmful. + + Args: + input: Input (or inputs) to classify. Can be a single string, an array of strings, or + an array of multi-modal input objects similar to other models. + + model: The content moderation model you would like to use. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v1/openai/v1/moderations", + body=maybe_transform( + { + "input": input, + "model": model, + }, + moderation_create_params.ModerationCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateResponse, + ) + + +class AsyncModerationsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncModerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers + """ + return AsyncModerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncModerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response + """ + return AsyncModerationsResourceWithStreamingResponse(self) + + async def create( + self, + *, + input: Union[str, List[str]], + model: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateResponse: + """ + Classifies if text and/or image inputs are potentially harmful. + + Args: + input: Input (or inputs) to classify. Can be a single string, an array of strings, or + an array of multi-modal input objects similar to other models. + + model: The content moderation model you would like to use. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v1/openai/v1/moderations", + body=await async_maybe_transform( + { + "input": input, + "model": model, + }, + moderation_create_params.ModerationCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateResponse, + ) + + +class ModerationsResourceWithRawResponse: + def __init__(self, moderations: ModerationsResource) -> None: + self._moderations = moderations + + self.create = to_raw_response_wrapper( + moderations.create, + ) + + +class AsyncModerationsResourceWithRawResponse: + def __init__(self, moderations: AsyncModerationsResource) -> None: + self._moderations = moderations + + self.create = async_to_raw_response_wrapper( + moderations.create, + ) + + +class ModerationsResourceWithStreamingResponse: + def __init__(self, moderations: ModerationsResource) -> None: + self._moderations = moderations + + self.create = to_streamed_response_wrapper( + moderations.create, + ) + + +class AsyncModerationsResourceWithStreamingResponse: + def __init__(self, moderations: AsyncModerationsResource) -> None: + self._moderations = moderations + + self.create = async_to_streamed_response_wrapper( + moderations.create, + ) diff --git a/src/llama_stack_client/resources/responses/responses.py b/src/llama_stack_client/resources/responses/responses.py index fa05f7ed..b73be85f 100644 --- a/src/llama_stack_client/resources/responses/responses.py +++ b/src/llama_stack_client/resources/responses/responses.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Iterable +from typing import List, Union, Iterable from typing_extensions import Literal, overload import httpx @@ -27,7 +27,8 @@ AsyncInputItemsResourceWithStreamingResponse, ) from ..._streaming import Stream, AsyncStream -from ..._base_client import make_request_options +from ...pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage +from ..._base_client import AsyncPaginator, make_request_options from ...types.response_object import ResponseObject from ...types.response_list_response import ResponseListResponse from ...types.response_object_stream import ResponseObjectStream @@ -65,6 +66,7 @@ def create( *, input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -88,10 +90,14 @@ def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + text: Text response configuration for OpenAI responses. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -109,6 +115,7 @@ def create( input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, stream: Literal[True], + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -131,10 +138,14 @@ def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + text: Text response configuration for OpenAI responses. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -152,6 +163,7 @@ def create( input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, stream: bool, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -174,10 +186,14 @@ def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + text: Text response configuration for OpenAI responses. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -194,6 +210,7 @@ def create( *, input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -215,6 +232,7 @@ def create( { "input": input, "model": model, + "include": include, "instructions": instructions, "max_infer_iters": max_infer_iters, "previous_response_id": previous_response_id, @@ -282,7 +300,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ResponseListResponse: + ) -> SyncOpenAICursorPage[ResponseListResponse]: """ List all OpenAI responses. @@ -303,8 +321,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - return self._get( + return self._get_api_list( "/v1/openai/v1/responses", + page=SyncOpenAICursorPage[ResponseListResponse], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -320,7 +339,7 @@ def list( response_list_params.ResponseListParams, ), ), - cast_to=ResponseListResponse, + model=ResponseListResponse, ) @@ -354,6 +373,7 @@ async def create( *, input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -377,10 +397,14 @@ async def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + text: Text response configuration for OpenAI responses. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -398,6 +422,7 @@ async def create( input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, stream: Literal[True], + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -420,10 +445,14 @@ async def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + text: Text response configuration for OpenAI responses. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -441,6 +470,7 @@ async def create( input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, stream: bool, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -463,10 +493,14 @@ async def create( model: The underlying LLM used for completions. + include: (Optional) Additional fields to include in the response. + previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + text: Text response configuration for OpenAI responses. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -483,6 +517,7 @@ async def create( *, input: Union[str, Iterable[response_create_params.InputUnionMember1]], model: str, + include: List[str] | NotGiven = NOT_GIVEN, instructions: str | NotGiven = NOT_GIVEN, max_infer_iters: int | NotGiven = NOT_GIVEN, previous_response_id: str | NotGiven = NOT_GIVEN, @@ -504,6 +539,7 @@ async def create( { "input": input, "model": model, + "include": include, "instructions": instructions, "max_infer_iters": max_infer_iters, "previous_response_id": previous_response_id, @@ -558,7 +594,7 @@ async def retrieve( cast_to=ResponseObject, ) - async def list( + def list( self, *, after: str | NotGiven = NOT_GIVEN, @@ -571,7 +607,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ResponseListResponse: + ) -> AsyncPaginator[ResponseListResponse, AsyncOpenAICursorPage[ResponseListResponse]]: """ List all OpenAI responses. @@ -592,14 +628,15 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - return await self._get( + return self._get_api_list( "/v1/openai/v1/responses", + page=AsyncOpenAICursorPage[ResponseListResponse], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=await async_maybe_transform( + query=maybe_transform( { "after": after, "limit": limit, @@ -609,7 +646,7 @@ async def list( response_list_params.ResponseListParams, ), ), - cast_to=ResponseListResponse, + model=ResponseListResponse, ) diff --git a/src/llama_stack_client/resources/routes.py b/src/llama_stack_client/resources/routes.py index a95b5e06..7d544c0e 100644 --- a/src/llama_stack_client/resources/routes.py +++ b/src/llama_stack_client/resources/routes.py @@ -52,7 +52,7 @@ def list( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RouteListResponse: - """List all routes.""" + """List all available API routes with their methods and implementing providers.""" return self._get( "/v1/inspect/routes", options=make_request_options( @@ -96,7 +96,7 @@ async def list( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RouteListResponse: - """List all routes.""" + """List all available API routes with their methods and implementing providers.""" return await self._get( "/v1/inspect/routes", options=make_request_options( diff --git a/src/llama_stack_client/resources/synthetic_data_generation.py b/src/llama_stack_client/resources/synthetic_data_generation.py index 6e4e5a08..0843eafe 100644 --- a/src/llama_stack_client/resources/synthetic_data_generation.py +++ b/src/llama_stack_client/resources/synthetic_data_generation.py @@ -59,8 +59,15 @@ def generate( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyntheticDataGenerationResponse: """ + Generate synthetic data based on input dialogs and apply filtering. + Args: - filtering_function: The type of filtering function. + dialogs: List of conversation messages to use as input for synthetic data generation + + filtering_function: Type of filtering to apply to generated synthetic data samples + + model: (Optional) The identifier of the model to use. The model must be registered with + Llama Stack and available via the /models endpoint extra_headers: Send extra headers @@ -121,8 +128,15 @@ async def generate( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyntheticDataGenerationResponse: """ + Generate synthetic data based on input dialogs and apply filtering. + Args: - filtering_function: The type of filtering function. + dialogs: List of conversation messages to use as input for synthetic data generation + + filtering_function: Type of filtering to apply to generated synthetic data samples + + model: (Optional) The identifier of the model to use. The model must be registered with + Llama Stack and available via the /models endpoint extra_headers: Send extra headers diff --git a/src/llama_stack_client/resources/tool_runtime/rag_tool.py b/src/llama_stack_client/resources/tool_runtime/rag_tool.py index 65ef0463..3ff25968 100644 --- a/src/llama_stack_client/resources/tool_runtime/rag_tool.py +++ b/src/llama_stack_client/resources/tool_runtime/rag_tool.py @@ -60,9 +60,15 @@ def insert( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> None: """ - Index documents so they can be used by the RAG system + Index documents so they can be used by the RAG system. Args: + chunk_size_in_tokens: (Optional) Size in tokens for document chunking during indexing + + documents: List of documents to index in the RAG system + + vector_db_id: ID of the vector database to store the document embeddings + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -102,12 +108,14 @@ def query( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> QueryResult: """ - Query the RAG system for context; typically invoked by the agent + Query the RAG system for context; typically invoked by the agent. Args: - content: A image content item + content: The query content to search for in the indexed documents + + vector_db_ids: List of vector database IDs to search within - query_config: Configuration for the RAG query generation. + query_config: (Optional) Configuration parameters for the query operation extra_headers: Send extra headers @@ -168,9 +176,15 @@ async def insert( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> None: """ - Index documents so they can be used by the RAG system + Index documents so they can be used by the RAG system. Args: + chunk_size_in_tokens: (Optional) Size in tokens for document chunking during indexing + + documents: List of documents to index in the RAG system + + vector_db_id: ID of the vector database to store the document embeddings + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -210,12 +224,14 @@ async def query( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> QueryResult: """ - Query the RAG system for context; typically invoked by the agent + Query the RAG system for context; typically invoked by the agent. Args: - content: A image content item + content: The query content to search for in the indexed documents + + vector_db_ids: List of vector database IDs to search within - query_config: Configuration for the RAG query generation. + query_config: (Optional) Configuration parameters for the query operation extra_headers: Send extra headers diff --git a/src/llama_stack_client/resources/vector_dbs.py b/src/llama_stack_client/resources/vector_dbs.py index 3838c38e..ab62fa6a 100644 --- a/src/llama_stack_client/resources/vector_dbs.py +++ b/src/llama_stack_client/resources/vector_dbs.py @@ -110,6 +110,7 @@ def register( embedding_dimension: int | NotGiven = NOT_GIVEN, provider_id: str | NotGiven = NOT_GIVEN, provider_vector_db_id: str | NotGiven = NOT_GIVEN, + vector_db_name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -131,6 +132,8 @@ def register( provider_vector_db_id: The identifier of the vector database in the provider. + vector_db_name: The name of the vector database. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -148,6 +151,7 @@ def register( "embedding_dimension": embedding_dimension, "provider_id": provider_id, "provider_vector_db_id": provider_vector_db_id, + "vector_db_name": vector_db_name, }, vector_db_register_params.VectorDBRegisterParams, ), @@ -276,6 +280,7 @@ async def register( embedding_dimension: int | NotGiven = NOT_GIVEN, provider_id: str | NotGiven = NOT_GIVEN, provider_vector_db_id: str | NotGiven = NOT_GIVEN, + vector_db_name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -297,6 +302,8 @@ async def register( provider_vector_db_id: The identifier of the vector database in the provider. + vector_db_name: The name of the vector database. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -314,6 +321,7 @@ async def register( "embedding_dimension": embedding_dimension, "provider_id": provider_id, "provider_vector_db_id": provider_vector_db_id, + "vector_db_name": vector_db_name, }, vector_db_register_params.VectorDBRegisterParams, ), diff --git a/src/llama_stack_client/resources/vector_stores/files.py b/src/llama_stack_client/resources/vector_stores/files.py index 1ef48084..8589ebc6 100644 --- a/src/llama_stack_client/resources/vector_stores/files.py +++ b/src/llama_stack_client/resources/vector_stores/files.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import Dict, Union, Iterable +from typing_extensions import Literal import httpx @@ -16,9 +17,12 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..._base_client import make_request_options -from ...types.vector_stores import file_create_params +from ...pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage +from ..._base_client import AsyncPaginator, make_request_options +from ...types.vector_stores import file_list_params, file_create_params, file_update_params from ...types.vector_stores.vector_store_file import VectorStoreFile +from ...types.vector_stores.file_delete_response import FileDeleteResponse +from ...types.vector_stores.file_content_response import FileContentResponse __all__ = ["FilesResource", "AsyncFilesResource"] @@ -93,6 +97,220 @@ def create( cast_to=VectorStoreFile, ) + def retrieve( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFile: + """ + Retrieves a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._get( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFile, + ) + + def update( + self, + file_id: str, + *, + vector_store_id: str, + attributes: Dict[str, Union[bool, float, str, Iterable[object], object, None]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFile: + """ + Updates a vector store file. + + Args: + attributes: The updated key-value attributes to store with the file. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._post( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}", + body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFile, + ) + + def list( + self, + vector_store_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + filter: Literal["completed", "in_progress", "cancelled", "failed"] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncOpenAICursorPage[VectorStoreFile]: + """ + List files in a vector store. + + Args: + after: (Optional) A cursor for use in pagination. `after` is an object ID that defines + your place in the list. + + before: (Optional) A cursor for use in pagination. `before` is an object ID that defines + your place in the list. + + filter: (Optional) Filter by file status to only return files with the specified status. + + limit: (Optional) A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + + order: (Optional) Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return self._get_api_list( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files", + page=SyncOpenAICursorPage[VectorStoreFile], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "filter": filter, + "limit": limit, + "order": order, + }, + file_list_params.FileListParams, + ), + ), + model=VectorStoreFile, + ) + + def delete( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileDeleteResponse: + """ + Delete a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._delete( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileDeleteResponse, + ) + + def content( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileContentResponse: + """ + Retrieves the contents of a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._get( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}/content", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileContentResponse, + ) + class AsyncFilesResource(AsyncAPIResource): @cached_property @@ -164,6 +382,220 @@ async def create( cast_to=VectorStoreFile, ) + async def retrieve( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFile: + """ + Retrieves a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._get( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFile, + ) + + async def update( + self, + file_id: str, + *, + vector_store_id: str, + attributes: Dict[str, Union[bool, float, str, Iterable[object], object, None]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFile: + """ + Updates a vector store file. + + Args: + attributes: The updated key-value attributes to store with the file. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._post( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}", + body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFile, + ) + + def list( + self, + vector_store_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + filter: Literal["completed", "in_progress", "cancelled", "failed"] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[VectorStoreFile, AsyncOpenAICursorPage[VectorStoreFile]]: + """ + List files in a vector store. + + Args: + after: (Optional) A cursor for use in pagination. `after` is an object ID that defines + your place in the list. + + before: (Optional) A cursor for use in pagination. `before` is an object ID that defines + your place in the list. + + filter: (Optional) Filter by file status to only return files with the specified status. + + limit: (Optional) A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + + order: (Optional) Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return self._get_api_list( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files", + page=AsyncOpenAICursorPage[VectorStoreFile], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "filter": filter, + "limit": limit, + "order": order, + }, + file_list_params.FileListParams, + ), + ), + model=VectorStoreFile, + ) + + async def delete( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileDeleteResponse: + """ + Delete a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._delete( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileDeleteResponse, + ) + + async def content( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileContentResponse: + """ + Retrieves the contents of a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._get( + f"/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}/content", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileContentResponse, + ) + class FilesResourceWithRawResponse: def __init__(self, files: FilesResource) -> None: @@ -172,6 +604,21 @@ def __init__(self, files: FilesResource) -> None: self.create = to_raw_response_wrapper( files.create, ) + self.retrieve = to_raw_response_wrapper( + files.retrieve, + ) + self.update = to_raw_response_wrapper( + files.update, + ) + self.list = to_raw_response_wrapper( + files.list, + ) + self.delete = to_raw_response_wrapper( + files.delete, + ) + self.content = to_raw_response_wrapper( + files.content, + ) class AsyncFilesResourceWithRawResponse: @@ -181,6 +628,21 @@ def __init__(self, files: AsyncFilesResource) -> None: self.create = async_to_raw_response_wrapper( files.create, ) + self.retrieve = async_to_raw_response_wrapper( + files.retrieve, + ) + self.update = async_to_raw_response_wrapper( + files.update, + ) + self.list = async_to_raw_response_wrapper( + files.list, + ) + self.delete = async_to_raw_response_wrapper( + files.delete, + ) + self.content = async_to_raw_response_wrapper( + files.content, + ) class FilesResourceWithStreamingResponse: @@ -190,6 +652,21 @@ def __init__(self, files: FilesResource) -> None: self.create = to_streamed_response_wrapper( files.create, ) + self.retrieve = to_streamed_response_wrapper( + files.retrieve, + ) + self.update = to_streamed_response_wrapper( + files.update, + ) + self.list = to_streamed_response_wrapper( + files.list, + ) + self.delete = to_streamed_response_wrapper( + files.delete, + ) + self.content = to_streamed_response_wrapper( + files.content, + ) class AsyncFilesResourceWithStreamingResponse: @@ -199,3 +676,18 @@ def __init__(self, files: AsyncFilesResource) -> None: self.create = async_to_streamed_response_wrapper( files.create, ) + self.retrieve = async_to_streamed_response_wrapper( + files.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + files.update, + ) + self.list = async_to_streamed_response_wrapper( + files.list, + ) + self.delete = async_to_streamed_response_wrapper( + files.delete, + ) + self.content = async_to_streamed_response_wrapper( + files.content, + ) diff --git a/src/llama_stack_client/resources/vector_stores/vector_stores.py b/src/llama_stack_client/resources/vector_stores/vector_stores.py index 7985cee9..bdc38e19 100644 --- a/src/llama_stack_client/resources/vector_stores/vector_stores.py +++ b/src/llama_stack_client/resources/vector_stores/vector_stores.py @@ -30,9 +30,9 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..._base_client import make_request_options +from ...pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage +from ..._base_client import AsyncPaginator, make_request_options from ...types.vector_store import VectorStore -from ...types.list_vector_stores_response import ListVectorStoresResponse from ...types.vector_store_delete_response import VectorStoreDeleteResponse from ...types.vector_store_search_response import VectorStoreSearchResponse @@ -66,15 +66,14 @@ def with_streaming_response(self) -> VectorStoresResourceWithStreamingResponse: def create( self, *, - name: str, chunking_strategy: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, embedding_dimension: int | NotGiven = NOT_GIVEN, embedding_model: str | NotGiven = NOT_GIVEN, expires_after: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, provider_id: str | NotGiven = NOT_GIVEN, - provider_vector_db_id: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -86,8 +85,6 @@ def create( Creates a vector store. Args: - name: A name for the vector store. - chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. @@ -102,9 +99,9 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. - provider_id: The ID of the provider to use for this vector store. + name: A name for the vector store. - provider_vector_db_id: The provider-specific vector database ID. + provider_id: The ID of the provider to use for this vector store. extra_headers: Send extra headers @@ -118,15 +115,14 @@ def create( "/v1/openai/v1/vector_stores", body=maybe_transform( { - "name": name, "chunking_strategy": chunking_strategy, "embedding_dimension": embedding_dimension, "embedding_model": embedding_model, "expires_after": expires_after, "file_ids": file_ids, "metadata": metadata, + "name": name, "provider_id": provider_id, - "provider_vector_db_id": provider_vector_db_id, }, vector_store_create_params.VectorStoreCreateParams, ), @@ -232,7 +228,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListVectorStoresResponse: + ) -> SyncOpenAICursorPage[VectorStore]: """Returns a list of vector stores. Args: @@ -258,8 +254,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - return self._get( + return self._get_api_list( "/v1/openai/v1/vector_stores", + page=SyncOpenAICursorPage[VectorStore], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -275,7 +272,7 @@ def list( vector_store_list_params.VectorStoreListParams, ), ), - cast_to=ListVectorStoresResponse, + model=VectorStore, ) def delete( @@ -403,15 +400,14 @@ def with_streaming_response(self) -> AsyncVectorStoresResourceWithStreamingRespo async def create( self, *, - name: str, chunking_strategy: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, embedding_dimension: int | NotGiven = NOT_GIVEN, embedding_model: str | NotGiven = NOT_GIVEN, expires_after: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, provider_id: str | NotGiven = NOT_GIVEN, - provider_vector_db_id: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -423,8 +419,6 @@ async def create( Creates a vector store. Args: - name: A name for the vector store. - chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. @@ -439,9 +433,9 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. - provider_id: The ID of the provider to use for this vector store. + name: A name for the vector store. - provider_vector_db_id: The provider-specific vector database ID. + provider_id: The ID of the provider to use for this vector store. extra_headers: Send extra headers @@ -455,15 +449,14 @@ async def create( "/v1/openai/v1/vector_stores", body=await async_maybe_transform( { - "name": name, "chunking_strategy": chunking_strategy, "embedding_dimension": embedding_dimension, "embedding_model": embedding_model, "expires_after": expires_after, "file_ids": file_ids, "metadata": metadata, + "name": name, "provider_id": provider_id, - "provider_vector_db_id": provider_vector_db_id, }, vector_store_create_params.VectorStoreCreateParams, ), @@ -556,7 +549,7 @@ async def update( cast_to=VectorStore, ) - async def list( + def list( self, *, after: str | NotGiven = NOT_GIVEN, @@ -569,7 +562,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListVectorStoresResponse: + ) -> AsyncPaginator[VectorStore, AsyncOpenAICursorPage[VectorStore]]: """Returns a list of vector stores. Args: @@ -595,14 +588,15 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - return await self._get( + return self._get_api_list( "/v1/openai/v1/vector_stores", + page=AsyncOpenAICursorPage[VectorStore], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=await async_maybe_transform( + query=maybe_transform( { "after": after, "before": before, @@ -612,7 +606,7 @@ async def list( vector_store_list_params.VectorStoreListParams, ), ), - cast_to=ListVectorStoresResponse, + model=VectorStore, ) async def delete( diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py index 7f742ba5..887f1706 100644 --- a/src/llama_stack_client/types/__init__.py +++ b/src/llama_stack_client/types/__init__.py @@ -47,18 +47,21 @@ from .tool_response import ToolResponse as ToolResponse from .inference_step import InferenceStep as InferenceStep from .tool_def_param import ToolDefParam as ToolDefParam +from .create_response import CreateResponse as CreateResponse from .response_object import ResponseObject as ResponseObject from .token_log_probs import TokenLogProbs as TokenLogProbs from .file_list_params import FileListParams as FileListParams from .shield_call_step import ShieldCallStep as ShieldCallStep from .span_with_status import SpanWithStatus as SpanWithStatus from .tool_list_params import ToolListParams as ToolListParams +from .agent_list_params import AgentListParams as AgentListParams from .evaluate_response import EvaluateResponse as EvaluateResponse from .post_training_job import PostTrainingJob as PostTrainingJob from .scoring_fn_params import ScoringFnParams as ScoringFnParams from .file_create_params import FileCreateParams as FileCreateParams from .tool_list_response import ToolListResponse as ToolListResponse from .agent_create_params import AgentCreateParams as AgentCreateParams +from .agent_list_response import AgentListResponse as AgentListResponse from .completion_response import CompletionResponse as CompletionResponse from .embeddings_response import EmbeddingsResponse as EmbeddingsResponse from .list_files_response import ListFilesResponse as ListFilesResponse @@ -96,6 +99,7 @@ from .shield_register_params import ShieldRegisterParams as ShieldRegisterParams from .tool_invocation_result import ToolInvocationResult as ToolInvocationResult from .vector_io_query_params import VectorIoQueryParams as VectorIoQueryParams +from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .benchmark_list_response import BenchmarkListResponse as BenchmarkListResponse from .dataset_iterrows_params import DatasetIterrowsParams as DatasetIterrowsParams from .dataset_register_params import DatasetRegisterParams as DatasetRegisterParams @@ -108,9 +112,11 @@ from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .list_benchmarks_response import ListBenchmarksResponse as ListBenchmarksResponse from .list_vector_dbs_response import ListVectorDBsResponse as ListVectorDBsResponse +from .moderation_create_params import ModerationCreateParams as ModerationCreateParams from .safety_run_shield_params import SafetyRunShieldParams as SafetyRunShieldParams from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams from .benchmark_register_params import BenchmarkRegisterParams as BenchmarkRegisterParams +from .dataset_appendrows_params import DatasetAppendrowsParams as DatasetAppendrowsParams from .dataset_iterrows_response import DatasetIterrowsResponse as DatasetIterrowsResponse from .dataset_register_response import DatasetRegisterResponse as DatasetRegisterResponse from .dataset_retrieve_response import DatasetRetrieveResponse as DatasetRetrieveResponse diff --git a/src/llama_stack_client/types/agent_create_response.py b/src/llama_stack_client/types/agent_create_response.py index 93651cb6..24fe864e 100644 --- a/src/llama_stack_client/types/agent_create_response.py +++ b/src/llama_stack_client/types/agent_create_response.py @@ -7,3 +7,4 @@ class AgentCreateResponse(BaseModel): agent_id: str + """Unique identifier for the created agent""" diff --git a/src/llama_stack_client/types/agent_list_params.py b/src/llama_stack_client/types/agent_list_params.py new file mode 100644 index 00000000..15da545b --- /dev/null +++ b/src/llama_stack_client/types/agent_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AgentListParams"] + + +class AgentListParams(TypedDict, total=False): + limit: int + """The number of agents to return.""" + + start_index: int + """The index to start the pagination from.""" diff --git a/src/llama_stack_client/types/agent_list_response.py b/src/llama_stack_client/types/agent_list_response.py new file mode 100644 index 00000000..d0640e21 --- /dev/null +++ b/src/llama_stack_client/types/agent_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional + +from .._models import BaseModel + +__all__ = ["AgentListResponse"] + + +class AgentListResponse(BaseModel): + data: List[Dict[str, Union[bool, float, str, List[object], object, None]]] + """The list of items for the current page""" + + has_more: bool + """Whether there are more items available after this set""" + + url: Optional[str] = None + """The URL for accessing this list""" diff --git a/src/llama_stack_client/types/agent_retrieve_response.py b/src/llama_stack_client/types/agent_retrieve_response.py new file mode 100644 index 00000000..1671a9fc --- /dev/null +++ b/src/llama_stack_client/types/agent_retrieve_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from datetime import datetime + +from .._models import BaseModel +from .shared.agent_config import AgentConfig + +__all__ = ["AgentRetrieveResponse"] + + +class AgentRetrieveResponse(BaseModel): + agent_config: AgentConfig + """Configuration settings for the agent""" + + agent_id: str + """Unique identifier for the agent""" + + created_at: datetime + """Timestamp when the agent was created""" diff --git a/src/llama_stack_client/types/agents/__init__.py b/src/llama_stack_client/types/agents/__init__.py index 30355cbf..f4f48353 100644 --- a/src/llama_stack_client/types/agents/__init__.py +++ b/src/llama_stack_client/types/agents/__init__.py @@ -6,8 +6,10 @@ from .session import Session as Session from .turn_create_params import TurnCreateParams as TurnCreateParams from .turn_resume_params import TurnResumeParams as TurnResumeParams +from .session_list_params import SessionListParams as SessionListParams from .turn_response_event import TurnResponseEvent as TurnResponseEvent from .session_create_params import SessionCreateParams as SessionCreateParams +from .session_list_response import SessionListResponse as SessionListResponse from .step_retrieve_response import StepRetrieveResponse as StepRetrieveResponse from .session_create_response import SessionCreateResponse as SessionCreateResponse from .session_retrieve_params import SessionRetrieveParams as SessionRetrieveParams diff --git a/src/llama_stack_client/types/agents/agent_turn_response_stream_chunk.py b/src/llama_stack_client/types/agents/agent_turn_response_stream_chunk.py index c488ba81..1ce1b8a7 100644 --- a/src/llama_stack_client/types/agents/agent_turn_response_stream_chunk.py +++ b/src/llama_stack_client/types/agents/agent_turn_response_stream_chunk.py @@ -8,3 +8,4 @@ class AgentTurnResponseStreamChunk(BaseModel): event: TurnResponseEvent + """Individual event in the agent turn response stream""" diff --git a/src/llama_stack_client/types/agents/session.py b/src/llama_stack_client/types/agents/session.py index 707c4cbf..1d3d697e 100644 --- a/src/llama_stack_client/types/agents/session.py +++ b/src/llama_stack_client/types/agents/session.py @@ -11,9 +11,13 @@ class Session(BaseModel): session_id: str + """Unique identifier for the conversation session""" session_name: str + """Human-readable name for the session""" started_at: datetime + """Timestamp when the session was created""" turns: List[Turn] + """List of all turns that have occurred in this session""" diff --git a/src/llama_stack_client/types/agents/session_create_response.py b/src/llama_stack_client/types/agents/session_create_response.py index abf18665..e7fe2a06 100644 --- a/src/llama_stack_client/types/agents/session_create_response.py +++ b/src/llama_stack_client/types/agents/session_create_response.py @@ -7,3 +7,4 @@ class SessionCreateResponse(BaseModel): session_id: str + """Unique identifier for the created session""" diff --git a/src/llama_stack_client/types/agents/session_list_params.py b/src/llama_stack_client/types/agents/session_list_params.py new file mode 100644 index 00000000..0644d1ae --- /dev/null +++ b/src/llama_stack_client/types/agents/session_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["SessionListParams"] + + +class SessionListParams(TypedDict, total=False): + limit: int + """The number of sessions to return.""" + + start_index: int + """The index to start the pagination from.""" diff --git a/src/llama_stack_client/types/agents/session_list_response.py b/src/llama_stack_client/types/agents/session_list_response.py new file mode 100644 index 00000000..e70ecc46 --- /dev/null +++ b/src/llama_stack_client/types/agents/session_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional + +from ..._models import BaseModel + +__all__ = ["SessionListResponse"] + + +class SessionListResponse(BaseModel): + data: List[Dict[str, Union[bool, float, str, List[object], object, None]]] + """The list of items for the current page""" + + has_more: bool + """Whether there are more items available after this set""" + + url: Optional[str] = None + """The URL for accessing this list""" diff --git a/src/llama_stack_client/types/agents/step_retrieve_response.py b/src/llama_stack_client/types/agents/step_retrieve_response.py index fcf2044b..10fc13d2 100644 --- a/src/llama_stack_client/types/agents/step_retrieve_response.py +++ b/src/llama_stack_client/types/agents/step_retrieve_response.py @@ -20,4 +20,4 @@ class StepRetrieveResponse(BaseModel): step: Step - """An inference step in an agent turn.""" + """The complete step data and execution details""" diff --git a/src/llama_stack_client/types/agents/turn.py b/src/llama_stack_client/types/agents/turn.py index aa8eeefe..386d7f78 100644 --- a/src/llama_stack_client/types/agents/turn.py +++ b/src/llama_stack_client/types/agents/turn.py @@ -38,6 +38,7 @@ class OutputAttachmentContentImageContentItemImageURL(BaseModel): uri: str + """The URL string pointing to the resource""" class OutputAttachmentContentImageContentItemImage(BaseModel): @@ -69,6 +70,7 @@ class OutputAttachmentContentTextContentItem(BaseModel): class OutputAttachmentContentURL(BaseModel): uri: str + """The URL string pointing to the resource""" OutputAttachmentContent: TypeAlias = Union[ @@ -90,18 +92,25 @@ class OutputAttachment(BaseModel): class Turn(BaseModel): input_messages: List[InputMessage] + """List of messages that initiated this turn""" output_message: CompletionMessage - """A message containing the model's (assistant) response in a chat conversation.""" + """The model's generated response containing content and metadata""" session_id: str + """Unique identifier for the conversation session""" started_at: datetime + """Timestamp when the turn began""" steps: List[Step] + """Ordered list of processing steps executed during this turn""" turn_id: str + """Unique identifier for the turn within a session""" completed_at: Optional[datetime] = None + """(Optional) Timestamp when the turn finished, if completed""" output_attachments: Optional[List[OutputAttachment]] = None + """(Optional) Files or media attached to the agent's response""" diff --git a/src/llama_stack_client/types/agents/turn_create_params.py b/src/llama_stack_client/types/agents/turn_create_params.py index 01e0f64b..fbb8de8e 100644 --- a/src/llama_stack_client/types/agents/turn_create_params.py +++ b/src/llama_stack_client/types/agents/turn_create_params.py @@ -54,6 +54,7 @@ class TurnCreateParamsBase(TypedDict, total=False): class DocumentContentImageContentItemImageURL(TypedDict, total=False): uri: Required[str] + """The URL string pointing to the resource""" class DocumentContentImageContentItemImage(TypedDict, total=False): @@ -85,6 +86,7 @@ class DocumentContentTextContentItem(TypedDict, total=False): class DocumentContentURL(TypedDict, total=False): uri: Required[str] + """The URL string pointing to the resource""" DocumentContent: TypeAlias = Union[ diff --git a/src/llama_stack_client/types/agents/turn_response_event.py b/src/llama_stack_client/types/agents/turn_response_event.py index c6a42d75..df213246 100644 --- a/src/llama_stack_client/types/agents/turn_response_event.py +++ b/src/llama_stack_client/types/agents/turn_response_event.py @@ -8,3 +8,4 @@ class TurnResponseEvent(BaseModel): payload: TurnResponseEventPayload + """Event-specific payload containing event data""" diff --git a/src/llama_stack_client/types/agents/turn_response_event_payload.py b/src/llama_stack_client/types/agents/turn_response_event_payload.py index 345a7ec4..1844c61e 100644 --- a/src/llama_stack_client/types/agents/turn_response_event_payload.py +++ b/src/llama_stack_client/types/agents/turn_response_event_payload.py @@ -26,24 +26,30 @@ class AgentTurnResponseStepStartPayload(BaseModel): event_type: Literal["step_start"] + """Type of event being reported""" step_id: str + """Unique identifier for the step within a turn""" step_type: Literal["inference", "tool_execution", "shield_call", "memory_retrieval"] - """Type of the step in an agent turn.""" + """Type of step being executed""" metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Additional metadata for the step""" class AgentTurnResponseStepProgressPayload(BaseModel): delta: ContentDelta + """Incremental content changes during step execution""" event_type: Literal["step_progress"] + """Type of event being reported""" step_id: str + """Unique identifier for the step within a turn""" step_type: Literal["inference", "tool_execution", "shield_call", "memory_retrieval"] - """Type of the step in an agent turn.""" + """Type of step being executed""" AgentTurnResponseStepCompletePayloadStepDetails: TypeAlias = Annotated[ @@ -54,34 +60,40 @@ class AgentTurnResponseStepProgressPayload(BaseModel): class AgentTurnResponseStepCompletePayload(BaseModel): event_type: Literal["step_complete"] + """Type of event being reported""" step_details: AgentTurnResponseStepCompletePayloadStepDetails - """An inference step in an agent turn.""" + """Complete details of the executed step""" step_id: str + """Unique identifier for the step within a turn""" step_type: Literal["inference", "tool_execution", "shield_call", "memory_retrieval"] - """Type of the step in an agent turn.""" + """Type of step being executed""" class AgentTurnResponseTurnStartPayload(BaseModel): event_type: Literal["turn_start"] + """Type of event being reported""" turn_id: str + """Unique identifier for the turn within a session""" class AgentTurnResponseTurnCompletePayload(BaseModel): event_type: Literal["turn_complete"] + """Type of event being reported""" turn: Turn - """A single turn in an interaction with an Agentic System.""" + """Complete turn data including all steps and results""" class AgentTurnResponseTurnAwaitingInputPayload(BaseModel): event_type: Literal["turn_awaiting_input"] + """Type of event being reported""" turn: Turn - """A single turn in an interaction with an Agentic System.""" + """Turn data when waiting for external tool responses""" TurnResponseEventPayload: TypeAlias = Annotated[ diff --git a/src/llama_stack_client/types/algorithm_config_param.py b/src/llama_stack_client/types/algorithm_config_param.py index 3f3c0cac..f2856526 100644 --- a/src/llama_stack_client/types/algorithm_config_param.py +++ b/src/llama_stack_client/types/algorithm_config_param.py @@ -10,28 +10,39 @@ class LoraFinetuningConfig(TypedDict, total=False): alpha: Required[int] + """LoRA scaling parameter that controls adaptation strength""" apply_lora_to_mlp: Required[bool] + """Whether to apply LoRA to MLP layers""" apply_lora_to_output: Required[bool] + """Whether to apply LoRA to output projection layers""" lora_attn_modules: Required[List[str]] + """List of attention module names to apply LoRA to""" rank: Required[int] + """Rank of the LoRA adaptation (lower rank = fewer parameters)""" type: Required[Literal["LoRA"]] + """Algorithm type identifier, always "LoRA" """ quantize_base: bool + """(Optional) Whether to quantize the base model weights""" use_dora: bool + """(Optional) Whether to use DoRA (Weight-Decomposed Low-Rank Adaptation)""" class QatFinetuningConfig(TypedDict, total=False): group_size: Required[int] + """Size of groups for grouped quantization""" quantizer_name: Required[str] + """Name of the quantization algorithm to use""" type: Required[Literal["QAT"]] + """Algorithm type identifier, always "QAT" """ AlgorithmConfigParam: TypeAlias = Union[LoraFinetuningConfig, QatFinetuningConfig] diff --git a/src/llama_stack_client/types/benchmark.py b/src/llama_stack_client/types/benchmark.py index e0b1ce9e..eb6dde75 100644 --- a/src/llama_stack_client/types/benchmark.py +++ b/src/llama_stack_client/types/benchmark.py @@ -10,15 +10,19 @@ class Benchmark(BaseModel): dataset_id: str + """Identifier of the dataset to use for the benchmark evaluation""" identifier: str metadata: Dict[str, Union[bool, float, str, List[object], object, None]] + """Metadata for this evaluation task""" provider_id: str scoring_functions: List[str] + """List of scoring function identifiers to apply during evaluation""" type: Literal["benchmark"] + """The resource type, always benchmark""" provider_resource_id: Optional[str] = None diff --git a/src/llama_stack_client/types/chat/completion_create_params.py b/src/llama_stack_client/types/chat/completion_create_params.py index 2c9d26f7..263c1c78 100644 --- a/src/llama_stack_client/types/chat/completion_create_params.py +++ b/src/llama_stack_client/types/chat/completion_create_params.py @@ -13,28 +13,18 @@ "MessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", "MessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", "MessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", + "MessageOpenAIUserMessageParamContentUnionMember1OpenAIFile", + "MessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile", "MessageOpenAISystemMessageParam", "MessageOpenAISystemMessageParamContentUnionMember1", - "MessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "MessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "MessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "MessageOpenAIAssistantMessageParam", "MessageOpenAIAssistantMessageParamContentUnionMember1", - "MessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "MessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "MessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "MessageOpenAIAssistantMessageParamToolCall", "MessageOpenAIAssistantMessageParamToolCallFunction", "MessageOpenAIToolMessageParam", "MessageOpenAIToolMessageParamContentUnionMember1", - "MessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "MessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "MessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "MessageOpenAIDeveloperMessageParam", "MessageOpenAIDeveloperMessageParamContentUnionMember1", - "MessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "MessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "MessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "ResponseFormat", "ResponseFormatOpenAIResponseFormatText", "ResponseFormatOpenAIResponseFormatJsonSchema", @@ -119,29 +109,53 @@ class CompletionCreateParamsBase(TypedDict, total=False): class MessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(TypedDict, total=False): text: Required[str] + """The text content of the message""" type: Required[Literal["text"]] + """Must be "text" to identify this as text content""" class MessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( TypedDict, total=False ): url: Required[str] + """URL of the image to include in the message""" detail: str + """(Optional) Level of detail for image processing. + + Can be "low", "high", or "auto" + """ class MessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(TypedDict, total=False): image_url: Required[ MessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL ] + """Image URL specification and processing details""" type: Required[Literal["image_url"]] + """Must be "image_url" to identify this as image content""" + + +class MessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile(TypedDict, total=False): + file_data: str + + file_id: str + + filename: str + + +class MessageOpenAIUserMessageParamContentUnionMember1OpenAIFile(TypedDict, total=False): + file: Required[MessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile] + + type: Required[Literal["file"]] MessageOpenAIUserMessageParamContentUnionMember1: TypeAlias = Union[ MessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, MessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, + MessageOpenAIUserMessageParamContentUnionMember1OpenAIFile, ] @@ -156,36 +170,12 @@ class MessageOpenAIUserMessageParam(TypedDict, total=False): """(Optional) The name of the user message participant.""" -class MessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam( - TypedDict, total=False -): +class MessageOpenAISystemMessageParamContentUnionMember1(TypedDict, total=False): text: Required[str] + """The text content of the message""" type: Required[Literal["text"]] - - -class MessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - TypedDict, total=False -): - url: Required[str] - - detail: str - - -class MessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - TypedDict, total=False -): - image_url: Required[ - MessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ] - - type: Required[Literal["image_url"]] - - -MessageOpenAISystemMessageParamContentUnionMember1: TypeAlias = Union[ - MessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - MessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, -] + """Must be "text" to identify this as text content""" class MessageOpenAISystemMessageParam(TypedDict, total=False): @@ -204,52 +194,34 @@ class MessageOpenAISystemMessageParam(TypedDict, total=False): """(Optional) The name of the system message participant.""" -class MessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam( - TypedDict, total=False -): +class MessageOpenAIAssistantMessageParamContentUnionMember1(TypedDict, total=False): text: Required[str] + """The text content of the message""" type: Required[Literal["text"]] - - -class MessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - TypedDict, total=False -): - url: Required[str] - - detail: str - - -class MessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - TypedDict, total=False -): - image_url: Required[ - MessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ] - - type: Required[Literal["image_url"]] - - -MessageOpenAIAssistantMessageParamContentUnionMember1: TypeAlias = Union[ - MessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - MessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, -] + """Must be "text" to identify this as text content""" class MessageOpenAIAssistantMessageParamToolCallFunction(TypedDict, total=False): arguments: str + """(Optional) Arguments to pass to the function as a JSON string""" name: str + """(Optional) Name of the function to call""" class MessageOpenAIAssistantMessageParamToolCall(TypedDict, total=False): type: Required[Literal["function"]] + """Must be "function" to identify this as a function call""" id: str + """(Optional) Unique identifier for the tool call""" function: MessageOpenAIAssistantMessageParamToolCallFunction + """(Optional) Function call details""" index: int + """(Optional) Index of the tool call in the list""" class MessageOpenAIAssistantMessageParam(TypedDict, total=False): @@ -266,32 +238,12 @@ class MessageOpenAIAssistantMessageParam(TypedDict, total=False): """List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.""" -class MessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(TypedDict, total=False): +class MessageOpenAIToolMessageParamContentUnionMember1(TypedDict, total=False): text: Required[str] + """The text content of the message""" type: Required[Literal["text"]] - - -class MessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - TypedDict, total=False -): - url: Required[str] - - detail: str - - -class MessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(TypedDict, total=False): - image_url: Required[ - MessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ] - - type: Required[Literal["image_url"]] - - -MessageOpenAIToolMessageParamContentUnionMember1: TypeAlias = Union[ - MessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - MessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, -] + """Must be "text" to identify this as text content""" class MessageOpenAIToolMessageParam(TypedDict, total=False): @@ -305,36 +257,12 @@ class MessageOpenAIToolMessageParam(TypedDict, total=False): """Unique identifier for the tool call this response is for""" -class MessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam( - TypedDict, total=False -): +class MessageOpenAIDeveloperMessageParamContentUnionMember1(TypedDict, total=False): text: Required[str] + """The text content of the message""" type: Required[Literal["text"]] - - -class MessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - TypedDict, total=False -): - url: Required[str] - - detail: str - - -class MessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - TypedDict, total=False -): - image_url: Required[ - MessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ] - - type: Required[Literal["image_url"]] - - -MessageOpenAIDeveloperMessageParamContentUnionMember1: TypeAlias = Union[ - MessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - MessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, -] + """Must be "text" to identify this as text content""" class MessageOpenAIDeveloperMessageParam(TypedDict, total=False): @@ -359,26 +287,34 @@ class MessageOpenAIDeveloperMessageParam(TypedDict, total=False): class ResponseFormatOpenAIResponseFormatText(TypedDict, total=False): type: Required[Literal["text"]] + """Must be "text" to indicate plain text response format""" class ResponseFormatOpenAIResponseFormatJsonSchemaJsonSchema(TypedDict, total=False): name: Required[str] + """Name of the schema""" description: str + """(Optional) Description of the schema""" schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]] + """(Optional) The JSON schema definition""" strict: bool + """(Optional) Whether to enforce strict adherence to the schema""" class ResponseFormatOpenAIResponseFormatJsonSchema(TypedDict, total=False): json_schema: Required[ResponseFormatOpenAIResponseFormatJsonSchemaJsonSchema] + """The JSON schema specification for the response""" type: Required[Literal["json_schema"]] + """Must be "json_schema" to indicate structured JSON response format""" class ResponseFormatOpenAIResponseFormatJsonObject(TypedDict, total=False): type: Required[Literal["json_object"]] + """Must be "json_object" to indicate generic JSON object response format""" ResponseFormat: TypeAlias = Union[ diff --git a/src/llama_stack_client/types/chat/completion_create_response.py b/src/llama_stack_client/types/chat/completion_create_response.py index 5c8eb51c..7c6b2299 100644 --- a/src/llama_stack_client/types/chat/completion_create_response.py +++ b/src/llama_stack_client/types/chat/completion_create_response.py @@ -17,28 +17,18 @@ "OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", "OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", "OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", + "OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile", + "OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile", "OpenAIChatCompletionChoiceMessageOpenAISystemMessageParam", "OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1", - "OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParam", "OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1", - "OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamToolCall", "OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamToolCallFunction", "OpenAIChatCompletionChoiceMessageOpenAIToolMessageParam", "OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1", - "OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParam", "OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1", - "OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "OpenAIChatCompletionChoiceLogprobs", "OpenAIChatCompletionChoiceLogprobsContent", "OpenAIChatCompletionChoiceLogprobsContentTopLogprob", @@ -51,30 +41,54 @@ class OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1 BaseModel ): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" class OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( BaseModel ): url: str + """URL of the image to include in the message""" detail: Optional[str] = None + """(Optional) Level of detail for image processing. + + Can be "low", "high", or "auto" + """ class OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( BaseModel ): image_url: OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL + """Image URL specification and processing details""" type: Literal["image_url"] + """Must be "image_url" to identify this as image content""" + + +class OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile(BaseModel): + file_data: Optional[str] = None + + file_id: Optional[str] = None + + filename: Optional[str] = None + + +class OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile(BaseModel): + file: OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile + + type: Literal["file"] OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1: TypeAlias = Annotated[ Union[ OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, + OpenAIChatCompletionChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile, ], PropertyInfo(discriminator="type"), ] @@ -91,37 +105,12 @@ class OpenAIChatCompletionChoiceMessageOpenAIUserMessageParam(BaseModel): """(Optional) The name of the user message participant.""" -class OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam( - BaseModel -): +class OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - BaseModel -): - image_url: OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - - type: Literal["image_url"] - - -OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - OpenAIChatCompletionChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class OpenAIChatCompletionChoiceMessageOpenAISystemMessageParam(BaseModel): @@ -140,53 +129,34 @@ class OpenAIChatCompletionChoiceMessageOpenAISystemMessageParam(BaseModel): """(Optional) The name of the system message participant.""" -class OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam( - BaseModel -): +class OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - BaseModel -): - image_url: OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - - type: Literal["image_url"] - - -OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamToolCallFunction(BaseModel): arguments: Optional[str] = None + """(Optional) Arguments to pass to the function as a JSON string""" name: Optional[str] = None + """(Optional) Name of the function to call""" class OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamToolCall(BaseModel): type: Literal["function"] + """Must be "function" to identify this as a function call""" id: Optional[str] = None + """(Optional) Unique identifier for the tool call""" function: Optional[OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParamToolCallFunction] = None + """(Optional) Function call details""" index: Optional[int] = None + """(Optional) Index of the tool call in the list""" class OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParam(BaseModel): @@ -205,37 +175,12 @@ class OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParam(BaseModel): """List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.""" -class OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam( - BaseModel -): +class OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - BaseModel -): - image_url: OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - - type: Literal["image_url"] - - -OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - OpenAIChatCompletionChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class OpenAIChatCompletionChoiceMessageOpenAIToolMessageParam(BaseModel): @@ -249,37 +194,12 @@ class OpenAIChatCompletionChoiceMessageOpenAIToolMessageParam(BaseModel): """Unique identifier for the tool call this response is for""" -class OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam( - BaseModel -): +class OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - BaseModel -): - image_url: OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - - type: Literal["image_url"] - - -OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class OpenAIChatCompletionChoiceMessageOpenAIDeveloperMessageParam(BaseModel): diff --git a/src/llama_stack_client/types/chat/completion_list_response.py b/src/llama_stack_client/types/chat/completion_list_response.py index d3b580a1..e448e35c 100644 --- a/src/llama_stack_client/types/chat/completion_list_response.py +++ b/src/llama_stack_client/types/chat/completion_list_response.py @@ -8,105 +8,106 @@ __all__ = [ "CompletionListResponse", - "Data", - "DataChoice", - "DataChoiceMessage", - "DataChoiceMessageOpenAIUserMessageParam", - "DataChoiceMessageOpenAIUserMessageParamContentUnionMember1", - "DataChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", - "DataChoiceMessageOpenAISystemMessageParam", - "DataChoiceMessageOpenAISystemMessageParamContentUnionMember1", - "DataChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", - "DataChoiceMessageOpenAIAssistantMessageParam", - "DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1", - "DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", - "DataChoiceMessageOpenAIAssistantMessageParamToolCall", - "DataChoiceMessageOpenAIAssistantMessageParamToolCallFunction", - "DataChoiceMessageOpenAIToolMessageParam", - "DataChoiceMessageOpenAIToolMessageParamContentUnionMember1", - "DataChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", - "DataChoiceMessageOpenAIDeveloperMessageParam", - "DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1", - "DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", - "DataChoiceLogprobs", - "DataChoiceLogprobsContent", - "DataChoiceLogprobsContentTopLogprob", - "DataChoiceLogprobsRefusal", - "DataChoiceLogprobsRefusalTopLogprob", - "DataInputMessage", - "DataInputMessageOpenAIUserMessageParam", - "DataInputMessageOpenAIUserMessageParamContentUnionMember1", - "DataInputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataInputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataInputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", - "DataInputMessageOpenAISystemMessageParam", - "DataInputMessageOpenAISystemMessageParamContentUnionMember1", - "DataInputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataInputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataInputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", - "DataInputMessageOpenAIAssistantMessageParam", - "DataInputMessageOpenAIAssistantMessageParamContentUnionMember1", - "DataInputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataInputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataInputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", - "DataInputMessageOpenAIAssistantMessageParamToolCall", - "DataInputMessageOpenAIAssistantMessageParamToolCallFunction", - "DataInputMessageOpenAIToolMessageParam", - "DataInputMessageOpenAIToolMessageParamContentUnionMember1", - "DataInputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataInputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataInputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", - "DataInputMessageOpenAIDeveloperMessageParam", - "DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1", - "DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", + "Choice", + "ChoiceMessage", + "ChoiceMessageOpenAIUserMessageParam", + "ChoiceMessageOpenAIUserMessageParamContentUnionMember1", + "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", + "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", + "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", + "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile", + "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile", + "ChoiceMessageOpenAISystemMessageParam", + "ChoiceMessageOpenAISystemMessageParamContentUnionMember1", + "ChoiceMessageOpenAIAssistantMessageParam", + "ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1", + "ChoiceMessageOpenAIAssistantMessageParamToolCall", + "ChoiceMessageOpenAIAssistantMessageParamToolCallFunction", + "ChoiceMessageOpenAIToolMessageParam", + "ChoiceMessageOpenAIToolMessageParamContentUnionMember1", + "ChoiceMessageOpenAIDeveloperMessageParam", + "ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1", + "ChoiceLogprobs", + "ChoiceLogprobsContent", + "ChoiceLogprobsContentTopLogprob", + "ChoiceLogprobsRefusal", + "ChoiceLogprobsRefusalTopLogprob", + "InputMessage", + "InputMessageOpenAIUserMessageParam", + "InputMessageOpenAIUserMessageParamContentUnionMember1", + "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", + "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", + "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", + "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile", + "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile", + "InputMessageOpenAISystemMessageParam", + "InputMessageOpenAISystemMessageParamContentUnionMember1", + "InputMessageOpenAIAssistantMessageParam", + "InputMessageOpenAIAssistantMessageParamContentUnionMember1", + "InputMessageOpenAIAssistantMessageParamToolCall", + "InputMessageOpenAIAssistantMessageParamToolCallFunction", + "InputMessageOpenAIToolMessageParam", + "InputMessageOpenAIToolMessageParamContentUnionMember1", + "InputMessageOpenAIDeveloperMessageParam", + "InputMessageOpenAIDeveloperMessageParamContentUnionMember1", ] -class DataChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( +class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( BaseModel ): url: str + """URL of the image to include in the message""" detail: Optional[str] = None + """(Optional) Level of detail for image processing. + + Can be "low", "high", or "auto" + """ -class DataChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - DataChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) +class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): + image_url: ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL + """Image URL specification and processing details""" type: Literal["image_url"] + """Must be "image_url" to identify this as image content""" + + +class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile(BaseModel): + file_data: Optional[str] = None + + file_id: Optional[str] = None + + filename: Optional[str] = None -DataChoiceMessageOpenAIUserMessageParamContentUnionMember1: TypeAlias = Annotated[ +class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile(BaseModel): + file: ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile + + type: Literal["file"] + + +ChoiceMessageOpenAIUserMessageParamContentUnionMember1: TypeAlias = Annotated[ Union[ - DataChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, + ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, + ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, + ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile, ], PropertyInfo(discriminator="type"), ] -class DataChoiceMessageOpenAIUserMessageParam(BaseModel): - content: Union[str, List[DataChoiceMessageOpenAIUserMessageParamContentUnionMember1]] +class ChoiceMessageOpenAIUserMessageParam(BaseModel): + content: Union[str, List[ChoiceMessageOpenAIUserMessageParamContentUnionMember1]] """The content of the message, which can include text and other media""" role: Literal["user"] @@ -116,39 +117,16 @@ class DataChoiceMessageOpenAIUserMessageParam(BaseModel): """(Optional) The name of the user message participant.""" -class DataChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class ChoiceMessageOpenAISystemMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class DataChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - DataChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -DataChoiceMessageOpenAISystemMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - DataChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] - - -class DataChoiceMessageOpenAISystemMessageParam(BaseModel): - content: Union[str, List[DataChoiceMessageOpenAISystemMessageParamContentUnionMember1]] +class ChoiceMessageOpenAISystemMessageParam(BaseModel): + content: Union[str, List[ChoiceMessageOpenAISystemMessageParamContentUnionMember1]] """The content of the "system prompt". If multiple system messages are provided, they are concatenated. The underlying @@ -163,104 +141,60 @@ class DataChoiceMessageOpenAISystemMessageParam(BaseModel): """(Optional) The name of the system message participant.""" -class DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam( - BaseModel -): +class ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - BaseModel -): - image_url: ( - DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] - - -class DataChoiceMessageOpenAIAssistantMessageParamToolCallFunction(BaseModel): +class ChoiceMessageOpenAIAssistantMessageParamToolCallFunction(BaseModel): arguments: Optional[str] = None + """(Optional) Arguments to pass to the function as a JSON string""" name: Optional[str] = None + """(Optional) Name of the function to call""" -class DataChoiceMessageOpenAIAssistantMessageParamToolCall(BaseModel): +class ChoiceMessageOpenAIAssistantMessageParamToolCall(BaseModel): type: Literal["function"] + """Must be "function" to identify this as a function call""" id: Optional[str] = None + """(Optional) Unique identifier for the tool call""" - function: Optional[DataChoiceMessageOpenAIAssistantMessageParamToolCallFunction] = None + function: Optional[ChoiceMessageOpenAIAssistantMessageParamToolCallFunction] = None + """(Optional) Function call details""" index: Optional[int] = None + """(Optional) Index of the tool call in the list""" -class DataChoiceMessageOpenAIAssistantMessageParam(BaseModel): +class ChoiceMessageOpenAIAssistantMessageParam(BaseModel): role: Literal["assistant"] """Must be "assistant" to identify this as the model's response""" - content: Union[str, List[DataChoiceMessageOpenAIAssistantMessageParamContentUnionMember1], None] = None + content: Union[str, List[ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1], None] = None """The content of the model's response""" name: Optional[str] = None """(Optional) The name of the assistant message participant.""" - tool_calls: Optional[List[DataChoiceMessageOpenAIAssistantMessageParamToolCall]] = None + tool_calls: Optional[List[ChoiceMessageOpenAIAssistantMessageParamToolCall]] = None """List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.""" -class DataChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class ChoiceMessageOpenAIToolMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class DataChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - DataChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -DataChoiceMessageOpenAIToolMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - DataChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] - - -class DataChoiceMessageOpenAIToolMessageParam(BaseModel): - content: Union[str, List[DataChoiceMessageOpenAIToolMessageParamContentUnionMember1]] +class ChoiceMessageOpenAIToolMessageParam(BaseModel): + content: Union[str, List[ChoiceMessageOpenAIToolMessageParamContentUnionMember1]] """The response content from the tool""" role: Literal["tool"] @@ -270,43 +204,16 @@ class DataChoiceMessageOpenAIToolMessageParam(BaseModel): """Unique identifier for the tool call this response is for""" -class DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam( - BaseModel -): +class ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - BaseModel -): - image_url: ( - DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] - - -class DataChoiceMessageOpenAIDeveloperMessageParam(BaseModel): - content: Union[str, List[DataChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1]] +class ChoiceMessageOpenAIDeveloperMessageParam(BaseModel): + content: Union[str, List[ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1]] """The content of the developer message""" role: Literal["developer"] @@ -316,19 +223,19 @@ class DataChoiceMessageOpenAIDeveloperMessageParam(BaseModel): """(Optional) The name of the developer message participant.""" -DataChoiceMessage: TypeAlias = Annotated[ +ChoiceMessage: TypeAlias = Annotated[ Union[ - DataChoiceMessageOpenAIUserMessageParam, - DataChoiceMessageOpenAISystemMessageParam, - DataChoiceMessageOpenAIAssistantMessageParam, - DataChoiceMessageOpenAIToolMessageParam, - DataChoiceMessageOpenAIDeveloperMessageParam, + ChoiceMessageOpenAIUserMessageParam, + ChoiceMessageOpenAISystemMessageParam, + ChoiceMessageOpenAIAssistantMessageParam, + ChoiceMessageOpenAIToolMessageParam, + ChoiceMessageOpenAIDeveloperMessageParam, ], PropertyInfo(discriminator="role"), ] -class DataChoiceLogprobsContentTopLogprob(BaseModel): +class ChoiceLogprobsContentTopLogprob(BaseModel): token: str logprob: float @@ -336,17 +243,17 @@ class DataChoiceLogprobsContentTopLogprob(BaseModel): bytes: Optional[List[int]] = None -class DataChoiceLogprobsContent(BaseModel): +class ChoiceLogprobsContent(BaseModel): token: str logprob: float - top_logprobs: List[DataChoiceLogprobsContentTopLogprob] + top_logprobs: List[ChoiceLogprobsContentTopLogprob] bytes: Optional[List[int]] = None -class DataChoiceLogprobsRefusalTopLogprob(BaseModel): +class ChoiceLogprobsRefusalTopLogprob(BaseModel): token: str logprob: float @@ -354,71 +261,91 @@ class DataChoiceLogprobsRefusalTopLogprob(BaseModel): bytes: Optional[List[int]] = None -class DataChoiceLogprobsRefusal(BaseModel): +class ChoiceLogprobsRefusal(BaseModel): token: str logprob: float - top_logprobs: List[DataChoiceLogprobsRefusalTopLogprob] + top_logprobs: List[ChoiceLogprobsRefusalTopLogprob] bytes: Optional[List[int]] = None -class DataChoiceLogprobs(BaseModel): - content: Optional[List[DataChoiceLogprobsContent]] = None +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChoiceLogprobsContent]] = None """(Optional) The log probabilities for the tokens in the message""" - refusal: Optional[List[DataChoiceLogprobsRefusal]] = None + refusal: Optional[List[ChoiceLogprobsRefusal]] = None """(Optional) The log probabilities for the tokens in the message""" -class DataChoice(BaseModel): +class Choice(BaseModel): finish_reason: str """The reason the model stopped generating""" index: int """The index of the choice""" - message: DataChoiceMessage + message: ChoiceMessage """The message from the model""" - logprobs: Optional[DataChoiceLogprobs] = None + logprobs: Optional[ChoiceLogprobs] = None """(Optional) The log probabilities for the tokens in the message""" -class DataInputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataInputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): +class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL(BaseModel): url: str + """URL of the image to include in the message""" detail: Optional[str] = None + """(Optional) Level of detail for image processing. + + Can be "low", "high", or "auto" + """ -class DataInputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - DataInputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) +class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): + image_url: InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL + """Image URL specification and processing details""" type: Literal["image_url"] + """Must be "image_url" to identify this as image content""" + + +class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile(BaseModel): + file_data: Optional[str] = None + + file_id: Optional[str] = None + + filename: Optional[str] = None + +class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile(BaseModel): + file: InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile -DataInputMessageOpenAIUserMessageParamContentUnionMember1: TypeAlias = Annotated[ + type: Literal["file"] + + +InputMessageOpenAIUserMessageParamContentUnionMember1: TypeAlias = Annotated[ Union[ - DataInputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataInputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, + InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, + InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, + InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile, ], PropertyInfo(discriminator="type"), ] -class DataInputMessageOpenAIUserMessageParam(BaseModel): - content: Union[str, List[DataInputMessageOpenAIUserMessageParamContentUnionMember1]] +class InputMessageOpenAIUserMessageParam(BaseModel): + content: Union[str, List[InputMessageOpenAIUserMessageParamContentUnionMember1]] """The content of the message, which can include text and other media""" role: Literal["user"] @@ -428,39 +355,16 @@ class DataInputMessageOpenAIUserMessageParam(BaseModel): """(Optional) The name of the user message participant.""" -class DataInputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class InputMessageOpenAISystemMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataInputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class DataInputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - DataInputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -DataInputMessageOpenAISystemMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - DataInputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataInputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] - - -class DataInputMessageOpenAISystemMessageParam(BaseModel): - content: Union[str, List[DataInputMessageOpenAISystemMessageParamContentUnionMember1]] +class InputMessageOpenAISystemMessageParam(BaseModel): + content: Union[str, List[InputMessageOpenAISystemMessageParamContentUnionMember1]] """The content of the "system prompt". If multiple system messages are provided, they are concatenated. The underlying @@ -475,102 +379,60 @@ class DataInputMessageOpenAISystemMessageParam(BaseModel): """(Optional) The name of the system message participant.""" -class DataInputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class InputMessageOpenAIAssistantMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataInputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class DataInputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - BaseModel -): - image_url: ( - DataInputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -DataInputMessageOpenAIAssistantMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - DataInputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataInputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] - - -class DataInputMessageOpenAIAssistantMessageParamToolCallFunction(BaseModel): +class InputMessageOpenAIAssistantMessageParamToolCallFunction(BaseModel): arguments: Optional[str] = None + """(Optional) Arguments to pass to the function as a JSON string""" name: Optional[str] = None + """(Optional) Name of the function to call""" -class DataInputMessageOpenAIAssistantMessageParamToolCall(BaseModel): +class InputMessageOpenAIAssistantMessageParamToolCall(BaseModel): type: Literal["function"] + """Must be "function" to identify this as a function call""" id: Optional[str] = None + """(Optional) Unique identifier for the tool call""" - function: Optional[DataInputMessageOpenAIAssistantMessageParamToolCallFunction] = None + function: Optional[InputMessageOpenAIAssistantMessageParamToolCallFunction] = None + """(Optional) Function call details""" index: Optional[int] = None + """(Optional) Index of the tool call in the list""" -class DataInputMessageOpenAIAssistantMessageParam(BaseModel): +class InputMessageOpenAIAssistantMessageParam(BaseModel): role: Literal["assistant"] """Must be "assistant" to identify this as the model's response""" - content: Union[str, List[DataInputMessageOpenAIAssistantMessageParamContentUnionMember1], None] = None + content: Union[str, List[InputMessageOpenAIAssistantMessageParamContentUnionMember1], None] = None """The content of the model's response""" name: Optional[str] = None """(Optional) The name of the assistant message participant.""" - tool_calls: Optional[List[DataInputMessageOpenAIAssistantMessageParamToolCall]] = None + tool_calls: Optional[List[InputMessageOpenAIAssistantMessageParamToolCall]] = None """List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.""" -class DataInputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class InputMessageOpenAIToolMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataInputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class DataInputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - DataInputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -DataInputMessageOpenAIToolMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - DataInputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataInputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] - - -class DataInputMessageOpenAIToolMessageParam(BaseModel): - content: Union[str, List[DataInputMessageOpenAIToolMessageParamContentUnionMember1]] +class InputMessageOpenAIToolMessageParam(BaseModel): + content: Union[str, List[InputMessageOpenAIToolMessageParamContentUnionMember1]] """The response content from the tool""" role: Literal["tool"] @@ -580,41 +442,16 @@ class DataInputMessageOpenAIToolMessageParam(BaseModel): """Unique identifier for the tool call this response is for""" -class DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class InputMessageOpenAIDeveloperMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" -class DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam( - BaseModel -): - image_url: ( - DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] - - -class DataInputMessageOpenAIDeveloperMessageParam(BaseModel): - content: Union[str, List[DataInputMessageOpenAIDeveloperMessageParamContentUnionMember1]] +class InputMessageOpenAIDeveloperMessageParam(BaseModel): + content: Union[str, List[InputMessageOpenAIDeveloperMessageParamContentUnionMember1]] """The content of the developer message""" role: Literal["developer"] @@ -624,44 +461,32 @@ class DataInputMessageOpenAIDeveloperMessageParam(BaseModel): """(Optional) The name of the developer message participant.""" -DataInputMessage: TypeAlias = Annotated[ +InputMessage: TypeAlias = Annotated[ Union[ - DataInputMessageOpenAIUserMessageParam, - DataInputMessageOpenAISystemMessageParam, - DataInputMessageOpenAIAssistantMessageParam, - DataInputMessageOpenAIToolMessageParam, - DataInputMessageOpenAIDeveloperMessageParam, + InputMessageOpenAIUserMessageParam, + InputMessageOpenAISystemMessageParam, + InputMessageOpenAIAssistantMessageParam, + InputMessageOpenAIToolMessageParam, + InputMessageOpenAIDeveloperMessageParam, ], PropertyInfo(discriminator="role"), ] -class Data(BaseModel): +class CompletionListResponse(BaseModel): id: str """The ID of the chat completion""" - choices: List[DataChoice] + choices: List[Choice] """List of choices""" created: int """The Unix timestamp in seconds when the chat completion was created""" - input_messages: List[DataInputMessage] + input_messages: List[InputMessage] model: str """The model that was used to generate the chat completion""" object: Literal["chat.completion"] """The object type, which will be "chat.completion" """ - - -class CompletionListResponse(BaseModel): - data: List[Data] - - first_id: str - - has_more: bool - - last_id: str - - object: Literal["list"] diff --git a/src/llama_stack_client/types/chat/completion_retrieve_response.py b/src/llama_stack_client/types/chat/completion_retrieve_response.py index 330c752d..74b60c35 100644 --- a/src/llama_stack_client/types/chat/completion_retrieve_response.py +++ b/src/llama_stack_client/types/chat/completion_retrieve_response.py @@ -15,28 +15,18 @@ "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", + "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile", + "ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile", "ChoiceMessageOpenAISystemMessageParam", "ChoiceMessageOpenAISystemMessageParamContentUnionMember1", - "ChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "ChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "ChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "ChoiceMessageOpenAIAssistantMessageParam", "ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1", - "ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "ChoiceMessageOpenAIAssistantMessageParamToolCall", "ChoiceMessageOpenAIAssistantMessageParamToolCallFunction", "ChoiceMessageOpenAIToolMessageParam", "ChoiceMessageOpenAIToolMessageParamContentUnionMember1", - "ChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "ChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "ChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "ChoiceMessageOpenAIDeveloperMessageParam", "ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1", - "ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "ChoiceLogprobs", "ChoiceLogprobsContent", "ChoiceLogprobsContentTopLogprob", @@ -48,55 +38,69 @@ "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", + "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile", + "InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile", "InputMessageOpenAISystemMessageParam", "InputMessageOpenAISystemMessageParamContentUnionMember1", - "InputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "InputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "InputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "InputMessageOpenAIAssistantMessageParam", "InputMessageOpenAIAssistantMessageParamContentUnionMember1", - "InputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "InputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "InputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "InputMessageOpenAIAssistantMessageParamToolCall", "InputMessageOpenAIAssistantMessageParamToolCallFunction", "InputMessageOpenAIToolMessageParam", "InputMessageOpenAIToolMessageParamContentUnionMember1", - "InputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "InputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "InputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", "InputMessageOpenAIDeveloperMessageParam", "InputMessageOpenAIDeveloperMessageParamContentUnionMember1", - "InputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam", - "InputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam", - "InputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL", ] class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( BaseModel ): url: str + """URL of the image to include in the message""" detail: Optional[str] = None + """(Optional) Level of detail for image processing. + + Can be "low", "high", or "auto" + """ class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): image_url: ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL + """Image URL specification and processing details""" type: Literal["image_url"] + """Must be "image_url" to identify this as image content""" + + +class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile(BaseModel): + file_data: Optional[str] = None + + file_id: Optional[str] = None + + filename: Optional[str] = None + + +class ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile(BaseModel): + file: ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile + + type: Literal["file"] ChoiceMessageOpenAIUserMessageParamContentUnionMember1: TypeAlias = Annotated[ Union[ ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, + ChoiceMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile, ], PropertyInfo(discriminator="type"), ] @@ -113,33 +117,12 @@ class ChoiceMessageOpenAIUserMessageParam(BaseModel): """(Optional) The name of the user message participant.""" -class ChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class ChoiceMessageOpenAISystemMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class ChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class ChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - - type: Literal["image_url"] - - -ChoiceMessageOpenAISystemMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - ChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - ChoiceMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class ChoiceMessageOpenAISystemMessageParam(BaseModel): @@ -158,51 +141,34 @@ class ChoiceMessageOpenAISystemMessageParam(BaseModel): """(Optional) The name of the system message participant.""" -class ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - ChoiceMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class ChoiceMessageOpenAIAssistantMessageParamToolCallFunction(BaseModel): arguments: Optional[str] = None + """(Optional) Arguments to pass to the function as a JSON string""" name: Optional[str] = None + """(Optional) Name of the function to call""" class ChoiceMessageOpenAIAssistantMessageParamToolCall(BaseModel): type: Literal["function"] + """Must be "function" to identify this as a function call""" id: Optional[str] = None + """(Optional) Unique identifier for the tool call""" function: Optional[ChoiceMessageOpenAIAssistantMessageParamToolCallFunction] = None + """(Optional) Function call details""" index: Optional[int] = None + """(Optional) Index of the tool call in the list""" class ChoiceMessageOpenAIAssistantMessageParam(BaseModel): @@ -219,33 +185,12 @@ class ChoiceMessageOpenAIAssistantMessageParam(BaseModel): """List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.""" -class ChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class ChoiceMessageOpenAIToolMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class ChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class ChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - - type: Literal["image_url"] - - -ChoiceMessageOpenAIToolMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - ChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - ChoiceMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class ChoiceMessageOpenAIToolMessageParam(BaseModel): @@ -259,35 +204,12 @@ class ChoiceMessageOpenAIToolMessageParam(BaseModel): """Unique identifier for the tool call this response is for""" -class ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - ChoiceMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class ChoiceMessageOpenAIDeveloperMessageParam(BaseModel): @@ -373,26 +295,50 @@ class Choice(BaseModel): class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): text: str + """The text content of the message""" type: Literal["text"] + """Must be "text" to identify this as text content""" class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL(BaseModel): url: str + """URL of the image to include in the message""" detail: Optional[str] = None + """(Optional) Level of detail for image processing. + + Can be "low", "high", or "auto" + """ class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): image_url: InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL + """Image URL specification and processing details""" type: Literal["image_url"] + """Must be "image_url" to identify this as image content""" + + +class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile(BaseModel): + file_data: Optional[str] = None + + file_id: Optional[str] = None + + filename: Optional[str] = None + + +class InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile(BaseModel): + file: InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFileFile + + type: Literal["file"] InputMessageOpenAIUserMessageParamContentUnionMember1: TypeAlias = Annotated[ Union[ InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, + InputMessageOpenAIUserMessageParamContentUnionMember1OpenAIFile, ], PropertyInfo(discriminator="type"), ] @@ -409,33 +355,12 @@ class InputMessageOpenAIUserMessageParam(BaseModel): """(Optional) The name of the user message participant.""" -class InputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class InputMessageOpenAISystemMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class InputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class InputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: InputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - - type: Literal["image_url"] - - -InputMessageOpenAISystemMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - InputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - InputMessageOpenAISystemMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class InputMessageOpenAISystemMessageParam(BaseModel): @@ -454,51 +379,34 @@ class InputMessageOpenAISystemMessageParam(BaseModel): """(Optional) The name of the system message participant.""" -class InputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class InputMessageOpenAIAssistantMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class InputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class InputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - InputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -InputMessageOpenAIAssistantMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - InputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - InputMessageOpenAIAssistantMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class InputMessageOpenAIAssistantMessageParamToolCallFunction(BaseModel): arguments: Optional[str] = None + """(Optional) Arguments to pass to the function as a JSON string""" name: Optional[str] = None + """(Optional) Name of the function to call""" class InputMessageOpenAIAssistantMessageParamToolCall(BaseModel): type: Literal["function"] + """Must be "function" to identify this as a function call""" id: Optional[str] = None + """(Optional) Unique identifier for the tool call""" function: Optional[InputMessageOpenAIAssistantMessageParamToolCallFunction] = None + """(Optional) Function call details""" index: Optional[int] = None + """(Optional) Index of the tool call in the list""" class InputMessageOpenAIAssistantMessageParam(BaseModel): @@ -515,31 +423,12 @@ class InputMessageOpenAIAssistantMessageParam(BaseModel): """List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object.""" -class InputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class InputMessageOpenAIToolMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class InputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL(BaseModel): - url: str - - detail: Optional[str] = None - - -class InputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: InputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - - type: Literal["image_url"] - - -InputMessageOpenAIToolMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - InputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - InputMessageOpenAIToolMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class InputMessageOpenAIToolMessageParam(BaseModel): @@ -553,35 +442,12 @@ class InputMessageOpenAIToolMessageParam(BaseModel): """Unique identifier for the tool call this response is for""" -class InputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam(BaseModel): +class InputMessageOpenAIDeveloperMessageParamContentUnionMember1(BaseModel): text: str + """The text content of the message""" type: Literal["text"] - - -class InputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL( - BaseModel -): - url: str - - detail: Optional[str] = None - - -class InputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam(BaseModel): - image_url: ( - InputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParamImageURL - ) - - type: Literal["image_url"] - - -InputMessageOpenAIDeveloperMessageParamContentUnionMember1: TypeAlias = Annotated[ - Union[ - InputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartTextParam, - InputMessageOpenAIDeveloperMessageParamContentUnionMember1OpenAIChatCompletionContentPartImageParam, - ], - PropertyInfo(discriminator="type"), -] + """Must be "text" to identify this as text content""" class InputMessageOpenAIDeveloperMessageParam(BaseModel): diff --git a/src/llama_stack_client/types/chat_completion_chunk.py b/src/llama_stack_client/types/chat_completion_chunk.py index 7d74663a..788a34ed 100644 --- a/src/llama_stack_client/types/chat_completion_chunk.py +++ b/src/llama_stack_client/types/chat_completion_chunk.py @@ -21,18 +21,24 @@ class ChoiceDeltaToolCallFunction(BaseModel): arguments: Optional[str] = None + """(Optional) Arguments to pass to the function as a JSON string""" name: Optional[str] = None + """(Optional) Name of the function to call""" class ChoiceDeltaToolCall(BaseModel): type: Literal["function"] + """Must be "function" to identify this as a function call""" id: Optional[str] = None + """(Optional) Unique identifier for the tool call""" function: Optional[ChoiceDeltaToolCallFunction] = None + """(Optional) Function call details""" index: Optional[int] = None + """(Optional) Index of the tool call in the list""" class ChoiceDelta(BaseModel): diff --git a/src/llama_stack_client/types/chat_completion_response_stream_chunk.py b/src/llama_stack_client/types/chat_completion_response_stream_chunk.py index 3c236fd4..2b94eb18 100644 --- a/src/llama_stack_client/types/chat_completion_response_stream_chunk.py +++ b/src/llama_stack_client/types/chat_completion_response_stream_chunk.py @@ -29,10 +29,13 @@ class Event(BaseModel): class Metric(BaseModel): metric: str + """The name of the metric""" value: float + """The numeric value of the metric""" unit: Optional[str] = None + """(Optional) The unit of measurement for the metric value""" class ChatCompletionResponseStreamChunk(BaseModel): @@ -40,3 +43,4 @@ class ChatCompletionResponseStreamChunk(BaseModel): """The event containing the new content""" metrics: Optional[List[Metric]] = None + """(Optional) List of metrics associated with the API response""" diff --git a/src/llama_stack_client/types/completion_response.py b/src/llama_stack_client/types/completion_response.py index 78254b28..51772801 100644 --- a/src/llama_stack_client/types/completion_response.py +++ b/src/llama_stack_client/types/completion_response.py @@ -11,10 +11,13 @@ class Metric(BaseModel): metric: str + """The name of the metric""" value: float + """The numeric value of the metric""" unit: Optional[str] = None + """(Optional) The unit of measurement for the metric value""" class CompletionResponse(BaseModel): @@ -28,3 +31,4 @@ class CompletionResponse(BaseModel): """Optional log probabilities for generated tokens""" metrics: Optional[List[Metric]] = None + """(Optional) List of metrics associated with the API response""" diff --git a/src/llama_stack_client/types/create_response.py b/src/llama_stack_client/types/create_response.py new file mode 100644 index 00000000..b0eaf3e5 --- /dev/null +++ b/src/llama_stack_client/types/create_response.py @@ -0,0 +1,44 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional + +from .._models import BaseModel + +__all__ = ["CreateResponse", "Result"] + + +class Result(BaseModel): + flagged: bool + """Whether any of the below categories are flagged.""" + + metadata: Dict[str, Union[bool, float, str, List[object], object, None]] + + categories: Optional[Dict[str, bool]] = None + """A list of the categories, and whether they are flagged or not.""" + + category_applied_input_types: Optional[Dict[str, List[str]]] = None + """ + A list of the categories along with the input type(s) that the score applies to. + """ + + category_scores: Optional[Dict[str, float]] = None + """A list of the categories along with their scores as predicted by model. + + Required set of categories that need to be in response - violence - + violence/graphic - harassment - harassment/threatening - hate - + hate/threatening - illicit - illicit/violent - sexual - sexual/minors - + self-harm - self-harm/intent - self-harm/instructions + """ + + user_message: Optional[str] = None + + +class CreateResponse(BaseModel): + id: str + """The unique identifier for the moderation request.""" + + model: str + """The model used to generate the moderation results.""" + + results: List[Result] + """A list of moderation objects""" diff --git a/src/llama_stack_client/types/dataset_appendrows_params.py b/src/llama_stack_client/types/dataset_appendrows_params.py new file mode 100644 index 00000000..2e96e124 --- /dev/null +++ b/src/llama_stack_client/types/dataset_appendrows_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["DatasetAppendrowsParams"] + + +class DatasetAppendrowsParams(TypedDict, total=False): + rows: Required[Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]] + """The rows to append to the dataset.""" diff --git a/src/llama_stack_client/types/dataset_list_response.py b/src/llama_stack_client/types/dataset_list_response.py index 42b27ab4..7080e589 100644 --- a/src/llama_stack_client/types/dataset_list_response.py +++ b/src/llama_stack_client/types/dataset_list_response.py @@ -47,16 +47,18 @@ class DatasetListResponseItem(BaseModel): identifier: str metadata: Dict[str, Union[bool, float, str, List[object], object, None]] + """Additional metadata for the dataset""" provider_id: str purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"] - """Purpose of the dataset. Each purpose has a required input data schema.""" + """Purpose of the dataset indicating its intended use""" source: DatasetListResponseItemSource - """A dataset that can be obtained from a URI.""" + """Data source configuration for the dataset""" type: Literal["dataset"] + """Type of resource, always 'dataset' for datasets""" provider_resource_id: Optional[str] = None diff --git a/src/llama_stack_client/types/dataset_register_response.py b/src/llama_stack_client/types/dataset_register_response.py index a79367bb..8da590b8 100644 --- a/src/llama_stack_client/types/dataset_register_response.py +++ b/src/llama_stack_client/types/dataset_register_response.py @@ -38,15 +38,17 @@ class DatasetRegisterResponse(BaseModel): identifier: str metadata: Dict[str, Union[bool, float, str, List[object], object, None]] + """Additional metadata for the dataset""" provider_id: str purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"] - """Purpose of the dataset. Each purpose has a required input data schema.""" + """Purpose of the dataset indicating its intended use""" source: Source - """A dataset that can be obtained from a URI.""" + """Data source configuration for the dataset""" type: Literal["dataset"] + """Type of resource, always 'dataset' for datasets""" provider_resource_id: Optional[str] = None diff --git a/src/llama_stack_client/types/dataset_retrieve_response.py b/src/llama_stack_client/types/dataset_retrieve_response.py index ab96c387..6cda0a42 100644 --- a/src/llama_stack_client/types/dataset_retrieve_response.py +++ b/src/llama_stack_client/types/dataset_retrieve_response.py @@ -38,15 +38,17 @@ class DatasetRetrieveResponse(BaseModel): identifier: str metadata: Dict[str, Union[bool, float, str, List[object], object, None]] + """Additional metadata for the dataset""" provider_id: str purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"] - """Purpose of the dataset. Each purpose has a required input data schema.""" + """Purpose of the dataset indicating its intended use""" source: Source - """A dataset that can be obtained from a URI.""" + """Data source configuration for the dataset""" type: Literal["dataset"] + """Type of resource, always 'dataset' for datasets""" provider_resource_id: Optional[str] = None diff --git a/src/llama_stack_client/types/event_param.py b/src/llama_stack_client/types/event_param.py index 500e4a24..b26f2916 100644 --- a/src/llama_stack_client/types/event_param.py +++ b/src/llama_stack_client/types/event_param.py @@ -21,50 +21,70 @@ class UnstructuredLogEvent(TypedDict, total=False): message: Required[str] + """The log message text""" severity: Required[Literal["verbose", "debug", "info", "warn", "error", "critical"]] + """The severity level of the log message""" span_id: Required[str] + """Unique identifier for the span this event belongs to""" timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]] + """Timestamp when the event occurred""" trace_id: Required[str] + """Unique identifier for the trace this event belongs to""" type: Required[Literal["unstructured_log"]] + """Event type identifier set to UNSTRUCTURED_LOG""" attributes: Dict[str, Union[str, float, bool, None]] + """(Optional) Key-value pairs containing additional metadata about the event""" class MetricEvent(TypedDict, total=False): metric: Required[str] + """The name of the metric being measured""" span_id: Required[str] + """Unique identifier for the span this event belongs to""" timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]] + """Timestamp when the event occurred""" trace_id: Required[str] + """Unique identifier for the trace this event belongs to""" type: Required[Literal["metric"]] + """Event type identifier set to METRIC""" unit: Required[str] + """The unit of measurement for the metric value""" value: Required[float] + """The numeric value of the metric measurement""" attributes: Dict[str, Union[str, float, bool, None]] + """(Optional) Key-value pairs containing additional metadata about the event""" class StructuredLogEventPayloadSpanStartPayload(TypedDict, total=False): name: Required[str] + """Human-readable name describing the operation this span represents""" type: Required[Literal["span_start"]] + """Payload type identifier set to SPAN_START""" parent_span_id: str + """(Optional) Unique identifier for the parent span, if this is a child span""" class StructuredLogEventPayloadSpanEndPayload(TypedDict, total=False): status: Required[Literal["ok", "error"]] + """The final status of the span indicating success or failure""" type: Required[Literal["span_end"]] + """Payload type identifier set to SPAN_END""" StructuredLogEventPayload: TypeAlias = Union[ @@ -74,16 +94,22 @@ class StructuredLogEventPayloadSpanEndPayload(TypedDict, total=False): class StructuredLogEvent(TypedDict, total=False): payload: Required[StructuredLogEventPayload] + """The structured payload data for the log event""" span_id: Required[str] + """Unique identifier for the span this event belongs to""" timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]] + """Timestamp when the event occurred""" trace_id: Required[str] + """Unique identifier for the trace this event belongs to""" type: Required[Literal["structured_log"]] + """Event type identifier set to STRUCTURED_LOG""" attributes: Dict[str, Union[str, float, bool, None]] + """(Optional) Key-value pairs containing additional metadata about the event""" EventParam: TypeAlias = Union[UnstructuredLogEvent, MetricEvent, StructuredLogEvent] diff --git a/src/llama_stack_client/types/health_info.py b/src/llama_stack_client/types/health_info.py index 3441ddd1..86410ed0 100644 --- a/src/llama_stack_client/types/health_info.py +++ b/src/llama_stack_client/types/health_info.py @@ -9,3 +9,4 @@ class HealthInfo(BaseModel): status: Literal["OK", "Error", "Not Implemented"] + """Current health status of the service""" diff --git a/src/llama_stack_client/types/inference_batch_chat_completion_response.py b/src/llama_stack_client/types/inference_batch_chat_completion_response.py index 84d6c425..ed24908d 100644 --- a/src/llama_stack_client/types/inference_batch_chat_completion_response.py +++ b/src/llama_stack_client/types/inference_batch_chat_completion_response.py @@ -10,3 +10,4 @@ class InferenceBatchChatCompletionResponse(BaseModel): batch: List[ChatCompletionResponse] + """List of chat completion responses, one for each conversation in the batch""" diff --git a/src/llama_stack_client/types/job.py b/src/llama_stack_client/types/job.py index 4953b3bf..9635de38 100644 --- a/src/llama_stack_client/types/job.py +++ b/src/llama_stack_client/types/job.py @@ -9,5 +9,7 @@ class Job(BaseModel): job_id: str + """Unique identifier for the job""" status: Literal["completed", "in_progress", "failed", "scheduled", "cancelled"] + """Current execution status of the job""" diff --git a/src/llama_stack_client/types/list_datasets_response.py b/src/llama_stack_client/types/list_datasets_response.py index 5a897f78..21c4b72a 100644 --- a/src/llama_stack_client/types/list_datasets_response.py +++ b/src/llama_stack_client/types/list_datasets_response.py @@ -8,3 +8,4 @@ class ListDatasetsResponse(BaseModel): data: DatasetListResponse + """List of datasets""" diff --git a/src/llama_stack_client/types/list_files_response.py b/src/llama_stack_client/types/list_files_response.py index cbb7d514..fb42f298 100644 --- a/src/llama_stack_client/types/list_files_response.py +++ b/src/llama_stack_client/types/list_files_response.py @@ -14,10 +14,13 @@ class ListFilesResponse(BaseModel): """List of file objects""" first_id: str + """ID of the first file in the list for pagination""" has_more: bool + """Whether there are more files available beyond this page""" last_id: str + """ID of the last file in the list for pagination""" object: Literal["list"] """The object type, which is always "list" """ diff --git a/src/llama_stack_client/types/list_providers_response.py b/src/llama_stack_client/types/list_providers_response.py index 4904c0b1..c75b6880 100644 --- a/src/llama_stack_client/types/list_providers_response.py +++ b/src/llama_stack_client/types/list_providers_response.py @@ -8,3 +8,4 @@ class ListProvidersResponse(BaseModel): data: ProviderListResponse + """List of provider information objects""" diff --git a/src/llama_stack_client/types/list_routes_response.py b/src/llama_stack_client/types/list_routes_response.py index 59e8392b..d038fe6d 100644 --- a/src/llama_stack_client/types/list_routes_response.py +++ b/src/llama_stack_client/types/list_routes_response.py @@ -8,3 +8,4 @@ class ListRoutesResponse(BaseModel): data: RouteListResponse + """List of available route information objects""" diff --git a/src/llama_stack_client/types/list_tool_groups_response.py b/src/llama_stack_client/types/list_tool_groups_response.py index 6433b164..94c5c145 100644 --- a/src/llama_stack_client/types/list_tool_groups_response.py +++ b/src/llama_stack_client/types/list_tool_groups_response.py @@ -8,3 +8,4 @@ class ListToolGroupsResponse(BaseModel): data: ToolgroupListResponse + """List of tool groups""" diff --git a/src/llama_stack_client/types/list_tools_response.py b/src/llama_stack_client/types/list_tools_response.py index c9b4ec6b..47f040b5 100644 --- a/src/llama_stack_client/types/list_tools_response.py +++ b/src/llama_stack_client/types/list_tools_response.py @@ -8,3 +8,4 @@ class ListToolsResponse(BaseModel): data: ToolListResponse + """List of tools""" diff --git a/src/llama_stack_client/types/list_vector_dbs_response.py b/src/llama_stack_client/types/list_vector_dbs_response.py index fede6c42..5ff7d5e5 100644 --- a/src/llama_stack_client/types/list_vector_dbs_response.py +++ b/src/llama_stack_client/types/list_vector_dbs_response.py @@ -8,3 +8,4 @@ class ListVectorDBsResponse(BaseModel): data: VectorDBListResponse + """List of vector databases""" diff --git a/src/llama_stack_client/types/list_vector_stores_response.py b/src/llama_stack_client/types/list_vector_stores_response.py index c79fd895..d4960217 100644 --- a/src/llama_stack_client/types/list_vector_stores_response.py +++ b/src/llama_stack_client/types/list_vector_stores_response.py @@ -10,11 +10,16 @@ class ListVectorStoresResponse(BaseModel): data: List[VectorStore] + """List of vector store objects""" has_more: bool + """Whether there are more vector stores available beyond this page""" object: str + """Object type identifier, always "list" """ first_id: Optional[str] = None + """(Optional) ID of the first vector store in the list for pagination""" last_id: Optional[str] = None + """(Optional) ID of the last vector store in the list for pagination""" diff --git a/src/llama_stack_client/types/model.py b/src/llama_stack_client/types/model.py index dea24d53..5651667d 100644 --- a/src/llama_stack_client/types/model.py +++ b/src/llama_stack_client/types/model.py @@ -12,13 +12,19 @@ class Model(BaseModel): identifier: str + """Unique identifier for this resource in llama stack""" metadata: Dict[str, Union[bool, float, str, List[object], object, None]] + """Any additional metadata for this model""" api_model_type: Literal["llm", "embedding"] = FieldInfo(alias="model_type") + """The type of model (LLM or embedding model)""" provider_id: str + """ID of the provider that owns this resource""" type: Literal["model"] + """The resource type, always 'model' for model resources""" provider_resource_id: Optional[str] = None + """Unique identifier for this resource in the provider""" diff --git a/src/llama_stack_client/types/moderation_create_params.py b/src/llama_stack_client/types/moderation_create_params.py new file mode 100644 index 00000000..61f7bc1b --- /dev/null +++ b/src/llama_stack_client/types/moderation_create_params.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import Required, TypedDict + +__all__ = ["ModerationCreateParams"] + + +class ModerationCreateParams(TypedDict, total=False): + input: Required[Union[str, List[str]]] + """Input (or inputs) to classify. + + Can be a single string, an array of strings, or an array of multi-modal input + objects similar to other models. + """ + + model: Required[str] + """The content moderation model you would like to use.""" diff --git a/src/llama_stack_client/types/post_training/job_artifacts_response.py b/src/llama_stack_client/types/post_training/job_artifacts_response.py index 0fb98c6c..42784aee 100644 --- a/src/llama_stack_client/types/post_training/job_artifacts_response.py +++ b/src/llama_stack_client/types/post_training/job_artifacts_response.py @@ -1,13 +1,50 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List +from typing import List, Optional +from datetime import datetime from ..._models import BaseModel -__all__ = ["JobArtifactsResponse"] +__all__ = ["JobArtifactsResponse", "Checkpoint", "CheckpointTrainingMetrics"] + + +class CheckpointTrainingMetrics(BaseModel): + epoch: int + """Training epoch number""" + + perplexity: float + """Perplexity metric indicating model confidence""" + + train_loss: float + """Loss value on the training dataset""" + + validation_loss: float + """Loss value on the validation dataset""" + + +class Checkpoint(BaseModel): + created_at: datetime + """Timestamp when the checkpoint was created""" + + epoch: int + """Training epoch when the checkpoint was saved""" + + identifier: str + """Unique identifier for the checkpoint""" + + path: str + """File system path where the checkpoint is stored""" + + post_training_job_id: str + """Identifier of the training job that created this checkpoint""" + + training_metrics: Optional[CheckpointTrainingMetrics] = None + """(Optional) Training metrics associated with this checkpoint""" class JobArtifactsResponse(BaseModel): - checkpoints: List[object] + checkpoints: List[Checkpoint] + """List of model checkpoints created during training""" job_uuid: str + """Unique identifier for the training job""" diff --git a/src/llama_stack_client/types/post_training/job_status_response.py b/src/llama_stack_client/types/post_training/job_status_response.py index 5ba60a6a..94379579 100644 --- a/src/llama_stack_client/types/post_training/job_status_response.py +++ b/src/llama_stack_client/types/post_training/job_status_response.py @@ -6,20 +6,61 @@ from ..._models import BaseModel -__all__ = ["JobStatusResponse"] +__all__ = ["JobStatusResponse", "Checkpoint", "CheckpointTrainingMetrics"] + + +class CheckpointTrainingMetrics(BaseModel): + epoch: int + """Training epoch number""" + + perplexity: float + """Perplexity metric indicating model confidence""" + + train_loss: float + """Loss value on the training dataset""" + + validation_loss: float + """Loss value on the validation dataset""" + + +class Checkpoint(BaseModel): + created_at: datetime + """Timestamp when the checkpoint was created""" + + epoch: int + """Training epoch when the checkpoint was saved""" + + identifier: str + """Unique identifier for the checkpoint""" + + path: str + """File system path where the checkpoint is stored""" + + post_training_job_id: str + """Identifier of the training job that created this checkpoint""" + + training_metrics: Optional[CheckpointTrainingMetrics] = None + """(Optional) Training metrics associated with this checkpoint""" class JobStatusResponse(BaseModel): - checkpoints: List[object] + checkpoints: List[Checkpoint] + """List of model checkpoints created during training""" job_uuid: str + """Unique identifier for the training job""" status: Literal["completed", "in_progress", "failed", "scheduled", "cancelled"] + """Current status of the training job""" completed_at: Optional[datetime] = None + """(Optional) Timestamp when the job finished, if completed""" resources_allocated: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Information about computational resources allocated to the job""" scheduled_at: Optional[datetime] = None + """(Optional) Timestamp when the job was scheduled""" started_at: Optional[datetime] = None + """(Optional) Timestamp when the job execution began""" diff --git a/src/llama_stack_client/types/post_training_preference_optimize_params.py b/src/llama_stack_client/types/post_training_preference_optimize_params.py index f7d998eb..2dcd294d 100644 --- a/src/llama_stack_client/types/post_training_preference_optimize_params.py +++ b/src/llama_stack_client/types/post_training_preference_optimize_params.py @@ -36,64 +36,88 @@ class PostTrainingPreferenceOptimizeParams(TypedDict, total=False): class AlgorithmConfig(TypedDict, total=False): - epsilon: Required[float] + beta: Required[float] + """Temperature parameter for the DPO loss""" - gamma: Required[float] - - reward_clip: Required[float] - - reward_scale: Required[float] + loss_type: Required[Literal["sigmoid", "hinge", "ipo", "kto_pair"]] + """The type of loss function to use for DPO""" class TrainingConfigDataConfig(TypedDict, total=False): batch_size: Required[int] + """Number of samples per training batch""" data_format: Required[Literal["instruct", "dialog"]] + """Format of the dataset (instruct or dialog)""" dataset_id: Required[str] + """Unique identifier for the training dataset""" shuffle: Required[bool] + """Whether to shuffle the dataset during training""" packed: bool + """ + (Optional) Whether to pack multiple samples into a single sequence for + efficiency + """ train_on_input: bool + """(Optional) Whether to compute loss on input tokens as well as output tokens""" validation_dataset_id: str + """(Optional) Unique identifier for the validation dataset""" class TrainingConfigEfficiencyConfig(TypedDict, total=False): enable_activation_checkpointing: bool + """(Optional) Whether to use activation checkpointing to reduce memory usage""" enable_activation_offloading: bool + """(Optional) Whether to offload activations to CPU to save GPU memory""" fsdp_cpu_offload: bool + """(Optional) Whether to offload FSDP parameters to CPU""" memory_efficient_fsdp_wrap: bool + """(Optional) Whether to use memory-efficient FSDP wrapping""" class TrainingConfigOptimizerConfig(TypedDict, total=False): lr: Required[float] + """Learning rate for the optimizer""" num_warmup_steps: Required[int] + """Number of steps for learning rate warmup""" optimizer_type: Required[Literal["adam", "adamw", "sgd"]] + """Type of optimizer to use (adam, adamw, or sgd)""" weight_decay: Required[float] + """Weight decay coefficient for regularization""" class TrainingConfig(TypedDict, total=False): gradient_accumulation_steps: Required[int] + """Number of steps to accumulate gradients before updating""" max_steps_per_epoch: Required[int] + """Maximum number of steps to run per epoch""" n_epochs: Required[int] + """Number of training epochs to run""" data_config: TrainingConfigDataConfig + """(Optional) Configuration for data loading and formatting""" dtype: str + """(Optional) Data type for model parameters (bf16, fp16, fp32)""" efficiency_config: TrainingConfigEfficiencyConfig + """(Optional) Configuration for memory and compute optimizations""" max_validation_steps: int + """(Optional) Maximum number of validation steps per epoch""" optimizer_config: TrainingConfigOptimizerConfig + """(Optional) Configuration for the optimization algorithm""" diff --git a/src/llama_stack_client/types/post_training_supervised_fine_tune_params.py b/src/llama_stack_client/types/post_training_supervised_fine_tune_params.py index 596ec18b..c23796f0 100644 --- a/src/llama_stack_client/types/post_training_supervised_fine_tune_params.py +++ b/src/llama_stack_client/types/post_training_supervised_fine_tune_params.py @@ -41,53 +41,79 @@ class PostTrainingSupervisedFineTuneParams(TypedDict, total=False): class TrainingConfigDataConfig(TypedDict, total=False): batch_size: Required[int] + """Number of samples per training batch""" data_format: Required[Literal["instruct", "dialog"]] + """Format of the dataset (instruct or dialog)""" dataset_id: Required[str] + """Unique identifier for the training dataset""" shuffle: Required[bool] + """Whether to shuffle the dataset during training""" packed: bool + """ + (Optional) Whether to pack multiple samples into a single sequence for + efficiency + """ train_on_input: bool + """(Optional) Whether to compute loss on input tokens as well as output tokens""" validation_dataset_id: str + """(Optional) Unique identifier for the validation dataset""" class TrainingConfigEfficiencyConfig(TypedDict, total=False): enable_activation_checkpointing: bool + """(Optional) Whether to use activation checkpointing to reduce memory usage""" enable_activation_offloading: bool + """(Optional) Whether to offload activations to CPU to save GPU memory""" fsdp_cpu_offload: bool + """(Optional) Whether to offload FSDP parameters to CPU""" memory_efficient_fsdp_wrap: bool + """(Optional) Whether to use memory-efficient FSDP wrapping""" class TrainingConfigOptimizerConfig(TypedDict, total=False): lr: Required[float] + """Learning rate for the optimizer""" num_warmup_steps: Required[int] + """Number of steps for learning rate warmup""" optimizer_type: Required[Literal["adam", "adamw", "sgd"]] + """Type of optimizer to use (adam, adamw, or sgd)""" weight_decay: Required[float] + """Weight decay coefficient for regularization""" class TrainingConfig(TypedDict, total=False): gradient_accumulation_steps: Required[int] + """Number of steps to accumulate gradients before updating""" max_steps_per_epoch: Required[int] + """Maximum number of steps to run per epoch""" n_epochs: Required[int] + """Number of training epochs to run""" data_config: TrainingConfigDataConfig + """(Optional) Configuration for data loading and formatting""" dtype: str + """(Optional) Data type for model parameters (bf16, fp16, fp32)""" efficiency_config: TrainingConfigEfficiencyConfig + """(Optional) Configuration for memory and compute optimizations""" max_validation_steps: int + """(Optional) Maximum number of validation steps per epoch""" optimizer_config: TrainingConfigOptimizerConfig + """(Optional) Configuration for the optimization algorithm""" diff --git a/src/llama_stack_client/types/provider_info.py b/src/llama_stack_client/types/provider_info.py index c9c748cc..6b8a1ec6 100644 --- a/src/llama_stack_client/types/provider_info.py +++ b/src/llama_stack_client/types/provider_info.py @@ -9,11 +9,16 @@ class ProviderInfo(BaseModel): api: str + """The API name this provider implements""" config: Dict[str, Union[bool, float, str, List[object], object, None]] + """Configuration parameters for the provider""" health: Dict[str, Union[bool, float, str, List[object], object, None]] + """Current health status of the provider""" provider_id: str + """Unique identifier for the provider""" provider_type: str + """The type of provider implementation""" diff --git a/src/llama_stack_client/types/query_chunks_response.py b/src/llama_stack_client/types/query_chunks_response.py index 97c1927c..6a06b3f2 100644 --- a/src/llama_stack_client/types/query_chunks_response.py +++ b/src/llama_stack_client/types/query_chunks_response.py @@ -76,5 +76,7 @@ class Chunk(BaseModel): class QueryChunksResponse(BaseModel): chunks: List[Chunk] + """List of content chunks returned from the query""" scores: List[float] + """Relevance scores corresponding to each returned chunk""" diff --git a/src/llama_stack_client/types/query_condition_param.py b/src/llama_stack_client/types/query_condition_param.py index 37c48093..59def1b4 100644 --- a/src/llama_stack_client/types/query_condition_param.py +++ b/src/llama_stack_client/types/query_condition_param.py @@ -10,7 +10,10 @@ class QueryConditionParam(TypedDict, total=False): key: Required[str] + """The attribute key to filter on""" op: Required[Literal["eq", "ne", "gt", "lt"]] + """The comparison operator to apply""" value: Required[Union[bool, float, str, Iterable[object], object, None]] + """The value to compare against""" diff --git a/src/llama_stack_client/types/query_spans_response.py b/src/llama_stack_client/types/query_spans_response.py index 488a4331..a20c9b92 100644 --- a/src/llama_stack_client/types/query_spans_response.py +++ b/src/llama_stack_client/types/query_spans_response.py @@ -8,3 +8,4 @@ class QuerySpansResponse(BaseModel): data: TelemetryQuerySpansResponse + """List of spans matching the query criteria""" diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py index 471d8b21..ac434963 100644 --- a/src/llama_stack_client/types/response_create_params.py +++ b/src/llama_stack_client/types/response_create_params.py @@ -10,6 +10,7 @@ "InputUnionMember1", "InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall", "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall", + "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult", "InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall", "InputUnionMember1OpenAIResponseInputFunctionToolCallOutput", "InputUnionMember1OpenAIResponseMessage", @@ -46,6 +47,9 @@ class ResponseCreateParamsBase(TypedDict, total=False): model: Required[str] """The underlying LLM used for completions.""" + include: List[str] + """(Optional) Additional fields to include in the response.""" + instructions: str max_infer_iters: int @@ -62,42 +66,74 @@ class ResponseCreateParamsBase(TypedDict, total=False): temperature: float text: Text + """Text response configuration for OpenAI responses.""" tools: Iterable[Tool] class InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall(TypedDict, total=False): id: Required[str] + """Unique identifier for this tool call""" status: Required[str] + """Current status of the web search operation""" type: Required[Literal["web_search_call"]] + """Tool call type identifier, always "web_search_call" """ + + +class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult(TypedDict, total=False): + attributes: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] + """(Optional) Key-value attributes associated with the file""" + + file_id: Required[str] + """Unique identifier of the file containing the result""" + + filename: Required[str] + """Name of the file containing the result""" + + score: Required[float] + """Relevance score for this search result (between 0 and 1)""" + + text: Required[str] + """Text content of the search result""" class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall(TypedDict, total=False): id: Required[str] + """Unique identifier for this tool call""" queries: Required[List[str]] + """List of search queries executed""" status: Required[str] + """Current status of the file search operation""" type: Required[Literal["file_search_call"]] + """Tool call type identifier, always "file_search_call" """ - results: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] + results: Iterable[InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult] + """(Optional) Search results returned by the file search operation""" class InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall(TypedDict, total=False): arguments: Required[str] + """JSON string containing the function arguments""" call_id: Required[str] + """Unique identifier for the function call""" name: Required[str] + """Name of the function being called""" type: Required[Literal["function_call"]] + """Tool call type identifier, always "function_call" """ id: str + """(Optional) Additional identifier for the tool call""" status: str + """(Optional) Current status of the function call execution""" class InputUnionMember1OpenAIResponseInputFunctionToolCallOutput(TypedDict, total=False): @@ -116,18 +152,23 @@ class InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInp TypedDict, total=False ): text: Required[str] + """The text content of the input message""" type: Required[Literal["input_text"]] + """Content type identifier, always "input_text" """ class InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage( TypedDict, total=False ): detail: Required[Literal["low", "high", "auto"]] + """Level of detail for image processing, can be "low", "high", or "auto" """ type: Required[Literal["input_image"]] + """Content type identifier, always "input_image" """ image_url: str + """(Optional) URL of the image content""" InputUnionMember1OpenAIResponseMessageContentUnionMember1: TypeAlias = Union[ @@ -140,26 +181,35 @@ class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIR TypedDict, total=False ): file_id: Required[str] + """Unique identifier of the referenced file""" filename: Required[str] + """Name of the referenced file""" index: Required[int] + """Position index of the citation within the content""" type: Required[Literal["file_citation"]] + """Annotation type identifier, always "file_citation" """ class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation( TypedDict, total=False ): end_index: Required[int] + """End position of the citation span in the content""" start_index: Required[int] + """Start position of the citation span in the content""" title: Required[str] + """Title of the referenced web resource""" type: Required[Literal["url_citation"]] + """Annotation type identifier, always "url_citation" """ url: Required[str] + """URL of the referenced web resource""" class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation( @@ -256,49 +306,65 @@ class TextFormat(TypedDict, total=False): class Text(TypedDict, total=False): format: TextFormat - """Configuration for Responses API text format.""" + """(Optional) Text format configuration specifying output format requirements""" class ToolOpenAIResponseInputToolWebSearch(TypedDict, total=False): type: Required[Literal["web_search", "web_search_preview", "web_search_preview_2025_03_11"]] + """Web search tool type variant to use""" search_context_size: str + """(Optional) Size of search context, must be "low", "medium", or "high" """ class ToolOpenAIResponseInputToolFileSearchRankingOptions(TypedDict, total=False): ranker: str + """(Optional) Name of the ranking algorithm to use""" score_threshold: float + """(Optional) Minimum relevance score threshold for results""" class ToolOpenAIResponseInputToolFileSearch(TypedDict, total=False): type: Required[Literal["file_search"]] + """Tool type identifier, always "file_search" """ vector_store_ids: Required[List[str]] + """List of vector store identifiers to search within""" filters: Dict[str, Union[bool, float, str, Iterable[object], object, None]] + """(Optional) Additional filters to apply to the search""" max_num_results: int + """(Optional) Maximum number of search results to return (1-50)""" ranking_options: ToolOpenAIResponseInputToolFileSearchRankingOptions + """(Optional) Options for ranking and scoring search results""" class ToolOpenAIResponseInputToolFunction(TypedDict, total=False): name: Required[str] + """Name of the function that can be called""" type: Required[Literal["function"]] + """Tool type identifier, always "function" """ description: str + """(Optional) Description of what the function does""" parameters: Dict[str, Union[bool, float, str, Iterable[object], object, None]] + """(Optional) JSON schema defining the function's parameters""" strict: bool + """(Optional) Whether to enforce strict parameter validation""" class ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter(TypedDict, total=False): always: List[str] + """(Optional) List of tool names that always require approval""" never: List[str] + """(Optional) List of tool names that never require approval""" ToolOpenAIResponseInputToolMcpRequireApproval: TypeAlias = Union[ @@ -308,6 +374,7 @@ class ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter(TypedDict, tot class ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter(TypedDict, total=False): tool_names: List[str] + """(Optional) List of specific tool names that are allowed""" ToolOpenAIResponseInputToolMcpAllowedTools: TypeAlias = Union[ @@ -317,16 +384,22 @@ class ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter(TypedDict, to class ToolOpenAIResponseInputToolMcp(TypedDict, total=False): require_approval: Required[ToolOpenAIResponseInputToolMcpRequireApproval] + """Approval requirement for tool calls ("always", "never", or filter)""" server_label: Required[str] + """Label to identify this MCP server""" server_url: Required[str] + """URL endpoint of the MCP server""" type: Required[Literal["mcp"]] + """Tool type identifier, always "mcp" """ allowed_tools: ToolOpenAIResponseInputToolMcpAllowedTools + """(Optional) Restriction on which tools can be used from this server""" headers: Dict[str, Union[bool, float, str, Iterable[object], object, None]] + """(Optional) HTTP headers to include when connecting to the server""" Tool: TypeAlias = Union[ diff --git a/src/llama_stack_client/types/response_list_response.py b/src/llama_stack_client/types/response_list_response.py index d46213ef..ae50d44a 100644 --- a/src/llama_stack_client/types/response_list_response.py +++ b/src/llama_stack_client/types/response_list_response.py @@ -10,80 +10,112 @@ __all__ = [ "ResponseListResponse", - "Data", - "DataInput", - "DataInputOpenAIResponseOutputMessageWebSearchToolCall", - "DataInputOpenAIResponseOutputMessageFileSearchToolCall", - "DataInputOpenAIResponseOutputMessageFunctionToolCall", - "DataInputOpenAIResponseInputFunctionToolCallOutput", - "DataInputOpenAIResponseMessage", - "DataInputOpenAIResponseMessageContentUnionMember1", - "DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText", - "DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage", - "DataInputOpenAIResponseMessageContentUnionMember2", - "DataInputOpenAIResponseMessageContentUnionMember2Annotation", - "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation", - "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation", - "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation", - "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath", - "DataOutput", - "DataOutputOpenAIResponseMessage", - "DataOutputOpenAIResponseMessageContentUnionMember1", - "DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText", - "DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage", - "DataOutputOpenAIResponseMessageContentUnionMember2", - "DataOutputOpenAIResponseMessageContentUnionMember2Annotation", - "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation", - "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation", - "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation", - "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath", - "DataOutputOpenAIResponseOutputMessageWebSearchToolCall", - "DataOutputOpenAIResponseOutputMessageFileSearchToolCall", - "DataOutputOpenAIResponseOutputMessageFunctionToolCall", - "DataOutputOpenAIResponseOutputMessageMcpCall", - "DataOutputOpenAIResponseOutputMessageMcpListTools", - "DataOutputOpenAIResponseOutputMessageMcpListToolsTool", - "DataText", - "DataTextFormat", - "DataError", + "Input", + "InputOpenAIResponseOutputMessageWebSearchToolCall", + "InputOpenAIResponseOutputMessageFileSearchToolCall", + "InputOpenAIResponseOutputMessageFileSearchToolCallResult", + "InputOpenAIResponseOutputMessageFunctionToolCall", + "InputOpenAIResponseInputFunctionToolCallOutput", + "InputOpenAIResponseMessage", + "InputOpenAIResponseMessageContentUnionMember1", + "InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText", + "InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage", + "InputOpenAIResponseMessageContentUnionMember2", + "InputOpenAIResponseMessageContentUnionMember2Annotation", + "InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation", + "InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation", + "InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation", + "InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath", + "Output", + "OutputOpenAIResponseMessage", + "OutputOpenAIResponseMessageContentUnionMember1", + "OutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText", + "OutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage", + "OutputOpenAIResponseMessageContentUnionMember2", + "OutputOpenAIResponseMessageContentUnionMember2Annotation", + "OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation", + "OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation", + "OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation", + "OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath", + "OutputOpenAIResponseOutputMessageWebSearchToolCall", + "OutputOpenAIResponseOutputMessageFileSearchToolCall", + "OutputOpenAIResponseOutputMessageFileSearchToolCallResult", + "OutputOpenAIResponseOutputMessageFunctionToolCall", + "OutputOpenAIResponseOutputMessageMcpCall", + "OutputOpenAIResponseOutputMessageMcpListTools", + "OutputOpenAIResponseOutputMessageMcpListToolsTool", + "Text", + "TextFormat", + "Error", ] -class DataInputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): +class InputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" status: str + """Current status of the web search operation""" type: Literal["web_search_call"] + """Tool call type identifier, always "web_search_call" """ -class DataInputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): +class InputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel): + attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """(Optional) Key-value attributes associated with the file""" + + file_id: str + """Unique identifier of the file containing the result""" + + filename: str + """Name of the file containing the result""" + + score: float + """Relevance score for this search result (between 0 and 1)""" + + text: str + """Text content of the search result""" + + +class InputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" queries: List[str] + """List of search queries executed""" status: str + """Current status of the file search operation""" type: Literal["file_search_call"] + """Tool call type identifier, always "file_search_call" """ - results: Optional[List[Dict[str, Union[bool, float, str, List[object], object, None]]]] = None + results: Optional[List[InputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None + """(Optional) Search results returned by the file search operation""" -class DataInputOpenAIResponseOutputMessageFunctionToolCall(BaseModel): +class InputOpenAIResponseOutputMessageFunctionToolCall(BaseModel): arguments: str + """JSON string containing the function arguments""" call_id: str + """Unique identifier for the function call""" name: str + """Name of the function being called""" type: Literal["function_call"] + """Tool call type identifier, always "function_call" """ id: Optional[str] = None + """(Optional) Additional identifier for the tool call""" status: Optional[str] = None + """(Optional) Current status of the function call execution""" -class DataInputOpenAIResponseInputFunctionToolCallOutput(BaseModel): +class InputOpenAIResponseInputFunctionToolCallOutput(BaseModel): call_id: str output: str @@ -95,54 +127,66 @@ class DataInputOpenAIResponseInputFunctionToolCallOutput(BaseModel): status: Optional[str] = None -class DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel): +class InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel): text: str + """The text content of the input message""" type: Literal["input_text"] + """Content type identifier, always "input_text" """ -class DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel): +class InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel): detail: Literal["low", "high", "auto"] + """Level of detail for image processing, can be "low", "high", or "auto" """ type: Literal["input_image"] + """Content type identifier, always "input_image" """ image_url: Optional[str] = None + """(Optional) URL of the image content""" -DataInputOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[ +InputOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[ Union[ - DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText, - DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage, + InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText, + InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage, ], PropertyInfo(discriminator="type"), ] -class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel): +class InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel): file_id: str + """Unique identifier of the referenced file""" filename: str + """Name of the referenced file""" index: int + """Position index of the citation within the content""" type: Literal["file_citation"] + """Annotation type identifier, always "file_citation" """ -class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel): +class InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel): end_index: int + """End position of the citation span in the content""" start_index: int + """Start position of the citation span in the content""" title: str + """Title of the referenced web resource""" type: Literal["url_citation"] + """Annotation type identifier, always "url_citation" """ url: str + """URL of the referenced web resource""" -class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation( - BaseModel -): +class InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(BaseModel): container_id: str end_index: int @@ -156,7 +200,7 @@ class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseA type: Literal["container_file_citation"] -class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel): +class InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel): file_id: str index: int @@ -164,30 +208,28 @@ class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseA type: Literal["file_path"] -DataInputOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[ +InputOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[ Union[ - DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation, - DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation, - DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation, - DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath, + InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation, + InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation, + InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation, + InputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath, ], PropertyInfo(discriminator="type"), ] -class DataInputOpenAIResponseMessageContentUnionMember2(BaseModel): - annotations: List[DataInputOpenAIResponseMessageContentUnionMember2Annotation] +class InputOpenAIResponseMessageContentUnionMember2(BaseModel): + annotations: List[InputOpenAIResponseMessageContentUnionMember2Annotation] text: str type: Literal["output_text"] -class DataInputOpenAIResponseMessage(BaseModel): +class InputOpenAIResponseMessage(BaseModel): content: Union[ - str, - List[DataInputOpenAIResponseMessageContentUnionMember1], - List[DataInputOpenAIResponseMessageContentUnionMember2], + str, List[InputOpenAIResponseMessageContentUnionMember1], List[InputOpenAIResponseMessageContentUnionMember2] ] role: Literal["system", "developer", "user", "assistant"] @@ -199,63 +241,75 @@ class DataInputOpenAIResponseMessage(BaseModel): status: Optional[str] = None -DataInput: TypeAlias = Union[ - DataInputOpenAIResponseOutputMessageWebSearchToolCall, - DataInputOpenAIResponseOutputMessageFileSearchToolCall, - DataInputOpenAIResponseOutputMessageFunctionToolCall, - DataInputOpenAIResponseInputFunctionToolCallOutput, - DataInputOpenAIResponseMessage, +Input: TypeAlias = Union[ + InputOpenAIResponseOutputMessageWebSearchToolCall, + InputOpenAIResponseOutputMessageFileSearchToolCall, + InputOpenAIResponseOutputMessageFunctionToolCall, + InputOpenAIResponseInputFunctionToolCallOutput, + InputOpenAIResponseMessage, ] -class DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel): +class OutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel): text: str + """The text content of the input message""" type: Literal["input_text"] + """Content type identifier, always "input_text" """ -class DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel): +class OutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel): detail: Literal["low", "high", "auto"] + """Level of detail for image processing, can be "low", "high", or "auto" """ type: Literal["input_image"] + """Content type identifier, always "input_image" """ image_url: Optional[str] = None + """(Optional) URL of the image content""" -DataOutputOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[ +OutputOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[ Union[ - DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText, - DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage, + OutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText, + OutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage, ], PropertyInfo(discriminator="type"), ] -class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel): +class OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel): file_id: str + """Unique identifier of the referenced file""" filename: str + """Name of the referenced file""" index: int + """Position index of the citation within the content""" type: Literal["file_citation"] + """Annotation type identifier, always "file_citation" """ -class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel): +class OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel): end_index: int + """End position of the citation span in the content""" start_index: int + """Start position of the citation span in the content""" title: str + """Title of the referenced web resource""" type: Literal["url_citation"] + """Annotation type identifier, always "url_citation" """ url: str + """URL of the referenced web resource""" -class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation( - BaseModel -): +class OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(BaseModel): container_id: str end_index: int @@ -269,7 +323,7 @@ class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponse type: Literal["container_file_citation"] -class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel): +class OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel): file_id: str index: int @@ -277,30 +331,28 @@ class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponse type: Literal["file_path"] -DataOutputOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[ +OutputOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[ Union[ - DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation, - DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation, - DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation, - DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath, + OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation, + OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation, + OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation, + OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath, ], PropertyInfo(discriminator="type"), ] -class DataOutputOpenAIResponseMessageContentUnionMember2(BaseModel): - annotations: List[DataOutputOpenAIResponseMessageContentUnionMember2Annotation] +class OutputOpenAIResponseMessageContentUnionMember2(BaseModel): + annotations: List[OutputOpenAIResponseMessageContentUnionMember2Annotation] text: str type: Literal["output_text"] -class DataOutputOpenAIResponseMessage(BaseModel): +class OutputOpenAIResponseMessage(BaseModel): content: Union[ - str, - List[DataOutputOpenAIResponseMessageContentUnionMember1], - List[DataOutputOpenAIResponseMessageContentUnionMember2], + str, List[OutputOpenAIResponseMessageContentUnionMember1], List[OutputOpenAIResponseMessageContentUnionMember2] ] role: Literal["system", "developer", "user", "assistant"] @@ -312,88 +364,133 @@ class DataOutputOpenAIResponseMessage(BaseModel): status: Optional[str] = None -class DataOutputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): +class OutputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" status: str + """Current status of the web search operation""" type: Literal["web_search_call"] + """Tool call type identifier, always "web_search_call" """ + + +class OutputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel): + attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """(Optional) Key-value attributes associated with the file""" + + file_id: str + """Unique identifier of the file containing the result""" + filename: str + """Name of the file containing the result""" + + score: float + """Relevance score for this search result (between 0 and 1)""" + + text: str + """Text content of the search result""" -class DataOutputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): + +class OutputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" queries: List[str] + """List of search queries executed""" status: str + """Current status of the file search operation""" type: Literal["file_search_call"] + """Tool call type identifier, always "file_search_call" """ - results: Optional[List[Dict[str, Union[bool, float, str, List[object], object, None]]]] = None + results: Optional[List[OutputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None + """(Optional) Search results returned by the file search operation""" -class DataOutputOpenAIResponseOutputMessageFunctionToolCall(BaseModel): +class OutputOpenAIResponseOutputMessageFunctionToolCall(BaseModel): arguments: str + """JSON string containing the function arguments""" call_id: str + """Unique identifier for the function call""" name: str + """Name of the function being called""" type: Literal["function_call"] + """Tool call type identifier, always "function_call" """ id: Optional[str] = None + """(Optional) Additional identifier for the tool call""" status: Optional[str] = None + """(Optional) Current status of the function call execution""" -class DataOutputOpenAIResponseOutputMessageMcpCall(BaseModel): +class OutputOpenAIResponseOutputMessageMcpCall(BaseModel): id: str + """Unique identifier for this MCP call""" arguments: str + """JSON string containing the MCP call arguments""" name: str + """Name of the MCP method being called""" server_label: str + """Label identifying the MCP server handling the call""" type: Literal["mcp_call"] + """Tool call type identifier, always "mcp_call" """ error: Optional[str] = None + """(Optional) Error message if the MCP call failed""" output: Optional[str] = None + """(Optional) Output result from the successful MCP call""" -class DataOutputOpenAIResponseOutputMessageMcpListToolsTool(BaseModel): +class OutputOpenAIResponseOutputMessageMcpListToolsTool(BaseModel): input_schema: Dict[str, Union[bool, float, str, List[object], object, None]] + """JSON schema defining the tool's input parameters""" name: str + """Name of the tool""" description: Optional[str] = None + """(Optional) Description of what the tool does""" -class DataOutputOpenAIResponseOutputMessageMcpListTools(BaseModel): +class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel): id: str + """Unique identifier for this MCP list tools operation""" server_label: str + """Label identifying the MCP server providing the tools""" - tools: List[DataOutputOpenAIResponseOutputMessageMcpListToolsTool] + tools: List[OutputOpenAIResponseOutputMessageMcpListToolsTool] + """List of available tools provided by the MCP server""" type: Literal["mcp_list_tools"] + """Tool call type identifier, always "mcp_list_tools" """ -DataOutput: TypeAlias = Annotated[ +Output: TypeAlias = Annotated[ Union[ - DataOutputOpenAIResponseMessage, - DataOutputOpenAIResponseOutputMessageWebSearchToolCall, - DataOutputOpenAIResponseOutputMessageFileSearchToolCall, - DataOutputOpenAIResponseOutputMessageFunctionToolCall, - DataOutputOpenAIResponseOutputMessageMcpCall, - DataOutputOpenAIResponseOutputMessageMcpListTools, + OutputOpenAIResponseMessage, + OutputOpenAIResponseOutputMessageWebSearchToolCall, + OutputOpenAIResponseOutputMessageFileSearchToolCall, + OutputOpenAIResponseOutputMessageFunctionToolCall, + OutputOpenAIResponseOutputMessageMcpCall, + OutputOpenAIResponseOutputMessageMcpListTools, ], PropertyInfo(discriminator="type"), ] -class DataTextFormat(BaseModel): +class TextFormat(BaseModel): type: Literal["text", "json_schema", "json_object"] """Must be "text", "json_schema", or "json_object" to identify the format type""" @@ -418,56 +515,61 @@ class DataTextFormat(BaseModel): """ -class DataText(BaseModel): - format: Optional[DataTextFormat] = None - """Configuration for Responses API text format.""" +class Text(BaseModel): + format: Optional[TextFormat] = None + """(Optional) Text format configuration specifying output format requirements""" -class DataError(BaseModel): +class Error(BaseModel): code: str + """Error code identifying the type of failure""" message: str + """Human-readable error message describing the failure""" -class Data(BaseModel): +class ResponseListResponse(BaseModel): id: str + """Unique identifier for this response""" created_at: int + """Unix timestamp when the response was created""" - input: List[DataInput] + input: List[Input] + """List of input items that led to this response""" model: str + """Model identifier used for generation""" object: Literal["response"] + """Object type identifier, always "response" """ - output: List[DataOutput] + output: List[Output] + """List of generated output items (messages, tool calls, etc.)""" parallel_tool_calls: bool + """Whether tool calls can be executed in parallel""" status: str + """Current status of the response generation""" - text: DataText + text: Text + """Text formatting configuration for the response""" - error: Optional[DataError] = None + error: Optional[Error] = None + """(Optional) Error details if the response generation failed""" previous_response_id: Optional[str] = None + """(Optional) ID of the previous response in a conversation""" temperature: Optional[float] = None + """(Optional) Sampling temperature used for generation""" top_p: Optional[float] = None + """(Optional) Nucleus sampling parameter used for generation""" truncation: Optional[str] = None + """(Optional) Truncation strategy applied to the response""" user: Optional[str] = None - - -class ResponseListResponse(BaseModel): - data: List[Data] - - first_id: str - - has_more: bool - - last_id: str - - object: Literal["list"] + """(Optional) User identifier associated with the request""" diff --git a/src/llama_stack_client/types/response_object.py b/src/llama_stack_client/types/response_object.py index e4b313d3..c0f348a9 100644 --- a/src/llama_stack_client/types/response_object.py +++ b/src/llama_stack_client/types/response_object.py @@ -23,6 +23,7 @@ "OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath", "OutputOpenAIResponseOutputMessageWebSearchToolCall", "OutputOpenAIResponseOutputMessageFileSearchToolCall", + "OutputOpenAIResponseOutputMessageFileSearchToolCallResult", "OutputOpenAIResponseOutputMessageFunctionToolCall", "OutputOpenAIResponseOutputMessageMcpCall", "OutputOpenAIResponseOutputMessageMcpListTools", @@ -35,16 +36,21 @@ class OutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel): text: str + """The text content of the input message""" type: Literal["input_text"] + """Content type identifier, always "input_text" """ class OutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel): detail: Literal["low", "high", "auto"] + """Level of detail for image processing, can be "low", "high", or "auto" """ type: Literal["input_image"] + """Content type identifier, always "input_image" """ image_url: Optional[str] = None + """(Optional) URL of the image content""" OutputOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[ @@ -58,24 +64,33 @@ class OutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageCo class OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel): file_id: str + """Unique identifier of the referenced file""" filename: str + """Name of the referenced file""" index: int + """Position index of the citation within the content""" type: Literal["file_citation"] + """Annotation type identifier, always "file_citation" """ class OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel): end_index: int + """End position of the citation span in the content""" start_index: int + """Start position of the citation span in the content""" title: str + """Title of the referenced web resource""" type: Literal["url_citation"] + """Annotation type identifier, always "url_citation" """ url: str + """URL of the referenced web resource""" class OutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(BaseModel): @@ -135,70 +150,115 @@ class OutputOpenAIResponseMessage(BaseModel): class OutputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" status: str + """Current status of the web search operation""" type: Literal["web_search_call"] + """Tool call type identifier, always "web_search_call" """ + + +class OutputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel): + attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """(Optional) Key-value attributes associated with the file""" + + file_id: str + """Unique identifier of the file containing the result""" + + filename: str + """Name of the file containing the result""" + + score: float + """Relevance score for this search result (between 0 and 1)""" + + text: str + """Text content of the search result""" class OutputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" queries: List[str] + """List of search queries executed""" status: str + """Current status of the file search operation""" type: Literal["file_search_call"] + """Tool call type identifier, always "file_search_call" """ - results: Optional[List[Dict[str, Union[bool, float, str, List[object], object, None]]]] = None + results: Optional[List[OutputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None + """(Optional) Search results returned by the file search operation""" class OutputOpenAIResponseOutputMessageFunctionToolCall(BaseModel): arguments: str + """JSON string containing the function arguments""" call_id: str + """Unique identifier for the function call""" name: str + """Name of the function being called""" type: Literal["function_call"] + """Tool call type identifier, always "function_call" """ id: Optional[str] = None + """(Optional) Additional identifier for the tool call""" status: Optional[str] = None + """(Optional) Current status of the function call execution""" class OutputOpenAIResponseOutputMessageMcpCall(BaseModel): id: str + """Unique identifier for this MCP call""" arguments: str + """JSON string containing the MCP call arguments""" name: str + """Name of the MCP method being called""" server_label: str + """Label identifying the MCP server handling the call""" type: Literal["mcp_call"] + """Tool call type identifier, always "mcp_call" """ error: Optional[str] = None + """(Optional) Error message if the MCP call failed""" output: Optional[str] = None + """(Optional) Output result from the successful MCP call""" class OutputOpenAIResponseOutputMessageMcpListToolsTool(BaseModel): input_schema: Dict[str, Union[bool, float, str, List[object], object, None]] + """JSON schema defining the tool's input parameters""" name: str + """Name of the tool""" description: Optional[str] = None + """(Optional) Description of what the tool does""" class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel): id: str + """Unique identifier for this MCP list tools operation""" server_label: str + """Label identifying the MCP server providing the tools""" tools: List[OutputOpenAIResponseOutputMessageMcpListToolsTool] + """List of available tools provided by the MCP server""" type: Literal["mcp_list_tools"] + """Tool call type identifier, always "mcp_list_tools" """ Output: TypeAlias = Annotated[ @@ -241,13 +301,15 @@ class TextFormat(BaseModel): class Text(BaseModel): format: Optional[TextFormat] = None - """Configuration for Responses API text format.""" + """(Optional) Text format configuration specifying output format requirements""" class Error(BaseModel): code: str + """Error code identifying the type of failure""" message: str + """Human-readable error message describing the failure""" class ResponseObject(BaseModel): @@ -262,29 +324,43 @@ def output_text(self) -> str: return "".join(texts) id: str + """Unique identifier for this response""" created_at: int + """Unix timestamp when the response was created""" model: str + """Model identifier used for generation""" object: Literal["response"] + """Object type identifier, always "response" """ output: List[Output] + """List of generated output items (messages, tool calls, etc.)""" parallel_tool_calls: bool + """Whether tool calls can be executed in parallel""" status: str + """Current status of the response generation""" text: Text + """Text formatting configuration for the response""" error: Optional[Error] = None + """(Optional) Error details if the response generation failed""" previous_response_id: Optional[str] = None + """(Optional) ID of the previous response in a conversation""" temperature: Optional[float] = None + """(Optional) Sampling temperature used for generation""" top_p: Optional[float] = None + """(Optional) Nucleus sampling parameter used for generation""" truncation: Optional[str] = None + """(Optional) Truncation strategy applied to the response""" user: Optional[str] = None + """(Optional) User identifier associated with the request""" diff --git a/src/llama_stack_client/types/response_object_stream.py b/src/llama_stack_client/types/response_object_stream.py index 311ad6bd..09b7830c 100644 --- a/src/llama_stack_client/types/response_object_stream.py +++ b/src/llama_stack_client/types/response_object_stream.py @@ -24,6 +24,7 @@ "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath", "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageWebSearchToolCall", "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFileSearchToolCall", + "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFileSearchToolCallResult", "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFunctionToolCall", "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpCall", "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListTools", @@ -42,6 +43,7 @@ "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath", "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageWebSearchToolCall", "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFileSearchToolCall", + "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFileSearchToolCallResult", "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFunctionToolCall", "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpCall", "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListTools", @@ -67,26 +69,33 @@ class OpenAIResponseObjectStreamResponseCreated(BaseModel): response: ResponseObject + """The newly created response object""" type: Literal["response.created"] + """Event type identifier, always "response.created" """ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText( BaseModel ): text: str + """The text content of the input message""" type: Literal["input_text"] + """Content type identifier, always "input_text" """ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage( BaseModel ): detail: Literal["low", "high", "auto"] + """Level of detail for image processing, can be "low", "high", or "auto" """ type: Literal["input_image"] + """Content type identifier, always "input_image" """ image_url: Optional[str] = None + """(Optional) URL of the image content""" OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[ @@ -102,26 +111,35 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage BaseModel ): file_id: str + """Unique identifier of the referenced file""" filename: str + """Name of the referenced file""" index: int + """Position index of the citation within the content""" type: Literal["file_citation"] + """Annotation type identifier, always "file_citation" """ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation( BaseModel ): end_index: int + """End position of the citation span in the content""" start_index: int + """Start position of the citation span in the content""" title: str + """Title of the referenced web resource""" type: Literal["url_citation"] + """Annotation type identifier, always "url_citation" """ url: str + """URL of the referenced web resource""" class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation( @@ -189,70 +207,119 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" status: str + """Current status of the web search operation""" type: Literal["web_search_call"] + """Tool call type identifier, always "web_search_call" """ + + +class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFileSearchToolCallResult( + BaseModel +): + attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """(Optional) Key-value attributes associated with the file""" + + file_id: str + """Unique identifier of the file containing the result""" + + filename: str + """Name of the file containing the result""" + + score: float + """Relevance score for this search result (between 0 and 1)""" + + text: str + """Text content of the search result""" class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" queries: List[str] + """List of search queries executed""" status: str + """Current status of the file search operation""" type: Literal["file_search_call"] + """Tool call type identifier, always "file_search_call" """ - results: Optional[List[Dict[str, Union[bool, float, str, List[object], object, None]]]] = None + results: Optional[ + List[OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFileSearchToolCallResult] + ] = None + """(Optional) Search results returned by the file search operation""" class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFunctionToolCall(BaseModel): arguments: str + """JSON string containing the function arguments""" call_id: str + """Unique identifier for the function call""" name: str + """Name of the function being called""" type: Literal["function_call"] + """Tool call type identifier, always "function_call" """ id: Optional[str] = None + """(Optional) Additional identifier for the tool call""" status: Optional[str] = None + """(Optional) Current status of the function call execution""" class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpCall(BaseModel): id: str + """Unique identifier for this MCP call""" arguments: str + """JSON string containing the MCP call arguments""" name: str + """Name of the MCP method being called""" server_label: str + """Label identifying the MCP server handling the call""" type: Literal["mcp_call"] + """Tool call type identifier, always "mcp_call" """ error: Optional[str] = None + """(Optional) Error message if the MCP call failed""" output: Optional[str] = None + """(Optional) Output result from the successful MCP call""" class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListToolsTool(BaseModel): input_schema: Dict[str, Union[bool, float, str, List[object], object, None]] + """JSON schema defining the tool's input parameters""" name: str + """Name of the tool""" description: Optional[str] = None + """(Optional) Description of what the tool does""" class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListTools(BaseModel): id: str + """Unique identifier for this MCP list tools operation""" server_label: str + """Label identifying the MCP server providing the tools""" tools: List[OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListToolsTool] + """List of available tools provided by the MCP server""" type: Literal["mcp_list_tools"] + """Tool call type identifier, always "mcp_list_tools" """ OpenAIResponseObjectStreamResponseOutputItemAddedItem: TypeAlias = Annotated[ @@ -270,37 +337,42 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputM class OpenAIResponseObjectStreamResponseOutputItemAdded(BaseModel): item: OpenAIResponseObjectStreamResponseOutputItemAddedItem - """Corresponds to the various Message types in the Responses API. - - They are all under one type because the Responses API gives them all the same - "type" value, and there is no way to tell them apart in certain scenarios. - """ + """The output item that was added (message, tool call, etc.)""" output_index: int + """Index position of this item in the output list""" response_id: str + """Unique identifier of the response containing this output""" sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.output_item.added"] + """Event type identifier, always "response.output_item.added" """ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText( BaseModel ): text: str + """The text content of the input message""" type: Literal["input_text"] + """Content type identifier, always "input_text" """ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage( BaseModel ): detail: Literal["low", "high", "auto"] + """Level of detail for image processing, can be "low", "high", or "auto" """ type: Literal["input_image"] + """Content type identifier, always "input_image" """ image_url: Optional[str] = None + """(Optional) URL of the image content""" OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[ @@ -316,26 +388,35 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageC BaseModel ): file_id: str + """Unique identifier of the referenced file""" filename: str + """Name of the referenced file""" index: int + """Position index of the citation within the content""" type: Literal["file_citation"] + """Annotation type identifier, always "file_citation" """ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation( BaseModel ): end_index: int + """End position of the citation span in the content""" start_index: int + """Start position of the citation span in the content""" title: str + """Title of the referenced web resource""" type: Literal["url_citation"] + """Annotation type identifier, always "url_citation" """ url: str + """URL of the referenced web resource""" class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation( @@ -403,70 +484,119 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage( class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" status: str + """Current status of the web search operation""" type: Literal["web_search_call"] + """Tool call type identifier, always "web_search_call" """ + + +class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFileSearchToolCallResult( + BaseModel +): + attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """(Optional) Key-value attributes associated with the file""" + + file_id: str + """Unique identifier of the file containing the result""" + + filename: str + """Name of the file containing the result""" + + score: float + """Relevance score for this search result (between 0 and 1)""" + + text: str + """Text content of the search result""" class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" queries: List[str] + """List of search queries executed""" status: str + """Current status of the file search operation""" type: Literal["file_search_call"] + """Tool call type identifier, always "file_search_call" """ - results: Optional[List[Dict[str, Union[bool, float, str, List[object], object, None]]]] = None + results: Optional[ + List[OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFileSearchToolCallResult] + ] = None + """(Optional) Search results returned by the file search operation""" class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFunctionToolCall(BaseModel): arguments: str + """JSON string containing the function arguments""" call_id: str + """Unique identifier for the function call""" name: str + """Name of the function being called""" type: Literal["function_call"] + """Tool call type identifier, always "function_call" """ id: Optional[str] = None + """(Optional) Additional identifier for the tool call""" status: Optional[str] = None + """(Optional) Current status of the function call execution""" class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpCall(BaseModel): id: str + """Unique identifier for this MCP call""" arguments: str + """JSON string containing the MCP call arguments""" name: str + """Name of the MCP method being called""" server_label: str + """Label identifying the MCP server handling the call""" type: Literal["mcp_call"] + """Tool call type identifier, always "mcp_call" """ error: Optional[str] = None + """(Optional) Error message if the MCP call failed""" output: Optional[str] = None + """(Optional) Output result from the successful MCP call""" class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListToolsTool(BaseModel): input_schema: Dict[str, Union[bool, float, str, List[object], object, None]] + """JSON schema defining the tool's input parameters""" name: str + """Name of the tool""" description: Optional[str] = None + """(Optional) Description of what the tool does""" class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListTools(BaseModel): id: str + """Unique identifier for this MCP list tools operation""" server_label: str + """Label identifying the MCP server providing the tools""" tools: List[OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListToolsTool] + """List of available tools provided by the MCP server""" type: Literal["mcp_list_tools"] + """Tool call type identifier, always "mcp_list_tools" """ OpenAIResponseObjectStreamResponseOutputItemDoneItem: TypeAlias = Annotated[ @@ -484,81 +614,107 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMe class OpenAIResponseObjectStreamResponseOutputItemDone(BaseModel): item: OpenAIResponseObjectStreamResponseOutputItemDoneItem - """Corresponds to the various Message types in the Responses API. - - They are all under one type because the Responses API gives them all the same - "type" value, and there is no way to tell them apart in certain scenarios. - """ + """The completed output item (message, tool call, etc.)""" output_index: int + """Index position of this item in the output list""" response_id: str + """Unique identifier of the response containing this output""" sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.output_item.done"] + """Event type identifier, always "response.output_item.done" """ class OpenAIResponseObjectStreamResponseOutputTextDelta(BaseModel): content_index: int + """Index position within the text content""" delta: str + """Incremental text content being added""" item_id: str + """Unique identifier of the output item being updated""" output_index: int + """Index position of the item in the output list""" sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.output_text.delta"] + """Event type identifier, always "response.output_text.delta" """ class OpenAIResponseObjectStreamResponseOutputTextDone(BaseModel): content_index: int + """Index position within the text content""" item_id: str + """Unique identifier of the completed output item""" output_index: int + """Index position of the item in the output list""" sequence_number: int + """Sequential number for ordering streaming events""" text: str + """Final complete text content of the output item""" type: Literal["response.output_text.done"] + """Event type identifier, always "response.output_text.done" """ class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta(BaseModel): delta: str + """Incremental function call arguments being added""" item_id: str + """Unique identifier of the function call being updated""" output_index: int + """Index position of the item in the output list""" sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.function_call_arguments.delta"] + """Event type identifier, always "response.function_call_arguments.delta" """ class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone(BaseModel): arguments: str + """Final complete arguments JSON string for the function call""" item_id: str + """Unique identifier of the completed function call""" output_index: int + """Index position of the item in the output list""" sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.function_call_arguments.done"] + """Event type identifier, always "response.function_call_arguments.done" """ class OpenAIResponseObjectStreamResponseWebSearchCallInProgress(BaseModel): item_id: str + """Unique identifier of the web search call""" output_index: int + """Index position of the item in the output list""" sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.web_search_call.in_progress"] + """Event type identifier, always "response.web_search_call.in_progress" """ class OpenAIResponseObjectStreamResponseWebSearchCallSearching(BaseModel): @@ -573,12 +729,16 @@ class OpenAIResponseObjectStreamResponseWebSearchCallSearching(BaseModel): class OpenAIResponseObjectStreamResponseWebSearchCallCompleted(BaseModel): item_id: str + """Unique identifier of the completed web search call""" output_index: int + """Index position of the item in the output list""" sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.web_search_call.completed"] + """Event type identifier, always "response.web_search_call.completed" """ class OpenAIResponseObjectStreamResponseMcpListToolsInProgress(BaseModel): @@ -625,30 +785,40 @@ class OpenAIResponseObjectStreamResponseMcpCallArgumentsDone(BaseModel): class OpenAIResponseObjectStreamResponseMcpCallInProgress(BaseModel): item_id: str + """Unique identifier of the MCP call""" output_index: int + """Index position of the item in the output list""" sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.mcp_call.in_progress"] + """Event type identifier, always "response.mcp_call.in_progress" """ class OpenAIResponseObjectStreamResponseMcpCallFailed(BaseModel): sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.mcp_call.failed"] + """Event type identifier, always "response.mcp_call.failed" """ class OpenAIResponseObjectStreamResponseMcpCallCompleted(BaseModel): sequence_number: int + """Sequential number for ordering streaming events""" type: Literal["response.mcp_call.completed"] + """Event type identifier, always "response.mcp_call.completed" """ class OpenAIResponseObjectStreamResponseCompleted(BaseModel): response: ResponseObject + """The completed response object""" type: Literal["response.completed"] + """Event type identifier, always "response.completed" """ ResponseObjectStream: TypeAlias = Annotated[ diff --git a/src/llama_stack_client/types/responses/input_item_list_response.py b/src/llama_stack_client/types/responses/input_item_list_response.py index aadcd9f2..714ff703 100644 --- a/src/llama_stack_client/types/responses/input_item_list_response.py +++ b/src/llama_stack_client/types/responses/input_item_list_response.py @@ -11,6 +11,7 @@ "Data", "DataOpenAIResponseOutputMessageWebSearchToolCall", "DataOpenAIResponseOutputMessageFileSearchToolCall", + "DataOpenAIResponseOutputMessageFileSearchToolCallResult", "DataOpenAIResponseOutputMessageFunctionToolCall", "DataOpenAIResponseInputFunctionToolCallOutput", "DataOpenAIResponseMessage", @@ -28,36 +29,67 @@ class DataOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" status: str + """Current status of the web search operation""" type: Literal["web_search_call"] + """Tool call type identifier, always "web_search_call" """ + + +class DataOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel): + attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """(Optional) Key-value attributes associated with the file""" + + file_id: str + """Unique identifier of the file containing the result""" + + filename: str + """Name of the file containing the result""" + + score: float + """Relevance score for this search result (between 0 and 1)""" + + text: str + """Text content of the search result""" class DataOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): id: str + """Unique identifier for this tool call""" queries: List[str] + """List of search queries executed""" status: str + """Current status of the file search operation""" type: Literal["file_search_call"] + """Tool call type identifier, always "file_search_call" """ - results: Optional[List[Dict[str, Union[bool, float, str, List[object], object, None]]]] = None + results: Optional[List[DataOpenAIResponseOutputMessageFileSearchToolCallResult]] = None + """(Optional) Search results returned by the file search operation""" class DataOpenAIResponseOutputMessageFunctionToolCall(BaseModel): arguments: str + """JSON string containing the function arguments""" call_id: str + """Unique identifier for the function call""" name: str + """Name of the function being called""" type: Literal["function_call"] + """Tool call type identifier, always "function_call" """ id: Optional[str] = None + """(Optional) Additional identifier for the tool call""" status: Optional[str] = None + """(Optional) Current status of the function call execution""" class DataOpenAIResponseInputFunctionToolCallOutput(BaseModel): @@ -74,16 +106,21 @@ class DataOpenAIResponseInputFunctionToolCallOutput(BaseModel): class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel): text: str + """The text content of the input message""" type: Literal["input_text"] + """Content type identifier, always "input_text" """ class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel): detail: Literal["low", "high", "auto"] + """Level of detail for image processing, can be "low", "high", or "auto" """ type: Literal["input_image"] + """Content type identifier, always "input_image" """ image_url: Optional[str] = None + """(Optional) URL of the image content""" DataOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[ @@ -97,24 +134,33 @@ class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageCont class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel): file_id: str + """Unique identifier of the referenced file""" filename: str + """Name of the referenced file""" index: int + """Position index of the citation within the content""" type: Literal["file_citation"] + """Annotation type identifier, always "file_citation" """ class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel): end_index: int + """End position of the citation span in the content""" start_index: int + """Start position of the citation span in the content""" title: str + """Title of the referenced web resource""" type: Literal["url_citation"] + """Annotation type identifier, always "url_citation" """ url: str + """URL of the referenced web resource""" class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(BaseModel): @@ -183,5 +229,7 @@ class DataOpenAIResponseMessage(BaseModel): class InputItemListResponse(BaseModel): data: List[Data] + """List of input items""" object: Literal["list"] + """Object type identifier, always "list" """ diff --git a/src/llama_stack_client/types/route_info.py b/src/llama_stack_client/types/route_info.py index 3d8880f1..671361b9 100644 --- a/src/llama_stack_client/types/route_info.py +++ b/src/llama_stack_client/types/route_info.py @@ -9,7 +9,10 @@ class RouteInfo(BaseModel): method: str + """HTTP method for the route""" provider_types: List[str] + """List of provider types that implement this route""" route: str + """The API endpoint path""" diff --git a/src/llama_stack_client/types/run_shield_response.py b/src/llama_stack_client/types/run_shield_response.py index 1dbdf5a0..ba7bac0b 100644 --- a/src/llama_stack_client/types/run_shield_response.py +++ b/src/llama_stack_client/types/run_shield_response.py @@ -10,3 +10,4 @@ class RunShieldResponse(BaseModel): violation: Optional[SafetyViolation] = None + """(Optional) Safety violation detected by the shield, if any""" diff --git a/src/llama_stack_client/types/scoring_fn.py b/src/llama_stack_client/types/scoring_fn.py index 3569cb44..8c558f2a 100644 --- a/src/llama_stack_client/types/scoring_fn.py +++ b/src/llama_stack_client/types/scoring_fn.py @@ -20,9 +20,11 @@ class ScoringFn(BaseModel): return_type: ReturnType type: Literal["scoring_function"] + """The resource type, always scoring_function""" description: Optional[str] = None params: Optional[ScoringFnParams] = None + """Parameters for LLM-as-judge scoring function configuration.""" provider_resource_id: Optional[str] = None diff --git a/src/llama_stack_client/types/scoring_fn_params.py b/src/llama_stack_client/types/scoring_fn_params.py index a46b46f5..937fd886 100644 --- a/src/llama_stack_client/types/scoring_fn_params.py +++ b/src/llama_stack_client/types/scoring_fn_params.py @@ -11,28 +11,38 @@ class LlmAsJudgeScoringFnParams(BaseModel): aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]] + """Aggregation functions to apply to the scores of each row""" judge_model: str + """Identifier of the LLM model to use as a judge for scoring""" judge_score_regexes: List[str] + """Regexes to extract the answer from generated response""" type: Literal["llm_as_judge"] + """The type of scoring function parameters, always llm_as_judge""" prompt_template: Optional[str] = None + """(Optional) Custom prompt template for the judge model""" class RegexParserScoringFnParams(BaseModel): aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]] + """Aggregation functions to apply to the scores of each row""" parsing_regexes: List[str] + """Regex to extract the answer from generated response""" type: Literal["regex_parser"] + """The type of scoring function parameters, always regex_parser""" class BasicScoringFnParams(BaseModel): aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]] + """Aggregation functions to apply to the scores of each row""" type: Literal["basic"] + """The type of scoring function parameters, always basic""" ScoringFnParams: TypeAlias = Annotated[ diff --git a/src/llama_stack_client/types/scoring_fn_params_param.py b/src/llama_stack_client/types/scoring_fn_params_param.py index b404bc89..9753ddeb 100644 --- a/src/llama_stack_client/types/scoring_fn_params_param.py +++ b/src/llama_stack_client/types/scoring_fn_params_param.py @@ -12,32 +12,42 @@ class LlmAsJudgeScoringFnParams(TypedDict, total=False): aggregation_functions: Required[ List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]] ] + """Aggregation functions to apply to the scores of each row""" judge_model: Required[str] + """Identifier of the LLM model to use as a judge for scoring""" judge_score_regexes: Required[List[str]] + """Regexes to extract the answer from generated response""" type: Required[Literal["llm_as_judge"]] + """The type of scoring function parameters, always llm_as_judge""" prompt_template: str + """(Optional) Custom prompt template for the judge model""" class RegexParserScoringFnParams(TypedDict, total=False): aggregation_functions: Required[ List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]] ] + """Aggregation functions to apply to the scores of each row""" parsing_regexes: Required[List[str]] + """Regex to extract the answer from generated response""" type: Required[Literal["regex_parser"]] + """The type of scoring function parameters, always regex_parser""" class BasicScoringFnParams(TypedDict, total=False): aggregation_functions: Required[ List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]] ] + """Aggregation functions to apply to the scores of each row""" type: Required[Literal["basic"]] + """The type of scoring function parameters, always basic""" ScoringFnParamsParam: TypeAlias = Union[LlmAsJudgeScoringFnParams, RegexParserScoringFnParams, BasicScoringFnParams] diff --git a/src/llama_stack_client/types/scoring_score_batch_response.py b/src/llama_stack_client/types/scoring_score_batch_response.py index da124b1e..7f5f7e24 100644 --- a/src/llama_stack_client/types/scoring_score_batch_response.py +++ b/src/llama_stack_client/types/scoring_score_batch_response.py @@ -10,5 +10,7 @@ class ScoringScoreBatchResponse(BaseModel): results: Dict[str, ScoringResult] + """A map of scoring function name to ScoringResult""" dataset_id: Optional[str] = None + """(Optional) The identifier of the dataset that was scored""" diff --git a/src/llama_stack_client/types/shared/batch_completion.py b/src/llama_stack_client/types/shared/batch_completion.py index 547884d1..43a0a735 100644 --- a/src/llama_stack_client/types/shared/batch_completion.py +++ b/src/llama_stack_client/types/shared/batch_completion.py @@ -10,3 +10,4 @@ class BatchCompletion(BaseModel): batch: List[CompletionResponse] + """List of completion responses, one for each input in the batch""" diff --git a/src/llama_stack_client/types/shared/chat_completion_response.py b/src/llama_stack_client/types/shared/chat_completion_response.py index 20dd1ecc..3ff6e0bc 100644 --- a/src/llama_stack_client/types/shared/chat_completion_response.py +++ b/src/llama_stack_client/types/shared/chat_completion_response.py @@ -11,10 +11,13 @@ class Metric(BaseModel): metric: str + """The name of the metric""" value: float + """The numeric value of the metric""" unit: Optional[str] = None + """(Optional) The unit of measurement for the metric value""" class ChatCompletionResponse(BaseModel): @@ -25,3 +28,4 @@ class ChatCompletionResponse(BaseModel): """Optional log probabilities for generated tokens""" metrics: Optional[List[Metric]] = None + """(Optional) List of metrics associated with the API response""" diff --git a/src/llama_stack_client/types/shared/content_delta.py b/src/llama_stack_client/types/shared/content_delta.py index ae036ad8..e5936990 100644 --- a/src/llama_stack_client/types/shared/content_delta.py +++ b/src/llama_stack_client/types/shared/content_delta.py @@ -12,22 +12,29 @@ class TextDelta(BaseModel): text: str + """The incremental text content""" type: Literal["text"] + """Discriminator type of the delta. Always "text" """ class ImageDelta(BaseModel): image: str + """The incremental image data as bytes""" type: Literal["image"] + """Discriminator type of the delta. Always "image" """ class ToolCallDelta(BaseModel): parse_status: Literal["started", "in_progress", "failed", "succeeded"] + """Current parsing status of the tool call""" tool_call: ToolCallOrString + """Either an in-progress tool call string or the final parsed tool call""" type: Literal["tool_call"] + """Discriminator type of the delta. Always "tool_call" """ ContentDelta: TypeAlias = Annotated[Union[TextDelta, ImageDelta, ToolCallDelta], PropertyInfo(discriminator="type")] diff --git a/src/llama_stack_client/types/shared/document.py b/src/llama_stack_client/types/shared/document.py index 67704232..492e6abd 100644 --- a/src/llama_stack_client/types/shared/document.py +++ b/src/llama_stack_client/types/shared/document.py @@ -19,6 +19,7 @@ class ContentImageContentItemImageURL(BaseModel): uri: str + """The URL string pointing to the resource""" class ContentImageContentItemImage(BaseModel): @@ -50,6 +51,7 @@ class ContentTextContentItem(BaseModel): class ContentURL(BaseModel): uri: str + """The URL string pointing to the resource""" Content: TypeAlias = Union[ diff --git a/src/llama_stack_client/types/shared/interleaved_content.py b/src/llama_stack_client/types/shared/interleaved_content.py index dc496150..852e487e 100644 --- a/src/llama_stack_client/types/shared/interleaved_content.py +++ b/src/llama_stack_client/types/shared/interleaved_content.py @@ -17,6 +17,7 @@ class ImageContentItemImageURL(BaseModel): uri: str + """The URL string pointing to the resource""" class ImageContentItemImage(BaseModel): diff --git a/src/llama_stack_client/types/shared/interleaved_content_item.py b/src/llama_stack_client/types/shared/interleaved_content_item.py index 8a3238b8..cb034712 100644 --- a/src/llama_stack_client/types/shared/interleaved_content_item.py +++ b/src/llama_stack_client/types/shared/interleaved_content_item.py @@ -17,6 +17,7 @@ class ImageContentItemImageURL(BaseModel): uri: str + """The URL string pointing to the resource""" class ImageContentItemImage(BaseModel): diff --git a/src/llama_stack_client/types/shared/param_type.py b/src/llama_stack_client/types/shared/param_type.py index 2fed6df2..199b0fd7 100644 --- a/src/llama_stack_client/types/shared/param_type.py +++ b/src/llama_stack_client/types/shared/param_type.py @@ -23,42 +23,52 @@ class StringType(BaseModel): type: Literal["string"] + """Discriminator type. Always "string" """ class NumberType(BaseModel): type: Literal["number"] + """Discriminator type. Always "number" """ class BooleanType(BaseModel): type: Literal["boolean"] + """Discriminator type. Always "boolean" """ class ArrayType(BaseModel): type: Literal["array"] + """Discriminator type. Always "array" """ class ObjectType(BaseModel): type: Literal["object"] + """Discriminator type. Always "object" """ class JsonType(BaseModel): type: Literal["json"] + """Discriminator type. Always "json" """ class UnionType(BaseModel): type: Literal["union"] + """Discriminator type. Always "union" """ class ChatCompletionInputType(BaseModel): type: Literal["chat_completion_input"] + """Discriminator type. Always "chat_completion_input" """ class CompletionInputType(BaseModel): type: Literal["completion_input"] + """Discriminator type. Always "completion_input" """ class AgentTurnInputType(BaseModel): type: Literal["agent_turn_input"] + """Discriminator type. Always "agent_turn_input" """ ParamType: TypeAlias = Annotated[ diff --git a/src/llama_stack_client/types/shared/query_config.py b/src/llama_stack_client/types/shared/query_config.py index 3628efbf..389514c7 100644 --- a/src/llama_stack_client/types/shared/query_config.py +++ b/src/llama_stack_client/types/shared/query_config.py @@ -14,8 +14,7 @@ class RankerRrfRanker(BaseModel): impact_factor: float """The impact factor for RRF scoring. - Higher values give more weight to higher-ranked results. Must be greater than 0. - Default of 60 is from the original RRF paper (Cormack et al., 2009). + Higher values give more weight to higher-ranked results. Must be greater than 0 """ type: Literal["rrf"] @@ -55,7 +54,7 @@ class QueryConfig(BaseModel): query_generator_config: QueryGeneratorConfig """Configuration for the query generator.""" - mode: Optional[str] = None + mode: Optional[Literal["vector", "keyword", "hybrid"]] = None """Search mode for retrieval—either "vector", "keyword", or "hybrid". Default "vector". diff --git a/src/llama_stack_client/types/shared/query_generator_config.py b/src/llama_stack_client/types/shared/query_generator_config.py index 559fca7d..624fc190 100644 --- a/src/llama_stack_client/types/shared/query_generator_config.py +++ b/src/llama_stack_client/types/shared/query_generator_config.py @@ -11,16 +11,21 @@ class DefaultRagQueryGeneratorConfig(BaseModel): separator: str + """String separator used to join query terms""" type: Literal["default"] + """Type of query generator, always 'default'""" class LlmragQueryGeneratorConfig(BaseModel): model: str + """Name of the language model to use for query generation""" template: str + """Template string for formatting the query generation prompt""" type: Literal["llm"] + """Type of query generator, always 'llm'""" QueryGeneratorConfig: TypeAlias = Annotated[ diff --git a/src/llama_stack_client/types/shared/query_result.py b/src/llama_stack_client/types/shared/query_result.py index c0a1d44c..c623c6d6 100644 --- a/src/llama_stack_client/types/shared/query_result.py +++ b/src/llama_stack_client/types/shared/query_result.py @@ -10,6 +10,7 @@ class QueryResult(BaseModel): metadata: Dict[str, Union[bool, float, str, List[object], object, None]] + """Additional metadata about the query result""" content: Optional[InterleavedContent] = None - """A image content item""" + """(Optional) The retrieved content from the query""" diff --git a/src/llama_stack_client/types/shared/safety_violation.py b/src/llama_stack_client/types/shared/safety_violation.py index e3c94312..bea7ca93 100644 --- a/src/llama_stack_client/types/shared/safety_violation.py +++ b/src/llama_stack_client/types/shared/safety_violation.py @@ -10,7 +10,13 @@ class SafetyViolation(BaseModel): metadata: Dict[str, Union[bool, float, str, List[object], object, None]] + """ + Additional metadata including specific violation codes for debugging and + telemetry + """ violation_level: Literal["info", "warn", "error"] + """Severity level of the violation""" user_message: Optional[str] = None + """(Optional) Message to convey to the user about the violation""" diff --git a/src/llama_stack_client/types/shared/sampling_params.py b/src/llama_stack_client/types/shared/sampling_params.py index 7ce2211e..6823aee7 100644 --- a/src/llama_stack_client/types/shared/sampling_params.py +++ b/src/llama_stack_client/types/shared/sampling_params.py @@ -17,20 +17,26 @@ class StrategyGreedySamplingStrategy(BaseModel): type: Literal["greedy"] + """Must be "greedy" to identify this sampling strategy""" class StrategyTopPSamplingStrategy(BaseModel): type: Literal["top_p"] + """Must be "top_p" to identify this sampling strategy""" temperature: Optional[float] = None + """Controls randomness in sampling. Higher values increase randomness""" top_p: Optional[float] = None + """Cumulative probability threshold for nucleus sampling. Defaults to 0.95""" class StrategyTopKSamplingStrategy(BaseModel): top_k: int + """Number of top tokens to consider for sampling. Must be at least 1""" type: Literal["top_k"] + """Must be "top_k" to identify this sampling strategy""" Strategy: TypeAlias = Annotated[ diff --git a/src/llama_stack_client/types/shared_params/document.py b/src/llama_stack_client/types/shared_params/document.py index 78564cfa..db9cd51d 100644 --- a/src/llama_stack_client/types/shared_params/document.py +++ b/src/llama_stack_client/types/shared_params/document.py @@ -20,6 +20,7 @@ class ContentImageContentItemImageURL(TypedDict, total=False): uri: Required[str] + """The URL string pointing to the resource""" class ContentImageContentItemImage(TypedDict, total=False): @@ -51,6 +52,7 @@ class ContentTextContentItem(TypedDict, total=False): class ContentURL(TypedDict, total=False): uri: Required[str] + """The URL string pointing to the resource""" Content: TypeAlias = Union[ diff --git a/src/llama_stack_client/types/shared_params/interleaved_content.py b/src/llama_stack_client/types/shared_params/interleaved_content.py index 5d045a20..5ea3953a 100644 --- a/src/llama_stack_client/types/shared_params/interleaved_content.py +++ b/src/llama_stack_client/types/shared_params/interleaved_content.py @@ -18,6 +18,7 @@ class ImageContentItemImageURL(TypedDict, total=False): uri: Required[str] + """The URL string pointing to the resource""" class ImageContentItemImage(TypedDict, total=False): diff --git a/src/llama_stack_client/types/shared_params/interleaved_content_item.py b/src/llama_stack_client/types/shared_params/interleaved_content_item.py index b5c0bcc1..ed3daa32 100644 --- a/src/llama_stack_client/types/shared_params/interleaved_content_item.py +++ b/src/llama_stack_client/types/shared_params/interleaved_content_item.py @@ -16,6 +16,7 @@ class ImageContentItemImageURL(TypedDict, total=False): uri: Required[str] + """The URL string pointing to the resource""" class ImageContentItemImage(TypedDict, total=False): diff --git a/src/llama_stack_client/types/shared_params/query_config.py b/src/llama_stack_client/types/shared_params/query_config.py index dd482ecf..d008c48c 100644 --- a/src/llama_stack_client/types/shared_params/query_config.py +++ b/src/llama_stack_client/types/shared_params/query_config.py @@ -14,8 +14,7 @@ class RankerRrfRanker(TypedDict, total=False): impact_factor: Required[float] """The impact factor for RRF scoring. - Higher values give more weight to higher-ranked results. Must be greater than 0. - Default of 60 is from the original RRF paper (Cormack et al., 2009). + Higher values give more weight to higher-ranked results. Must be greater than 0 """ type: Required[Literal["rrf"]] @@ -55,7 +54,7 @@ class QueryConfig(TypedDict, total=False): query_generator_config: Required[QueryGeneratorConfig] """Configuration for the query generator.""" - mode: str + mode: Literal["vector", "keyword", "hybrid"] """Search mode for retrieval—either "vector", "keyword", or "hybrid". Default "vector". diff --git a/src/llama_stack_client/types/shared_params/query_generator_config.py b/src/llama_stack_client/types/shared_params/query_generator_config.py index db135e80..8c589bf9 100644 --- a/src/llama_stack_client/types/shared_params/query_generator_config.py +++ b/src/llama_stack_client/types/shared_params/query_generator_config.py @@ -10,16 +10,21 @@ class DefaultRagQueryGeneratorConfig(TypedDict, total=False): separator: Required[str] + """String separator used to join query terms""" type: Required[Literal["default"]] + """Type of query generator, always 'default'""" class LlmragQueryGeneratorConfig(TypedDict, total=False): model: Required[str] + """Name of the language model to use for query generation""" template: Required[str] + """Template string for formatting the query generation prompt""" type: Required[Literal["llm"]] + """Type of query generator, always 'llm'""" QueryGeneratorConfig: TypeAlias = Union[DefaultRagQueryGeneratorConfig, LlmragQueryGeneratorConfig] diff --git a/src/llama_stack_client/types/shared_params/sampling_params.py b/src/llama_stack_client/types/shared_params/sampling_params.py index 158db1c5..55f05e8b 100644 --- a/src/llama_stack_client/types/shared_params/sampling_params.py +++ b/src/llama_stack_client/types/shared_params/sampling_params.py @@ -16,20 +16,26 @@ class StrategyGreedySamplingStrategy(TypedDict, total=False): type: Required[Literal["greedy"]] + """Must be "greedy" to identify this sampling strategy""" class StrategyTopPSamplingStrategy(TypedDict, total=False): type: Required[Literal["top_p"]] + """Must be "top_p" to identify this sampling strategy""" temperature: float + """Controls randomness in sampling. Higher values increase randomness""" top_p: float + """Cumulative probability threshold for nucleus sampling. Defaults to 0.95""" class StrategyTopKSamplingStrategy(TypedDict, total=False): top_k: Required[int] + """Number of top tokens to consider for sampling. Must be at least 1""" type: Required[Literal["top_k"]] + """Must be "top_k" to identify this sampling strategy""" Strategy: TypeAlias = Union[StrategyGreedySamplingStrategy, StrategyTopPSamplingStrategy, StrategyTopKSamplingStrategy] diff --git a/src/llama_stack_client/types/shield.py b/src/llama_stack_client/types/shield.py index ff5f01f1..dd48dfae 100644 --- a/src/llama_stack_client/types/shield.py +++ b/src/llama_stack_client/types/shield.py @@ -14,7 +14,9 @@ class Shield(BaseModel): provider_id: str type: Literal["shield"] + """The resource type, always shield""" params: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Configuration parameters for the shield""" provider_resource_id: Optional[str] = None diff --git a/src/llama_stack_client/types/span_with_status.py b/src/llama_stack_client/types/span_with_status.py index f93f4ff5..04d124bd 100644 --- a/src/llama_stack_client/types/span_with_status.py +++ b/src/llama_stack_client/types/span_with_status.py @@ -11,17 +11,25 @@ class SpanWithStatus(BaseModel): name: str + """Human-readable name describing the operation this span represents""" span_id: str + """Unique identifier for the span""" start_time: datetime + """Timestamp when the operation began""" trace_id: str + """Unique identifier for the trace this span belongs to""" attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Key-value pairs containing additional metadata about the span""" end_time: Optional[datetime] = None + """(Optional) Timestamp when the operation finished, if completed""" parent_span_id: Optional[str] = None + """(Optional) Unique identifier for the parent span, if this is a child span""" status: Optional[Literal["ok", "error"]] = None + """(Optional) The current status of the span""" diff --git a/src/llama_stack_client/types/synthetic_data_generation_generate_params.py b/src/llama_stack_client/types/synthetic_data_generation_generate_params.py index abf51059..5f55a97c 100644 --- a/src/llama_stack_client/types/synthetic_data_generation_generate_params.py +++ b/src/llama_stack_client/types/synthetic_data_generation_generate_params.py @@ -12,8 +12,14 @@ class SyntheticDataGenerationGenerateParams(TypedDict, total=False): dialogs: Required[Iterable[Message]] + """List of conversation messages to use as input for synthetic data generation""" filtering_function: Required[Literal["none", "random", "top_k", "top_p", "top_k_top_p", "sigmoid"]] - """The type of filtering function.""" + """Type of filtering to apply to generated synthetic data samples""" model: str + """(Optional) The identifier of the model to use. + + The model must be registered with Llama Stack and available via the /models + endpoint + """ diff --git a/src/llama_stack_client/types/synthetic_data_generation_response.py b/src/llama_stack_client/types/synthetic_data_generation_response.py index a2ee11e6..cfb20f08 100644 --- a/src/llama_stack_client/types/synthetic_data_generation_response.py +++ b/src/llama_stack_client/types/synthetic_data_generation_response.py @@ -9,5 +9,10 @@ class SyntheticDataGenerationResponse(BaseModel): synthetic_data: List[Dict[str, Union[bool, float, str, List[object], object, None]]] + """List of generated synthetic data samples that passed the filtering criteria""" statistics: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """ + (Optional) Statistical information about the generation process and filtering + results + """ diff --git a/src/llama_stack_client/types/telemetry_get_span_response.py b/src/llama_stack_client/types/telemetry_get_span_response.py index 9e50ed0d..6826d4d0 100644 --- a/src/llama_stack_client/types/telemetry_get_span_response.py +++ b/src/llama_stack_client/types/telemetry_get_span_response.py @@ -10,15 +10,22 @@ class TelemetryGetSpanResponse(BaseModel): name: str + """Human-readable name describing the operation this span represents""" span_id: str + """Unique identifier for the span""" start_time: datetime + """Timestamp when the operation began""" trace_id: str + """Unique identifier for the trace this span belongs to""" attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Key-value pairs containing additional metadata about the span""" end_time: Optional[datetime] = None + """(Optional) Timestamp when the operation finished, if completed""" parent_span_id: Optional[str] = None + """(Optional) Unique identifier for the parent span, if this is a child span""" diff --git a/src/llama_stack_client/types/telemetry_query_spans_response.py b/src/llama_stack_client/types/telemetry_query_spans_response.py index c630efeb..49eaeb38 100644 --- a/src/llama_stack_client/types/telemetry_query_spans_response.py +++ b/src/llama_stack_client/types/telemetry_query_spans_response.py @@ -11,18 +11,25 @@ class TelemetryQuerySpansResponseItem(BaseModel): name: str + """Human-readable name describing the operation this span represents""" span_id: str + """Unique identifier for the span""" start_time: datetime + """Timestamp when the operation began""" trace_id: str + """Unique identifier for the trace this span belongs to""" attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Key-value pairs containing additional metadata about the span""" end_time: Optional[datetime] = None + """(Optional) Timestamp when the operation finished, if completed""" parent_span_id: Optional[str] = None + """(Optional) Unique identifier for the parent span, if this is a child span""" TelemetryQuerySpansResponse: TypeAlias = List[TelemetryQuerySpansResponseItem] diff --git a/src/llama_stack_client/types/tool.py b/src/llama_stack_client/types/tool.py index 6beb8764..c6994268 100644 --- a/src/llama_stack_client/types/tool.py +++ b/src/llama_stack_client/types/tool.py @@ -10,29 +10,39 @@ class Parameter(BaseModel): description: str + """Human-readable description of what the parameter does""" name: str + """Name of the parameter""" parameter_type: str + """Type of the parameter (e.g., string, integer)""" required: bool + """Whether this parameter is required for tool invocation""" default: Union[bool, float, str, List[object], object, None] = None + """(Optional) Default value for the parameter if not provided""" class Tool(BaseModel): description: str + """Human-readable description of what the tool does""" identifier: str parameters: List[Parameter] + """List of parameters this tool accepts""" provider_id: str toolgroup_id: str + """ID of the tool group this tool belongs to""" type: Literal["tool"] + """Type of resource, always 'tool'""" metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Additional metadata about the tool""" provider_resource_id: Optional[str] = None diff --git a/src/llama_stack_client/types/tool_def.py b/src/llama_stack_client/types/tool_def.py index d96c5c5d..c82a9b8a 100644 --- a/src/llama_stack_client/types/tool_def.py +++ b/src/llama_stack_client/types/tool_def.py @@ -9,21 +9,30 @@ class Parameter(BaseModel): description: str + """Human-readable description of what the parameter does""" name: str + """Name of the parameter""" parameter_type: str + """Type of the parameter (e.g., string, integer)""" required: bool + """Whether this parameter is required for tool invocation""" default: Union[bool, float, str, List[object], object, None] = None + """(Optional) Default value for the parameter if not provided""" class ToolDef(BaseModel): name: str + """Name of the tool""" description: Optional[str] = None + """(Optional) Human-readable description of what the tool does""" metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Additional metadata about the tool""" parameters: Optional[List[Parameter]] = None + """(Optional) List of parameters this tool accepts""" diff --git a/src/llama_stack_client/types/tool_def_param.py b/src/llama_stack_client/types/tool_def_param.py index 42d27fbd..93ad8285 100644 --- a/src/llama_stack_client/types/tool_def_param.py +++ b/src/llama_stack_client/types/tool_def_param.py @@ -10,21 +10,30 @@ class Parameter(TypedDict, total=False): description: Required[str] + """Human-readable description of what the parameter does""" name: Required[str] + """Name of the parameter""" parameter_type: Required[str] + """Type of the parameter (e.g., string, integer)""" required: Required[bool] + """Whether this parameter is required for tool invocation""" default: Union[bool, float, str, Iterable[object], object, None] + """(Optional) Default value for the parameter if not provided""" class ToolDefParam(TypedDict, total=False): name: Required[str] + """Name of the tool""" description: str + """(Optional) Human-readable description of what the tool does""" metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] + """(Optional) Additional metadata about the tool""" parameters: Iterable[Parameter] + """(Optional) List of parameters this tool accepts""" diff --git a/src/llama_stack_client/types/tool_group.py b/src/llama_stack_client/types/tool_group.py index 3389395a..52fca005 100644 --- a/src/llama_stack_client/types/tool_group.py +++ b/src/llama_stack_client/types/tool_group.py @@ -10,6 +10,7 @@ class McpEndpoint(BaseModel): uri: str + """The URL string pointing to the resource""" class ToolGroup(BaseModel): @@ -18,9 +19,12 @@ class ToolGroup(BaseModel): provider_id: str type: Literal["tool_group"] + """Type of resource, always 'tool_group'""" args: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Additional arguments for the tool group""" mcp_endpoint: Optional[McpEndpoint] = None + """(Optional) Model Context Protocol endpoint for remote tools""" provider_resource_id: Optional[str] = None diff --git a/src/llama_stack_client/types/tool_invocation_result.py b/src/llama_stack_client/types/tool_invocation_result.py index 01f7db28..4262a85b 100644 --- a/src/llama_stack_client/types/tool_invocation_result.py +++ b/src/llama_stack_client/types/tool_invocation_result.py @@ -10,10 +10,13 @@ class ToolInvocationResult(BaseModel): content: Optional[InterleavedContent] = None - """A image content item""" + """(Optional) The output content from the tool execution""" error_code: Optional[int] = None + """(Optional) Numeric error code if the tool execution failed""" error_message: Optional[str] = None + """(Optional) Error message if the tool execution failed""" metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Additional metadata about the tool execution""" diff --git a/src/llama_stack_client/types/tool_response.py b/src/llama_stack_client/types/tool_response.py index f984f30a..7750494e 100644 --- a/src/llama_stack_client/types/tool_response.py +++ b/src/llama_stack_client/types/tool_response.py @@ -11,10 +11,13 @@ class ToolResponse(BaseModel): call_id: str + """Unique identifier for the tool call this response is for""" content: InterleavedContent - """A image content item""" + """The response content from the tool""" tool_name: Union[Literal["brave_search", "wolfram_alpha", "photogen", "code_interpreter"], str] + """Name of the tool that was invoked""" metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + """(Optional) Additional metadata about the tool response""" diff --git a/src/llama_stack_client/types/tool_response_param.py b/src/llama_stack_client/types/tool_response_param.py index 8ac14862..386658f9 100644 --- a/src/llama_stack_client/types/tool_response_param.py +++ b/src/llama_stack_client/types/tool_response_param.py @@ -12,10 +12,13 @@ class ToolResponseParam(TypedDict, total=False): call_id: Required[str] + """Unique identifier for the tool call this response is for""" content: Required[InterleavedContent] - """A image content item""" + """The response content from the tool""" tool_name: Required[Union[Literal["brave_search", "wolfram_alpha", "photogen", "code_interpreter"], str]] + """Name of the tool that was invoked""" metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] + """(Optional) Additional metadata about the tool response""" diff --git a/src/llama_stack_client/types/tool_runtime/rag_tool_insert_params.py b/src/llama_stack_client/types/tool_runtime/rag_tool_insert_params.py index bc52c481..614a969c 100644 --- a/src/llama_stack_client/types/tool_runtime/rag_tool_insert_params.py +++ b/src/llama_stack_client/types/tool_runtime/rag_tool_insert_params.py @@ -12,7 +12,10 @@ class RagToolInsertParams(TypedDict, total=False): chunk_size_in_tokens: Required[int] + """(Optional) Size in tokens for document chunking during indexing""" documents: Required[Iterable[Document]] + """List of documents to index in the RAG system""" vector_db_id: Required[str] + """ID of the vector database to store the document embeddings""" diff --git a/src/llama_stack_client/types/tool_runtime/rag_tool_query_params.py b/src/llama_stack_client/types/tool_runtime/rag_tool_query_params.py index 4599c693..a28faf2b 100644 --- a/src/llama_stack_client/types/tool_runtime/rag_tool_query_params.py +++ b/src/llama_stack_client/types/tool_runtime/rag_tool_query_params.py @@ -13,9 +13,10 @@ class RagToolQueryParams(TypedDict, total=False): content: Required[InterleavedContent] - """A image content item""" + """The query content to search for in the indexed documents""" vector_db_ids: Required[List[str]] + """List of vector database IDs to search within""" query_config: QueryConfig - """Configuration for the RAG query generation.""" + """(Optional) Configuration parameters for the query operation""" diff --git a/src/llama_stack_client/types/tool_runtime_list_tools_params.py b/src/llama_stack_client/types/tool_runtime_list_tools_params.py index 539e176d..d4933940 100644 --- a/src/llama_stack_client/types/tool_runtime_list_tools_params.py +++ b/src/llama_stack_client/types/tool_runtime_list_tools_params.py @@ -17,3 +17,4 @@ class ToolRuntimeListToolsParams(TypedDict, total=False): class McpEndpoint(TypedDict, total=False): uri: Required[str] + """The URL string pointing to the resource""" diff --git a/src/llama_stack_client/types/toolgroup_register_params.py b/src/llama_stack_client/types/toolgroup_register_params.py index a50c14c4..2aa79960 100644 --- a/src/llama_stack_client/types/toolgroup_register_params.py +++ b/src/llama_stack_client/types/toolgroup_register_params.py @@ -24,3 +24,4 @@ class ToolgroupRegisterParams(TypedDict, total=False): class McpEndpoint(TypedDict, total=False): uri: Required[str] + """The URL string pointing to the resource""" diff --git a/src/llama_stack_client/types/trace.py b/src/llama_stack_client/types/trace.py index 3683551c..0657d616 100644 --- a/src/llama_stack_client/types/trace.py +++ b/src/llama_stack_client/types/trace.py @@ -10,9 +10,13 @@ class Trace(BaseModel): root_span_id: str + """Unique identifier for the root span that started this trace""" start_time: datetime + """Timestamp when the trace began""" trace_id: str + """Unique identifier for the trace""" end_time: Optional[datetime] = None + """(Optional) Timestamp when the trace finished, if completed""" diff --git a/src/llama_stack_client/types/vector_db_list_response.py b/src/llama_stack_client/types/vector_db_list_response.py index 39161431..18034f52 100644 --- a/src/llama_stack_client/types/vector_db_list_response.py +++ b/src/llama_stack_client/types/vector_db_list_response.py @@ -10,16 +10,21 @@ class VectorDBListResponseItem(BaseModel): embedding_dimension: int + """Dimension of the embedding vectors""" embedding_model: str + """Name of the embedding model to use for vector generation""" identifier: str provider_id: str type: Literal["vector_db"] + """Type of resource, always 'vector_db' for vector databases""" provider_resource_id: Optional[str] = None + vector_db_name: Optional[str] = None + VectorDBListResponse: TypeAlias = List[VectorDBListResponseItem] diff --git a/src/llama_stack_client/types/vector_db_register_params.py b/src/llama_stack_client/types/vector_db_register_params.py index 734659a6..f7e3e7c7 100644 --- a/src/llama_stack_client/types/vector_db_register_params.py +++ b/src/llama_stack_client/types/vector_db_register_params.py @@ -22,3 +22,6 @@ class VectorDBRegisterParams(TypedDict, total=False): provider_vector_db_id: str """The identifier of the vector database in the provider.""" + + vector_db_name: str + """The name of the vector database.""" diff --git a/src/llama_stack_client/types/vector_db_register_response.py b/src/llama_stack_client/types/vector_db_register_response.py index 9c7a3166..cf48dd5a 100644 --- a/src/llama_stack_client/types/vector_db_register_response.py +++ b/src/llama_stack_client/types/vector_db_register_response.py @@ -10,13 +10,18 @@ class VectorDBRegisterResponse(BaseModel): embedding_dimension: int + """Dimension of the embedding vectors""" embedding_model: str + """Name of the embedding model to use for vector generation""" identifier: str provider_id: str type: Literal["vector_db"] + """Type of resource, always 'vector_db' for vector databases""" provider_resource_id: Optional[str] = None + + vector_db_name: Optional[str] = None diff --git a/src/llama_stack_client/types/vector_db_retrieve_response.py b/src/llama_stack_client/types/vector_db_retrieve_response.py index fb3597a5..aa349d1c 100644 --- a/src/llama_stack_client/types/vector_db_retrieve_response.py +++ b/src/llama_stack_client/types/vector_db_retrieve_response.py @@ -10,13 +10,18 @@ class VectorDBRetrieveResponse(BaseModel): embedding_dimension: int + """Dimension of the embedding vectors""" embedding_model: str + """Name of the embedding model to use for vector generation""" identifier: str provider_id: str type: Literal["vector_db"] + """Type of resource, always 'vector_db' for vector databases""" provider_resource_id: Optional[str] = None + + vector_db_name: Optional[str] = None diff --git a/src/llama_stack_client/types/vector_store.py b/src/llama_stack_client/types/vector_store.py index 5dc4ad3a..cfcebd81 100644 --- a/src/llama_stack_client/types/vector_store.py +++ b/src/llama_stack_client/types/vector_store.py @@ -10,35 +10,51 @@ class FileCounts(BaseModel): cancelled: int + """Number of files that had their processing cancelled""" completed: int + """Number of files that have been successfully processed""" failed: int + """Number of files that failed to process""" in_progress: int + """Number of files currently being processed""" total: int + """Total number of files in the vector store""" class VectorStore(BaseModel): id: str + """Unique identifier for the vector store""" created_at: int + """Timestamp when the vector store was created""" file_counts: FileCounts + """File processing status counts for the vector store""" metadata: Dict[str, Union[bool, float, str, List[object], object, None]] + """Set of key-value pairs that can be attached to the vector store""" object: str + """Object type identifier, always "vector_store" """ status: str + """Current status of the vector store""" usage_bytes: int + """Storage space used by the vector store in bytes""" expires_after: Optional[Dict[str, Union[bool, float, str, List[builtins.object], builtins.object, None]]] = None + """(Optional) Expiration policy for the vector store""" expires_at: Optional[int] = None + """(Optional) Timestamp when the vector store will expire""" last_active_at: Optional[int] = None + """(Optional) Timestamp of last activity on the vector store""" name: Optional[str] = None + """(Optional) Name of the vector store""" diff --git a/src/llama_stack_client/types/vector_store_create_params.py b/src/llama_stack_client/types/vector_store_create_params.py index 18748f48..dc6ea47e 100644 --- a/src/llama_stack_client/types/vector_store_create_params.py +++ b/src/llama_stack_client/types/vector_store_create_params.py @@ -3,15 +3,12 @@ from __future__ import annotations from typing import Dict, List, Union, Iterable -from typing_extensions import Required, TypedDict +from typing_extensions import TypedDict __all__ = ["VectorStoreCreateParams"] class VectorStoreCreateParams(TypedDict, total=False): - name: Required[str] - """A name for the vector store.""" - chunking_strategy: Dict[str, Union[bool, float, str, Iterable[object], object, None]] """The chunking strategy used to chunk the file(s). @@ -36,8 +33,8 @@ class VectorStoreCreateParams(TypedDict, total=False): metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] """Set of 16 key-value pairs that can be attached to an object.""" + name: str + """A name for the vector store.""" + provider_id: str """The ID of the provider to use for this vector store.""" - - provider_vector_db_id: str - """The provider-specific vector database ID.""" diff --git a/src/llama_stack_client/types/vector_store_delete_response.py b/src/llama_stack_client/types/vector_store_delete_response.py index 945ada10..29637547 100644 --- a/src/llama_stack_client/types/vector_store_delete_response.py +++ b/src/llama_stack_client/types/vector_store_delete_response.py @@ -7,7 +7,10 @@ class VectorStoreDeleteResponse(BaseModel): id: str + """Unique identifier of the deleted vector store""" deleted: bool + """Whether the deletion operation was successful""" object: str + """Object type identifier for the deletion response""" diff --git a/src/llama_stack_client/types/vector_store_search_params.py b/src/llama_stack_client/types/vector_store_search_params.py index fdb02ff7..5a429b79 100644 --- a/src/llama_stack_client/types/vector_store_search_params.py +++ b/src/llama_stack_client/types/vector_store_search_params.py @@ -30,5 +30,7 @@ class VectorStoreSearchParams(TypedDict, total=False): class RankingOptions(TypedDict, total=False): ranker: str + """(Optional) Name of the ranking algorithm to use""" score_threshold: float + """(Optional) Minimum relevance score threshold for results""" diff --git a/src/llama_stack_client/types/vector_store_search_response.py b/src/llama_stack_client/types/vector_store_search_response.py index 7b596e03..2f9a1076 100644 --- a/src/llama_stack_client/types/vector_store_search_response.py +++ b/src/llama_stack_client/types/vector_store_search_response.py @@ -10,29 +10,41 @@ class DataContent(BaseModel): text: str + """The actual text content""" type: Literal["text"] + """Content type, currently only "text" is supported""" class Data(BaseModel): content: List[DataContent] + """List of content items matching the search query""" file_id: str + """Unique identifier of the file containing the result""" filename: str + """Name of the file containing the result""" score: float + """Relevance score for this search result""" attributes: Optional[Dict[str, Union[str, float, bool]]] = None + """(Optional) Key-value attributes associated with the file""" class VectorStoreSearchResponse(BaseModel): data: List[Data] + """List of search result objects""" has_more: bool + """Whether there are more results available beyond this page""" object: str + """Object type identifier for the search results page""" search_query: str + """The original search query that was executed""" next_page: Optional[str] = None + """(Optional) Token for retrieving the next page of results""" diff --git a/src/llama_stack_client/types/vector_stores/__init__.py b/src/llama_stack_client/types/vector_stores/__init__.py index 550270e2..68bcf684 100644 --- a/src/llama_stack_client/types/vector_stores/__init__.py +++ b/src/llama_stack_client/types/vector_stores/__init__.py @@ -2,5 +2,9 @@ from __future__ import annotations +from .file_list_params import FileListParams as FileListParams from .vector_store_file import VectorStoreFile as VectorStoreFile from .file_create_params import FileCreateParams as FileCreateParams +from .file_update_params import FileUpdateParams as FileUpdateParams +from .file_delete_response import FileDeleteResponse as FileDeleteResponse +from .file_content_response import FileContentResponse as FileContentResponse diff --git a/src/llama_stack_client/types/vector_stores/file_content_response.py b/src/llama_stack_client/types/vector_stores/file_content_response.py new file mode 100644 index 00000000..035a34a8 --- /dev/null +++ b/src/llama_stack_client/types/vector_stores/file_content_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FileContentResponse", "Content"] + + +class Content(BaseModel): + text: str + """The actual text content""" + + type: Literal["text"] + """Content type, currently only "text" is supported""" + + +class FileContentResponse(BaseModel): + attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """Key-value attributes associated with the file""" + + content: List[Content] + """List of content items from the file""" + + file_id: str + """Unique identifier for the file""" + + filename: str + """Name of the file""" diff --git a/src/llama_stack_client/types/vector_stores/file_create_params.py b/src/llama_stack_client/types/vector_stores/file_create_params.py index 66fbf624..a75716b3 100644 --- a/src/llama_stack_client/types/vector_stores/file_create_params.py +++ b/src/llama_stack_client/types/vector_stores/file_create_params.py @@ -27,18 +27,23 @@ class FileCreateParams(TypedDict, total=False): class ChunkingStrategyVectorStoreChunkingStrategyAuto(TypedDict, total=False): type: Required[Literal["auto"]] + """Strategy type, always "auto" for automatic chunking""" class ChunkingStrategyVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): chunk_overlap_tokens: Required[int] + """Number of tokens to overlap between adjacent chunks""" max_chunk_size_tokens: Required[int] + """Maximum number of tokens per chunk, must be between 100 and 4096""" class ChunkingStrategyVectorStoreChunkingStrategyStatic(TypedDict, total=False): static: Required[ChunkingStrategyVectorStoreChunkingStrategyStaticStatic] + """Configuration parameters for the static chunking strategy""" type: Required[Literal["static"]] + """Strategy type, always "static" for static chunking""" ChunkingStrategy: TypeAlias = Union[ diff --git a/src/llama_stack_client/types/vector_stores/file_delete_response.py b/src/llama_stack_client/types/vector_stores/file_delete_response.py new file mode 100644 index 00000000..f24e1910 --- /dev/null +++ b/src/llama_stack_client/types/vector_stores/file_delete_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel + +__all__ = ["FileDeleteResponse"] + + +class FileDeleteResponse(BaseModel): + id: str + """Unique identifier of the deleted file""" + + deleted: bool + """Whether the deletion operation was successful""" + + object: str + """Object type identifier for the deletion response""" diff --git a/src/llama_stack_client/types/vector_stores/file_list_params.py b/src/llama_stack_client/types/vector_stores/file_list_params.py new file mode 100644 index 00000000..7174242d --- /dev/null +++ b/src/llama_stack_client/types/vector_stores/file_list_params.py @@ -0,0 +1,38 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["FileListParams"] + + +class FileListParams(TypedDict, total=False): + after: str + """(Optional) A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. + """ + + before: str + """(Optional) A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. + """ + + filter: Literal["completed", "in_progress", "cancelled", "failed"] + """ + (Optional) Filter by file status to only return files with the specified status. + """ + + limit: int + """(Optional) A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: str + """(Optional) Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ diff --git a/src/llama_stack_client/types/vector_stores/file_update_params.py b/src/llama_stack_client/types/vector_stores/file_update_params.py new file mode 100644 index 00000000..fddfc8c6 --- /dev/null +++ b/src/llama_stack_client/types/vector_stores/file_update_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["FileUpdateParams"] + + +class FileUpdateParams(TypedDict, total=False): + vector_store_id: Required[str] + + attributes: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] + """The updated key-value attributes to store with the file.""" diff --git a/src/llama_stack_client/types/vector_stores/vector_store_file.py b/src/llama_stack_client/types/vector_stores/vector_store_file.py index 45ce03f8..243a00df 100644 --- a/src/llama_stack_client/types/vector_stores/vector_store_file.py +++ b/src/llama_stack_client/types/vector_stores/vector_store_file.py @@ -18,18 +18,23 @@ class ChunkingStrategyVectorStoreChunkingStrategyAuto(BaseModel): type: Literal["auto"] + """Strategy type, always "auto" for automatic chunking""" class ChunkingStrategyVectorStoreChunkingStrategyStaticStatic(BaseModel): chunk_overlap_tokens: int + """Number of tokens to overlap between adjacent chunks""" max_chunk_size_tokens: int + """Maximum number of tokens per chunk, must be between 100 and 4096""" class ChunkingStrategyVectorStoreChunkingStrategyStatic(BaseModel): static: ChunkingStrategyVectorStoreChunkingStrategyStaticStatic + """Configuration parameters for the static chunking strategy""" type: Literal["static"] + """Strategy type, always "static" for static chunking""" ChunkingStrategy: TypeAlias = Annotated[ @@ -40,25 +45,36 @@ class ChunkingStrategyVectorStoreChunkingStrategyStatic(BaseModel): class LastError(BaseModel): code: Literal["server_error", "rate_limit_exceeded"] + """Error code indicating the type of failure""" message: str + """Human-readable error message describing the failure""" class VectorStoreFile(BaseModel): id: str + """Unique identifier for the file""" attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """Key-value attributes associated with the file""" chunking_strategy: ChunkingStrategy + """Strategy used for splitting the file into chunks""" created_at: int + """Timestamp when the file was added to the vector store""" object: str + """Object type identifier, always "vector_store.file" """ status: Literal["completed", "in_progress", "cancelled", "failed"] + """Current processing status of the file""" usage_bytes: int + """Storage space used by this file in bytes""" vector_store_id: str + """ID of the vector store containing this file""" last_error: Optional[LastError] = None + """(Optional) Error information if file processing failed""" diff --git a/src/llama_stack_client/types/version_info.py b/src/llama_stack_client/types/version_info.py index 5fc5bbb4..001d05cb 100644 --- a/src/llama_stack_client/types/version_info.py +++ b/src/llama_stack_client/types/version_info.py @@ -7,3 +7,4 @@ class VersionInfo(BaseModel): version: str + """Version number of the service""" diff --git a/tests/api_resources/agents/test_session.py b/tests/api_resources/agents/test_session.py index 2c80df58..b49ab492 100644 --- a/tests/api_resources/agents/test_session.py +++ b/tests/api_resources/agents/test_session.py @@ -11,6 +11,7 @@ from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient from llama_stack_client.types.agents import ( Session, + SessionListResponse, SessionCreateResponse, ) @@ -119,6 +120,53 @@ def test_path_params_retrieve(self, client: LlamaStackClient) -> None: agent_id="agent_id", ) + @parametrize + def test_method_list(self, client: LlamaStackClient) -> None: + session = client.agents.session.list( + agent_id="agent_id", + ) + assert_matches_type(SessionListResponse, session, path=["response"]) + + @parametrize + def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: + session = client.agents.session.list( + agent_id="agent_id", + limit=0, + start_index=0, + ) + assert_matches_type(SessionListResponse, session, path=["response"]) + + @parametrize + def test_raw_response_list(self, client: LlamaStackClient) -> None: + response = client.agents.session.with_raw_response.list( + agent_id="agent_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + session = response.parse() + assert_matches_type(SessionListResponse, session, path=["response"]) + + @parametrize + def test_streaming_response_list(self, client: LlamaStackClient) -> None: + with client.agents.session.with_streaming_response.list( + agent_id="agent_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + session = response.parse() + assert_matches_type(SessionListResponse, session, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_list(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.session.with_raw_response.list( + agent_id="", + ) + @parametrize def test_method_delete(self, client: LlamaStackClient) -> None: session = client.agents.session.delete( @@ -272,6 +320,53 @@ async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) - agent_id="agent_id", ) + @parametrize + async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: + session = await async_client.agents.session.list( + agent_id="agent_id", + ) + assert_matches_type(SessionListResponse, session, path=["response"]) + + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: + session = await async_client.agents.session.list( + agent_id="agent_id", + limit=0, + start_index=0, + ) + assert_matches_type(SessionListResponse, session, path=["response"]) + + @parametrize + async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.agents.session.with_raw_response.list( + agent_id="agent_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + session = await response.parse() + assert_matches_type(SessionListResponse, session, path=["response"]) + + @parametrize + async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.agents.session.with_streaming_response.list( + agent_id="agent_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + session = await response.parse() + assert_matches_type(SessionListResponse, session, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_list(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.session.with_raw_response.list( + agent_id="", + ) + @parametrize async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None: session = await async_client.agents.session.delete( diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 496ea061..7a1e9d41 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -9,6 +9,7 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient +from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage from llama_stack_client.types.chat import ( CompletionListResponse, CompletionCreateResponse, @@ -232,7 +233,7 @@ def test_path_params_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_method_list(self, client: LlamaStackClient) -> None: completion = client.chat.completions.list() - assert_matches_type(CompletionListResponse, completion, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[CompletionListResponse], completion, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: @@ -242,7 +243,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: model="model", order="asc", ) - assert_matches_type(CompletionListResponse, completion, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[CompletionListResponse], completion, path=["response"]) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: @@ -251,7 +252,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = response.parse() - assert_matches_type(CompletionListResponse, completion, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[CompletionListResponse], completion, path=["response"]) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: @@ -260,7 +261,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = response.parse() - assert_matches_type(CompletionListResponse, completion, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[CompletionListResponse], completion, path=["response"]) assert cast(Any, response.is_closed) is True @@ -481,7 +482,7 @@ async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) - @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: completion = await async_client.chat.completions.list() - assert_matches_type(CompletionListResponse, completion, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[CompletionListResponse], completion, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: @@ -491,7 +492,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl model="model", order="asc", ) - assert_matches_type(CompletionListResponse, completion, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[CompletionListResponse], completion, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -500,7 +501,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = await response.parse() - assert_matches_type(CompletionListResponse, completion, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[CompletionListResponse], completion, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -509,6 +510,6 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = await response.parse() - assert_matches_type(CompletionListResponse, completion, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[CompletionListResponse], completion, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index c4aa5349..18b34012 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -9,7 +9,11 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import AgentCreateResponse +from llama_stack_client.types import ( + AgentListResponse, + AgentCreateResponse, + AgentRetrieveResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -106,6 +110,77 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_method_retrieve(self, client: LlamaStackClient) -> None: + agent = client.agents.retrieve( + "agent_id", + ) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @parametrize + def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: + response = client.agents.with_raw_response.retrieve( + "agent_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @parametrize + def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: + with client.agents.with_streaming_response.retrieve( + "agent_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.with_raw_response.retrieve( + "", + ) + + @parametrize + def test_method_list(self, client: LlamaStackClient) -> None: + agent = client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @parametrize + def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: + agent = client.agents.list( + limit=0, + start_index=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @parametrize + def test_raw_response_list(self, client: LlamaStackClient) -> None: + response = client.agents.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @parametrize + def test_streaming_response_list(self, client: LlamaStackClient) -> None: + with client.agents.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + @parametrize def test_method_delete(self, client: LlamaStackClient) -> None: agent = client.agents.delete( @@ -239,6 +314,77 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie assert cast(Any, response.is_closed) is True + @parametrize + async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + agent = await async_client.agents.retrieve( + "agent_id", + ) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.agents.with_raw_response.retrieve( + "agent_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.agents.with_streaming_response.retrieve( + "agent_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.with_raw_response.retrieve( + "", + ) + + @parametrize + async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: + agent = await async_client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: + agent = await async_client.agents.list( + limit=0, + start_index=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @parametrize + async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.agents.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @parametrize + async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.agents.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + @parametrize async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None: agent = await async_client.agents.delete( diff --git a/tests/api_resources/test_datasets.py b/tests/api_resources/test_datasets.py index 9cd17f45..eee1de8c 100644 --- a/tests/api_resources/test_datasets.py +++ b/tests/api_resources/test_datasets.py @@ -85,6 +85,48 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_method_appendrows(self, client: LlamaStackClient) -> None: + dataset = client.datasets.appendrows( + dataset_id="dataset_id", + rows=[{"foo": True}], + ) + assert dataset is None + + @parametrize + def test_raw_response_appendrows(self, client: LlamaStackClient) -> None: + response = client.datasets.with_raw_response.appendrows( + dataset_id="dataset_id", + rows=[{"foo": True}], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + dataset = response.parse() + assert dataset is None + + @parametrize + def test_streaming_response_appendrows(self, client: LlamaStackClient) -> None: + with client.datasets.with_streaming_response.appendrows( + dataset_id="dataset_id", + rows=[{"foo": True}], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + dataset = response.parse() + assert dataset is None + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_appendrows(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"): + client.datasets.with_raw_response.appendrows( + dataset_id="", + rows=[{"foo": True}], + ) + @parametrize def test_method_iterrows(self, client: LlamaStackClient) -> None: dataset = client.datasets.iterrows( @@ -295,6 +337,48 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert cast(Any, response.is_closed) is True + @parametrize + async def test_method_appendrows(self, async_client: AsyncLlamaStackClient) -> None: + dataset = await async_client.datasets.appendrows( + dataset_id="dataset_id", + rows=[{"foo": True}], + ) + assert dataset is None + + @parametrize + async def test_raw_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.datasets.with_raw_response.appendrows( + dataset_id="dataset_id", + rows=[{"foo": True}], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + dataset = await response.parse() + assert dataset is None + + @parametrize + async def test_streaming_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.datasets.with_streaming_response.appendrows( + dataset_id="dataset_id", + rows=[{"foo": True}], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + dataset = await response.parse() + assert dataset is None + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_appendrows(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"): + await async_client.datasets.with_raw_response.appendrows( + dataset_id="", + rows=[{"foo": True}], + ) + @parametrize async def test_method_iterrows(self, async_client: AsyncLlamaStackClient) -> None: dataset = await async_client.datasets.iterrows( diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py index 7fc5e107..d9b29ffc 100644 --- a/tests/api_resources/test_files.py +++ b/tests/api_resources/test_files.py @@ -9,7 +9,8 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import File, ListFilesResponse, DeleteFileResponse +from llama_stack_client.types import File, DeleteFileResponse +from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -92,7 +93,7 @@ def test_path_params_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_method_list(self, client: LlamaStackClient) -> None: file = client.files.list() - assert_matches_type(ListFilesResponse, file, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[File], file, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: @@ -102,7 +103,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: order="asc", purpose="assistants", ) - assert_matches_type(ListFilesResponse, file, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[File], file, path=["response"]) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: @@ -111,7 +112,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() - assert_matches_type(ListFilesResponse, file, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[File], file, path=["response"]) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: @@ -120,7 +121,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() - assert_matches_type(ListFilesResponse, file, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[File], file, path=["response"]) assert cast(Any, response.is_closed) is True @@ -281,7 +282,7 @@ async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) - @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: file = await async_client.files.list() - assert_matches_type(ListFilesResponse, file, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[File], file, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: @@ -291,7 +292,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl order="asc", purpose="assistants", ) - assert_matches_type(ListFilesResponse, file, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[File], file, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -300,7 +301,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() - assert_matches_type(ListFilesResponse, file, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[File], file, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -309,7 +310,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() - assert_matches_type(ListFilesResponse, file, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[File], file, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_inference.py b/tests/api_resources/test_inference.py index 21967c9a..d5ef46d1 100644 --- a/tests/api_resources/test_inference.py +++ b/tests/api_resources/test_inference.py @@ -16,6 +16,8 @@ ) from llama_stack_client.types.shared import BatchCompletion, ChatCompletionResponse +# pyright: reportDeprecated=false + base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -178,75 +180,80 @@ def test_streaming_response_batch_completion(self, client: LlamaStackClient) -> @parametrize def test_method_chat_completion_overload_1(self, client: LlamaStackClient) -> None: - inference = client.inference.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + inference = client.inference.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + ) + assert_matches_type(ChatCompletionResponse, inference, path=["response"]) @parametrize def test_method_chat_completion_with_all_params_overload_1(self, client: LlamaStackClient) -> None: - inference = client.inference.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - "context": "string", - } - ], - model_id="model_id", - logprobs={"top_k": 0}, - response_format={ - "json_schema": {"foo": True}, - "type": "json_schema", - }, - sampling_params={ - "strategy": {"type": "greedy"}, - "max_tokens": 0, - "repetition_penalty": 0, - "stop": ["string"], - }, - stream=False, - tool_choice="auto", - tool_config={ - "system_message_behavior": "append", - "tool_choice": "auto", - "tool_prompt_format": "json", - }, - tool_prompt_format="json", - tools=[ - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - } - ], - ) + with pytest.warns(DeprecationWarning): + inference = client.inference.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + "context": "string", + } + ], + model_id="model_id", + logprobs={"top_k": 0}, + response_format={ + "json_schema": {"foo": True}, + "type": "json_schema", + }, + sampling_params={ + "strategy": {"type": "greedy"}, + "max_tokens": 0, + "repetition_penalty": 0, + "stop": ["string"], + }, + stream=False, + tool_choice="auto", + tool_config={ + "system_message_behavior": "append", + "tool_choice": "auto", + "tool_prompt_format": "json", + }, + tool_prompt_format="json", + tools=[ + { + "tool_name": "brave_search", + "description": "description", + "parameters": { + "foo": { + "param_type": "param_type", + "default": True, + "description": "description", + "required": True, + } + }, + } + ], + ) + assert_matches_type(ChatCompletionResponse, inference, path=["response"]) @parametrize def test_raw_response_chat_completion_overload_1(self, client: LlamaStackClient) -> None: - response = client.inference.with_raw_response.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + response = client.inference.with_raw_response.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -255,96 +262,102 @@ def test_raw_response_chat_completion_overload_1(self, client: LlamaStackClient) @parametrize def test_streaming_response_chat_completion_overload_1(self, client: LlamaStackClient) -> None: - with client.inference.with_streaming_response.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + with client.inference.with_streaming_response.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - inference = response.parse() - assert_matches_type(ChatCompletionResponse, inference, path=["response"]) + inference = response.parse() + assert_matches_type(ChatCompletionResponse, inference, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_chat_completion_overload_2(self, client: LlamaStackClient) -> None: - inference_stream = client.inference.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - stream=True, - ) + with pytest.warns(DeprecationWarning): + inference_stream = client.inference.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + stream=True, + ) + inference_stream.response.close() @parametrize def test_method_chat_completion_with_all_params_overload_2(self, client: LlamaStackClient) -> None: - inference_stream = client.inference.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - "context": "string", - } - ], - model_id="model_id", - stream=True, - logprobs={"top_k": 0}, - response_format={ - "json_schema": {"foo": True}, - "type": "json_schema", - }, - sampling_params={ - "strategy": {"type": "greedy"}, - "max_tokens": 0, - "repetition_penalty": 0, - "stop": ["string"], - }, - tool_choice="auto", - tool_config={ - "system_message_behavior": "append", - "tool_choice": "auto", - "tool_prompt_format": "json", - }, - tool_prompt_format="json", - tools=[ - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - } - ], - ) + with pytest.warns(DeprecationWarning): + inference_stream = client.inference.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + "context": "string", + } + ], + model_id="model_id", + stream=True, + logprobs={"top_k": 0}, + response_format={ + "json_schema": {"foo": True}, + "type": "json_schema", + }, + sampling_params={ + "strategy": {"type": "greedy"}, + "max_tokens": 0, + "repetition_penalty": 0, + "stop": ["string"], + }, + tool_choice="auto", + tool_config={ + "system_message_behavior": "append", + "tool_choice": "auto", + "tool_prompt_format": "json", + }, + tool_prompt_format="json", + tools=[ + { + "tool_name": "brave_search", + "description": "description", + "parameters": { + "foo": { + "param_type": "param_type", + "default": True, + "description": "description", + "required": True, + } + }, + } + ], + ) + inference_stream.response.close() @parametrize def test_raw_response_chat_completion_overload_2(self, client: LlamaStackClient) -> None: - response = client.inference.with_raw_response.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - stream=True, - ) + with pytest.warns(DeprecationWarning): + response = client.inference.with_raw_response.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + stream=True, + ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() @@ -352,58 +365,64 @@ def test_raw_response_chat_completion_overload_2(self, client: LlamaStackClient) @parametrize def test_streaming_response_chat_completion_overload_2(self, client: LlamaStackClient) -> None: - with client.inference.with_streaming_response.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - stream=True, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + with client.inference.with_streaming_response.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - stream = response.parse() - stream.close() + stream = response.parse() + stream.close() assert cast(Any, response.is_closed) is True @parametrize def test_method_completion_overload_1(self, client: LlamaStackClient) -> None: - inference = client.inference.completion( - content="string", - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + inference = client.inference.completion( + content="string", + model_id="model_id", + ) + assert_matches_type(CompletionResponse, inference, path=["response"]) @parametrize def test_method_completion_with_all_params_overload_1(self, client: LlamaStackClient) -> None: - inference = client.inference.completion( - content="string", - model_id="model_id", - logprobs={"top_k": 0}, - response_format={ - "json_schema": {"foo": True}, - "type": "json_schema", - }, - sampling_params={ - "strategy": {"type": "greedy"}, - "max_tokens": 0, - "repetition_penalty": 0, - "stop": ["string"], - }, - stream=False, - ) + with pytest.warns(DeprecationWarning): + inference = client.inference.completion( + content="string", + model_id="model_id", + logprobs={"top_k": 0}, + response_format={ + "json_schema": {"foo": True}, + "type": "json_schema", + }, + sampling_params={ + "strategy": {"type": "greedy"}, + "max_tokens": 0, + "repetition_penalty": 0, + "stop": ["string"], + }, + stream=False, + ) + assert_matches_type(CompletionResponse, inference, path=["response"]) @parametrize def test_raw_response_completion_overload_1(self, client: LlamaStackClient) -> None: - response = client.inference.with_raw_response.completion( - content="string", - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + response = client.inference.with_raw_response.completion( + content="string", + model_id="model_id", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -412,54 +431,60 @@ def test_raw_response_completion_overload_1(self, client: LlamaStackClient) -> N @parametrize def test_streaming_response_completion_overload_1(self, client: LlamaStackClient) -> None: - with client.inference.with_streaming_response.completion( - content="string", - model_id="model_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + with client.inference.with_streaming_response.completion( + content="string", + model_id="model_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - inference = response.parse() - assert_matches_type(CompletionResponse, inference, path=["response"]) + inference = response.parse() + assert_matches_type(CompletionResponse, inference, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_completion_overload_2(self, client: LlamaStackClient) -> None: - inference_stream = client.inference.completion( - content="string", - model_id="model_id", - stream=True, - ) + with pytest.warns(DeprecationWarning): + inference_stream = client.inference.completion( + content="string", + model_id="model_id", + stream=True, + ) + inference_stream.response.close() @parametrize def test_method_completion_with_all_params_overload_2(self, client: LlamaStackClient) -> None: - inference_stream = client.inference.completion( - content="string", - model_id="model_id", - stream=True, - logprobs={"top_k": 0}, - response_format={ - "json_schema": {"foo": True}, - "type": "json_schema", - }, - sampling_params={ - "strategy": {"type": "greedy"}, - "max_tokens": 0, - "repetition_penalty": 0, - "stop": ["string"], - }, - ) + with pytest.warns(DeprecationWarning): + inference_stream = client.inference.completion( + content="string", + model_id="model_id", + stream=True, + logprobs={"top_k": 0}, + response_format={ + "json_schema": {"foo": True}, + "type": "json_schema", + }, + sampling_params={ + "strategy": {"type": "greedy"}, + "max_tokens": 0, + "repetition_penalty": 0, + "stop": ["string"], + }, + ) + inference_stream.response.close() @parametrize def test_raw_response_completion_overload_2(self, client: LlamaStackClient) -> None: - response = client.inference.with_raw_response.completion( - content="string", - model_id="model_id", - stream=True, - ) + with pytest.warns(DeprecationWarning): + response = client.inference.with_raw_response.completion( + content="string", + model_id="model_id", + stream=True, + ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() @@ -467,44 +492,50 @@ def test_raw_response_completion_overload_2(self, client: LlamaStackClient) -> N @parametrize def test_streaming_response_completion_overload_2(self, client: LlamaStackClient) -> None: - with client.inference.with_streaming_response.completion( - content="string", - model_id="model_id", - stream=True, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - stream = response.parse() - stream.close() + with pytest.warns(DeprecationWarning): + with client.inference.with_streaming_response.completion( + content="string", + model_id="model_id", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = response.parse() + stream.close() assert cast(Any, response.is_closed) is True @parametrize def test_method_embeddings(self, client: LlamaStackClient) -> None: - inference = client.inference.embeddings( - contents=["string"], - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + inference = client.inference.embeddings( + contents=["string"], + model_id="model_id", + ) + assert_matches_type(EmbeddingsResponse, inference, path=["response"]) @parametrize def test_method_embeddings_with_all_params(self, client: LlamaStackClient) -> None: - inference = client.inference.embeddings( - contents=["string"], - model_id="model_id", - output_dimension=0, - task_type="query", - text_truncation="none", - ) + with pytest.warns(DeprecationWarning): + inference = client.inference.embeddings( + contents=["string"], + model_id="model_id", + output_dimension=0, + task_type="query", + text_truncation="none", + ) + assert_matches_type(EmbeddingsResponse, inference, path=["response"]) @parametrize def test_raw_response_embeddings(self, client: LlamaStackClient) -> None: - response = client.inference.with_raw_response.embeddings( - contents=["string"], - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + response = client.inference.with_raw_response.embeddings( + contents=["string"], + model_id="model_id", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -513,15 +544,16 @@ def test_raw_response_embeddings(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_embeddings(self, client: LlamaStackClient) -> None: - with client.inference.with_streaming_response.embeddings( - contents=["string"], - model_id="model_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + with client.inference.with_streaming_response.embeddings( + contents=["string"], + model_id="model_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - inference = response.parse() - assert_matches_type(EmbeddingsResponse, inference, path=["response"]) + inference = response.parse() + assert_matches_type(EmbeddingsResponse, inference, path=["response"]) assert cast(Any, response.is_closed) is True @@ -687,75 +719,80 @@ async def test_streaming_response_batch_completion(self, async_client: AsyncLlam @parametrize async def test_method_chat_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None: - inference = await async_client.inference.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + inference = await async_client.inference.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + ) + assert_matches_type(ChatCompletionResponse, inference, path=["response"]) @parametrize async def test_method_chat_completion_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None: - inference = await async_client.inference.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - "context": "string", - } - ], - model_id="model_id", - logprobs={"top_k": 0}, - response_format={ - "json_schema": {"foo": True}, - "type": "json_schema", - }, - sampling_params={ - "strategy": {"type": "greedy"}, - "max_tokens": 0, - "repetition_penalty": 0, - "stop": ["string"], - }, - stream=False, - tool_choice="auto", - tool_config={ - "system_message_behavior": "append", - "tool_choice": "auto", - "tool_prompt_format": "json", - }, - tool_prompt_format="json", - tools=[ - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - } - ], - ) + with pytest.warns(DeprecationWarning): + inference = await async_client.inference.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + "context": "string", + } + ], + model_id="model_id", + logprobs={"top_k": 0}, + response_format={ + "json_schema": {"foo": True}, + "type": "json_schema", + }, + sampling_params={ + "strategy": {"type": "greedy"}, + "max_tokens": 0, + "repetition_penalty": 0, + "stop": ["string"], + }, + stream=False, + tool_choice="auto", + tool_config={ + "system_message_behavior": "append", + "tool_choice": "auto", + "tool_prompt_format": "json", + }, + tool_prompt_format="json", + tools=[ + { + "tool_name": "brave_search", + "description": "description", + "parameters": { + "foo": { + "param_type": "param_type", + "default": True, + "description": "description", + "required": True, + } + }, + } + ], + ) + assert_matches_type(ChatCompletionResponse, inference, path=["response"]) @parametrize async def test_raw_response_chat_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.inference.with_raw_response.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + response = await async_client.inference.with_raw_response.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -764,96 +801,102 @@ async def test_raw_response_chat_completion_overload_1(self, async_client: Async @parametrize async def test_streaming_response_chat_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.inference.with_streaming_response.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + async with async_client.inference.with_streaming_response.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - inference = await response.parse() - assert_matches_type(ChatCompletionResponse, inference, path=["response"]) + inference = await response.parse() + assert_matches_type(ChatCompletionResponse, inference, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_chat_completion_overload_2(self, async_client: AsyncLlamaStackClient) -> None: - inference_stream = await async_client.inference.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - stream=True, - ) + with pytest.warns(DeprecationWarning): + inference_stream = await async_client.inference.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + stream=True, + ) + await inference_stream.response.aclose() @parametrize async def test_method_chat_completion_with_all_params_overload_2(self, async_client: AsyncLlamaStackClient) -> None: - inference_stream = await async_client.inference.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - "context": "string", - } - ], - model_id="model_id", - stream=True, - logprobs={"top_k": 0}, - response_format={ - "json_schema": {"foo": True}, - "type": "json_schema", - }, - sampling_params={ - "strategy": {"type": "greedy"}, - "max_tokens": 0, - "repetition_penalty": 0, - "stop": ["string"], - }, - tool_choice="auto", - tool_config={ - "system_message_behavior": "append", - "tool_choice": "auto", - "tool_prompt_format": "json", - }, - tool_prompt_format="json", - tools=[ - { - "tool_name": "brave_search", - "description": "description", - "parameters": { - "foo": { - "param_type": "param_type", - "default": True, - "description": "description", - "required": True, - } - }, - } - ], - ) + with pytest.warns(DeprecationWarning): + inference_stream = await async_client.inference.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + "context": "string", + } + ], + model_id="model_id", + stream=True, + logprobs={"top_k": 0}, + response_format={ + "json_schema": {"foo": True}, + "type": "json_schema", + }, + sampling_params={ + "strategy": {"type": "greedy"}, + "max_tokens": 0, + "repetition_penalty": 0, + "stop": ["string"], + }, + tool_choice="auto", + tool_config={ + "system_message_behavior": "append", + "tool_choice": "auto", + "tool_prompt_format": "json", + }, + tool_prompt_format="json", + tools=[ + { + "tool_name": "brave_search", + "description": "description", + "parameters": { + "foo": { + "param_type": "param_type", + "default": True, + "description": "description", + "required": True, + } + }, + } + ], + ) + await inference_stream.response.aclose() @parametrize async def test_raw_response_chat_completion_overload_2(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.inference.with_raw_response.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - stream=True, - ) + with pytest.warns(DeprecationWarning): + response = await async_client.inference.with_raw_response.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + stream=True, + ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = await response.parse() @@ -861,58 +904,64 @@ async def test_raw_response_chat_completion_overload_2(self, async_client: Async @parametrize async def test_streaming_response_chat_completion_overload_2(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.inference.with_streaming_response.chat_completion( - messages=[ - { - "content": "string", - "role": "user", - } - ], - model_id="model_id", - stream=True, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + async with async_client.inference.with_streaming_response.chat_completion( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model_id="model_id", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - stream = await response.parse() - await stream.close() + stream = await response.parse() + await stream.close() assert cast(Any, response.is_closed) is True @parametrize async def test_method_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None: - inference = await async_client.inference.completion( - content="string", - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + inference = await async_client.inference.completion( + content="string", + model_id="model_id", + ) + assert_matches_type(CompletionResponse, inference, path=["response"]) @parametrize async def test_method_completion_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None: - inference = await async_client.inference.completion( - content="string", - model_id="model_id", - logprobs={"top_k": 0}, - response_format={ - "json_schema": {"foo": True}, - "type": "json_schema", - }, - sampling_params={ - "strategy": {"type": "greedy"}, - "max_tokens": 0, - "repetition_penalty": 0, - "stop": ["string"], - }, - stream=False, - ) + with pytest.warns(DeprecationWarning): + inference = await async_client.inference.completion( + content="string", + model_id="model_id", + logprobs={"top_k": 0}, + response_format={ + "json_schema": {"foo": True}, + "type": "json_schema", + }, + sampling_params={ + "strategy": {"type": "greedy"}, + "max_tokens": 0, + "repetition_penalty": 0, + "stop": ["string"], + }, + stream=False, + ) + assert_matches_type(CompletionResponse, inference, path=["response"]) @parametrize async def test_raw_response_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.inference.with_raw_response.completion( - content="string", - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + response = await async_client.inference.with_raw_response.completion( + content="string", + model_id="model_id", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -921,54 +970,60 @@ async def test_raw_response_completion_overload_1(self, async_client: AsyncLlama @parametrize async def test_streaming_response_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.inference.with_streaming_response.completion( - content="string", - model_id="model_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + async with async_client.inference.with_streaming_response.completion( + content="string", + model_id="model_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - inference = await response.parse() - assert_matches_type(CompletionResponse, inference, path=["response"]) + inference = await response.parse() + assert_matches_type(CompletionResponse, inference, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_completion_overload_2(self, async_client: AsyncLlamaStackClient) -> None: - inference_stream = await async_client.inference.completion( - content="string", - model_id="model_id", - stream=True, - ) + with pytest.warns(DeprecationWarning): + inference_stream = await async_client.inference.completion( + content="string", + model_id="model_id", + stream=True, + ) + await inference_stream.response.aclose() @parametrize async def test_method_completion_with_all_params_overload_2(self, async_client: AsyncLlamaStackClient) -> None: - inference_stream = await async_client.inference.completion( - content="string", - model_id="model_id", - stream=True, - logprobs={"top_k": 0}, - response_format={ - "json_schema": {"foo": True}, - "type": "json_schema", - }, - sampling_params={ - "strategy": {"type": "greedy"}, - "max_tokens": 0, - "repetition_penalty": 0, - "stop": ["string"], - }, - ) + with pytest.warns(DeprecationWarning): + inference_stream = await async_client.inference.completion( + content="string", + model_id="model_id", + stream=True, + logprobs={"top_k": 0}, + response_format={ + "json_schema": {"foo": True}, + "type": "json_schema", + }, + sampling_params={ + "strategy": {"type": "greedy"}, + "max_tokens": 0, + "repetition_penalty": 0, + "stop": ["string"], + }, + ) + await inference_stream.response.aclose() @parametrize async def test_raw_response_completion_overload_2(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.inference.with_raw_response.completion( - content="string", - model_id="model_id", - stream=True, - ) + with pytest.warns(DeprecationWarning): + response = await async_client.inference.with_raw_response.completion( + content="string", + model_id="model_id", + stream=True, + ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = await response.parse() @@ -976,44 +1031,50 @@ async def test_raw_response_completion_overload_2(self, async_client: AsyncLlama @parametrize async def test_streaming_response_completion_overload_2(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.inference.with_streaming_response.completion( - content="string", - model_id="model_id", - stream=True, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - stream = await response.parse() - await stream.close() + with pytest.warns(DeprecationWarning): + async with async_client.inference.with_streaming_response.completion( + content="string", + model_id="model_id", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = await response.parse() + await stream.close() assert cast(Any, response.is_closed) is True @parametrize async def test_method_embeddings(self, async_client: AsyncLlamaStackClient) -> None: - inference = await async_client.inference.embeddings( - contents=["string"], - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + inference = await async_client.inference.embeddings( + contents=["string"], + model_id="model_id", + ) + assert_matches_type(EmbeddingsResponse, inference, path=["response"]) @parametrize async def test_method_embeddings_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: - inference = await async_client.inference.embeddings( - contents=["string"], - model_id="model_id", - output_dimension=0, - task_type="query", - text_truncation="none", - ) + with pytest.warns(DeprecationWarning): + inference = await async_client.inference.embeddings( + contents=["string"], + model_id="model_id", + output_dimension=0, + task_type="query", + text_truncation="none", + ) + assert_matches_type(EmbeddingsResponse, inference, path=["response"]) @parametrize async def test_raw_response_embeddings(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.inference.with_raw_response.embeddings( - contents=["string"], - model_id="model_id", - ) + with pytest.warns(DeprecationWarning): + response = await async_client.inference.with_raw_response.embeddings( + contents=["string"], + model_id="model_id", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1022,14 +1083,15 @@ async def test_raw_response_embeddings(self, async_client: AsyncLlamaStackClient @parametrize async def test_streaming_response_embeddings(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.inference.with_streaming_response.embeddings( - contents=["string"], - model_id="model_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - inference = await response.parse() - assert_matches_type(EmbeddingsResponse, inference, path=["response"]) + with pytest.warns(DeprecationWarning): + async with async_client.inference.with_streaming_response.embeddings( + contents=["string"], + model_id="model_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + inference = await response.parse() + assert_matches_type(EmbeddingsResponse, inference, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py new file mode 100644 index 00000000..cbc77db1 --- /dev/null +++ b/tests/api_resources/test_moderations.py @@ -0,0 +1,92 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient +from llama_stack_client.types import CreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestModerations: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_create(self, client: LlamaStackClient) -> None: + moderation = client.moderations.create( + input="string", + model="model", + ) + assert_matches_type(CreateResponse, moderation, path=["response"]) + + @parametrize + def test_raw_response_create(self, client: LlamaStackClient) -> None: + response = client.moderations.with_raw_response.create( + input="string", + model="model", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + moderation = response.parse() + assert_matches_type(CreateResponse, moderation, path=["response"]) + + @parametrize + def test_streaming_response_create(self, client: LlamaStackClient) -> None: + with client.moderations.with_streaming_response.create( + input="string", + model="model", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + moderation = response.parse() + assert_matches_type(CreateResponse, moderation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncModerations: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @parametrize + async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None: + moderation = await async_client.moderations.create( + input="string", + model="model", + ) + assert_matches_type(CreateResponse, moderation, path=["response"]) + + @parametrize + async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.moderations.with_raw_response.create( + input="string", + model="model", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + moderation = await response.parse() + assert_matches_type(CreateResponse, moderation, path=["response"]) + + @parametrize + async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.moderations.with_streaming_response.create( + input="string", + model="model", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + moderation = await response.parse() + assert_matches_type(CreateResponse, moderation, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_post_training.py b/tests/api_resources/test_post_training.py index 5e7430fb..899a53ca 100644 --- a/tests/api_resources/test_post_training.py +++ b/tests/api_resources/test_post_training.py @@ -23,10 +23,8 @@ class TestPostTraining: def test_method_preference_optimize(self, client: LlamaStackClient) -> None: post_training = client.post_training.preference_optimize( algorithm_config={ - "epsilon": 0, - "gamma": 0, - "reward_clip": 0, - "reward_scale": 0, + "beta": 0, + "loss_type": "sigmoid", }, finetuned_model="finetuned_model", hyperparam_search_config={"foo": True}, @@ -44,10 +42,8 @@ def test_method_preference_optimize(self, client: LlamaStackClient) -> None: def test_method_preference_optimize_with_all_params(self, client: LlamaStackClient) -> None: post_training = client.post_training.preference_optimize( algorithm_config={ - "epsilon": 0, - "gamma": 0, - "reward_clip": 0, - "reward_scale": 0, + "beta": 0, + "loss_type": "sigmoid", }, finetuned_model="finetuned_model", hyperparam_search_config={"foo": True}, @@ -88,10 +84,8 @@ def test_method_preference_optimize_with_all_params(self, client: LlamaStackClie def test_raw_response_preference_optimize(self, client: LlamaStackClient) -> None: response = client.post_training.with_raw_response.preference_optimize( algorithm_config={ - "epsilon": 0, - "gamma": 0, - "reward_clip": 0, - "reward_scale": 0, + "beta": 0, + "loss_type": "sigmoid", }, finetuned_model="finetuned_model", hyperparam_search_config={"foo": True}, @@ -113,10 +107,8 @@ def test_raw_response_preference_optimize(self, client: LlamaStackClient) -> Non def test_streaming_response_preference_optimize(self, client: LlamaStackClient) -> None: with client.post_training.with_streaming_response.preference_optimize( algorithm_config={ - "epsilon": 0, - "gamma": 0, - "reward_clip": 0, - "reward_scale": 0, + "beta": 0, + "loss_type": "sigmoid", }, finetuned_model="finetuned_model", hyperparam_search_config={"foo": True}, @@ -247,10 +239,8 @@ class TestAsyncPostTraining: async def test_method_preference_optimize(self, async_client: AsyncLlamaStackClient) -> None: post_training = await async_client.post_training.preference_optimize( algorithm_config={ - "epsilon": 0, - "gamma": 0, - "reward_clip": 0, - "reward_scale": 0, + "beta": 0, + "loss_type": "sigmoid", }, finetuned_model="finetuned_model", hyperparam_search_config={"foo": True}, @@ -268,10 +258,8 @@ async def test_method_preference_optimize(self, async_client: AsyncLlamaStackCli async def test_method_preference_optimize_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: post_training = await async_client.post_training.preference_optimize( algorithm_config={ - "epsilon": 0, - "gamma": 0, - "reward_clip": 0, - "reward_scale": 0, + "beta": 0, + "loss_type": "sigmoid", }, finetuned_model="finetuned_model", hyperparam_search_config={"foo": True}, @@ -312,10 +300,8 @@ async def test_method_preference_optimize_with_all_params(self, async_client: As async def test_raw_response_preference_optimize(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.post_training.with_raw_response.preference_optimize( algorithm_config={ - "epsilon": 0, - "gamma": 0, - "reward_clip": 0, - "reward_scale": 0, + "beta": 0, + "loss_type": "sigmoid", }, finetuned_model="finetuned_model", hyperparam_search_config={"foo": True}, @@ -337,10 +323,8 @@ async def test_raw_response_preference_optimize(self, async_client: AsyncLlamaSt async def test_streaming_response_preference_optimize(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.post_training.with_streaming_response.preference_optimize( algorithm_config={ - "epsilon": 0, - "gamma": 0, - "reward_clip": 0, - "reward_scale": 0, + "beta": 0, + "loss_type": "sigmoid", }, finetuned_model="finetuned_model", hyperparam_search_config={"foo": True}, diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index a3fa9fd1..44366d61 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -10,6 +10,7 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient from llama_stack_client.types import ResponseObject, ResponseListResponse +from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -30,6 +31,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient response = client.responses.create( input="string", model="model", + include=["string"], instructions="instructions", max_infer_iters=0, previous_response_id="previous_response_id", @@ -95,6 +97,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient input="string", model="model", stream=True, + include=["string"], instructions="instructions", max_infer_iters=0, previous_response_id="previous_response_id", @@ -186,7 +189,7 @@ def test_path_params_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_method_list(self, client: LlamaStackClient) -> None: response = client.responses.list() - assert_matches_type(ResponseListResponse, response, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[ResponseListResponse], response, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: @@ -196,7 +199,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: model="model", order="asc", ) - assert_matches_type(ResponseListResponse, response, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[ResponseListResponse], response, path=["response"]) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: @@ -205,7 +208,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert http_response.is_closed is True assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" response = http_response.parse() - assert_matches_type(ResponseListResponse, response, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[ResponseListResponse], response, path=["response"]) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: @@ -214,7 +217,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" response = http_response.parse() - assert_matches_type(ResponseListResponse, response, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[ResponseListResponse], response, path=["response"]) assert cast(Any, http_response.is_closed) is True @@ -237,6 +240,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn response = await async_client.responses.create( input="string", model="model", + include=["string"], instructions="instructions", max_infer_iters=0, previous_response_id="previous_response_id", @@ -302,6 +306,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn input="string", model="model", stream=True, + include=["string"], instructions="instructions", max_infer_iters=0, previous_response_id="previous_response_id", @@ -393,7 +398,7 @@ async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) - @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.responses.list() - assert_matches_type(ResponseListResponse, response, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[ResponseListResponse], response, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: @@ -403,7 +408,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl model="model", order="asc", ) - assert_matches_type(ResponseListResponse, response, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[ResponseListResponse], response, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -412,7 +417,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert http_response.is_closed is True assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" response = await http_response.parse() - assert_matches_type(ResponseListResponse, response, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[ResponseListResponse], response, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -421,6 +426,6 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" response = await http_response.parse() - assert_matches_type(ResponseListResponse, response, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[ResponseListResponse], response, path=["response"]) assert cast(Any, http_response.is_closed) is True diff --git a/tests/api_resources/test_vector_dbs.py b/tests/api_resources/test_vector_dbs.py index 68d6be89..1c4b855a 100644 --- a/tests/api_resources/test_vector_dbs.py +++ b/tests/api_resources/test_vector_dbs.py @@ -100,6 +100,7 @@ def test_method_register_with_all_params(self, client: LlamaStackClient) -> None embedding_dimension=0, provider_id="provider_id", provider_vector_db_id="provider_vector_db_id", + vector_db_name="vector_db_name", ) assert_matches_type(VectorDBRegisterResponse, vector_db, path=["response"]) @@ -252,6 +253,7 @@ async def test_method_register_with_all_params(self, async_client: AsyncLlamaSta embedding_dimension=0, provider_id="provider_id", provider_vector_db_id="provider_vector_db_id", + vector_db_name="vector_db_name", ) assert_matches_type(VectorDBRegisterResponse, vector_db, path=["response"]) diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py index 84324ca4..3918624b 100644 --- a/tests/api_resources/test_vector_stores.py +++ b/tests/api_resources/test_vector_stores.py @@ -11,10 +11,10 @@ from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient from llama_stack_client.types import ( VectorStore, - ListVectorStoresResponse, VectorStoreDeleteResponse, VectorStoreSearchResponse, ) +from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -24,31 +24,26 @@ class TestVectorStores: @parametrize def test_method_create(self, client: LlamaStackClient) -> None: - vector_store = client.vector_stores.create( - name="name", - ) + vector_store = client.vector_stores.create() assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: LlamaStackClient) -> None: vector_store = client.vector_stores.create( - name="name", chunking_strategy={"foo": True}, embedding_dimension=0, embedding_model="embedding_model", expires_after={"foo": True}, file_ids=["string"], metadata={"foo": True}, + name="name", provider_id="provider_id", - provider_vector_db_id="provider_vector_db_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize def test_raw_response_create(self, client: LlamaStackClient) -> None: - response = client.vector_stores.with_raw_response.create( - name="name", - ) + response = client.vector_stores.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -57,9 +52,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_create(self, client: LlamaStackClient) -> None: - with client.vector_stores.with_streaming_response.create( - name="name", - ) as response: + with client.vector_stores.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -157,7 +150,7 @@ def test_path_params_update(self, client: LlamaStackClient) -> None: @parametrize def test_method_list(self, client: LlamaStackClient) -> None: vector_store = client.vector_stores.list() - assert_matches_type(ListVectorStoresResponse, vector_store, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[VectorStore], vector_store, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: @@ -167,7 +160,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: limit=0, order="order", ) - assert_matches_type(ListVectorStoresResponse, vector_store, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[VectorStore], vector_store, path=["response"]) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: @@ -176,7 +169,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" vector_store = response.parse() - assert_matches_type(ListVectorStoresResponse, vector_store, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[VectorStore], vector_store, path=["response"]) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: @@ -185,7 +178,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" vector_store = response.parse() - assert_matches_type(ListVectorStoresResponse, vector_store, path=["response"]) + assert_matches_type(SyncOpenAICursorPage[VectorStore], vector_store, path=["response"]) assert cast(Any, response.is_closed) is True @@ -293,31 +286,26 @@ class TestAsyncVectorStores: @parametrize async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None: - vector_store = await async_client.vector_stores.create( - name="name", - ) + vector_store = await async_client.vector_stores.create() assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: vector_store = await async_client.vector_stores.create( - name="name", chunking_strategy={"foo": True}, embedding_dimension=0, embedding_model="embedding_model", expires_after={"foo": True}, file_ids=["string"], metadata={"foo": True}, + name="name", provider_id="provider_id", - provider_vector_db_id="provider_vector_db_id", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None: - response = await async_client.vector_stores.with_raw_response.create( - name="name", - ) + response = await async_client.vector_stores.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -326,9 +314,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> @parametrize async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None: - async with async_client.vector_stores.with_streaming_response.create( - name="name", - ) as response: + async with async_client.vector_stores.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -426,7 +412,7 @@ async def test_path_params_update(self, async_client: AsyncLlamaStackClient) -> @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: vector_store = await async_client.vector_stores.list() - assert_matches_type(ListVectorStoresResponse, vector_store, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[VectorStore], vector_store, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: @@ -436,7 +422,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl limit=0, order="order", ) - assert_matches_type(ListVectorStoresResponse, vector_store, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[VectorStore], vector_store, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -445,7 +431,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" vector_store = await response.parse() - assert_matches_type(ListVectorStoresResponse, vector_store, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[VectorStore], vector_store, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -454,7 +440,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" vector_store = await response.parse() - assert_matches_type(ListVectorStoresResponse, vector_store, path=["response"]) + assert_matches_type(AsyncOpenAICursorPage[VectorStore], vector_store, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/tool_runtime/test_rag_tool.py b/tests/api_resources/tool_runtime/test_rag_tool.py index 17a64d8e..dab816ab 100644 --- a/tests/api_resources/tool_runtime/test_rag_tool.py +++ b/tests/api_resources/tool_runtime/test_rag_tool.py @@ -93,7 +93,7 @@ def test_method_query_with_all_params(self, client: LlamaStackClient) -> None: "separator": "separator", "type": "default", }, - "mode": "mode", + "mode": "vector", "ranker": { "impact_factor": 0, "type": "rrf", @@ -210,7 +210,7 @@ async def test_method_query_with_all_params(self, async_client: AsyncLlamaStackC "separator": "separator", "type": "default", }, - "mode": "mode", + "mode": "vector", "ranker": { "impact_factor": 0, "type": "rrf", diff --git a/tests/api_resources/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py index f9728a36..cf38bc2b 100644 --- a/tests/api_resources/vector_stores/test_files.py +++ b/tests/api_resources/vector_stores/test_files.py @@ -9,7 +9,12 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types.vector_stores import VectorStoreFile +from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage +from llama_stack_client.types.vector_stores import ( + VectorStoreFile, + FileDeleteResponse, + FileContentResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -69,6 +74,253 @@ def test_path_params_create(self, client: LlamaStackClient) -> None: file_id="file_id", ) + @parametrize + def test_method_retrieve(self, client: LlamaStackClient) -> None: + file = client.vector_stores.files.retrieve( + file_id="file_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: + response = client.vector_stores.files.with_raw_response.retrieve( + file_id="file_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: + with client.vector_stores.files.with_streaming_response.retrieve( + file_id="file_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.retrieve( + file_id="file_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.retrieve( + file_id="", + vector_store_id="vector_store_id", + ) + + @parametrize + def test_method_update(self, client: LlamaStackClient) -> None: + file = client.vector_stores.files.update( + file_id="file_id", + vector_store_id="vector_store_id", + attributes={"foo": True}, + ) + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + def test_raw_response_update(self, client: LlamaStackClient) -> None: + response = client.vector_stores.files.with_raw_response.update( + file_id="file_id", + vector_store_id="vector_store_id", + attributes={"foo": True}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + def test_streaming_response_update(self, client: LlamaStackClient) -> None: + with client.vector_stores.files.with_streaming_response.update( + file_id="file_id", + vector_store_id="vector_store_id", + attributes={"foo": True}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_update(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.update( + file_id="file_id", + vector_store_id="", + attributes={"foo": True}, + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.update( + file_id="", + vector_store_id="vector_store_id", + attributes={"foo": True}, + ) + + @parametrize + def test_method_list(self, client: LlamaStackClient) -> None: + file = client.vector_stores.files.list( + vector_store_id="vector_store_id", + ) + assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file, path=["response"]) + + @parametrize + def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: + file = client.vector_stores.files.list( + vector_store_id="vector_store_id", + after="after", + before="before", + filter="completed", + limit=0, + order="order", + ) + assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file, path=["response"]) + + @parametrize + def test_raw_response_list(self, client: LlamaStackClient) -> None: + response = client.vector_stores.files.with_raw_response.list( + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file, path=["response"]) + + @parametrize + def test_streaming_response_list(self, client: LlamaStackClient) -> None: + with client.vector_stores.files.with_streaming_response.list( + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_list(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.list( + vector_store_id="", + ) + + @parametrize + def test_method_delete(self, client: LlamaStackClient) -> None: + file = client.vector_stores.files.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @parametrize + def test_raw_response_delete(self, client: LlamaStackClient) -> None: + response = client.vector_stores.files.with_raw_response.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @parametrize + def test_streaming_response_delete(self, client: LlamaStackClient) -> None: + with client.vector_stores.files.with_streaming_response.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_delete(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.delete( + file_id="file_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.delete( + file_id="", + vector_store_id="vector_store_id", + ) + + @parametrize + def test_method_content(self, client: LlamaStackClient) -> None: + file = client.vector_stores.files.content( + file_id="file_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(FileContentResponse, file, path=["response"]) + + @parametrize + def test_raw_response_content(self, client: LlamaStackClient) -> None: + response = client.vector_stores.files.with_raw_response.content( + file_id="file_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileContentResponse, file, path=["response"]) + + @parametrize + def test_streaming_response_content(self, client: LlamaStackClient) -> None: + with client.vector_stores.files.with_streaming_response.content( + file_id="file_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileContentResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_content(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.content( + file_id="file_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.content( + file_id="", + vector_store_id="vector_store_id", + ) + class TestAsyncFiles: parametrize = pytest.mark.parametrize( @@ -126,3 +378,250 @@ async def test_path_params_create(self, async_client: AsyncLlamaStackClient) -> vector_store_id="", file_id="file_id", ) + + @parametrize + async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + file = await async_client.vector_stores.files.retrieve( + file_id="file_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.vector_stores.files.with_raw_response.retrieve( + file_id="file_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.vector_stores.files.with_streaming_response.retrieve( + file_id="file_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.retrieve( + file_id="file_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.retrieve( + file_id="", + vector_store_id="vector_store_id", + ) + + @parametrize + async def test_method_update(self, async_client: AsyncLlamaStackClient) -> None: + file = await async_client.vector_stores.files.update( + file_id="file_id", + vector_store_id="vector_store_id", + attributes={"foo": True}, + ) + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.vector_stores.files.with_raw_response.update( + file_id="file_id", + vector_store_id="vector_store_id", + attributes={"foo": True}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + @parametrize + async def test_streaming_response_update(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.vector_stores.files.with_streaming_response.update( + file_id="file_id", + vector_store_id="vector_store_id", + attributes={"foo": True}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(VectorStoreFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_update(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.update( + file_id="file_id", + vector_store_id="", + attributes={"foo": True}, + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.update( + file_id="", + vector_store_id="vector_store_id", + attributes={"foo": True}, + ) + + @parametrize + async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: + file = await async_client.vector_stores.files.list( + vector_store_id="vector_store_id", + ) + assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file, path=["response"]) + + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: + file = await async_client.vector_stores.files.list( + vector_store_id="vector_store_id", + after="after", + before="before", + filter="completed", + limit=0, + order="order", + ) + assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file, path=["response"]) + + @parametrize + async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.vector_stores.files.with_raw_response.list( + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file, path=["response"]) + + @parametrize + async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.vector_stores.files.with_streaming_response.list( + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_list(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.list( + vector_store_id="", + ) + + @parametrize + async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None: + file = await async_client.vector_stores.files.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @parametrize + async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.vector_stores.files.with_raw_response.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.vector_stores.files.with_streaming_response.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.delete( + file_id="file_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.delete( + file_id="", + vector_store_id="vector_store_id", + ) + + @parametrize + async def test_method_content(self, async_client: AsyncLlamaStackClient) -> None: + file = await async_client.vector_stores.files.content( + file_id="file_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(FileContentResponse, file, path=["response"]) + + @parametrize + async def test_raw_response_content(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.vector_stores.files.with_raw_response.content( + file_id="file_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileContentResponse, file, path=["response"]) + + @parametrize + async def test_streaming_response_content(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.vector_stores.files.with_streaming_response.content( + file_id="file_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileContentResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_content(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.content( + file_id="file_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.content( + file_id="", + vector_store_id="vector_store_id", + ) diff --git a/tests/test_client.py b/tests/test_client.py index 6a1a8f85..14889fae 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -431,7 +431,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, client: LlamaStackClient) -> None: request = client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, @@ -523,7 +523,7 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: - with update_env(LLAMA_STACK_BASE_URL="http://localhost:5000/from/env"): + with update_env(LLAMA_STACK_CLIENT_BASE_URL="http://localhost:5000/from/env"): client = LlamaStackClient(_strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @@ -1245,7 +1245,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, async_client: AsyncLlamaStackClient) -> None: request = async_client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, @@ -1337,7 +1337,7 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: - with update_env(LLAMA_STACK_BASE_URL="http://localhost:5000/from/env"): + with update_env(LLAMA_STACK_CLIENT_BASE_URL="http://localhost:5000/from/env"): client = AsyncLlamaStackClient(_strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" diff --git a/tests/test_models.py b/tests/test_models.py index a27dfa46..c5135234 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,5 +1,5 @@ import json -from typing import Any, Dict, List, Union, Optional, cast +from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast from datetime import datetime, timezone from typing_extensions import Literal, Annotated, TypeAliasType @@ -889,3 +889,75 @@ class ModelB(BaseModel): ) assert isinstance(m, ModelB) + + +def test_nested_discriminated_union() -> None: + class InnerType1(BaseModel): + type: Literal["type_1"] + + class InnerModel(BaseModel): + inner_value: str + + class InnerType2(BaseModel): + type: Literal["type_2"] + some_inner_model: InnerModel + + class Type1(BaseModel): + base_type: Literal["base_type_1"] + value: Annotated[ + Union[ + InnerType1, + InnerType2, + ], + PropertyInfo(discriminator="type"), + ] + + class Type2(BaseModel): + base_type: Literal["base_type_2"] + + T = Annotated[ + Union[ + Type1, + Type2, + ], + PropertyInfo(discriminator="base_type"), + ] + + model = construct_type( + type_=T, + value={ + "base_type": "base_type_1", + "value": { + "type": "type_2", + }, + }, + ) + assert isinstance(model, Type1) + assert isinstance(model.value, InnerType2) + + +@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now") +def test_extra_properties() -> None: + class Item(BaseModel): + prop: int + + class Model(BaseModel): + __pydantic_extra__: Dict[str, Item] = Field(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + + other: str + + if TYPE_CHECKING: + + def __getattr__(self, attr: str) -> Item: ... + + model = construct_type( + type_=Model, + value={ + "a": {"prop": 1}, + "other": "foo", + }, + ) + assert isinstance(model, Model) + assert model.a.prop == 1 + assert isinstance(model.a, Item) + assert model.other == "foo" diff --git a/uv.lock b/uv.lock index 7e95839b..5af8b7c9 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.12" [[package]] @@ -13,16 +13,16 @@ wheels = [ [[package]] name = "anyio" -version = "4.8.0" +version = "4.9.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "idna" }, { name = "sniffio" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126, upload-time = "2025-01-05T13:13:11.095Z" } +sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949, upload-time = "2025-03-17T00:02:54.77Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041, upload-time = "2025-01-05T13:13:07.985Z" }, + { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" }, ] [[package]] @@ -51,11 +51,11 @@ wheels = [ [[package]] name = "certifi" -version = "2025.1.31" +version = "2025.6.15" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577, upload-time = "2025-01-31T02:16:47.166Z" } +sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393, upload-time = "2025-01-31T02:16:45.015Z" }, + { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, ] [[package]] @@ -104,14 +104,14 @@ wheels = [ [[package]] name = "click" -version = "8.1.8" +version = "8.2.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, ] [[package]] @@ -170,24 +170,24 @@ sdist = { url = "https://files.pythonhosted.org/packages/6b/b6/82c7e601d6d3c3278 [[package]] name = "h11" -version = "0.14.0" +version = "0.16.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418, upload-time = "2022-09-25T15:40:01.519Z" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259, upload-time = "2022-09-25T15:39:59.68Z" }, + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, ] [[package]] name = "httpcore" -version = "1.0.7" +version = "1.0.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "h11" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196, upload-time = "2024-11-15T12:30:47.531Z" } +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551, upload-time = "2024-11-15T12:30:45.782Z" }, + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, ] [[package]] @@ -234,7 +234,7 @@ wheels = [ [[package]] name = "llama-stack-client" -version = "0.2.13" +version = "0.2.17" source = { editable = "." } dependencies = [ { name = "anyio" }, @@ -320,28 +320,28 @@ wheels = [ [[package]] name = "mypy" -version = "1.16.0" +version = "1.16.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, { name = "pathspec" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d4/38/13c2f1abae94d5ea0354e146b95a1be9b2137a0d506728e0da037c4276f6/mypy-1.16.0.tar.gz", hash = "sha256:84b94283f817e2aa6350a14b4a8fb2a35a53c286f97c9d30f53b63620e7af8ab", size = 3323139, upload-time = "2025-05-29T13:46:12.532Z" } +sdist = { url = "https://files.pythonhosted.org/packages/81/69/92c7fa98112e4d9eb075a239caa4ef4649ad7d441545ccffbd5e34607cbb/mypy-1.16.1.tar.gz", hash = "sha256:6bd00a0a2094841c5e47e7374bb42b83d64c527a502e3334e1173a0c24437bab", size = 3324747, upload-time = "2025-06-16T16:51:35.145Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/70/cf/158e5055e60ca2be23aec54a3010f89dcffd788732634b344fc9cb1e85a0/mypy-1.16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c5436d11e89a3ad16ce8afe752f0f373ae9620841c50883dc96f8b8805620b13", size = 11062927, upload-time = "2025-05-29T13:35:52.328Z" }, - { url = "https://files.pythonhosted.org/packages/94/34/cfff7a56be1609f5d10ef386342ce3494158e4d506516890142007e6472c/mypy-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f2622af30bf01d8fc36466231bdd203d120d7a599a6d88fb22bdcb9dbff84090", size = 10083082, upload-time = "2025-05-29T13:35:33.378Z" }, - { url = "https://files.pythonhosted.org/packages/b3/7f/7242062ec6288c33d8ad89574df87c3903d394870e5e6ba1699317a65075/mypy-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d045d33c284e10a038f5e29faca055b90eee87da3fc63b8889085744ebabb5a1", size = 11828306, upload-time = "2025-05-29T13:21:02.164Z" }, - { url = "https://files.pythonhosted.org/packages/6f/5f/b392f7b4f659f5b619ce5994c5c43caab3d80df2296ae54fa888b3d17f5a/mypy-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b4968f14f44c62e2ec4a038c8797a87315be8df7740dc3ee8d3bfe1c6bf5dba8", size = 12702764, upload-time = "2025-05-29T13:20:42.826Z" }, - { url = "https://files.pythonhosted.org/packages/9b/c0/7646ef3a00fa39ac9bc0938626d9ff29d19d733011be929cfea59d82d136/mypy-1.16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eb14a4a871bb8efb1e4a50360d4e3c8d6c601e7a31028a2c79f9bb659b63d730", size = 12896233, upload-time = "2025-05-29T13:18:37.446Z" }, - { url = "https://files.pythonhosted.org/packages/6d/38/52f4b808b3fef7f0ef840ee8ff6ce5b5d77381e65425758d515cdd4f5bb5/mypy-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:bd4e1ebe126152a7bbaa4daedd781c90c8f9643c79b9748caa270ad542f12bec", size = 9565547, upload-time = "2025-05-29T13:20:02.836Z" }, - { url = "https://files.pythonhosted.org/packages/97/9c/ca03bdbefbaa03b264b9318a98950a9c683e06472226b55472f96ebbc53d/mypy-1.16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a9e056237c89f1587a3be1a3a70a06a698d25e2479b9a2f57325ddaaffc3567b", size = 11059753, upload-time = "2025-05-29T13:18:18.167Z" }, - { url = "https://files.pythonhosted.org/packages/36/92/79a969b8302cfe316027c88f7dc6fee70129490a370b3f6eb11d777749d0/mypy-1.16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b07e107affb9ee6ce1f342c07f51552d126c32cd62955f59a7db94a51ad12c0", size = 10073338, upload-time = "2025-05-29T13:19:48.079Z" }, - { url = "https://files.pythonhosted.org/packages/14/9b/a943f09319167da0552d5cd722104096a9c99270719b1afeea60d11610aa/mypy-1.16.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c6fb60cbd85dc65d4d63d37cb5c86f4e3a301ec605f606ae3a9173e5cf34997b", size = 11827764, upload-time = "2025-05-29T13:46:04.47Z" }, - { url = "https://files.pythonhosted.org/packages/ec/64/ff75e71c65a0cb6ee737287c7913ea155845a556c64144c65b811afdb9c7/mypy-1.16.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a7e32297a437cc915599e0578fa6bc68ae6a8dc059c9e009c628e1c47f91495d", size = 12701356, upload-time = "2025-05-29T13:35:13.553Z" }, - { url = "https://files.pythonhosted.org/packages/0a/ad/0e93c18987a1182c350f7a5fab70550852f9fabe30ecb63bfbe51b602074/mypy-1.16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:afe420c9380ccec31e744e8baff0d406c846683681025db3531b32db56962d52", size = 12900745, upload-time = "2025-05-29T13:17:24.409Z" }, - { url = "https://files.pythonhosted.org/packages/28/5d/036c278d7a013e97e33f08c047fe5583ab4f1fc47c9a49f985f1cdd2a2d7/mypy-1.16.0-cp313-cp313-win_amd64.whl", hash = "sha256:55f9076c6ce55dd3f8cd0c6fff26a008ca8e5131b89d5ba6d86bd3f47e736eeb", size = 9572200, upload-time = "2025-05-29T13:33:44.92Z" }, - { url = "https://files.pythonhosted.org/packages/99/a3/6ed10530dec8e0fdc890d81361260c9ef1f5e5c217ad8c9b21ecb2b8366b/mypy-1.16.0-py3-none-any.whl", hash = "sha256:29e1499864a3888bca5c1542f2d7232c6e586295183320caa95758fc84034031", size = 2265773, upload-time = "2025-05-29T13:35:18.762Z" }, + { url = "https://files.pythonhosted.org/packages/b4/d6/39482e5fcc724c15bf6280ff5806548c7185e0c090712a3736ed4d07e8b7/mypy-1.16.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:af4792433f09575d9eeca5c63d7d90ca4aeceda9d8355e136f80f8967639183d", size = 11066493, upload-time = "2025-06-16T16:47:01.683Z" }, + { url = "https://files.pythonhosted.org/packages/e6/e5/26c347890efc6b757f4d5bb83f4a0cf5958b8cf49c938ac99b8b72b420a6/mypy-1.16.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66df38405fd8466ce3517eda1f6640611a0b8e70895e2a9462d1d4323c5eb4b9", size = 10081687, upload-time = "2025-06-16T16:48:19.367Z" }, + { url = "https://files.pythonhosted.org/packages/44/c7/b5cb264c97b86914487d6a24bd8688c0172e37ec0f43e93b9691cae9468b/mypy-1.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44e7acddb3c48bd2713994d098729494117803616e116032af192871aed80b79", size = 11839723, upload-time = "2025-06-16T16:49:20.912Z" }, + { url = "https://files.pythonhosted.org/packages/15/f8/491997a9b8a554204f834ed4816bda813aefda31cf873bb099deee3c9a99/mypy-1.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ab5eca37b50188163fa7c1b73c685ac66c4e9bdee4a85c9adac0e91d8895e15", size = 12722980, upload-time = "2025-06-16T16:37:40.929Z" }, + { url = "https://files.pythonhosted.org/packages/df/f0/2bd41e174b5fd93bc9de9a28e4fb673113633b8a7f3a607fa4a73595e468/mypy-1.16.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb6229b2c9086247e21a83c309754b9058b438704ad2f6807f0d8227f6ebdd", size = 12903328, upload-time = "2025-06-16T16:34:35.099Z" }, + { url = "https://files.pythonhosted.org/packages/61/81/5572108a7bec2c46b8aff7e9b524f371fe6ab5efb534d38d6b37b5490da8/mypy-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:1f0435cf920e287ff68af3d10a118a73f212deb2ce087619eb4e648116d1fe9b", size = 9562321, upload-time = "2025-06-16T16:48:58.823Z" }, + { url = "https://files.pythonhosted.org/packages/28/e3/96964af4a75a949e67df4b95318fe2b7427ac8189bbc3ef28f92a1c5bc56/mypy-1.16.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ddc91eb318c8751c69ddb200a5937f1232ee8efb4e64e9f4bc475a33719de438", size = 11063480, upload-time = "2025-06-16T16:47:56.205Z" }, + { url = "https://files.pythonhosted.org/packages/f5/4d/cd1a42b8e5be278fab7010fb289d9307a63e07153f0ae1510a3d7b703193/mypy-1.16.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:87ff2c13d58bdc4bbe7dc0dedfe622c0f04e2cb2a492269f3b418df2de05c536", size = 10090538, upload-time = "2025-06-16T16:46:43.92Z" }, + { url = "https://files.pythonhosted.org/packages/c9/4f/c3c6b4b66374b5f68bab07c8cabd63a049ff69796b844bc759a0ca99bb2a/mypy-1.16.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a7cfb0fe29fe5a9841b7c8ee6dffb52382c45acdf68f032145b75620acfbd6f", size = 11836839, upload-time = "2025-06-16T16:36:28.039Z" }, + { url = "https://files.pythonhosted.org/packages/b4/7e/81ca3b074021ad9775e5cb97ebe0089c0f13684b066a750b7dc208438403/mypy-1.16.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:051e1677689c9d9578b9c7f4d206d763f9bbd95723cd1416fad50db49d52f359", size = 12715634, upload-time = "2025-06-16T16:50:34.441Z" }, + { url = "https://files.pythonhosted.org/packages/e9/95/bdd40c8be346fa4c70edb4081d727a54d0a05382d84966869738cfa8a497/mypy-1.16.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d5d2309511cc56c021b4b4e462907c2b12f669b2dbeb68300110ec27723971be", size = 12895584, upload-time = "2025-06-16T16:34:54.857Z" }, + { url = "https://files.pythonhosted.org/packages/5a/fd/d486a0827a1c597b3b48b1bdef47228a6e9ee8102ab8c28f944cb83b65dc/mypy-1.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:4f58ac32771341e38a853c5d0ec0dfe27e18e27da9cdb8bbc882d2249c71a3ee", size = 9573886, upload-time = "2025-06-16T16:36:43.589Z" }, + { url = "https://files.pythonhosted.org/packages/cf/d3/53e684e78e07c1a2bf7105715e5edd09ce951fc3f47cf9ed095ec1b7a037/mypy-1.16.1-py3-none-any.whl", hash = "sha256:5fc2ac4027d0ef28d6ba69a0343737a23c4d1b83672bf38d1fe237bdc0643b37", size = 2265923, upload-time = "2025-06-16T16:48:02.366Z" }, ] [[package]] @@ -364,40 +364,43 @@ wheels = [ [[package]] name = "numpy" -version = "2.2.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fb/90/8956572f5c4ae52201fdec7ba2044b2c882832dcec7d5d0922c9e9acf2de/numpy-2.2.3.tar.gz", hash = "sha256:dbdc15f0c81611925f382dfa97b3bd0bc2c1ce19d4fe50482cb0ddc12ba30020", size = 20262700, upload-time = "2025-02-13T17:17:41.558Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/ec/43628dcf98466e087812142eec6d1c1a6c6bdfdad30a0aa07b872dc01f6f/numpy-2.2.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12c045f43b1d2915eca6b880a7f4a256f59d62df4f044788c8ba67709412128d", size = 20929458, upload-time = "2025-02-13T16:48:32.527Z" }, - { url = "https://files.pythonhosted.org/packages/9b/c0/2f4225073e99a5c12350954949ed19b5d4a738f541d33e6f7439e33e98e4/numpy-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:87eed225fd415bbae787f93a457af7f5990b92a334e346f72070bf569b9c9c95", size = 14115299, upload-time = "2025-02-13T16:48:54.659Z" }, - { url = "https://files.pythonhosted.org/packages/ca/fa/d2c5575d9c734a7376cc1592fae50257ec95d061b27ee3dbdb0b3b551eb2/numpy-2.2.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:712a64103d97c404e87d4d7c47fb0c7ff9acccc625ca2002848e0d53288b90ea", size = 5145723, upload-time = "2025-02-13T16:49:04.561Z" }, - { url = "https://files.pythonhosted.org/packages/eb/dc/023dad5b268a7895e58e791f28dc1c60eb7b6c06fcbc2af8538ad069d5f3/numpy-2.2.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a5ae282abe60a2db0fd407072aff4599c279bcd6e9a2475500fc35b00a57c532", size = 6678797, upload-time = "2025-02-13T16:49:15.217Z" }, - { url = "https://files.pythonhosted.org/packages/3f/19/bcd641ccf19ac25abb6fb1dcd7744840c11f9d62519d7057b6ab2096eb60/numpy-2.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5266de33d4c3420973cf9ae3b98b54a2a6d53a559310e3236c4b2b06b9c07d4e", size = 14067362, upload-time = "2025-02-13T16:49:36.17Z" }, - { url = "https://files.pythonhosted.org/packages/39/04/78d2e7402fb479d893953fb78fa7045f7deb635ec095b6b4f0260223091a/numpy-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b787adbf04b0db1967798dba8da1af07e387908ed1553a0d6e74c084d1ceafe", size = 16116679, upload-time = "2025-02-13T16:50:00.079Z" }, - { url = "https://files.pythonhosted.org/packages/d0/a1/e90f7aa66512be3150cb9d27f3d9995db330ad1b2046474a13b7040dfd92/numpy-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:34c1b7e83f94f3b564b35f480f5652a47007dd91f7c839f404d03279cc8dd021", size = 15264272, upload-time = "2025-02-13T16:50:23.121Z" }, - { url = "https://files.pythonhosted.org/packages/dc/b6/50bd027cca494de4fa1fc7bf1662983d0ba5f256fa0ece2c376b5eb9b3f0/numpy-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4d8335b5f1b6e2bce120d55fb17064b0262ff29b459e8493d1785c18ae2553b8", size = 17880549, upload-time = "2025-02-13T16:50:50.778Z" }, - { url = "https://files.pythonhosted.org/packages/96/30/f7bf4acb5f8db10a96f73896bdeed7a63373137b131ca18bd3dab889db3b/numpy-2.2.3-cp312-cp312-win32.whl", hash = "sha256:4d9828d25fb246bedd31e04c9e75714a4087211ac348cb39c8c5f99dbb6683fe", size = 6293394, upload-time = "2025-02-13T16:51:02.031Z" }, - { url = "https://files.pythonhosted.org/packages/42/6e/55580a538116d16ae7c9aa17d4edd56e83f42126cb1dfe7a684da7925d2c/numpy-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:83807d445817326b4bcdaaaf8e8e9f1753da04341eceec705c001ff342002e5d", size = 12626357, upload-time = "2025-02-13T16:51:21.821Z" }, - { url = "https://files.pythonhosted.org/packages/0e/8b/88b98ed534d6a03ba8cddb316950fe80842885709b58501233c29dfa24a9/numpy-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bfdb06b395385ea9b91bf55c1adf1b297c9fdb531552845ff1d3ea6e40d5aba", size = 20916001, upload-time = "2025-02-13T16:51:52.612Z" }, - { url = "https://files.pythonhosted.org/packages/d9/b4/def6ec32c725cc5fbd8bdf8af80f616acf075fe752d8a23e895da8c67b70/numpy-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:23c9f4edbf4c065fddb10a4f6e8b6a244342d95966a48820c614891e5059bb50", size = 14130721, upload-time = "2025-02-13T16:52:31.998Z" }, - { url = "https://files.pythonhosted.org/packages/20/60/70af0acc86495b25b672d403e12cb25448d79a2b9658f4fc45e845c397a8/numpy-2.2.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:a0c03b6be48aaf92525cccf393265e02773be8fd9551a2f9adbe7db1fa2b60f1", size = 5130999, upload-time = "2025-02-13T16:52:41.545Z" }, - { url = "https://files.pythonhosted.org/packages/2e/69/d96c006fb73c9a47bcb3611417cf178049aae159afae47c48bd66df9c536/numpy-2.2.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:2376e317111daa0a6739e50f7ee2a6353f768489102308b0d98fcf4a04f7f3b5", size = 6665299, upload-time = "2025-02-13T16:52:54.96Z" }, - { url = "https://files.pythonhosted.org/packages/5a/3f/d8a877b6e48103733ac224ffa26b30887dc9944ff95dffdfa6c4ce3d7df3/numpy-2.2.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8fb62fe3d206d72fe1cfe31c4a1106ad2b136fcc1606093aeab314f02930fdf2", size = 14064096, upload-time = "2025-02-13T16:53:29.678Z" }, - { url = "https://files.pythonhosted.org/packages/e4/43/619c2c7a0665aafc80efca465ddb1f260287266bdbdce517396f2f145d49/numpy-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52659ad2534427dffcc36aac76bebdd02b67e3b7a619ac67543bc9bfe6b7cdb1", size = 16114758, upload-time = "2025-02-13T16:54:03.466Z" }, - { url = "https://files.pythonhosted.org/packages/d9/79/ee4fe4f60967ccd3897aa71ae14cdee9e3c097e3256975cc9575d393cb42/numpy-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1b416af7d0ed3271cad0f0a0d0bee0911ed7eba23e66f8424d9f3dfcdcae1304", size = 15259880, upload-time = "2025-02-13T16:54:26.744Z" }, - { url = "https://files.pythonhosted.org/packages/fb/c8/8b55cf05db6d85b7a7d414b3d1bd5a740706df00bfa0824a08bf041e52ee/numpy-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1402da8e0f435991983d0a9708b779f95a8c98c6b18a171b9f1be09005e64d9d", size = 17876721, upload-time = "2025-02-13T16:54:53.751Z" }, - { url = "https://files.pythonhosted.org/packages/21/d6/b4c2f0564b7dcc413117b0ffbb818d837e4b29996b9234e38b2025ed24e7/numpy-2.2.3-cp313-cp313-win32.whl", hash = "sha256:136553f123ee2951bfcfbc264acd34a2fc2f29d7cdf610ce7daf672b6fbaa693", size = 6290195, upload-time = "2025-02-13T16:58:31.683Z" }, - { url = "https://files.pythonhosted.org/packages/97/e7/7d55a86719d0de7a6a597949f3febefb1009435b79ba510ff32f05a8c1d7/numpy-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5b732c8beef1d7bc2d9e476dbba20aaff6167bf205ad9aa8d30913859e82884b", size = 12619013, upload-time = "2025-02-13T16:58:50.693Z" }, - { url = "https://files.pythonhosted.org/packages/a6/1f/0b863d5528b9048fd486a56e0b97c18bf705e88736c8cea7239012119a54/numpy-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:435e7a933b9fda8126130b046975a968cc2d833b505475e588339e09f7672890", size = 20944621, upload-time = "2025-02-13T16:55:27.593Z" }, - { url = "https://files.pythonhosted.org/packages/aa/99/b478c384f7a0a2e0736177aafc97dc9152fc036a3fdb13f5a3ab225f1494/numpy-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7678556eeb0152cbd1522b684dcd215250885993dd00adb93679ec3c0e6e091c", size = 14142502, upload-time = "2025-02-13T16:55:52.039Z" }, - { url = "https://files.pythonhosted.org/packages/fb/61/2d9a694a0f9cd0a839501d362de2a18de75e3004576a3008e56bdd60fcdb/numpy-2.2.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:2e8da03bd561504d9b20e7a12340870dfc206c64ea59b4cfee9fceb95070ee94", size = 5176293, upload-time = "2025-02-13T16:56:01.372Z" }, - { url = "https://files.pythonhosted.org/packages/33/35/51e94011b23e753fa33f891f601e5c1c9a3d515448659b06df9d40c0aa6e/numpy-2.2.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:c9aa4496fd0e17e3843399f533d62857cef5900facf93e735ef65aa4bbc90ef0", size = 6691874, upload-time = "2025-02-13T16:56:12.842Z" }, - { url = "https://files.pythonhosted.org/packages/ff/cf/06e37619aad98a9d03bd8d65b8e3041c3a639be0f5f6b0a0e2da544538d4/numpy-2.2.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4ca91d61a4bf61b0f2228f24bbfa6a9facd5f8af03759fe2a655c50ae2c6610", size = 14036826, upload-time = "2025-02-13T16:56:33.453Z" }, - { url = "https://files.pythonhosted.org/packages/0c/93/5d7d19955abd4d6099ef4a8ee006f9ce258166c38af259f9e5558a172e3e/numpy-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:deaa09cd492e24fd9b15296844c0ad1b3c976da7907e1c1ed3a0ad21dded6f76", size = 16096567, upload-time = "2025-02-13T16:56:58.035Z" }, - { url = "https://files.pythonhosted.org/packages/af/53/d1c599acf7732d81f46a93621dab6aa8daad914b502a7a115b3f17288ab2/numpy-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:246535e2f7496b7ac85deffe932896a3577be7af8fb7eebe7146444680297e9a", size = 15242514, upload-time = "2025-02-13T16:57:22.124Z" }, - { url = "https://files.pythonhosted.org/packages/53/43/c0f5411c7b3ea90adf341d05ace762dad8cb9819ef26093e27b15dd121ac/numpy-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:daf43a3d1ea699402c5a850e5313680ac355b4adc9770cd5cfc2940e7861f1bf", size = 17872920, upload-time = "2025-02-13T16:57:49.308Z" }, - { url = "https://files.pythonhosted.org/packages/5b/57/6dbdd45ab277aff62021cafa1e15f9644a52f5b5fc840bc7591b4079fb58/numpy-2.2.3-cp313-cp313t-win32.whl", hash = "sha256:cf802eef1f0134afb81fef94020351be4fe1d6681aadf9c5e862af6602af64ef", size = 6346584, upload-time = "2025-02-13T16:58:02.02Z" }, - { url = "https://files.pythonhosted.org/packages/97/9b/484f7d04b537d0a1202a5ba81c6f53f1846ae6c63c2127f8df869ed31342/numpy-2.2.3-cp313-cp313t-win_amd64.whl", hash = "sha256:aee2512827ceb6d7f517c8b85aa5d3923afe8fc7a57d028cffcd522f1c6fd082", size = 12706784, upload-time = "2025-02-13T16:58:21.038Z" }, +version = "2.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/19/d7c972dfe90a353dbd3efbbe1d14a5951de80c99c9dc1b93cd998d51dc0f/numpy-2.3.1.tar.gz", hash = "sha256:1ec9ae20a4226da374362cca3c62cd753faf2f951440b0e3b98e93c235441d2b", size = 20390372, upload-time = "2025-06-21T12:28:33.469Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/56/71ad5022e2f63cfe0ca93559403d0edef14aea70a841d640bd13cdba578e/numpy-2.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2959d8f268f3d8ee402b04a9ec4bb7604555aeacf78b360dc4ec27f1d508177d", size = 20896664, upload-time = "2025-06-21T12:15:30.845Z" }, + { url = "https://files.pythonhosted.org/packages/25/65/2db52ba049813670f7f987cc5db6dac9be7cd95e923cc6832b3d32d87cef/numpy-2.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:762e0c0c6b56bdedfef9a8e1d4538556438288c4276901ea008ae44091954e29", size = 14131078, upload-time = "2025-06-21T12:15:52.23Z" }, + { url = "https://files.pythonhosted.org/packages/57/dd/28fa3c17b0e751047ac928c1e1b6990238faad76e9b147e585b573d9d1bd/numpy-2.3.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:867ef172a0976aaa1f1d1b63cf2090de8b636a7674607d514505fb7276ab08fc", size = 5112554, upload-time = "2025-06-21T12:16:01.434Z" }, + { url = "https://files.pythonhosted.org/packages/c9/fc/84ea0cba8e760c4644b708b6819d91784c290288c27aca916115e3311d17/numpy-2.3.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:4e602e1b8682c2b833af89ba641ad4176053aaa50f5cacda1a27004352dde943", size = 6646560, upload-time = "2025-06-21T12:16:11.895Z" }, + { url = "https://files.pythonhosted.org/packages/61/b2/512b0c2ddec985ad1e496b0bd853eeb572315c0f07cd6997473ced8f15e2/numpy-2.3.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:8e333040d069eba1652fb08962ec5b76af7f2c7bce1df7e1418c8055cf776f25", size = 14260638, upload-time = "2025-06-21T12:16:32.611Z" }, + { url = "https://files.pythonhosted.org/packages/6e/45/c51cb248e679a6c6ab14b7a8e3ead3f4a3fe7425fc7a6f98b3f147bec532/numpy-2.3.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e7cbf5a5eafd8d230a3ce356d892512185230e4781a361229bd902ff403bc660", size = 16632729, upload-time = "2025-06-21T12:16:57.439Z" }, + { url = "https://files.pythonhosted.org/packages/e4/ff/feb4be2e5c09a3da161b412019caf47183099cbea1132fd98061808c2df2/numpy-2.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f1b8f26d1086835f442286c1d9b64bb3974b0b1e41bb105358fd07d20872952", size = 15565330, upload-time = "2025-06-21T12:17:20.638Z" }, + { url = "https://files.pythonhosted.org/packages/bc/6d/ceafe87587101e9ab0d370e4f6e5f3f3a85b9a697f2318738e5e7e176ce3/numpy-2.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ee8340cb48c9b7a5899d1149eece41ca535513a9698098edbade2a8e7a84da77", size = 18361734, upload-time = "2025-06-21T12:17:47.938Z" }, + { url = "https://files.pythonhosted.org/packages/2b/19/0fb49a3ea088be691f040c9bf1817e4669a339d6e98579f91859b902c636/numpy-2.3.1-cp312-cp312-win32.whl", hash = "sha256:e772dda20a6002ef7061713dc1e2585bc1b534e7909b2030b5a46dae8ff077ab", size = 6320411, upload-time = "2025-06-21T12:17:58.475Z" }, + { url = "https://files.pythonhosted.org/packages/b1/3e/e28f4c1dd9e042eb57a3eb652f200225e311b608632bc727ae378623d4f8/numpy-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:cfecc7822543abdea6de08758091da655ea2210b8ffa1faf116b940693d3df76", size = 12734973, upload-time = "2025-06-21T12:18:17.601Z" }, + { url = "https://files.pythonhosted.org/packages/04/a8/8a5e9079dc722acf53522b8f8842e79541ea81835e9b5483388701421073/numpy-2.3.1-cp312-cp312-win_arm64.whl", hash = "sha256:7be91b2239af2658653c5bb6f1b8bccafaf08226a258caf78ce44710a0160d30", size = 10191491, upload-time = "2025-06-21T12:18:33.585Z" }, + { url = "https://files.pythonhosted.org/packages/d4/bd/35ad97006d8abff8631293f8ea6adf07b0108ce6fec68da3c3fcca1197f2/numpy-2.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:25a1992b0a3fdcdaec9f552ef10d8103186f5397ab45e2d25f8ac51b1a6b97e8", size = 20889381, upload-time = "2025-06-21T12:19:04.103Z" }, + { url = "https://files.pythonhosted.org/packages/f1/4f/df5923874d8095b6062495b39729178eef4a922119cee32a12ee1bd4664c/numpy-2.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7dea630156d39b02a63c18f508f85010230409db5b2927ba59c8ba4ab3e8272e", size = 14152726, upload-time = "2025-06-21T12:19:25.599Z" }, + { url = "https://files.pythonhosted.org/packages/8c/0f/a1f269b125806212a876f7efb049b06c6f8772cf0121139f97774cd95626/numpy-2.3.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:bada6058dd886061f10ea15f230ccf7dfff40572e99fef440a4a857c8728c9c0", size = 5105145, upload-time = "2025-06-21T12:19:34.782Z" }, + { url = "https://files.pythonhosted.org/packages/6d/63/a7f7fd5f375b0361682f6ffbf686787e82b7bbd561268e4f30afad2bb3c0/numpy-2.3.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:a894f3816eb17b29e4783e5873f92faf55b710c2519e5c351767c51f79d8526d", size = 6639409, upload-time = "2025-06-21T12:19:45.228Z" }, + { url = "https://files.pythonhosted.org/packages/bf/0d/1854a4121af895aab383f4aa233748f1df4671ef331d898e32426756a8a6/numpy-2.3.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:18703df6c4a4fee55fd3d6e5a253d01c5d33a295409b03fda0c86b3ca2ff41a1", size = 14257630, upload-time = "2025-06-21T12:20:06.544Z" }, + { url = "https://files.pythonhosted.org/packages/50/30/af1b277b443f2fb08acf1c55ce9d68ee540043f158630d62cef012750f9f/numpy-2.3.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:5902660491bd7a48b2ec16c23ccb9124b8abfd9583c5fdfa123fe6b421e03de1", size = 16627546, upload-time = "2025-06-21T12:20:31.002Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ec/3b68220c277e463095342d254c61be8144c31208db18d3fd8ef02712bcd6/numpy-2.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:36890eb9e9d2081137bd78d29050ba63b8dab95dff7912eadf1185e80074b2a0", size = 15562538, upload-time = "2025-06-21T12:20:54.322Z" }, + { url = "https://files.pythonhosted.org/packages/77/2b/4014f2bcc4404484021c74d4c5ee8eb3de7e3f7ac75f06672f8dcf85140a/numpy-2.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a780033466159c2270531e2b8ac063704592a0bc62ec4a1b991c7c40705eb0e8", size = 18360327, upload-time = "2025-06-21T12:21:21.053Z" }, + { url = "https://files.pythonhosted.org/packages/40/8d/2ddd6c9b30fcf920837b8672f6c65590c7d92e43084c25fc65edc22e93ca/numpy-2.3.1-cp313-cp313-win32.whl", hash = "sha256:39bff12c076812595c3a306f22bfe49919c5513aa1e0e70fac756a0be7c2a2b8", size = 6312330, upload-time = "2025-06-21T12:25:07.447Z" }, + { url = "https://files.pythonhosted.org/packages/dd/c8/beaba449925988d415efccb45bf977ff8327a02f655090627318f6398c7b/numpy-2.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:8d5ee6eec45f08ce507a6570e06f2f879b374a552087a4179ea7838edbcbfa42", size = 12731565, upload-time = "2025-06-21T12:25:26.444Z" }, + { url = "https://files.pythonhosted.org/packages/0b/c3/5c0c575d7ec78c1126998071f58facfc124006635da75b090805e642c62e/numpy-2.3.1-cp313-cp313-win_arm64.whl", hash = "sha256:0c4d9e0a8368db90f93bd192bfa771ace63137c3488d198ee21dfb8e7771916e", size = 10190262, upload-time = "2025-06-21T12:25:42.196Z" }, + { url = "https://files.pythonhosted.org/packages/ea/19/a029cd335cf72f79d2644dcfc22d90f09caa86265cbbde3b5702ccef6890/numpy-2.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b0b5397374f32ec0649dd98c652a1798192042e715df918c20672c62fb52d4b8", size = 20987593, upload-time = "2025-06-21T12:21:51.664Z" }, + { url = "https://files.pythonhosted.org/packages/25/91/8ea8894406209107d9ce19b66314194675d31761fe2cb3c84fe2eeae2f37/numpy-2.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c5bdf2015ccfcee8253fb8be695516ac4457c743473a43290fd36eba6a1777eb", size = 14300523, upload-time = "2025-06-21T12:22:13.583Z" }, + { url = "https://files.pythonhosted.org/packages/a6/7f/06187b0066eefc9e7ce77d5f2ddb4e314a55220ad62dd0bfc9f2c44bac14/numpy-2.3.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d70f20df7f08b90a2062c1f07737dd340adccf2068d0f1b9b3d56e2038979fee", size = 5227993, upload-time = "2025-06-21T12:22:22.53Z" }, + { url = "https://files.pythonhosted.org/packages/e8/ec/a926c293c605fa75e9cfb09f1e4840098ed46d2edaa6e2152ee35dc01ed3/numpy-2.3.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:2fb86b7e58f9ac50e1e9dd1290154107e47d1eef23a0ae9145ded06ea606f992", size = 6736652, upload-time = "2025-06-21T12:22:33.629Z" }, + { url = "https://files.pythonhosted.org/packages/e3/62/d68e52fb6fde5586650d4c0ce0b05ff3a48ad4df4ffd1b8866479d1d671d/numpy-2.3.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:23ab05b2d241f76cb883ce8b9a93a680752fbfcbd51c50eff0b88b979e471d8c", size = 14331561, upload-time = "2025-06-21T12:22:55.056Z" }, + { url = "https://files.pythonhosted.org/packages/fc/ec/b74d3f2430960044bdad6900d9f5edc2dc0fb8bf5a0be0f65287bf2cbe27/numpy-2.3.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:ce2ce9e5de4703a673e705183f64fd5da5bf36e7beddcb63a25ee2286e71ca48", size = 16693349, upload-time = "2025-06-21T12:23:20.53Z" }, + { url = "https://files.pythonhosted.org/packages/0d/15/def96774b9d7eb198ddadfcbd20281b20ebb510580419197e225f5c55c3e/numpy-2.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c4913079974eeb5c16ccfd2b1f09354b8fed7e0d6f2cab933104a09a6419b1ee", size = 15642053, upload-time = "2025-06-21T12:23:43.697Z" }, + { url = "https://files.pythonhosted.org/packages/2b/57/c3203974762a759540c6ae71d0ea2341c1fa41d84e4971a8e76d7141678a/numpy-2.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:010ce9b4f00d5c036053ca684c77441f2f2c934fd23bee058b4d6f196efd8280", size = 18434184, upload-time = "2025-06-21T12:24:10.708Z" }, + { url = "https://files.pythonhosted.org/packages/22/8a/ccdf201457ed8ac6245187850aff4ca56a79edbea4829f4e9f14d46fa9a5/numpy-2.3.1-cp313-cp313t-win32.whl", hash = "sha256:6269b9edfe32912584ec496d91b00b6d34282ca1d07eb10e82dfc780907d6c2e", size = 6440678, upload-time = "2025-06-21T12:24:21.596Z" }, + { url = "https://files.pythonhosted.org/packages/f1/7e/7f431d8bd8eb7e03d79294aed238b1b0b174b3148570d03a8a8a8f6a0da9/numpy-2.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:2a809637460e88a113e186e87f228d74ae2852a2e0c44de275263376f17b5bdc", size = 12870697, upload-time = "2025-06-21T12:24:40.644Z" }, + { url = "https://files.pythonhosted.org/packages/d4/ca/af82bf0fad4c3e573c6930ed743b5308492ff19917c7caaf2f9b6f9e2e98/numpy-2.3.1-cp313-cp313t-win_arm64.whl", hash = "sha256:eccb9a159db9aed60800187bc47a6d3451553f0e1b08b068d8b277ddfbb9b244", size = 10260376, upload-time = "2025-06-21T12:24:56.884Z" }, ] [[package]] @@ -411,7 +414,7 @@ wheels = [ [[package]] name = "pandas" -version = "2.2.3" +version = "2.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, @@ -419,28 +422,28 @@ dependencies = [ { name = "pytz" }, { name = "tzdata" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9c/d6/9f8431bacc2e19dca897724cd097b1bb224a6ad5433784a44b587c7c13af/pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667", size = 4399213, upload-time = "2024-09-20T13:10:04.827Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/17/a3/fb2734118db0af37ea7433f57f722c0a56687e14b14690edff0cdb4b7e58/pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9", size = 12529893, upload-time = "2024-09-20T13:09:09.655Z" }, - { url = "https://files.pythonhosted.org/packages/e1/0c/ad295fd74bfac85358fd579e271cded3ac969de81f62dd0142c426b9da91/pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4", size = 11363475, upload-time = "2024-09-20T13:09:14.718Z" }, - { url = "https://files.pythonhosted.org/packages/c6/2a/4bba3f03f7d07207481fed47f5b35f556c7441acddc368ec43d6643c5777/pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3", size = 15188645, upload-time = "2024-09-20T19:02:03.88Z" }, - { url = "https://files.pythonhosted.org/packages/38/f8/d8fddee9ed0d0c0f4a2132c1dfcf0e3e53265055da8df952a53e7eaf178c/pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319", size = 12739445, upload-time = "2024-09-20T13:09:17.621Z" }, - { url = "https://files.pythonhosted.org/packages/20/e8/45a05d9c39d2cea61ab175dbe6a2de1d05b679e8de2011da4ee190d7e748/pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", size = 16359235, upload-time = "2024-09-20T19:02:07.094Z" }, - { url = "https://files.pythonhosted.org/packages/1d/99/617d07a6a5e429ff90c90da64d428516605a1ec7d7bea494235e1c3882de/pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", size = 14056756, upload-time = "2024-09-20T13:09:20.474Z" }, - { url = "https://files.pythonhosted.org/packages/29/d4/1244ab8edf173a10fd601f7e13b9566c1b525c4f365d6bee918e68381889/pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", size = 11504248, upload-time = "2024-09-20T13:09:23.137Z" }, - { url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643, upload-time = "2024-09-20T13:09:25.522Z" }, - { url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573, upload-time = "2024-09-20T13:09:28.012Z" }, - { url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085, upload-time = "2024-09-20T19:02:10.451Z" }, - { url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809, upload-time = "2024-09-20T13:09:30.814Z" }, - { url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316, upload-time = "2024-09-20T19:02:13.825Z" }, - { url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055, upload-time = "2024-09-20T13:09:33.462Z" }, - { url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175, upload-time = "2024-09-20T13:09:35.871Z" }, - { url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650, upload-time = "2024-09-20T13:09:38.685Z" }, - { url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177, upload-time = "2024-09-20T13:09:41.141Z" }, - { url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526, upload-time = "2024-09-20T19:02:16.905Z" }, - { url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013, upload-time = "2024-09-20T13:09:44.39Z" }, - { url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620, upload-time = "2024-09-20T19:02:20.639Z" }, - { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436, upload-time = "2024-09-20T13:09:48.112Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/72/51/48f713c4c728d7c55ef7444ba5ea027c26998d96d1a40953b346438602fc/pandas-2.3.0.tar.gz", hash = "sha256:34600ab34ebf1131a7613a260a61dbe8b62c188ec0ea4c296da7c9a06b004133", size = 4484490, upload-time = "2025-06-05T03:27:54.133Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/46/24192607058dd607dbfacdd060a2370f6afb19c2ccb617406469b9aeb8e7/pandas-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2eb4728a18dcd2908c7fccf74a982e241b467d178724545a48d0caf534b38ebf", size = 11573865, upload-time = "2025-06-05T03:26:46.774Z" }, + { url = "https://files.pythonhosted.org/packages/9f/cc/ae8ea3b800757a70c9fdccc68b67dc0280a6e814efcf74e4211fd5dea1ca/pandas-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9d8c3187be7479ea5c3d30c32a5d73d62a621166675063b2edd21bc47614027", size = 10702154, upload-time = "2025-06-05T16:50:14.439Z" }, + { url = "https://files.pythonhosted.org/packages/d8/ba/a7883d7aab3d24c6540a2768f679e7414582cc389876d469b40ec749d78b/pandas-2.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ff730713d4c4f2f1c860e36c005c7cefc1c7c80c21c0688fd605aa43c9fcf09", size = 11262180, upload-time = "2025-06-05T16:50:17.453Z" }, + { url = "https://files.pythonhosted.org/packages/01/a5/931fc3ad333d9d87b10107d948d757d67ebcfc33b1988d5faccc39c6845c/pandas-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba24af48643b12ffe49b27065d3babd52702d95ab70f50e1b34f71ca703e2c0d", size = 11991493, upload-time = "2025-06-05T03:26:51.813Z" }, + { url = "https://files.pythonhosted.org/packages/d7/bf/0213986830a92d44d55153c1d69b509431a972eb73f204242988c4e66e86/pandas-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:404d681c698e3c8a40a61d0cd9412cc7364ab9a9cc6e144ae2992e11a2e77a20", size = 12470733, upload-time = "2025-06-06T00:00:18.651Z" }, + { url = "https://files.pythonhosted.org/packages/a4/0e/21eb48a3a34a7d4bac982afc2c4eb5ab09f2d988bdf29d92ba9ae8e90a79/pandas-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6021910b086b3ca756755e86ddc64e0ddafd5e58e076c72cb1585162e5ad259b", size = 13212406, upload-time = "2025-06-05T03:26:55.992Z" }, + { url = "https://files.pythonhosted.org/packages/1f/d9/74017c4eec7a28892d8d6e31ae9de3baef71f5a5286e74e6b7aad7f8c837/pandas-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:094e271a15b579650ebf4c5155c05dcd2a14fd4fdd72cf4854b2f7ad31ea30be", size = 10976199, upload-time = "2025-06-05T03:26:59.594Z" }, + { url = "https://files.pythonhosted.org/packages/d3/57/5cb75a56a4842bbd0511c3d1c79186d8315b82dac802118322b2de1194fe/pandas-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c7e2fc25f89a49a11599ec1e76821322439d90820108309bf42130d2f36c983", size = 11518913, upload-time = "2025-06-05T03:27:02.757Z" }, + { url = "https://files.pythonhosted.org/packages/05/01/0c8785610e465e4948a01a059562176e4c8088aa257e2e074db868f86d4e/pandas-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6da97aeb6a6d233fb6b17986234cc723b396b50a3c6804776351994f2a658fd", size = 10655249, upload-time = "2025-06-05T16:50:20.17Z" }, + { url = "https://files.pythonhosted.org/packages/e8/6a/47fd7517cd8abe72a58706aab2b99e9438360d36dcdb052cf917b7bf3bdc/pandas-2.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb32dc743b52467d488e7a7c8039b821da2826a9ba4f85b89ea95274f863280f", size = 11328359, upload-time = "2025-06-05T03:27:06.431Z" }, + { url = "https://files.pythonhosted.org/packages/2a/b3/463bfe819ed60fb7e7ddffb4ae2ee04b887b3444feee6c19437b8f834837/pandas-2.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:213cd63c43263dbb522c1f8a7c9d072e25900f6975596f883f4bebd77295d4f3", size = 12024789, upload-time = "2025-06-05T03:27:09.875Z" }, + { url = "https://files.pythonhosted.org/packages/04/0c/e0704ccdb0ac40aeb3434d1c641c43d05f75c92e67525df39575ace35468/pandas-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1d2b33e68d0ce64e26a4acc2e72d747292084f4e8db4c847c6f5f6cbe56ed6d8", size = 12480734, upload-time = "2025-06-06T00:00:22.246Z" }, + { url = "https://files.pythonhosted.org/packages/e9/df/815d6583967001153bb27f5cf075653d69d51ad887ebbf4cfe1173a1ac58/pandas-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:430a63bae10b5086995db1b02694996336e5a8ac9a96b4200572b413dfdfccb9", size = 13223381, upload-time = "2025-06-05T03:27:15.641Z" }, + { url = "https://files.pythonhosted.org/packages/79/88/ca5973ed07b7f484c493e941dbff990861ca55291ff7ac67c815ce347395/pandas-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4930255e28ff5545e2ca404637bcc56f031893142773b3468dc021c6c32a1390", size = 10970135, upload-time = "2025-06-05T03:27:24.131Z" }, + { url = "https://files.pythonhosted.org/packages/24/fb/0994c14d1f7909ce83f0b1fb27958135513c4f3f2528bde216180aa73bfc/pandas-2.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f925f1ef673b4bd0271b1809b72b3270384f2b7d9d14a189b12b7fc02574d575", size = 12141356, upload-time = "2025-06-05T03:27:34.547Z" }, + { url = "https://files.pythonhosted.org/packages/9d/a2/9b903e5962134497ac4f8a96f862ee3081cb2506f69f8e4778ce3d9c9d82/pandas-2.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78ad363ddb873a631e92a3c063ade1ecfb34cae71e9a2be6ad100f875ac1042", size = 11474674, upload-time = "2025-06-05T03:27:39.448Z" }, + { url = "https://files.pythonhosted.org/packages/81/3a/3806d041bce032f8de44380f866059437fb79e36d6b22c82c187e65f765b/pandas-2.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951805d146922aed8357e4cc5671b8b0b9be1027f0619cea132a9f3f65f2f09c", size = 11439876, upload-time = "2025-06-05T03:27:43.652Z" }, + { url = "https://files.pythonhosted.org/packages/15/aa/3fc3181d12b95da71f5c2537c3e3b3af6ab3a8c392ab41ebb766e0929bc6/pandas-2.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a881bc1309f3fce34696d07b00f13335c41f5f5a8770a33b09ebe23261cfc67", size = 11966182, upload-time = "2025-06-05T03:27:47.652Z" }, + { url = "https://files.pythonhosted.org/packages/37/e7/e12f2d9b0a2c4a2cc86e2aabff7ccfd24f03e597d770abfa2acd313ee46b/pandas-2.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e1991bbb96f4050b09b5f811253c4f3cf05ee89a589379aa36cd623f21a31d6f", size = 12547686, upload-time = "2025-06-06T00:00:26.142Z" }, + { url = "https://files.pythonhosted.org/packages/39/c2/646d2e93e0af70f4e5359d870a63584dacbc324b54d73e6b3267920ff117/pandas-2.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bb3be958022198531eb7ec2008cfc78c5b1eed51af8600c6c5d9160d89d8d249", size = 13231847, upload-time = "2025-06-05T03:27:51.465Z" }, ] [[package]] @@ -488,93 +491,97 @@ wheels = [ [[package]] name = "prompt-toolkit" -version = "3.0.50" +version = "3.0.51" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "wcwidth" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087, upload-time = "2025-01-20T15:55:35.072Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/6e/9d084c929dfe9e3bfe0c6a47e31f78a25c54627d64a66e884a8bf5474f1c/prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed", size = 428940, upload-time = "2025-04-15T09:18:47.731Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816, upload-time = "2025-01-20T15:55:29.98Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4f/5249960887b1fbe561d9ff265496d170b55a735b76724f10ef19f9e40716/prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07", size = 387810, upload-time = "2025-04-15T09:18:44.753Z" }, ] [[package]] name = "pyaml" -version = "25.1.0" +version = "25.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyyaml" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f4/06/04b9c1907c13dc81729a9c6b4f42eab47baab7a8738ed5d2683eac215ad0/pyaml-25.1.0.tar.gz", hash = "sha256:33a93ac49218f57e020b81e280d2706cea554ac5a76445ac79add760d019c709", size = 29469, upload-time = "2025-01-01T14:52:46.684Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c1/40/94f10f32ab952c5cca713d9ac9d8b2fdc37392d90eea403823eeac674c24/pyaml-25.5.0.tar.gz", hash = "sha256:5799560c7b1c9daf35a7a4535f53e2c30323f74cbd7cb4f2e715b16dd681a58a", size = 29812, upload-time = "2025-05-29T05:34:05.292Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/c1/ec1930bc6c01754b8baf3c99420f340b920561f0060bccbf81809db354cc/pyaml-25.1.0-py3-none-any.whl", hash = "sha256:f7b40629d2dae88035657c860f539db3525ddd0120a11e0bcb44d47d5968b3bc", size = 26074, upload-time = "2025-01-01T14:52:45.006Z" }, + { url = "https://files.pythonhosted.org/packages/aa/7d/1b5061beff826f902285827261485a058b943332eba8a5532a0164735205/pyaml-25.5.0-py3-none-any.whl", hash = "sha256:b9e0c4e58a5e8003f8f18e802db49fd0563ada587209b13e429bdcbefa87d035", size = 26422, upload-time = "2025-05-29T05:34:03.594Z" }, ] [[package]] name = "pydantic" -version = "2.10.6" +version = "2.11.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-types" }, { name = "pydantic-core" }, { name = "typing-extensions" }, + { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681, upload-time = "2025-01-24T01:42:12.693Z" } +sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696, upload-time = "2025-01-24T01:42:10.371Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, ] [[package]] name = "pydantic-core" -version = "2.27.2" +version = "2.33.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443, upload-time = "2024-12-18T11:31:54.917Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127, upload-time = "2024-12-18T11:28:30.346Z" }, - { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340, upload-time = "2024-12-18T11:28:32.521Z" }, - { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900, upload-time = "2024-12-18T11:28:34.507Z" }, - { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177, upload-time = "2024-12-18T11:28:36.488Z" }, - { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046, upload-time = "2024-12-18T11:28:39.409Z" }, - { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386, upload-time = "2024-12-18T11:28:41.221Z" }, - { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060, upload-time = "2024-12-18T11:28:44.709Z" }, - { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870, upload-time = "2024-12-18T11:28:46.839Z" }, - { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822, upload-time = "2024-12-18T11:28:48.896Z" }, - { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364, upload-time = "2024-12-18T11:28:50.755Z" }, - { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303, upload-time = "2024-12-18T11:28:54.122Z" }, - { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064, upload-time = "2024-12-18T11:28:56.074Z" }, - { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046, upload-time = "2024-12-18T11:28:58.107Z" }, - { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092, upload-time = "2024-12-18T11:29:01.335Z" }, - { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709, upload-time = "2024-12-18T11:29:03.193Z" }, - { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273, upload-time = "2024-12-18T11:29:05.306Z" }, - { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027, upload-time = "2024-12-18T11:29:07.294Z" }, - { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888, upload-time = "2024-12-18T11:29:09.249Z" }, - { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738, upload-time = "2024-12-18T11:29:11.23Z" }, - { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138, upload-time = "2024-12-18T11:29:16.396Z" }, - { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025, upload-time = "2024-12-18T11:29:20.25Z" }, - { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633, upload-time = "2024-12-18T11:29:23.877Z" }, - { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404, upload-time = "2024-12-18T11:29:25.872Z" }, - { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130, upload-time = "2024-12-18T11:29:29.252Z" }, - { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946, upload-time = "2024-12-18T11:29:31.338Z" }, - { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387, upload-time = "2024-12-18T11:29:33.481Z" }, - { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453, upload-time = "2024-12-18T11:29:35.533Z" }, - { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186, upload-time = "2024-12-18T11:29:37.649Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" }, + { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" }, + { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" }, + { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" }, + { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" }, + { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" }, + { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" }, + { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" }, + { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" }, + { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" }, + { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" }, + { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" }, + { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" }, + { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" }, + { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" }, + { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" }, + { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, + { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, + { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, ] [[package]] name = "pygments" -version = "2.19.1" +version = "2.19.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" }, + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, ] [[package]] name = "pytest" -version = "8.4.0" +version = "8.4.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -583,9 +590,9 @@ dependencies = [ { name = "pluggy" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fb/aa/405082ce2749be5398045152251ac69c0f3578c7077efc53431303af97ce/pytest-8.4.0.tar.gz", hash = "sha256:14d920b48472ea0dbf68e45b96cd1ffda4705f33307dcc86c676c1b5104838a6", size = 1515232, upload-time = "2025-06-02T17:36:30.03Z" } +sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714, upload-time = "2025-06-18T05:48:06.109Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2f/de/afa024cbe022b1b318a3d224125aa24939e99b4ff6f22e0ba639a2eaee47/pytest-8.4.0-py3-none-any.whl", hash = "sha256:f40f825768ad76c0977cbacdf1fd37c6f7a468e460ea6a0636078f8972d4517e", size = 363797, upload-time = "2025-06-02T17:36:27.859Z" }, + { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474, upload-time = "2025-06-18T05:48:03.955Z" }, ] [[package]] @@ -614,11 +621,11 @@ wheels = [ [[package]] name = "pytz" -version = "2025.1" +version = "2025.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5f/57/df1c9157c8d5a05117e455d66fd7cf6dbc46974f832b1058ed4856785d8a/pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e", size = 319617, upload-time = "2025-01-31T01:54:48.615Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/eb/38/ac33370d784287baa1c3d538978b5e2ea064d4c1b93ffbd12826c190dd10/pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57", size = 507930, upload-time = "2025-01-31T01:54:45.634Z" }, + { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, ] [[package]] @@ -676,40 +683,40 @@ wheels = [ [[package]] name = "rich" -version = "13.9.4" +version = "14.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown-it-py" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149, upload-time = "2024-11-01T16:43:57.873Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/53/830aa4c3066a8ab0ae9a9955976fb770fe9c6102117c8ec4ab3ea62d89e8/rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725", size = 224078, upload-time = "2025-03-30T14:15:14.23Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424, upload-time = "2024-11-01T16:43:55.817Z" }, + { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229, upload-time = "2025-03-30T14:15:12.283Z" }, ] [[package]] name = "ruff" -version = "0.11.13" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ed/da/9c6f995903b4d9474b39da91d2d626659af3ff1eeb43e9ae7c119349dba6/ruff-0.11.13.tar.gz", hash = "sha256:26fa247dc68d1d4e72c179e08889a25ac0c7ba4d78aecfc835d49cbfd60bf514", size = 4282054, upload-time = "2025-06-05T21:00:15.721Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7d/ce/a11d381192966e0b4290842cc8d4fac7dc9214ddf627c11c1afff87da29b/ruff-0.11.13-py3-none-linux_armv6l.whl", hash = "sha256:4bdfbf1240533f40042ec00c9e09a3aade6f8c10b6414cf11b519488d2635d46", size = 10292516, upload-time = "2025-06-05T20:59:32.944Z" }, - { url = "https://files.pythonhosted.org/packages/78/db/87c3b59b0d4e753e40b6a3b4a2642dfd1dcaefbff121ddc64d6c8b47ba00/ruff-0.11.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aef9c9ed1b5ca28bb15c7eac83b8670cf3b20b478195bd49c8d756ba0a36cf48", size = 11106083, upload-time = "2025-06-05T20:59:37.03Z" }, - { url = "https://files.pythonhosted.org/packages/77/79/d8cec175856ff810a19825d09ce700265f905c643c69f45d2b737e4a470a/ruff-0.11.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53b15a9dfdce029c842e9a5aebc3855e9ab7771395979ff85b7c1dedb53ddc2b", size = 10436024, upload-time = "2025-06-05T20:59:39.741Z" }, - { url = "https://files.pythonhosted.org/packages/8b/5b/f6d94f2980fa1ee854b41568368a2e1252681b9238ab2895e133d303538f/ruff-0.11.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab153241400789138d13f362c43f7edecc0edfffce2afa6a68434000ecd8f69a", size = 10646324, upload-time = "2025-06-05T20:59:42.185Z" }, - { url = "https://files.pythonhosted.org/packages/6c/9c/b4c2acf24ea4426016d511dfdc787f4ce1ceb835f3c5fbdbcb32b1c63bda/ruff-0.11.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c51f93029d54a910d3d24f7dd0bb909e31b6cd989a5e4ac513f4eb41629f0dc", size = 10174416, upload-time = "2025-06-05T20:59:44.319Z" }, - { url = "https://files.pythonhosted.org/packages/f3/10/e2e62f77c65ede8cd032c2ca39c41f48feabedb6e282bfd6073d81bb671d/ruff-0.11.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1808b3ed53e1a777c2ef733aca9051dc9bf7c99b26ece15cb59a0320fbdbd629", size = 11724197, upload-time = "2025-06-05T20:59:46.935Z" }, - { url = "https://files.pythonhosted.org/packages/bb/f0/466fe8469b85c561e081d798c45f8a1d21e0b4a5ef795a1d7f1a9a9ec182/ruff-0.11.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d28ce58b5ecf0f43c1b71edffabe6ed7f245d5336b17805803312ec9bc665933", size = 12511615, upload-time = "2025-06-05T20:59:49.534Z" }, - { url = "https://files.pythonhosted.org/packages/17/0e/cefe778b46dbd0cbcb03a839946c8f80a06f7968eb298aa4d1a4293f3448/ruff-0.11.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55e4bc3a77842da33c16d55b32c6cac1ec5fb0fbec9c8c513bdce76c4f922165", size = 12117080, upload-time = "2025-06-05T20:59:51.654Z" }, - { url = "https://files.pythonhosted.org/packages/5d/2c/caaeda564cbe103bed145ea557cb86795b18651b0f6b3ff6a10e84e5a33f/ruff-0.11.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:633bf2c6f35678c56ec73189ba6fa19ff1c5e4807a78bf60ef487b9dd272cc71", size = 11326315, upload-time = "2025-06-05T20:59:54.469Z" }, - { url = "https://files.pythonhosted.org/packages/75/f0/782e7d681d660eda8c536962920c41309e6dd4ebcea9a2714ed5127d44bd/ruff-0.11.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ffbc82d70424b275b089166310448051afdc6e914fdab90e08df66c43bb5ca9", size = 11555640, upload-time = "2025-06-05T20:59:56.986Z" }, - { url = "https://files.pythonhosted.org/packages/5d/d4/3d580c616316c7f07fb3c99dbecfe01fbaea7b6fd9a82b801e72e5de742a/ruff-0.11.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a9ddd3ec62a9a89578c85842b836e4ac832d4a2e0bfaad3b02243f930ceafcc", size = 10507364, upload-time = "2025-06-05T20:59:59.154Z" }, - { url = "https://files.pythonhosted.org/packages/5a/dc/195e6f17d7b3ea6b12dc4f3e9de575db7983db187c378d44606e5d503319/ruff-0.11.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d237a496e0778d719efb05058c64d28b757c77824e04ffe8796c7436e26712b7", size = 10141462, upload-time = "2025-06-05T21:00:01.481Z" }, - { url = "https://files.pythonhosted.org/packages/f4/8e/39a094af6967faa57ecdeacb91bedfb232474ff8c3d20f16a5514e6b3534/ruff-0.11.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26816a218ca6ef02142343fd24c70f7cd8c5aa6c203bca284407adf675984432", size = 11121028, upload-time = "2025-06-05T21:00:04.06Z" }, - { url = "https://files.pythonhosted.org/packages/5a/c0/b0b508193b0e8a1654ec683ebab18d309861f8bd64e3a2f9648b80d392cb/ruff-0.11.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:51c3f95abd9331dc5b87c47ac7f376db5616041173826dfd556cfe3d4977f492", size = 11602992, upload-time = "2025-06-05T21:00:06.249Z" }, - { url = "https://files.pythonhosted.org/packages/7c/91/263e33ab93ab09ca06ce4f8f8547a858cc198072f873ebc9be7466790bae/ruff-0.11.13-py3-none-win32.whl", hash = "sha256:96c27935418e4e8e77a26bb05962817f28b8ef3843a6c6cc49d8783b5507f250", size = 10474944, upload-time = "2025-06-05T21:00:08.459Z" }, - { url = "https://files.pythonhosted.org/packages/46/f4/7c27734ac2073aae8efb0119cae6931b6fb48017adf048fdf85c19337afc/ruff-0.11.13-py3-none-win_amd64.whl", hash = "sha256:29c3189895a8a6a657b7af4e97d330c8a3afd2c9c8f46c81e2fc5a31866517e3", size = 11548669, upload-time = "2025-06-05T21:00:11.147Z" }, - { url = "https://files.pythonhosted.org/packages/ec/bf/b273dd11673fed8a6bd46032c0ea2a04b2ac9bfa9c628756a5856ba113b0/ruff-0.11.13-py3-none-win_arm64.whl", hash = "sha256:b4385285e9179d608ff1d2fb9922062663c658605819a6876d8beef0c30b7f3b", size = 10683928, upload-time = "2025-06-05T21:00:13.758Z" }, +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/97/38/796a101608a90494440856ccfb52b1edae90de0b817e76bfade66b12d320/ruff-0.12.1.tar.gz", hash = "sha256:806bbc17f1104fd57451a98a58df35388ee3ab422e029e8f5cf30aa4af2c138c", size = 4413426, upload-time = "2025-06-26T20:34:14.784Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/bf/3dba52c1d12ab5e78d75bd78ad52fb85a6a1f29cc447c2423037b82bed0d/ruff-0.12.1-py3-none-linux_armv6l.whl", hash = "sha256:6013a46d865111e2edb71ad692fbb8262e6c172587a57c0669332a449384a36b", size = 10305649, upload-time = "2025-06-26T20:33:39.242Z" }, + { url = "https://files.pythonhosted.org/packages/8c/65/dab1ba90269bc8c81ce1d499a6517e28fe6f87b2119ec449257d0983cceb/ruff-0.12.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b3f75a19e03a4b0757d1412edb7f27cffb0c700365e9d6b60bc1b68d35bc89e0", size = 11120201, upload-time = "2025-06-26T20:33:42.207Z" }, + { url = "https://files.pythonhosted.org/packages/3f/3e/2d819ffda01defe857fa2dd4cba4d19109713df4034cc36f06bbf582d62a/ruff-0.12.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:9a256522893cb7e92bb1e1153283927f842dea2e48619c803243dccc8437b8be", size = 10466769, upload-time = "2025-06-26T20:33:44.102Z" }, + { url = "https://files.pythonhosted.org/packages/63/37/bde4cf84dbd7821c8de56ec4ccc2816bce8125684f7b9e22fe4ad92364de/ruff-0.12.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:069052605fe74c765a5b4272eb89880e0ff7a31e6c0dbf8767203c1fbd31c7ff", size = 10660902, upload-time = "2025-06-26T20:33:45.98Z" }, + { url = "https://files.pythonhosted.org/packages/0e/3a/390782a9ed1358c95e78ccc745eed1a9d657a537e5c4c4812fce06c8d1a0/ruff-0.12.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a684f125a4fec2d5a6501a466be3841113ba6847827be4573fddf8308b83477d", size = 10167002, upload-time = "2025-06-26T20:33:47.81Z" }, + { url = "https://files.pythonhosted.org/packages/6d/05/f2d4c965009634830e97ffe733201ec59e4addc5b1c0efa035645baa9e5f/ruff-0.12.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdecdef753bf1e95797593007569d8e1697a54fca843d78f6862f7dc279e23bd", size = 11751522, upload-time = "2025-06-26T20:33:49.857Z" }, + { url = "https://files.pythonhosted.org/packages/35/4e/4bfc519b5fcd462233f82fc20ef8b1e5ecce476c283b355af92c0935d5d9/ruff-0.12.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:70d52a058c0e7b88b602f575d23596e89bd7d8196437a4148381a3f73fcd5010", size = 12520264, upload-time = "2025-06-26T20:33:52.199Z" }, + { url = "https://files.pythonhosted.org/packages/85/b2/7756a6925da236b3a31f234b4167397c3e5f91edb861028a631546bad719/ruff-0.12.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84d0a69d1e8d716dfeab22d8d5e7c786b73f2106429a933cee51d7b09f861d4e", size = 12133882, upload-time = "2025-06-26T20:33:54.231Z" }, + { url = "https://files.pythonhosted.org/packages/dd/00/40da9c66d4a4d51291e619be6757fa65c91b92456ff4f01101593f3a1170/ruff-0.12.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cc32e863adcf9e71690248607ccdf25252eeeab5193768e6873b901fd441fed", size = 11608941, upload-time = "2025-06-26T20:33:56.202Z" }, + { url = "https://files.pythonhosted.org/packages/91/e7/f898391cc026a77fbe68dfea5940f8213622474cb848eb30215538a2dadf/ruff-0.12.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fd49a4619f90d5afc65cf42e07b6ae98bb454fd5029d03b306bd9e2273d44cc", size = 11602887, upload-time = "2025-06-26T20:33:58.47Z" }, + { url = "https://files.pythonhosted.org/packages/f6/02/0891872fc6aab8678084f4cf8826f85c5d2d24aa9114092139a38123f94b/ruff-0.12.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ed5af6aaaea20710e77698e2055b9ff9b3494891e1b24d26c07055459bb717e9", size = 10521742, upload-time = "2025-06-26T20:34:00.465Z" }, + { url = "https://files.pythonhosted.org/packages/2a/98/d6534322c74a7d47b0f33b036b2498ccac99d8d8c40edadb552c038cecf1/ruff-0.12.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:801d626de15e6bf988fbe7ce59b303a914ff9c616d5866f8c79eb5012720ae13", size = 10149909, upload-time = "2025-06-26T20:34:02.603Z" }, + { url = "https://files.pythonhosted.org/packages/34/5c/9b7ba8c19a31e2b6bd5e31aa1e65b533208a30512f118805371dbbbdf6a9/ruff-0.12.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:2be9d32a147f98a1972c1e4df9a6956d612ca5f5578536814372113d09a27a6c", size = 11136005, upload-time = "2025-06-26T20:34:04.723Z" }, + { url = "https://files.pythonhosted.org/packages/dc/34/9bbefa4d0ff2c000e4e533f591499f6b834346025e11da97f4ded21cb23e/ruff-0.12.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:49b7ce354eed2a322fbaea80168c902de9504e6e174fd501e9447cad0232f9e6", size = 11648579, upload-time = "2025-06-26T20:34:06.766Z" }, + { url = "https://files.pythonhosted.org/packages/6f/1c/20cdb593783f8f411839ce749ec9ae9e4298c2b2079b40295c3e6e2089e1/ruff-0.12.1-py3-none-win32.whl", hash = "sha256:d973fa626d4c8267848755bd0414211a456e99e125dcab147f24daa9e991a245", size = 10519495, upload-time = "2025-06-26T20:34:08.718Z" }, + { url = "https://files.pythonhosted.org/packages/cf/56/7158bd8d3cf16394928f47c637d39a7d532268cd45220bdb6cd622985760/ruff-0.12.1-py3-none-win_amd64.whl", hash = "sha256:9e1123b1c033f77bd2590e4c1fe7e8ea72ef990a85d2484351d408224d603013", size = 11547485, upload-time = "2025-06-26T20:34:11.008Z" }, + { url = "https://files.pythonhosted.org/packages/91/d0/6902c0d017259439d6fd2fd9393cea1cfe30169940118b007d5e0ea7e954/ruff-0.12.1-py3-none-win_arm64.whl", hash = "sha256:78ad09a022c64c13cc6077707f036bab0fac8cd7088772dcd1e5be21c5002efc", size = 10691209, upload-time = "2025-06-26T20:34:12.928Z" }, ] [[package]] @@ -732,11 +739,11 @@ wheels = [ [[package]] name = "termcolor" -version = "2.5.0" +version = "3.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/37/72/88311445fd44c455c7d553e61f95412cf89054308a1aa2434ab835075fc5/termcolor-2.5.0.tar.gz", hash = "sha256:998d8d27da6d48442e8e1f016119076b690d962507531df4890fcd2db2ef8a6f", size = 13057, upload-time = "2024-10-06T19:50:04.115Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/6c/3d75c196ac07ac8749600b60b03f4f6094d54e132c4d94ebac6ee0e0add0/termcolor-3.1.0.tar.gz", hash = "sha256:6a6dd7fbee581909eeec6a756cff1d7f7c376063b14e4a298dc4980309e55970", size = 14324, upload-time = "2025-04-30T11:37:53.791Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/be/df630c387a0a054815d60be6a97eb4e8f17385d5d6fe660e1c02750062b4/termcolor-2.5.0-py3-none-any.whl", hash = "sha256:37b17b5fc1e604945c2642c872a3764b5d547a48009871aea3edd3afa180afb8", size = 7755, upload-time = "2024-10-06T19:50:02.097Z" }, + { url = "https://files.pythonhosted.org/packages/4f/bd/de8d508070629b6d84a30d01d57e4a65c69aa7f5abe7560b8fad3b50ea59/termcolor-3.1.0-py3-none-any.whl", hash = "sha256:591dd26b5c2ce03b9e43f391264626557873ce1d379019786f99b0c2bee140aa", size = 7684, upload-time = "2025-04-30T11:37:52.382Z" }, ] [[package]] @@ -753,29 +760,41 @@ wheels = [ [[package]] name = "typing-extensions" -version = "4.12.2" +version = "4.14.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423, upload-time = "2025-06-02T14:52:11.399Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" }, + { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" }, ] [[package]] name = "tzdata" -version = "2025.1" +version = "2025.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/0f/fa4723f22942480be4ca9527bbde8d43f6c3f2fe8412f00e7f5f6746bc8b/tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694", size = 194950, upload-time = "2025-01-21T19:49:38.686Z" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0f/dd/84f10e23edd882c6f968c21c2434fe67bd4a528967067515feca9e611e5e/tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639", size = 346762, upload-time = "2025-01-21T19:49:37.187Z" }, + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, ] [[package]] name = "urllib3" -version = "2.4.0" +version = "2.5.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" }, + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, ] [[package]]