From 059672eb504dbcee5da39484009fbd8f4492ff4d Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Tue, 28 Jan 2025 12:43:35 +0000 Subject: [PATCH 01/29] chore: add devcontainer config --- .devcontainer/Dockerfile | 6 ++++++ .devcontainer/devcontainer.json | 35 +++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000..b4539433 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,6 @@ +FROM mcr.microsoft.com/devcontainers/python:3.9 + +ENV PYTHONUNBUFFERED 1 + +# Install pre-commit +RUN pip install pre-commit \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..f51d67e1 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,35 @@ +{ + "name": "Together Python Development", + "build": { + "dockerfile": "Dockerfile" + }, + "features": { + "ghcr.io/devcontainers/features/git:1": {}, + "ghcr.io/devcontainers-contrib/features/poetry:2": {} + }, + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance", + "ms-python.isort", + "charliermarsh.ruff", + "ms-python.mypy-type-checker", + "eamodio.gitlens" + ], + "settings": { + "[python]": { + "editor.defaultFormatter": "charliermarsh.ruff" + }, + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.fixAll": "explicit", + "source.organizeImports": "explicit" + }, + "ruff.lineLength": 100 + } + } + }, + "postCreateCommand": "poetry install", + "remoteUser": "vscode" +} \ No newline at end of file From b3caa48dded2ff78f79ca86057eb58ad4ba52f2b Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Tue, 28 Jan 2025 12:43:47 +0000 Subject: [PATCH 02/29] add openapi generation --- Makefile | 5 +- poetry.lock | 91 ++++++++++++++++++++++++++++++- pyproject.toml | 9 +++ scripts/generate_api_client.py | 74 +++++++++++++++++++++++++ src/together/cli/api/inference.py | 24 ++++++++ src/together/cli/cli.py | 10 ++-- 6 files changed, 203 insertions(+), 10 deletions(-) create mode 100755 scripts/generate_api_client.py create mode 100644 src/together/cli/api/inference.py diff --git a/Makefile b/Makefile index 4d63680a..f5398a86 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests +.PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests generate-client # Default target executed when no arguments are given to make. all: help @@ -49,3 +49,6 @@ help: @echo 'test_watch - run unit tests in watch mode' @echo 'extended_tests - run extended tests' @echo 'integration_tests - run integration tests' + +generate-client: + python scripts/generate_api_client.py diff --git a/poetry.lock b/poetry.lock index 39d73100..5c050c76 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -6,6 +6,7 @@ version = "2.4.0" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" +groups = ["main", "examples"] files = [ {file = "aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd"}, {file = "aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2"}, @@ -17,6 +18,7 @@ version = "3.10.11" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.8" +groups = ["main", "examples"] files = [ {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5077b1a5f40ffa3ba1f40d537d3bec4383988ee51fbba6b74aa8fb1bc466599e"}, {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d6a14a4d93b5b3c2891fca94fa9d41b2322a68194422bef0dd5ec1e57d7d298"}, @@ -129,6 +131,7 @@ version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.7" +groups = ["main", "examples"] files = [ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, @@ -143,6 +146,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -157,6 +161,8 @@ version = "4.0.3" description = "Timeout context manager for asyncio programs" optional = false python-versions = ">=3.7" +groups = ["main", "examples"] +markers = "python_version < \"3.11\"" files = [ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, @@ -168,6 +174,7 @@ version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" +groups = ["main", "examples"] files = [ {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, @@ -187,6 +194,7 @@ version = "24.8.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" +groups = ["quality"] files = [ {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, @@ -233,6 +241,7 @@ version = "5.5.0" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" +groups = ["tests"] files = [ {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, @@ -244,6 +253,7 @@ version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main", "examples"] files = [ {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, @@ -255,6 +265,7 @@ version = "3.4.0" description = "Validate configuration and produce human readable error messages." optional = false python-versions = ">=3.8" +groups = ["quality"] files = [ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, @@ -266,6 +277,7 @@ version = "5.2.0" description = "Universal encoding detector for Python 3" optional = false python-versions = ">=3.7" +groups = ["tests"] files = [ {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, @@ -277,6 +289,7 @@ version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" +groups = ["main", "examples"] files = [ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, @@ -376,6 +389,7 @@ version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" +groups = ["main", "quality"] files = [ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, @@ -390,10 +404,12 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "examples", "quality", "tests"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "platform_system == \"Windows\"", examples = "platform_system == \"Windows\"", quality = "platform_system == \"Windows\""} [[package]] name = "datasets" @@ -401,6 +417,7 @@ version = "3.1.0" description = "HuggingFace community-driven open-source library of datasets" optional = false python-versions = ">=3.8.0" +groups = ["examples"] files = [ {file = "datasets-3.1.0-py3-none-any.whl", hash = "sha256:dc8808a6d17838fe05e13b39aa7ac3ea0fd0806ed7004eaf4d4eb2c2a356bc61"}, {file = "datasets-3.1.0.tar.gz", hash = "sha256:c92cac049e0f9f85b0dd63739c68e564c657b1624bc2b66b1e13489062832e27"}, @@ -443,6 +460,7 @@ version = "0.3.8" description = "serialize all of Python" optional = false python-versions = ">=3.8" +groups = ["examples"] files = [ {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, @@ -458,6 +476,7 @@ version = "0.3.8" description = "Distribution utilities" optional = false python-versions = "*" +groups = ["quality", "tests"] files = [ {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, @@ -469,6 +488,7 @@ version = "0.6.2" description = "Pythonic argument parser, that will make you smile" optional = false python-versions = "*" +groups = ["tests"] files = [ {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"}, ] @@ -479,6 +499,7 @@ version = "0.2.2" description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "eval_type_backport-0.2.2-py3-none-any.whl", hash = "sha256:cb6ad7c393517f476f96d456d0412ea80f0a8cf96f6892834cd9340149111b0a"}, {file = "eval_type_backport-0.2.2.tar.gz", hash = "sha256:f0576b4cf01ebb5bd358d02314d31846af5e07678387486e2c798af0e7d849c1"}, @@ -493,6 +514,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["tests"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -507,6 +530,7 @@ version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" +groups = ["main", "examples", "quality", "tests"] files = [ {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, @@ -523,6 +547,7 @@ version = "1.4.1" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" +groups = ["main", "examples"] files = [ {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, @@ -609,6 +634,7 @@ version = "2024.6.1" description = "File-system specification" optional = false python-versions = ">=3.8" +groups = ["examples"] files = [ {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"}, {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"}, @@ -651,6 +677,7 @@ version = "0.25.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" +groups = ["examples"] files = [ {file = "huggingface_hub-0.25.1-py3-none-any.whl", hash = "sha256:a5158ded931b3188f54ea9028097312cb0acd50bffaaa2612014c3c526b44972"}, {file = "huggingface_hub-0.25.1.tar.gz", hash = "sha256:9ff7cb327343211fbd06e2b149b8f362fd1e389454f3f14c6db75a4999ee20ff"}, @@ -685,6 +712,7 @@ version = "2.6.1" description = "File identification library for Python" optional = false python-versions = ">=3.8" +groups = ["quality"] files = [ {file = "identify-2.6.1-py2.py3-none-any.whl", hash = "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0"}, {file = "identify-2.6.1.tar.gz", hash = "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"}, @@ -699,6 +727,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main", "examples"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -713,6 +742,7 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["tests"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -724,6 +754,7 @@ version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, @@ -748,6 +779,7 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -759,6 +791,7 @@ version = "6.1.0" description = "multidict implementation" optional = false python-versions = ">=3.8" +groups = ["main", "examples"] files = [ {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, @@ -863,6 +896,7 @@ version = "0.70.16" description = "better multiprocessing and multithreading in Python" optional = false python-versions = ">=3.8" +groups = ["examples"] files = [ {file = "multiprocess-0.70.16-pp310-pypy310_pp73-macosx_10_13_x86_64.whl", hash = "sha256:476887be10e2f59ff183c006af746cb6f1fd0eadcfd4ef49e605cbe2659920ee"}, {file = "multiprocess-0.70.16-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d951bed82c8f73929ac82c61f01a7b5ce8f3e5ef40f5b52553b4f547ce2b08ec"}, @@ -887,6 +921,7 @@ version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" +groups = ["dev", "quality"] files = [ {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, @@ -946,6 +981,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["dev", "quality"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -957,6 +993,7 @@ version = "1.9.1" description = "Node.js virtual environment builder" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["quality"] files = [ {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, @@ -968,6 +1005,8 @@ version = "1.24.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.8" +groups = ["main", "examples", "quality"] +markers = "python_version < \"3.12\"" files = [ {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, @@ -1005,6 +1044,8 @@ version = "2.1.1" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.10" +groups = ["main", "examples", "quality"] +markers = "python_version >= \"3.12\"" files = [ {file = "numpy-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8a0e34993b510fc19b9a2ce7f31cb8e94ecf6e924a40c0c9dd4f62d0aac47d9"}, {file = "numpy-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7dd86dfaf7c900c0bbdcb8b16e2f6ddf1eb1fe39c6c8cca6e94844ed3152a8fd"}, @@ -1067,6 +1108,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["examples", "quality", "tests"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -1078,6 +1120,7 @@ version = "2.0.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.8" +groups = ["examples"] files = [ {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, @@ -1145,6 +1188,7 @@ version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.8" +groups = ["quality"] files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, @@ -1156,6 +1200,7 @@ version = "10.4.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, @@ -1253,6 +1298,7 @@ version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" +groups = ["quality", "tests"] files = [ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, @@ -1269,6 +1315,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["tests"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -1284,6 +1331,7 @@ version = "3.5.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.8" +groups = ["quality"] files = [ {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"}, {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"}, @@ -1302,6 +1350,7 @@ version = "17.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" +groups = ["main", "examples", "quality"] files = [ {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, @@ -1353,6 +1402,7 @@ version = "17.16" description = "Type annotations for pyarrow" optional = false python-versions = "<4,>=3.8" +groups = ["quality"] files = [ {file = "pyarrow_stubs-17.16-py3-none-any.whl", hash = "sha256:76eebeb2433fe93754c74a252e976b4e76a182f8f054748872aa3a98d347621d"}, {file = "pyarrow_stubs-17.16.tar.gz", hash = "sha256:ab74652db59855a09d5de7009f57d079f4fb318650ff767ec91841f3d2fa550c"}, @@ -1367,6 +1417,7 @@ version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, @@ -1387,6 +1438,7 @@ version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, @@ -1499,6 +1551,7 @@ version = "2.18.0" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, @@ -1513,6 +1566,7 @@ version = "1.8.0" description = "API to interact with the python pyproject.toml based projects" optional = false python-versions = ">=3.8" +groups = ["tests"] files = [ {file = "pyproject_api-1.8.0-py3-none-any.whl", hash = "sha256:3d7d347a047afe796fd5d1885b1e391ba29be7169bd2f102fcd378f04273d228"}, {file = "pyproject_api-1.8.0.tar.gz", hash = "sha256:77b8049f2feb5d33eefcc21b57f1e279636277a8ac8ad6b5871037b243778496"}, @@ -1532,6 +1586,7 @@ version = "8.3.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["tests"] files = [ {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, @@ -1554,6 +1609,7 @@ version = "4.2.0" description = "Local continuous test runner with pytest and watchdog." optional = false python-versions = "*" +groups = ["tests"] files = [ {file = "pytest-watch-4.2.0.tar.gz", hash = "sha256:06136f03d5b361718b8d0d234042f7b2f203910d8568f63df2f866b547b3d4b9"}, ] @@ -1570,6 +1626,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["examples"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -1584,6 +1641,7 @@ version = "2024.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["examples"] files = [ {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, @@ -1595,6 +1653,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["examples", "quality"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -1657,6 +1716,7 @@ version = "2024.9.11" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" +groups = ["examples"] files = [ {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, @@ -1760,6 +1820,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main", "examples"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -1781,6 +1842,7 @@ version = "13.9.4" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, @@ -1800,6 +1862,7 @@ version = "0.9.3" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" +groups = ["quality"] files = [ {file = "ruff-0.9.3-py3-none-linux_armv6l.whl", hash = "sha256:7f39b879064c7d9670197d91124a75d118d00b0990586549949aae80cdc16624"}, {file = "ruff-0.9.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:a187171e7c09efa4b4cc30ee5d0d55a8d6c5311b3e1b74ac5cb96cc89bafc43c"}, @@ -1827,6 +1890,7 @@ version = "0.4.5" description = "" optional = false python-versions = ">=3.7" +groups = ["examples"] files = [ {file = "safetensors-0.4.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a63eaccd22243c67e4f2b1c3e258b257effc4acd78f3b9d397edc8cf8f1298a7"}, {file = "safetensors-0.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:23fc9b4ec7b602915cbb4ec1a7c1ad96d2743c322f20ab709e2c35d1b66dad27"}, @@ -1959,6 +2023,7 @@ version = "1.5.4" description = "Tool to Detect Surrounding Shell" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, @@ -1970,6 +2035,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["examples"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -1981,6 +2047,7 @@ version = "0.9.0" description = "Pretty-print tabular data" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, @@ -1995,6 +2062,7 @@ version = "0.20.0" description = "" optional = false python-versions = ">=3.7" +groups = ["examples"] files = [ {file = "tokenizers-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6cff5c5e37c41bc5faa519d6f3df0679e4b37da54ea1f42121719c5e2b4905c0"}, {file = "tokenizers-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:62a56bf75c27443432456f4ca5ca055befa95e25be8a28141cc495cac8ae4d6d"}, @@ -2112,6 +2180,8 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev", "quality", "tests"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -2153,6 +2223,7 @@ version = "4.24.1" description = "tox is a generic virtualenv management and test command line tool" optional = false python-versions = ">=3.8" +groups = ["tests"] files = [ {file = "tox-4.24.1-py3-none-any.whl", hash = "sha256:57ba7df7d199002c6df8c2db9e6484f3de6ca8f42013c083ea2d4d1e5c6bdc75"}, {file = "tox-4.24.1.tar.gz", hash = "sha256:083a720adbc6166fff0b7d1df9d154f9d00bfccb9403b8abf6bc0ee435d6a62e"}, @@ -2180,6 +2251,7 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["main", "examples"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -2201,6 +2273,7 @@ version = "4.46.3" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.8.0" +groups = ["examples"] files = [ {file = "transformers-4.46.3-py3-none-any.whl", hash = "sha256:a12ef6f52841fd190a3e5602145b542d03507222f2c64ebb7ee92e8788093aef"}, {file = "transformers-4.46.3.tar.gz", hash = "sha256:8ee4b3ae943fe33e82afff8e837f4b052058b07ca9be3cb5b729ed31295f72cc"}, @@ -2270,6 +2343,7 @@ version = "0.15.1" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "typer-0.15.1-py3-none-any.whl", hash = "sha256:7994fb7b8155b64d3402518560648446072864beefd44aa2dc36972a5972e847"}, {file = "typer-0.15.1.tar.gz", hash = "sha256:a0588c0a7fa68a1978a069818657778f86abe6ff5ea6abf472f940a08bfe4f0a"}, @@ -2287,6 +2361,7 @@ version = "2.32.0.20241016" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" +groups = ["quality"] files = [ {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, @@ -2301,6 +2376,7 @@ version = "0.9.0.20241207" description = "Typing stubs for tabulate" optional = false python-versions = ">=3.8" +groups = ["quality"] files = [ {file = "types_tabulate-0.9.0.20241207-py3-none-any.whl", hash = "sha256:b8dad1343c2a8ba5861c5441370c3e35908edd234ff036d4298708a1d4cf8a85"}, {file = "types_tabulate-0.9.0.20241207.tar.gz", hash = "sha256:ac1ac174750c0a385dfd248edc6279fa328aaf4ea317915ab879a2ec47833230"}, @@ -2312,6 +2388,7 @@ version = "4.67.0.20241221" description = "Typing stubs for tqdm" optional = false python-versions = ">=3.8" +groups = ["quality"] files = [ {file = "types_tqdm-4.67.0.20241221-py3-none-any.whl", hash = "sha256:a1f1c9cda5c2d8482d2c73957a5398bfdedda10f6bc7b3b4e812d5c910486d29"}, {file = "types_tqdm-4.67.0.20241221.tar.gz", hash = "sha256:e56046631056922385abe89aeb18af5611f471eadd7918a0ad7f34d84cd4c8cc"}, @@ -2326,10 +2403,12 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev", "examples", "quality", "tests"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +markers = {tests = "python_version < \"3.11\""} [[package]] name = "tzdata" @@ -2337,6 +2416,7 @@ version = "2024.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["examples"] files = [ {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, @@ -2348,6 +2428,7 @@ version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" +groups = ["main", "examples", "quality"] files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -2365,6 +2446,7 @@ version = "20.29.1" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" +groups = ["quality", "tests"] files = [ {file = "virtualenv-20.29.1-py3-none-any.whl", hash = "sha256:4e4cb403c0b0da39e13b46b1b2476e505cb0046b25f242bee80f62bf990b2779"}, {file = "virtualenv-20.29.1.tar.gz", hash = "sha256:b8b8970138d32fb606192cb97f6cd4bb644fa486be9308fb9b63f81091b5dc35"}, @@ -2385,6 +2467,7 @@ version = "4.0.2" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" +groups = ["tests"] files = [ {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"}, {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"}, @@ -2432,6 +2515,7 @@ version = "3.5.0" description = "Python binding for xxHash" optional = false python-versions = ">=3.7" +groups = ["examples"] files = [ {file = "xxhash-3.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ece616532c499ee9afbb83078b1b952beffef121d989841f7f4b3dc5ac0fd212"}, {file = "xxhash-3.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3171f693dbc2cef6477054a665dc255d996646b4023fe56cb4db80e26f4cc520"}, @@ -2564,6 +2648,7 @@ version = "1.12.1" description = "Yet another URL library" optional = false python-versions = ">=3.8" +groups = ["main", "examples"] files = [ {file = "yarl-1.12.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:64c5b0f2b937fe40d0967516eee5504b23cb247b8b7ffeba7213a467d9646fdc"}, {file = "yarl-1.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2e430ac432f969ef21770645743611c1618362309e3ad7cab45acd1ad1a540ff"}, @@ -2664,6 +2749,6 @@ idna = ">=2.0" multidict = ">=4.0" [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = "^3.8" -content-hash = "5aca1f99fafe7ed29e42a142cf78111224c13646318b962f8ce29630a02e074c" +content-hash = "d169851a7d91fdd84e2586eada0e49faa2e7dd9d35a24caf1b26142640f79251" diff --git a/pyproject.toml b/pyproject.toml index 76b3383b..f7f0622d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,6 +75,14 @@ datasets = ">=2.18,<4.0" transformers = "^4.39.3" +[tool.poetry.group.dev] +optional = true + +[tool.poetry.group.dev.dependencies] +mypy = "^1.14.1" +types-requests = "^2.31.0" +openapi-python-client = "^0.19.0" + [tool.poetry.urls] "Homepage" = "https://github.com/togethercomputer/together-python" "Bug Tracker" = "https://github.com/togethercomputer/together-python/issues" @@ -84,6 +92,7 @@ together = "together.cli.cli:main" [tool.black] target-version = ['py310'] +line-length = 100 [tool.ruff.lint] # Never enforce `E501` (line length violations). diff --git a/scripts/generate_api_client.py b/scripts/generate_api_client.py new file mode 100755 index 00000000..751274c4 --- /dev/null +++ b/scripts/generate_api_client.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import subprocess +import sys +from pathlib import Path + +import requests + + +OPENAPI_SPEC_URL = ( + "https://raw.githubusercontent.com/togethercomputer/openapi/refs/heads/main/openapi.yaml" +) +OUTPUT_DIR = Path(__file__).parent.parent / "src" / "together" / "generated" +GENERATOR_JAR_URL = "https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/7.3.0/openapi-generator-cli-7.3.0.jar" +GENERATOR_JAR = Path(__file__).parent / "openapi-generator-cli.jar" + + +def download_file(url: str, target: Path) -> None: + """Download a file if it doesn't exist.""" + if target.exists(): + return + + print(f"Downloading {url} to {target}") + response = requests.get(url, stream=True) + response.raise_for_status() + + target.parent.mkdir(parents=True, exist_ok=True) + with open(target, "wb") as f: + for chunk in response.iter_content(chunk_size=8192): + f.write(chunk) + + +def main() -> None: + # Download OpenAPI spec + spec_file = Path(__file__).parent / "openapi.yaml" + download_file(OPENAPI_SPEC_URL, spec_file) + + # Download generator if needed + download_file(GENERATOR_JAR_URL, GENERATOR_JAR) + + # Ensure output directory exists + OUTPUT_DIR.mkdir(parents=True, exist_ok=True) + + # Generate client code + cmd = [ + "java", + "-jar", + str(GENERATOR_JAR), + "generate", + "-i", + str(spec_file), + "-g", + "python", + "-o", + str(OUTPUT_DIR), + "--additional-properties=packageName=together.generated", + "--git-repo-id=together-python", + "--git-user-id=togethercomputer", + ] + + print("Generating client code...") + result = subprocess.run(cmd, capture_output=True, text=True) + + if result.returncode != 0: + print("Error generating client code:", file=sys.stderr) + print(result.stderr, file=sys.stderr) + sys.exit(1) + + print("Successfully generated client code") + + +if __name__ == "__main__": + main() diff --git a/src/together/cli/api/inference.py b/src/together/cli/api/inference.py new file mode 100644 index 00000000..de52f1cd --- /dev/null +++ b/src/together/cli/api/inference.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +import click + + +@click.group() +def inference(): + """Manage inference endpoints and configurations.""" + pass + + +@inference.group() +def dedicated(): + """Manage dedicated inference endpoints.""" + pass + + +@dedicated.command() +@click.pass_obj +def create(client): + """Create a new dedicated inference endpoint.""" + click.echo("Creating new dedicated inference endpoint...") + # TODO: Implement the actual endpoint creation logic + pass diff --git a/src/together/cli/cli.py b/src/together/cli/cli.py index 8bfee0db..ff1f24f3 100644 --- a/src/together/cli/cli.py +++ b/src/together/cli/cli.py @@ -11,6 +11,7 @@ from together.cli.api.files import files from together.cli.api.finetune import fine_tuning from together.cli.api.images import images +from together.cli.api.inference import inference from together.cli.api.models import models from together.constants import MAX_RETRIES, TIMEOUT_SECS @@ -30,12 +31,8 @@ def print_version(ctx: click.Context, params: Any, value: Any) -> None: help="API Key. Defaults to environment variable `TOGETHER_API_KEY`", default=os.getenv("TOGETHER_API_KEY"), ) -@click.option( - "--base-url", type=str, help="API Base URL. Defaults to Together AI endpoint." -) -@click.option( - "--timeout", type=int, help=f"Request timeout. Defaults to {TIMEOUT_SECS} seconds" -) +@click.option("--base-url", type=str, help="API Base URL. Defaults to Together AI endpoint.") +@click.option("--timeout", type=int, help=f"Request timeout. Defaults to {TIMEOUT_SECS} seconds") @click.option( "--max-retries", type=int, @@ -72,6 +69,7 @@ def main( main.add_command(files) main.add_command(fine_tuning) main.add_command(models) +main.add_command(inference) if __name__ == "__main__": main() From cab32b957c40b2fa776896f14469fe87596d0f21 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Wed, 12 Feb 2025 19:22:41 +0000 Subject: [PATCH 03/29] feat: add dedicated endpoint cli --- .devcontainer/devcontainer.json | 5 + .gitignore | 3 + poetry.lock | 37 ++- pyproject.toml | 4 +- scripts/.gitignore | 2 + scripts/generate_api_client.py | 47 ++-- src/together/cli/api/endpoints.py | 337 ++++++++++++++++++++++++++++ src/together/cli/api/inference.py | 24 -- src/together/cli/cli.py | 4 +- src/together/client.py | 1 + src/together/resources/__init__.py | 5 +- src/together/resources/endpoints.py | 307 +++++++++++++++++++++++++ src/together/resources/models.py | 6 +- src/together/types/__init__.py | 45 ++-- src/together/types/endpoints.py | 12 + 15 files changed, 758 insertions(+), 81 deletions(-) create mode 100644 scripts/.gitignore create mode 100644 src/together/cli/api/endpoints.py delete mode 100644 src/together/cli/api/inference.py create mode 100644 src/together/resources/endpoints.py create mode 100644 src/together/types/endpoints.py diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index f51d67e1..b673f795 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -5,6 +5,11 @@ }, "features": { "ghcr.io/devcontainers/features/git:1": {}, + "ghcr.io/devcontainers/features/java:1": { + "version": "17", + "installMaven": false, + "installGradle": false + }, "ghcr.io/devcontainers-contrib/features/poetry:2": {} }, "customizations": { diff --git a/.gitignore b/.gitignore index b6e47617..c94c15ff 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,9 @@ share/python-wheels/ *.egg MANIFEST +# OpenAPI Generator Ignore +src/together/generated/ + # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. diff --git a/poetry.lock b/poetry.lock index 5c050c76..8a45d721 100644 --- a/poetry.lock +++ b/poetry.lock @@ -125,6 +125,21 @@ yarl = ">=1.12.0,<2.0" [package.extras] speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] +[[package]] +name = "aiohttp-retry" +version = "2.9.1" +description = "Simple retry client for aiohttp" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "aiohttp_retry-2.9.1-py3-none-any.whl", hash = "sha256:66d2759d1921838256a05a3f80ad7e724936f083e35be5abb5e16eed6be6dc54"}, + {file = "aiohttp_retry-2.9.1.tar.gz", hash = "sha256:8eb75e904ed4ee5c2ec242fefe85bf04240f685391c4879d8f541d6028ff01f1"}, +] + +[package.dependencies] +aiohttp = "*" + [[package]] name = "aiosignal" version = "1.3.1" @@ -748,6 +763,18 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "makefun" +version = "1.15.6" +description = "Small library to dynamically create python functions." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "makefun-1.15.6-py2.py3-none-any.whl", hash = "sha256:e69b870f0bb60304765b1e3db576aaecf2f9b3e5105afe8cfeff8f2afe6ad067"}, + {file = "makefun-1.15.6.tar.gz", hash = "sha256:26bc63442a6182fb75efed8b51741dd2d1db2f176bec8c64e20a586256b8f149"}, +] + [[package]] name = "markdown-it-py" version = "3.0.0" @@ -1626,7 +1653,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["examples"] +groups = ["main", "examples"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -2035,7 +2062,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["examples"] +groups = ["main", "examples"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -2361,7 +2388,7 @@ version = "2.32.0.20241016" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" -groups = ["quality"] +groups = ["dev", "quality"] files = [ {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, @@ -2428,7 +2455,7 @@ version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" -groups = ["main", "examples", "quality"] +groups = ["main", "dev", "examples", "quality"] files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -2751,4 +2778,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.1" python-versions = "^3.8" -content-hash = "d169851a7d91fdd84e2586eada0e49faa2e7dd9d35a24caf1b26142640f79251" +content-hash = "82809528a39fc644e6e9cef3bf8f652b195e22929c8b840e2c7d2bf5367f0ced" diff --git a/pyproject.toml b/pyproject.toml index f7f0622d..b823440c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,12 +39,15 @@ aiohttp = "^3.9.3" filelock = "^3.13.1" eval-type-backport = ">=0.1.3,<0.3.0" click = "^8.1.7" +python-dateutil = "^2.8.2" pillow = "^10.3.0" pyarrow = ">=10.0.1" +makefun = "^1.15.2" numpy = [ { version = ">=1.23.5", python = "<3.12" }, { version = ">=1.26.0", python = ">=3.12" }, ] +aiohttp-retry = "^2.9.1" [tool.poetry.group.quality] optional = true @@ -81,7 +84,6 @@ optional = true [tool.poetry.group.dev.dependencies] mypy = "^1.14.1" types-requests = "^2.31.0" -openapi-python-client = "^0.19.0" [tool.poetry.urls] "Homepage" = "https://github.com/togethercomputer/together-python" diff --git a/scripts/.gitignore b/scripts/.gitignore new file mode 100644 index 00000000..2a7ceb17 --- /dev/null +++ b/scripts/.gitignore @@ -0,0 +1,2 @@ +openapi-generator-cli.jar +openapi.yaml diff --git a/scripts/generate_api_client.py b/scripts/generate_api_client.py index 751274c4..7687cb80 100755 --- a/scripts/generate_api_client.py +++ b/scripts/generate_api_client.py @@ -1,34 +1,29 @@ #!/usr/bin/env python3 from __future__ import annotations +import shutil import subprocess import sys from pathlib import Path -import requests - -OPENAPI_SPEC_URL = ( - "https://raw.githubusercontent.com/togethercomputer/openapi/refs/heads/main/openapi.yaml" -) +OPENAPI_SPEC_URL = "https://raw.githubusercontent.com/togethercomputer/openapi/main/openapi.yaml" OUTPUT_DIR = Path(__file__).parent.parent / "src" / "together" / "generated" -GENERATOR_JAR_URL = "https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/7.3.0/openapi-generator-cli-7.3.0.jar" +GENERATOR_JAR_URL = "https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/7.11.0/openapi-generator-cli-7.11.0.jar" GENERATOR_JAR = Path(__file__).parent / "openapi-generator-cli.jar" +def run_command(cmd: list[str], check: bool = True) -> subprocess.CompletedProcess: + """Run a command and optionally check its return code.""" + print(f"Running: {' '.join(cmd)}") + return subprocess.run(cmd, check=check, capture_output=True, text=True) + + def download_file(url: str, target: Path) -> None: - """Download a file if it doesn't exist.""" - if target.exists(): - return + """Download a file""" print(f"Downloading {url} to {target}") - response = requests.get(url, stream=True) - response.raise_for_status() - - target.parent.mkdir(parents=True, exist_ok=True) - with open(target, "wb") as f: - for chunk in response.iter_content(chunk_size=8192): - f.write(chunk) + run_command(["wget", "-O", str(target), url]) def main() -> None: @@ -39,6 +34,9 @@ def main() -> None: # Download generator if needed download_file(GENERATOR_JAR_URL, GENERATOR_JAR) + # Delete existing generated code + shutil.rmtree(OUTPUT_DIR, ignore_errors=True) + # Ensure output directory exists OUTPUT_DIR.mkdir(parents=True, exist_ok=True) @@ -54,19 +52,32 @@ def main() -> None: "python", "-o", str(OUTPUT_DIR), - "--additional-properties=packageName=together.generated", + "--package-name=together.generated", "--git-repo-id=together-python", "--git-user-id=togethercomputer", + "--additional-properties=packageUrl=https://github.com/togethercomputer/together-python", + "--additional-properties=library=asyncio", + "--additional-properties=generateSourceCodeOnly=true", ] print("Generating client code...") - result = subprocess.run(cmd, capture_output=True, text=True) + result = run_command(cmd, check=False) if result.returncode != 0: print("Error generating client code:", file=sys.stderr) print(result.stderr, file=sys.stderr) sys.exit(1) + # Move files from nested directory to target directory + nested_dir = OUTPUT_DIR / "together" / "generated" + if nested_dir.exists(): + print("Moving files from nested directory...") + # Move all contents to parent directory + for item in nested_dir.iterdir(): + shutil.move(str(item), str(OUTPUT_DIR / item.name)) + # Clean up empty directories + shutil.rmtree(OUTPUT_DIR / "together", ignore_errors=True) + print("Successfully generated client code") diff --git a/src/together/cli/api/endpoints.py b/src/together/cli/api/endpoints.py new file mode 100644 index 00000000..0f810d9b --- /dev/null +++ b/src/together/cli/api/endpoints.py @@ -0,0 +1,337 @@ +from __future__ import annotations + +import sys +from functools import wraps +from typing import Any, Callable, Dict, List, Literal, TypeVar, Union + +import click + +from together import Together +from together.error import AuthenticationError, InvalidRequestError +from together.generated.exceptions import ForbiddenException, ServiceException +from together.types import DedicatedEndpoint, ListEndpoint + + +F = TypeVar("F", bound=Callable[..., Any]) + + +def print_endpoint(endpoint: Union[DedicatedEndpoint, ListEndpoint], json: bool = False): + """Print endpoint details in a Docker-like format or JSON.""" + if json: + import json as json_lib + + output: Dict[str, Any] = { + "id": endpoint.id, + "name": endpoint.name, + "model": endpoint.model, + "type": endpoint.type, + "owner": endpoint.owner, + "state": endpoint.state, + "created_at": endpoint.created_at.isoformat(), + } + + if isinstance(endpoint, DedicatedEndpoint): + output.update( + { + "display_name": endpoint.display_name, + "hardware": endpoint.hardware, + "autoscaling": { + "min_replicas": endpoint.autoscaling.min_replicas, + "max_replicas": endpoint.autoscaling.max_replicas, + }, + } + ) + + click.echo(json_lib.dumps(output, indent=2)) + return + + # Print header info + click.echo(f"ID:\t\t{endpoint.id}") + click.echo(f"Name:\t\t{endpoint.name}") + + # Print type-specific fields + if isinstance(endpoint, DedicatedEndpoint): + click.echo(f"Display Name:\t{endpoint.display_name}") + click.echo(f"Hardware:\t{endpoint.hardware}") + click.echo( + f"Autoscaling:\tMin={endpoint.autoscaling.min_replicas}, " + f"Max={endpoint.autoscaling.max_replicas}" + ) + + click.echo(f"Model:\t\t{endpoint.model}") + click.echo(f"Type:\t\t{endpoint.type}") + click.echo(f"Owner:\t\t{endpoint.owner}") + click.echo(f"State:\t\t{endpoint.state}") + click.echo(f"Created:\t{endpoint.created_at}") + + +def handle_api_errors(f: F) -> F: + """Decorator to handle common API errors in CLI commands.""" + + @wraps(f) + def wrapper(*args: Any, **kwargs: Any) -> Any: + try: + return f(*args, **kwargs) + except (ForbiddenException, ServiceException) as e: + error_details = "" + if e.data is not None: + error_details = e.data.to_dict()["error"]["message"] + else: + error_details = str(e) + + if "credentials" in error_details.lower() or "authentication" in error_details.lower(): + click.echo("Error: Invalid API key or authentication failed", err=True) + else: + click.echo(f"Error: {error_details}", err=True) + sys.exit(1) + except AuthenticationError as e: + click.echo(f"Error details: {str(e)}", err=True) + click.echo("Error: Invalid API key or authentication failed", err=True) + sys.exit(1) + except InvalidRequestError as e: + click.echo(f"Error details: {str(e)}", err=True) + click.echo("Error: Invalid request", err=True) + sys.exit(1) + except Exception as e: + click.echo(f"Error: An unexpected error occurred - {str(e)}", err=True) + sys.exit(1) + + return wrapper # type: ignore + + +@click.group() +@click.pass_context +def endpoints(ctx: click.Context) -> None: + """Endpoints API commands""" + pass + + +@endpoints.command() +@click.option( + "--model", + required=True, + help="The model to deploy (e.g. mistralai/Mixtral-8x7B-Instruct-v0.1)", +) +@click.option( + "--min-replicas", + type=int, + default=1, + help="Minimum number of replicas to deploy", +) +@click.option( + "--max-replicas", + type=int, + default=1, + help="Maximum number of replicas to deploy", +) +@click.option( + "--gpu", + type=click.Choice(["h100", "a100", "l40", "l40s", "rtx-6000"]), + required=True, + help="GPU type to use for inference", +) +@click.option( + "--gpu-count", + type=int, + default=1, + help="Number of GPUs to use per replica", +) +@click.option( + "--display-name", + help="A human-readable name for the endpoint", +) +@click.option( + "--no-prompt-cache", + is_flag=True, + help="Disable the prompt cache for this endpoint", +) +@click.option( + "--no-speculative-decoding", + is_flag=True, + help="Disable speculative decoding for this endpoint", +) +@click.option( + "--no-auto-start", + is_flag=True, + help="Create the endpoint in STOPPED state instead of auto-starting it", +) +@click.pass_obj +@handle_api_errors +def create( + client: Together, + model: str, + min_replicas: int, + max_replicas: int, + gpu: str, + gpu_count: int, + display_name: str | None, + no_prompt_cache: bool, + no_speculative_decoding: bool, + no_auto_start: bool, +): + """Create a new dedicated inference endpoint.""" + # Map GPU types to their full hardware ID names + gpu_map = { + "h100": "nvidia_h100_80gb_sxm", + "a100": "nvidia_a100_80gb_pcie" if gpu_count == 1 else "nvidia_a100_80gb_sxm", + "l40": "nvidia_l40", + "l40s": "nvidia_l40s", + "rtx-6000": "nvidia_rtx_6000_ada", + } + + hardware_id = f"{gpu_count}x_{gpu_map[gpu]}" + + response = client.endpoints.create( + model=model, + hardware=hardware_id, + min_replicas=min_replicas, + max_replicas=max_replicas, + display_name=display_name, + disable_prompt_cache=no_prompt_cache, + disable_speculative_decoding=no_speculative_decoding, + state="STOPPED" if no_auto_start else "STARTED", + ) + + # Print detailed information to stderr + click.echo("Created dedicated endpoint with:", err=True) + click.echo(f" Model: {model}", err=True) + click.echo(f" Min replicas: {min_replicas}", err=True) + click.echo(f" Max replicas: {max_replicas}", err=True) + click.echo(f" Hardware: {hardware_id}", err=True) + if display_name: + click.echo(f" Display name: {display_name}", err=True) + if no_prompt_cache: + click.echo(" Prompt cache: disabled", err=True) + if no_speculative_decoding: + click.echo(" Speculative decoding: disabled", err=True) + if no_auto_start: + click.echo(" Auto-start: disabled", err=True) + + click.echo("Endpoint created successfully, id: ", err=True) + # Print only the endpoint ID to stdout + click.echo(response.id) + + +@endpoints.command() +@click.argument("endpoint-id", required=True) +@click.option("--json", is_flag=True, help="Print output in JSON format") +@click.pass_obj +@handle_api_errors +def get(client: Together, endpoint_id: str, json: bool): + """Get a dedicated inference endpoint.""" + endpoint = client.endpoints.get(endpoint_id) + print_endpoint(endpoint, json=json) + + +@endpoints.command() +@click.argument("endpoint-id", required=True) +@click.pass_obj +@handle_api_errors +def stop(client: Together, endpoint_id: str): + """Stop a dedicated inference endpoint.""" + client.endpoints.update(endpoint_id, state="STOPPED") + click.echo("Successfully stopped endpoint", err=True) + click.echo(endpoint_id) + + +@endpoints.command() +@click.argument("endpoint-id", required=True) +@click.pass_obj +@handle_api_errors +def start(client: Together, endpoint_id: str): + """Start a dedicated inference endpoint.""" + client.endpoints.update(endpoint_id, state="STARTED") + click.echo("Successfully started endpoint", err=True) + click.echo(endpoint_id) + + +@endpoints.command() +@click.argument("endpoint-id", required=True) +@click.pass_obj +@handle_api_errors +def delete(client: Together, endpoint_id: str): + """Delete a dedicated inference endpoint.""" + client.endpoints.delete(endpoint_id) + click.echo("Successfully deleted endpoint", err=True) + click.echo(endpoint_id) + + +@endpoints.command() +@click.option("--json", is_flag=True, help="Print output in JSON format") +@click.option( + "--type", type=click.Choice(["dedicated", "serverless"]), help="Filter by endpoint type" +) +@click.pass_obj +@handle_api_errors +def list(client: Together, json: bool, type: Literal["dedicated", "serverless"] | None) -> None: + """List all inference endpoints (includes both dedicated and serverless endpoints).""" + endpoints: List[ListEndpoint] = client.endpoints.list(type=type) + + if not endpoints: + click.echo("No dedicated endpoints found", err=True) + return + + click.echo("Dedicated endpoints:", err=True) + for endpoint in endpoints: + print_endpoint(endpoint, json=json) + click.echo() + + +@endpoints.command() +@click.argument("endpoint-id", required=True) +@click.option( + "--display-name", + help="A new human-readable name for the endpoint", +) +@click.option( + "--min-replicas", + type=int, + help="New minimum number of replicas to maintain", +) +@click.option( + "--max-replicas", + type=int, + help="New maximum number of replicas to scale up to", +) +@click.pass_obj +@handle_api_errors +def update( + client: Together, + endpoint_id: str, + display_name: str | None, + min_replicas: int | None, + max_replicas: int | None, +): + """Update a dedicated inference endpoint's configuration.""" + if not any([display_name, min_replicas, max_replicas]): + click.echo("Error: At least one update option must be specified", err=True) + sys.exit(1) + + # If only one of min/max replicas is specified, we need both for the update + if (min_replicas is None) != (max_replicas is None): + click.echo( + "Error: Both --min-replicas and --max-replicas must be specified together", + err=True, + ) + sys.exit(1) + + # Build kwargs for the update + kwargs = {} + if display_name is not None: + kwargs["display_name"] = display_name + if min_replicas is not None and max_replicas is not None: + kwargs["min_replicas"] = min_replicas + kwargs["max_replicas"] = max_replicas + + _response = client.endpoints.update(endpoint_id, **kwargs) + + # Print what was updated + click.echo("Updated endpoint configuration:", err=True) + if display_name: + click.echo(f" Display name: {display_name}", err=True) + if min_replicas is not None and max_replicas is not None: + click.echo(f" Min replicas: {min_replicas}", err=True) + click.echo(f" Max replicas: {max_replicas}", err=True) + + click.echo("Successfully updated endpoint", err=True) + click.echo(endpoint_id) diff --git a/src/together/cli/api/inference.py b/src/together/cli/api/inference.py deleted file mode 100644 index de52f1cd..00000000 --- a/src/together/cli/api/inference.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import annotations - -import click - - -@click.group() -def inference(): - """Manage inference endpoints and configurations.""" - pass - - -@inference.group() -def dedicated(): - """Manage dedicated inference endpoints.""" - pass - - -@dedicated.command() -@click.pass_obj -def create(client): - """Create a new dedicated inference endpoint.""" - click.echo("Creating new dedicated inference endpoint...") - # TODO: Implement the actual endpoint creation logic - pass diff --git a/src/together/cli/cli.py b/src/together/cli/cli.py index ff1f24f3..4409f648 100644 --- a/src/together/cli/cli.py +++ b/src/together/cli/cli.py @@ -8,10 +8,10 @@ import together from together.cli.api.chat import chat, interactive from together.cli.api.completions import completions +from together.cli.api.endpoints import endpoints from together.cli.api.files import files from together.cli.api.finetune import fine_tuning from together.cli.api.images import images -from together.cli.api.inference import inference from together.cli.api.models import models from together.constants import MAX_RETRIES, TIMEOUT_SECS @@ -69,7 +69,7 @@ def main( main.add_command(files) main.add_command(fine_tuning) main.add_command(models) -main.add_command(inference) +main.add_command(endpoints) if __name__ == "__main__": main() diff --git a/src/together/client.py b/src/together/client.py index 6419581b..ea5359a5 100644 --- a/src/together/client.py +++ b/src/together/client.py @@ -81,6 +81,7 @@ def __init__( self.fine_tuning = resources.FineTuning(self.client) self.rerank = resources.Rerank(self.client) self.audio = resources.Audio(self.client) + self.endpoints = resources.Endpoints(self.client) class AsyncTogether: diff --git a/src/together/resources/__init__.py b/src/together/resources/__init__.py index cf4bf3b2..f07aeb00 100644 --- a/src/together/resources/__init__.py +++ b/src/together/resources/__init__.py @@ -1,12 +1,13 @@ +from together.resources.audio import AsyncAudio, Audio from together.resources.chat import AsyncChat, Chat from together.resources.completions import AsyncCompletions, Completions from together.resources.embeddings import AsyncEmbeddings, Embeddings +from together.resources.endpoints import AsyncEndpoints, Endpoints from together.resources.files import AsyncFiles, Files from together.resources.finetune import AsyncFineTuning, FineTuning from together.resources.images import AsyncImages, Images from together.resources.models import AsyncModels, Models from together.resources.rerank import AsyncRerank, Rerank -from together.resources.audio import AsyncAudio, Audio __all__ = [ @@ -28,4 +29,6 @@ "Rerank", "AsyncAudio", "Audio", + "AsyncEndpoints", + "Endpoints", ] diff --git a/src/together/resources/endpoints.py b/src/together/resources/endpoints.py new file mode 100644 index 00000000..c74c2afe --- /dev/null +++ b/src/together/resources/endpoints.py @@ -0,0 +1,307 @@ +from __future__ import annotations + +import asyncio +from typing import Any, Dict, List, Literal, Optional + +from together.generated.api.endpoints_api import EndpointsApi +from together.generated.api_client import ApiClient +from together.generated.configuration import Configuration +from together.generated.models.autoscaling import Autoscaling +from together.generated.models.create_endpoint_request import CreateEndpointRequest +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.list_endpoint import ListEndpoint +from together.generated.models.update_endpoint_request import UpdateEndpointRequest +from together.types import TogetherClient + + +class BaseEndpoints: + """Base class containing common endpoint functionality and documentation.""" + + def _get_api_client(self, client: TogetherClient) -> tuple[ApiClient, EndpointsApi]: + api_client = ApiClient( + configuration=Configuration( + host=client.base_url.rstrip("/") if client.base_url else "", + ), + header_name="Authorization", + header_value=f"Bearer {client.api_key}" if client.api_key else None, + ) + return api_client, EndpointsApi(api_client) + + +class Endpoints(BaseEndpoints): + """Synchronous endpoints client.""" + + def __init__(self, client: TogetherClient) -> None: + self.api_client, self._api = self._get_api_client(client) + self._loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._loop) + + def __del__(self): + if hasattr(self, "api_client"): + self._loop.run_until_complete(self.api_client.close()) + self._loop.close() + + def create( + self, + *, + model: str, + hardware: str, + min_replicas: int, + max_replicas: int, + display_name: Optional[str] = None, + disable_prompt_cache: bool = False, + disable_speculative_decoding: bool = False, + state: Literal["STARTED", "STOPPED"] = "STARTED", + ) -> DedicatedEndpoint: + """ + Create a new dedicated endpoint. + + Args: + model (str): The model to deploy on this endpoint + hardware (str): The hardware configuration to use for this endpoint + min_replicas (int): The minimum number of replicas to maintain + max_replicas (int): The maximum number of replicas to scale up to + display_name (str, optional): A human-readable name for the endpoint + disable_prompt_cache (bool, optional): Whether to disable the prompt cache. Defaults to False. + disable_speculative_decoding (bool, optional): Whether to disable speculative decoding. Defaults to False. + state (str, optional): The desired state of the endpoint. Defaults to "STARTED". + + Returns: + DedicatedEndpoint: Object containing endpoint information + """ + + async def _create(): + request = CreateEndpointRequest( + model=model, + hardware=hardware, + autoscaling=Autoscaling(min_replicas=min_replicas, max_replicas=max_replicas), + display_name=display_name, + disable_prompt_cache=disable_prompt_cache, + disable_speculative_decoding=disable_speculative_decoding, + state=state, + ) + return await self._api.create_endpoint(create_endpoint_request=request) + + return self._loop.run_until_complete(_create()) + + def list(self, type: Literal["dedicated", "serverless"] | None = None) -> List[ListEndpoint]: + """ + List all endpoints. + + Args: + type (str, optional): Filter endpoints by type ("dedicated" or "serverless"). Defaults to None. + + Returns: + Dict[str, Any]: Response containing list of endpoints in the data field + """ + + async def _list(): + return await self._api.list_endpoints(type=type) + + response = self._loop.run_until_complete(_list()) + return response.data + + def get(self, endpoint_id: str) -> DedicatedEndpoint: + """ + Get details of a specific endpoint. + + Args: + endpoint_id (str): ID of the endpoint to retrieve + + Returns: + DedicatedEndpoint: Object containing endpoint information + """ + + async def _get(): + return await self._api.get_endpoint(endpoint_id=endpoint_id) + + return self._loop.run_until_complete(_get()) + + def delete(self, endpoint_id: str) -> None: + """ + Delete a specific endpoint. + + Args: + endpoint_id (str): ID of the endpoint to delete + """ + + async def _delete(): + return await self._api.delete_endpoint(endpoint_id=endpoint_id) + + return self._loop.run_until_complete(_delete()) + + def update( + self, + endpoint_id: str, + *, + min_replicas: Optional[int] = None, + max_replicas: Optional[int] = None, + state: Optional[Literal["STARTED", "STOPPED"]] = None, + display_name: Optional[str] = None, + ) -> DedicatedEndpoint: + """ + Update an endpoint's configuration. + + Args: + endpoint_id (str): ID of the endpoint to update + min_replicas (int, optional): The minimum number of replicas to maintain + max_replicas (int, optional): The maximum number of replicas to scale up to + state (str, optional): The desired state of the endpoint ("STARTED" or "STOPPED") + display_name (str, optional): A human-readable name for the endpoint + + Returns: + DedicatedEndpoint: Object containing endpoint information + """ + + async def _update(): + kwargs: Dict[str, Any] = {} + if min_replicas is not None or max_replicas is not None: + current_min = min_replicas + current_max = max_replicas + if current_min is None or current_max is None: + # Get current values if only one is specified + current = await self._api.get_endpoint(endpoint_id=endpoint_id) + current_min = current_min or current.autoscaling.min_replicas + current_max = current_max or current.autoscaling.max_replicas + kwargs["autoscaling"] = Autoscaling( + min_replicas=current_min, + max_replicas=current_max, + ) + if state is not None: + kwargs["state"] = state + if display_name is not None: + kwargs["display_name"] = display_name + + request = UpdateEndpointRequest(**kwargs) + return await self._api.update_endpoint( + endpoint_id=endpoint_id, update_endpoint_request=request + ) + + return self._loop.run_until_complete(_update()) + + +class AsyncEndpoints(BaseEndpoints): + """Asynchronous endpoints client.""" + + def __init__(self, client: TogetherClient) -> None: + self.api_client, self._api = self._get_api_client(client) + + async def create( + self, + *, + model: str, + hardware: str, + min_replicas: int, + max_replicas: int, + display_name: Optional[str] = None, + disable_prompt_cache: bool = False, + disable_speculative_decoding: bool = False, + state: Literal["STARTED", "STOPPED"] = "STARTED", + ) -> DedicatedEndpoint: + """ + Create a new dedicated endpoint. + + Args: + model (str): The model to deploy on this endpoint + hardware (str): The hardware configuration to use for this endpoint + min_replicas (int): The minimum number of replicas to maintain + max_replicas (int): The maximum number of replicas to scale up to + display_name (str, optional): A human-readable name for the endpoint + disable_prompt_cache (bool, optional): Whether to disable the prompt cache. Defaults to False. + disable_speculative_decoding (bool, optional): Whether to disable speculative decoding. Defaults to False. + state (str, optional): The desired state of the endpoint. Defaults to "STARTED". + + Returns: + DedicatedEndpoint: Object containing endpoint information + """ + request = CreateEndpointRequest( + model=model, + hardware=hardware, + autoscaling=Autoscaling(min_replicas=min_replicas, max_replicas=max_replicas), + display_name=display_name, + disable_prompt_cache=disable_prompt_cache, + disable_speculative_decoding=disable_speculative_decoding, + state=state, + ) + return await self._api.create_endpoint(create_endpoint_request=request) + + async def list( + self, type: Literal["dedicated", "serverless"] | None = None + ) -> List[ListEndpoint]: + """ + List all endpoints. + + Args: + type (str, optional): Filter endpoints by type ("dedicated" or "serverless"). Defaults to None. + + Returns: + Dict[str, Any]: Response containing list of endpoints in the data field + """ + response = await self._api.list_endpoints(type=type) + return response.data + + async def get(self, endpoint_id: str) -> DedicatedEndpoint: + """ + Get details of a specific endpoint. + + Args: + endpoint_id (str): ID of the endpoint to retrieve + + Returns: + DedicatedEndpoint: Object containing endpoint information + """ + return await self._api.get_endpoint(endpoint_id=endpoint_id) + + async def delete(self, endpoint_id: str) -> None: + """ + Delete a specific endpoint. + + Args: + endpoint_id (str): ID of the endpoint to delete + """ + return await self._api.delete_endpoint(endpoint_id=endpoint_id) + + async def update( + self, + endpoint_id: str, + *, + min_replicas: Optional[int] = None, + max_replicas: Optional[int] = None, + state: Optional[Literal["STARTED", "STOPPED"]] = None, + display_name: Optional[str] = None, + ) -> DedicatedEndpoint: + """ + Update an endpoint's configuration. + + Args: + endpoint_id (str): ID of the endpoint to update + min_replicas (int, optional): The minimum number of replicas to maintain + max_replicas (int, optional): The maximum number of replicas to scale up to + state (str, optional): The desired state of the endpoint ("STARTED" or "STOPPED") + display_name (str, optional): A human-readable name for the endpoint + + Returns: + DedicatedEndpoint: Object containing endpoint information + """ + kwargs: Dict[str, Any] = {} + if min_replicas is not None or max_replicas is not None: + current_min = min_replicas + current_max = max_replicas + if current_min is None or current_max is None: + # Get current values if only one is specified + current = await self._api.get_endpoint(endpoint_id=endpoint_id) + current_min = current_min or current.autoscaling.min_replicas + current_max = current_max or current.autoscaling.max_replicas + kwargs["autoscaling"] = Autoscaling( + min_replicas=current_min, + max_replicas=current_max, + ) + if state is not None: + kwargs["state"] = state + if display_name is not None: + kwargs["display_name"] = display_name + + request = UpdateEndpointRequest(**kwargs) + return await self._api.update_endpoint( + endpoint_id=endpoint_id, update_endpoint_request=request + ) diff --git a/src/together/resources/models.py b/src/together/resources/models.py index 9a85e9bb..6b4f955b 100644 --- a/src/together/resources/models.py +++ b/src/together/resources/models.py @@ -4,11 +4,7 @@ from together.abstract import api_requestor from together.together_response import TogetherResponse -from together.types import ( - ModelObject, - TogetherClient, - TogetherRequest, -) +from together.types import ModelObject, TogetherClient, TogetherRequest class Models: diff --git a/src/together/types/__init__.py b/src/together/types/__init__.py index 5768d8de..e94c6778 100644 --- a/src/together/types/__init__.py +++ b/src/together/types/__init__.py @@ -1,16 +1,22 @@ from together.types.abstract import TogetherClient +from together.types.audio_speech import ( + AudioLanguage, + AudioResponseEncoding, + AudioResponseFormat, + AudioSpeechRequest, + AudioSpeechStreamChunk, + AudioSpeechStreamEvent, + AudioSpeechStreamResponse, +) from together.types.chat_completions import ( ChatCompletionChunk, ChatCompletionRequest, ChatCompletionResponse, ) from together.types.common import TogetherRequest -from together.types.completions import ( - CompletionChunk, - CompletionRequest, - CompletionResponse, -) +from together.types.completions import CompletionChunk, CompletionRequest, CompletionResponse from together.types.embeddings import EmbeddingRequest, EmbeddingResponse +from together.types.endpoints import Autoscaling, DedicatedEndpoint, ListEndpoint from together.types.files import ( FileDeleteResponse, FileList, @@ -22,35 +28,21 @@ ) from together.types.finetune import ( FinetuneDownloadResult, + FinetuneLinearLRSchedulerArgs, FinetuneList, FinetuneListEvents, + FinetuneLRScheduler, FinetuneRequest, FinetuneResponse, + FinetuneTrainingLimits, FullTrainingType, LoRATrainingType, TrainingType, - FinetuneTrainingLimits, - FinetuneLRScheduler, - FinetuneLinearLRSchedulerArgs, -) -from together.types.images import ( - ImageRequest, - ImageResponse, ) +from together.types.images import ImageRequest, ImageResponse from together.types.models import ModelObject -from together.types.rerank import ( - RerankRequest, - RerankResponse, -) -from together.types.audio_speech import ( - AudioSpeechRequest, - AudioResponseFormat, - AudioLanguage, - AudioResponseEncoding, - AudioSpeechStreamChunk, - AudioSpeechStreamEvent, - AudioSpeechStreamResponse, -) +from together.types.rerank import RerankRequest, RerankResponse + __all__ = [ "TogetherClient", @@ -93,4 +85,7 @@ "AudioSpeechStreamChunk", "AudioSpeechStreamEvent", "AudioSpeechStreamResponse", + "DedicatedEndpoint", + "ListEndpoint", + "Autoscaling", ] diff --git a/src/together/types/endpoints.py b/src/together/types/endpoints.py new file mode 100644 index 00000000..42299bb4 --- /dev/null +++ b/src/together/types/endpoints.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +from together.generated.models.autoscaling import Autoscaling +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.list_endpoint import ListEndpoint + + +__all__ = [ + "DedicatedEndpoint", + "ListEndpoint", + "Autoscaling", +] From c983f2e2fb1edb94f64c81093f08224401d84f97 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Wed, 12 Feb 2025 19:51:57 +0000 Subject: [PATCH 04/29] run lints --- .devcontainer/Dockerfile | 2 +- .devcontainer/devcontainer.json | 3 +- Makefile | 4 + examples/tokenize_data.py | 17 +- mypy.ini | 7 + scripts/.gitignore | 2 +- scripts/generate_api_client.py | 5 +- scripts/openapi.yaml | 2848 ++++++++++++++++++++++++ src/together/__init__.py | 4 +- src/together/abstract/api_requestor.py | 33 +- src/together/cli/api/chat.py | 24 +- src/together/cli/api/completions.py | 8 +- src/together/cli/api/endpoints.py | 22 +- src/together/cli/api/files.py | 12 +- src/together/cli/api/finetune.py | 36 +- src/together/cli/api/utils.py | 4 +- src/together/error.py | 52 +- src/together/filemanager.py | 11 +- src/together/legacy/embeddings.py | 4 +- src/together/legacy/files.py | 12 +- src/together/legacy/finetune.py | 12 +- src/together/legacy/images.py | 4 +- src/together/resources/endpoints.py | 21 +- src/together/resources/files.py | 8 +- src/together/resources/finetune.py | 16 +- src/together/utils/api_helpers.py | 4 +- src/together/utils/files.py | 12 +- src/together/version.py | 4 +- tests/unit/test_async_client.py | 8 +- tests/unit/test_client.py | 8 +- tests/unit/test_files_checks.py | 19 +- 31 files changed, 2966 insertions(+), 260 deletions(-) create mode 100644 scripts/openapi.yaml diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index b4539433..94036c6e 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,4 +3,4 @@ FROM mcr.microsoft.com/devcontainers/python:3.9 ENV PYTHONUNBUFFERED 1 # Install pre-commit -RUN pip install pre-commit \ No newline at end of file +RUN pip install pre-commit diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index b673f795..63ff3d7c 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -5,6 +5,7 @@ }, "features": { "ghcr.io/devcontainers/features/git:1": {}, + "ghcr.io/devcontainers/features/node:1": {}, "ghcr.io/devcontainers/features/java:1": { "version": "17", "installMaven": false, @@ -37,4 +38,4 @@ }, "postCreateCommand": "poetry install", "remoteUser": "vscode" -} \ No newline at end of file +} diff --git a/Makefile b/Makefile index f5398a86..c3d3462c 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,10 @@ install: format: poetry run pre-commit run --all-files +# OpenAPI Client Generation + +generate-client: + python scripts/generate_api_client.py # Documentation diff --git a/examples/tokenize_data.py b/examples/tokenize_data.py index 327f9cd1..c1ba67df 100644 --- a/examples/tokenize_data.py +++ b/examples/tokenize_data.py @@ -25,9 +25,7 @@ def tokenize_variable_length( tokenizer: PreTrainedTokenizerBase, add_special_tokens: bool = True, ) -> BatchEncoding: - tokenized = tokenizer( - data["text"], add_special_tokens=add_special_tokens, truncation=False - ) + tokenized = tokenizer(data["text"], add_special_tokens=add_special_tokens, truncation=False) return tokenized @@ -102,10 +100,7 @@ def pack_sequences( output = {"input_ids": packed_sequences} if add_labels: output["labels"] = [ - [ - LOSS_IGNORE_INDEX if token_id == pad_token_id else token_id - for token_id in example - ] + [LOSS_IGNORE_INDEX if token_id == pad_token_id else token_id for token_id in example] for example in output["input_ids"] ] @@ -201,18 +196,14 @@ def process_data(args: argparse.Namespace) -> None: if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Pretokenize examples for finetuning via Together" - ) + parser = argparse.ArgumentParser(description="Pretokenize examples for finetuning via Together") parser.add_argument( "--dataset", type=str, default="clam004/antihallucination_dataset", help="Dataset name on the Hugging Face Hub", ) - parser.add_argument( - "--max-seq-length", type=int, default=8192, help="Maximum sequence length" - ) + parser.add_argument("--max-seq-length", type=int, default=8192, help="Maximum sequence length") parser.add_argument( "--add-labels", action="store_true", diff --git a/mypy.ini b/mypy.ini index ff370642..88b2d7ba 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,2 +1,9 @@ [mypy] plugins = pydantic.mypy +disallow_untyped_defs = true + +[mypy-together.generated.*] +ignore_errors = true + +[mypy.tests.*] +ignore_errors = true diff --git a/scripts/.gitignore b/scripts/.gitignore index 2a7ceb17..4bf363be 100644 --- a/scripts/.gitignore +++ b/scripts/.gitignore @@ -1,2 +1,2 @@ openapi-generator-cli.jar -openapi.yaml + diff --git a/scripts/generate_api_client.py b/scripts/generate_api_client.py index 7687cb80..74f52457 100755 --- a/scripts/generate_api_client.py +++ b/scripts/generate_api_client.py @@ -13,7 +13,7 @@ GENERATOR_JAR = Path(__file__).parent / "openapi-generator-cli.jar" -def run_command(cmd: list[str], check: bool = True) -> subprocess.CompletedProcess: +def run_command(cmd: list[str], check: bool = True) -> subprocess.CompletedProcess[str]: """Run a command and optionally check its return code.""" print(f"Running: {' '.join(cmd)}") return subprocess.run(cmd, check=check, capture_output=True, text=True) @@ -31,6 +31,9 @@ def main() -> None: spec_file = Path(__file__).parent / "openapi.yaml" download_file(OPENAPI_SPEC_URL, spec_file) + # Run formatter on the spec for better merge conflict handling + run_command(["npx", "-y", "prettier", "--write", str(spec_file)]) + # Download generator if needed download_file(GENERATOR_JAR_URL, GENERATOR_JAR) diff --git a/scripts/openapi.yaml b/scripts/openapi.yaml new file mode 100644 index 00000000..c951154c --- /dev/null +++ b/scripts/openapi.yaml @@ -0,0 +1,2848 @@ +openapi: 3.1.0 +info: + title: Together APIs + description: The Together REST API. Please see https://docs.together.ai for more details. + version: "2.0.0" + termsOfService: https://www.together.ai/terms-of-service + contact: + name: Together Support + url: https://www.together.ai/contact + license: + name: MIT + url: https://github.com/togethercomputer/openapi/blob/main/LICENSE +servers: + - url: https://api.together.xyz/v1 +security: + - bearerAuth: [] +paths: + /chat/completions: + post: + tags: ["Chat"] + summary: Create chat completion + description: Query a chat model. + operationId: chat-completions + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/ChatCompletionRequest" + responses: + "200": + description: "200" + content: + application/json: + schema: + $ref: "#/components/schemas/ChatCompletionResponse" + text/event-stream: + schema: + $ref: "#/components/schemas/ChatCompletionStream" + "400": + description: "BadRequest" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "401": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "404": + description: "NotFound" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "429": + description: "RateLimit" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "503": + description: "Overloaded" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "504": + description: "Timeout" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + deprecated: false + /completions: + post: + tags: ["Completion"] + summary: Create completion + description: Query a language, code, or image model. + operationId: completions + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CompletionRequest" + responses: + "200": + description: "200" + content: + application/json: + schema: + $ref: "#/components/schemas/CompletionResponse" + text/event-stream: + schema: + $ref: "#/components/schemas/CompletionStream" + "400": + description: "BadRequest" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "401": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "404": + description: "NotFound" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "429": + description: "RateLimit" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "503": + description: "Overloaded" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "504": + description: "Timeout" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + deprecated: false + /embeddings: + post: + tags: ["Embeddings"] + summary: Create embedding + description: Query an embedding model for a given string of text. + operationId: embeddings + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/EmbeddingsRequest" + responses: + "200": + description: "200" + content: + application/json: + schema: + $ref: "#/components/schemas/EmbeddingsResponse" + "400": + description: "BadRequest" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "401": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "404": + description: "NotFound" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "429": + description: "RateLimit" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "503": + description: "Overloaded" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "504": + description: "Timeout" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + deprecated: false + /models: + get: + tags: ["Models"] + summary: List all models + description: Lists all of Together's open-source models + operationId: models + responses: + "200": + description: "200" + content: + application/json: + schema: + $ref: "#/components/schemas/ModelInfoList" + "400": + description: "BadRequest" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "401": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "404": + description: "NotFound" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "429": + description: "RateLimit" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "504": + description: "Timeout" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + deprecated: false + /images/generations: + post: + tags: ["Images"] + summary: Create image + description: Use an image model to generate an image for a given prompt. + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - prompt + - model + properties: + prompt: + type: string + description: A description of the desired images. Maximum length varies by model. + example: cat floating in space, cinematic + model: + type: string + description: > + The model to use for image generation.
+
+ [See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models) + example: black-forest-labs/FLUX.1-schnell + anyOf: + - type: string + enum: + - black-forest-labs/FLUX.1-schnell-Free + - black-forest-labs/FLUX.1-schnell + - black-forest-labs/FLUX.1.1-pro + - type: string + steps: + type: integer + default: 20 + description: Number of generation steps. + image_url: + type: string + description: URL of an image to use for image models that support it. + seed: + type: integer + description: Seed used for generation. Can be used to reproduce image generations. + n: + type: integer + default: 1 + description: Number of image results to generate. + height: + type: integer + default: 1024 + description: Height of the image to generate in number of pixels. + width: + type: integer + default: 1024 + description: Width of the image to generate in number of pixels. + negative_prompt: + type: string + description: The prompt or prompts not to guide the image generation. + response_format: + type: string + description: Format of the image response. Can be either a base64 string or a URL. + enum: + - base64 + - url + guidance: + type: number + description: Adjusts the alignment of the generated image with the input prompt. Higher values (e.g., 8-10) make the output more faithful to the prompt, while lower values (e.g., 1-5) encourage more creative freedom. + default: 3.5 + output_format: + type: string + description: The format of the image response. Can be either be `jpeg` or `png`. Defaults to `jpeg`. + default: jpeg + enum: + - jpeg + - png + image_loras: + description: An array of objects that define LoRAs (Low-Rank Adaptations) to influence the generated image. + type: array + items: + type: object + required: [path, scale] + properties: + path: + type: string + description: The URL of the LoRA to apply (e.g. https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA). + scale: + type: number + description: The strength of the LoRA's influence. Most LoRA's recommend a value of 1. + responses: + "200": + description: Image generated successfully + content: + application/json: + schema: + $ref: "#/components/schemas/ImageResponse" + /files: + get: + tags: ["Files"] + summary: List all files + description: List the metadata for all uploaded data files. + responses: + "200": + description: List of files + content: + application/json: + schema: + $ref: "#/components/schemas/FileList" + /files/{id}: + get: + tags: ["Files"] + summary: List file + description: List the metadata for a single uploaded data file. + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + "200": + description: File retrieved successfully + content: + application/json: + schema: + $ref: "#/components/schemas/FileResponse" + delete: + tags: ["Files"] + summary: Delete a file + description: Delete a previously uploaded data file. + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + "200": + description: File deleted successfully + content: + application/json: + schema: + $ref: "#/components/schemas/FileDeleteResponse" + /files/{id}/content: + get: + tags: ["Files"] + summary: Get file contents + description: Get the contents of a single uploaded data file. + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + "200": + description: File content retrieved successfully + content: + application/json: + schema: + $ref: "#/components/schemas/FileObject" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + /fine-tunes: + post: + tags: ["Fine-tuning"] + summary: Create job + description: Use a model to create a fine-tuning job. + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - training_file + - model + properties: + training_file: + type: string + description: File-ID of a training file uploaded to the Together API + validation_file: + type: string + description: File-ID of a validation file uploaded to the Together API + model: + type: string + description: Name of the base model to run fine-tune job on + n_epochs: + type: integer + default: 1 + description: Number of epochs for fine-tuning + n_checkpoints: + type: integer + default: 1 + description: Number of checkpoints to save during fine-tuning + n_evals: + type: integer + default: 0 + description: Number of evaluations to be run on a given validation set during training + batch_size: + type: integer + default: 32 + description: Batch size for fine-tuning + learning_rate: + type: number + format: float + default: 0.00001 + description: Learning rate multiplier to use for training + lr_scheduler: + type: object + default: none + $ref: "#/components/schemas/LRScheduler" + warmup_ratio: + type: number + format: float + default: 0.0 + description: The percent of steps at the start of training to linearly increase the learning rate. + max_grad_norm: + type: number + format: float + default: 1.0 + description: Max gradient norm to be used for gradient clipping. Set to 0 to disable. + weight_decay: + type: number + format: float + default: 0.0 + description: Weight decay + suffix: + type: string + description: Suffix that will be added to your fine-tuned model name + wandb_api_key: + type: string + description: API key for Weights & Biases integration + wandb_base_url: + type: string + description: The base URL of a dedicated Weights & Biases instance. + wandb_project_name: + type: string + description: The Weights & Biases project for your run. If not specified, will use `together` as the project name. + wandb_name: + type: string + description: The Weights & Biases name for your run. + train_on_inputs: + oneOf: + - type: boolean + - type: string + enum: + - auto + type: boolean + default: auto + description: Whether to mask the user messages in conversational data or prompts in instruction data. + training_type: + type: object + oneOf: + - $ref: "#/components/schemas/FullTrainingType" + - $ref: "#/components/schemas/LoRATrainingType" + responses: + "200": + description: Fine-tuning job initiated successfully + content: + application/json: + schema: + $ref: "#/components/schemas/FinetuneResponse" + get: + tags: ["Fine-tuning"] + summary: List all jobs + description: List the metadata for all fine-tuning jobs. + responses: + "200": + description: List of fine-tune jobs + content: + application/json: + schema: + $ref: "#/components/schemas/FinetuneList" + /fine-tunes/{id}: + get: + tags: ["Fine-tuning"] + summary: List job + description: List the metadata for a single fine-tuning job. + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + "200": + description: Fine-tune job details retrieved successfully + content: + application/json: + schema: + $ref: "#/components/schemas/FinetuneResponse" + /fine-tunes/{id}/events: + get: + tags: ["Fine-tuning"] + summary: List job events + description: List the events for a single fine-tuning job. + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + "200": + description: List of fine-tune events + content: + application/json: + schema: + $ref: "#/components/schemas/FinetuneListEvents" + /finetune/download: + get: + tags: ["Fine-tuning"] + summary: Download model + description: Download a compressed fine-tuned model or checkpoint to local disk. + parameters: + - in: query + name: ft_id + schema: + type: string + required: true + description: Fine-tune ID to download. A string that starts with `ft-`. + - in: query + name: checkpoint_step + schema: + type: integer + required: false + description: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. + - in: query + name: checkpoint + schema: + type: string + enum: + - merged + - adapter + description: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. + - in: query + name: output + schema: + type: string + required: false + description: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. + responses: + "200": + description: Successfully downloaded the fine-tuned model or checkpoint. + content: + application/json: + schema: + $ref: "#/components/schemas/FinetuneDownloadResult" + "400": + description: Invalid request parameters. + "404": + description: Fine-tune ID not found. + /fine-tunes/{id}/cancel: + post: + tags: ["Fine-tuning"] + summary: Cancel job + description: Cancel a currently running fine-tuning job. + parameters: + - in: path + name: id + schema: + type: string + required: true + description: Fine-tune ID to cancel. A string that starts with `ft-`. + responses: + "200": + description: Successfully cancelled the fine-tuning job. + content: + application/json: + schema: + $ref: "#/components/schemas/FinetuneResponse" + "400": + description: Invalid request parameters. + "404": + description: Fine-tune ID not found. + /rerank: + post: + tags: ["Rerank"] + summary: Create a rerank request + description: Query a reranker model + operationId: rerank + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/RerankRequest" + responses: + "200": + description: "200" + content: + application/json: + schema: + $ref: "#/components/schemas/RerankResponse" + "400": + description: "BadRequest" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "401": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "404": + description: "NotFound" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "429": + description: "RateLimit" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "503": + description: "Overloaded" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "504": + description: "Timeout" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + deprecated: false + /audio/speech: + post: + tags: ["Audio"] + summary: Create audio generation request + description: Generate audio from input text + operationId: audio-speech + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/AudioSpeechRequest" + responses: + "200": + description: "OK" + content: + application/octet-stream: + schema: + type: string + format: binary + audio/wav: + schema: + type: string + format: binary + audio/mpeg: + schema: + type: string + format: binary + text/event-stream: + schema: + $ref: "#/components/schemas/AudioSpeechStreamResponse" + "400": + description: "BadRequest" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "429": + description: "RateLimit" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + /endpoints: + get: + tags: ["Endpoints"] + summary: List all endpoints, can be filtered by type + description: Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). + operationId: listEndpoints + parameters: + - name: type + in: query + required: false + schema: + type: string + enum: + - dedicated + - serverless + description: Filter endpoints by type + example: dedicated + responses: + "200": + description: "200" + content: + application/json: + schema: + type: object + required: + - object + - data + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/ListEndpoint" + example: + object: "list" + data: + - object: "endpoint" + name: "allenai/OLMo-7B" + model: "allenai/OLMo-7B" + type: "serverless" + owner: "together" + state: "STARTED" + created_at: "2024-02-28T21:34:35.444Z" + "403": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "500": + description: "Internal error" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + post: + tags: ["Endpoints"] + summary: Create a dedicated endpoint, it will start automatically + description: Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. + operationId: createEndpoint + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEndpointRequest" + responses: + "200": + description: "200" + content: + application/json: + schema: + $ref: "#/components/schemas/DedicatedEndpoint" + "403": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "500": + description: "Internal error" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + + /endpoints/{endpointId}: + get: + tags: ["Endpoints"] + summary: Get endpoint by ID + description: Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. + operationId: getEndpoint + parameters: + - name: endpointId + in: path + required: true + schema: + type: string + description: The ID of the endpoint to retrieve + example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 + responses: + "200": + description: "200" + content: + application/json: + schema: + $ref: "#/components/schemas/DedicatedEndpoint" + "403": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "404": + description: "Not Found" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "500": + description: "Internal error" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + + patch: + tags: ["Endpoints"] + summary: Update endpoint, this can also be used to start or stop a dedicated endpoint + description: Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). + operationId: updateEndpoint + parameters: + - name: endpointId + in: path + required: true + schema: + type: string + description: The ID of the endpoint to update + example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + display_name: + type: string + description: A human-readable name for the endpoint + example: My Llama3 70b endpoint + state: + type: string + description: The desired state of the endpoint + enum: + - STARTED + - STOPPED + example: STARTED + autoscaling: + $ref: "#/components/schemas/Autoscaling" + description: New autoscaling configuration for the endpoint + responses: + "200": + description: "200" + content: + application/json: + schema: + $ref: "#/components/schemas/DedicatedEndpoint" + "403": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "404": + description: "Not Found" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "500": + description: "Internal error" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + + delete: + tags: ["Endpoints"] + summary: Delete endpoint + description: Permanently deletes an endpoint. This action cannot be undone. + operationId: deleteEndpoint + parameters: + - name: endpointId + in: path + required: true + schema: + type: string + description: The ID of the endpoint to delete + example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 + responses: + "204": + description: "No Content - Endpoint successfully deleted" + "403": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "404": + description: "Not Found" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "500": + description: "Internal error" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + + /hardware: + get: + tags: ["Hardware"] + summary: List available hardware configurations + description: Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + operationId: listHardware + parameters: + - name: model + in: query + required: false + schema: + type: string + description: Filter hardware configurations by model compatibility + example: meta-llama/Llama-3-70b-chat-hf + responses: + "200": + description: "List of available hardware configurations" + content: + application/json: + schema: + oneOf: + - type: object + description: Response when no model filter is provided + required: + - object + - data + properties: + object: + type: string + enum: + - list + data: + type: array + items: + allOf: + - $ref: "#/components/schemas/HardwareWithStatus" + - type: object + properties: + availability: + not: {} + - type: object + description: Response when model filter is provided + required: + - object + - data + properties: + object: + type: string + enum: + - list + data: + type: array + items: + allOf: + - $ref: "#/components/schemas/HardwareWithStatus" + - type: object + required: + - availability + example: + object: "list" + data: + - object: "hardware" + name: "2x_nvidia_a100_80gb_sxm" + pricing: + input: 0 + output: 0 + cents_per_minute: 5.42 + specs: + gpu_type: "a100-80gb" + gpu_link: "sxm" + gpu_memory: 80 + gpu_count: 2 + updated_at: "2024-01-01T00:00:00Z" + "403": + description: "Unauthorized" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + "500": + description: "Internal error" + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorData" + +components: + securitySchemes: + bearerAuth: + type: http + scheme: bearer + x-bearer-format: bearer + x-default: default + + schemas: + RerankRequest: + type: object + properties: + model: + type: string + description: > + The model to be used for the rerank request.
+
+ [See all of Together AI's rerank models](https://docs.together.ai/docs/serverless-models#rerank-models) + example: Salesforce/Llama-Rank-V1 + anyOf: + - type: string + enum: + - Salesforce/Llama-Rank-v1 + - type: string + + query: + type: string + description: The search query to be used for ranking. + example: What animals can I find near Peru? + documents: + description: List of documents, which can be either strings or objects. + oneOf: + - type: array + items: + type: object + additionalProperties: true + - type: array + items: + type: string + example: Our solar system orbits the Milky Way galaxy at about 515,000 mph + example: + - { + "title": "Llama", + "text": "The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.", + } + - { + "title": "Panda", + "text": "The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.", + } + - { + "title": "Guanaco", + "text": "The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.", + } + - { + "title": "Wild Bactrian camel", + "text": "The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.", + } + top_n: + type: integer + description: The number of top results to return. + example: 2 + return_documents: + type: boolean + description: Whether to return supplied documents with the response. + example: true + rank_fields: + type: array + items: + type: string + description: List of keys in the JSON Object document to rank by. Defaults to use all supplied keys for ranking. + example: ["title", "text"] + required: + - model + - query + - documents + additionalProperties: false + + RerankResponse: + type: object + required: + - object + - model + - results + properties: + object: + type: string + description: Object type + enum: + - rerank + example: rerank + id: + type: string + description: Request ID + example: 9dfa1a09-5ebc-4a40-970f-586cb8f4ae47 + model: + type: string + description: The model to be used for the rerank request. + example: salesforce/turboranker-0.8-3778-6328 + results: + type: array + items: + type: object + required: [index, relevance_score, document] + properties: + index: + type: integer + relevance_score: + type: number + document: + type: object + properties: + text: + type: string + nullable: true + example: + - { + "index": 0, + "relevance_score": 0.29980177813003117, + "document": + { + "text": '{"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}', + }, + } + - { + "index": 2, + "relevance_score": 0.2752447527354349, + "document": + { + "text": '{"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}', + }, + } + usage: + $ref: "#/components/schemas/UsageData" + example: + { + "prompt_tokens": 1837, + "completion_tokens": 0, + "total_tokens": 1837, + } + + ErrorData: + type: object + required: + - error + properties: + error: + type: object + properties: + message: + type: string + nullable: false + type: + type: string + nullable: false + param: + type: string + nullable: true + default: null + code: + type: string + nullable: true + default: null + required: + - type + - message + + FinishReason: + type: string + enum: + - stop + - eos + - length + - tool_calls + - function_call + + LogprobsPart: + type: object + properties: + token_ids: + type: array + items: + type: number + description: List of token IDs corresponding to the logprobs + tokens: + type: array + items: + type: string + description: List of token strings + token_logprobs: + type: array + items: + type: number + description: List of token log probabilities + + PromptPart: + type: array + items: + type: object + properties: + text: + type: string + example: [INST] What is the capital of France? [/INST] + logprobs: + $ref: "#/components/schemas/LogprobsPart" + + UsageData: + type: object + properties: + prompt_tokens: + type: integer + completion_tokens: + type: integer + total_tokens: + type: integer + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + + CompletionChoicesData: + type: array + items: + type: object + properties: + text: + type: string + example: The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture. + seed: + type: integer + finish_reason: + $ref: "#/components/schemas/FinishReason" + logprobs: + type: object + $ref: "#/components/schemas/LogprobsPart" + + CompletionRequest: + type: object + required: + - model + - prompt + properties: + prompt: + type: string + description: A string providing context for the model to complete. + example: [INST] What is the capital of France? [/INST] + model: + type: string + description: > + The name of the model to query.
+
+ [See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + example: mistralai/Mixtral-8x7B-Instruct-v0.1 + anyOf: + - type: string + enum: + - meta-llama/Llama-2-70b-hf + - mistralai/Mistral-7B-v0.1 + - mistralai/Mixtral-8x7B-v0.1 + - Meta-Llama/Llama-Guard-7b + - type: string + max_tokens: + type: integer + description: The maximum number of tokens to generate. + stop: + type: array + description: A list of string sequences that will truncate (stop) inference text output. For example, "
" will stop generation as soon as the model generates the given token. + items: + type: string + temperature: + type: number + description: A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. + format: float + top_p: + type: number + description: A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. + format: float + top_k: + type: integer + description: An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. + format: int32 + repetition_penalty: + type: number + description: A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. + format: float + stream: + type: boolean + description: "If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results." + logprobs: + type: integer + minimum: 0 + maximum: 1 + description: Determines the number of most likely tokens to return at each token position log probabilities to return. + echo: + type: boolean + description: If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. + n: + type: integer + description: The number of completions to generate for each prompt. + minimum: 1 + maximum: 128 + safety_model: + type: string + description: The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). + example: "safety_model_name" + anyOf: + - type: string + enum: + - Meta-Llama/Llama-Guard-7b + - type: string + min_p: + type: number + description: A number between 0 and 1 that can be used as an alternative to top-p and top-k. + format: float + presence_penalty: + type: number + description: A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. + format: float + frequency_penalty: + type: number + description: A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. + format: float + logit_bias: + type: object + additionalProperties: + type: number + + format: float + description: Adjusts the likelihood of specific tokens appearing in the generated output. + example: {"1024": -10.5, "105": 21.4} + seed: + type: integer + description: Seed value for reproducibility. + example: 42 + CompletionResponse: + type: object + properties: + id: + type: string + choices: + $ref: "#/components/schemas/CompletionChoicesData" + prompt: + $ref: "#/components/schemas/PromptPart" + usage: + $ref: "#/components/schemas/UsageData" + created: + type: integer + model: + type: string + object: + type: string + enum: + - text_completion + required: + - id + - choices + - usage + - created + - model + - object + + CompletionStream: + oneOf: + - $ref: "#/components/schemas/CompletionEvent" + - $ref: "#/components/schemas/StreamSentinel" + + CompletionEvent: + type: object + required: [data] + properties: + data: + $ref: "#/components/schemas/CompletionChunk" + + CompletionChunk: + type: object + required: [id, token, choices, usage, finish_reason] + properties: + id: + type: string + token: + $ref: "#/components/schemas/CompletionToken" + choices: + title: CompletionChoices + type: array + items: + $ref: "#/components/schemas/CompletionChoice" + usage: + allOf: + - $ref: "#/components/schemas/UsageData" + - nullable: true + seed: + type: integer + finish_reason: + allOf: + - $ref: "#/components/schemas/FinishReason" + - nullable: true + + CompletionChoice: + type: object + required: [index] + properties: + text: + type: string + + CompletionToken: + type: object + required: [id, text, logprob, special] + properties: + id: + type: integer + text: + type: string + logprob: + type: number + special: + type: boolean + + ChatCompletionChoicesData: + type: array + items: + type: object + properties: + text: + type: string + index: + type: integer + seed: + type: integer + finish_reason: + $ref: "#/components/schemas/FinishReason" + message: + $ref: "#/components/schemas/ChatCompletionMessage" + logprobs: + allOf: + - nullable: true + - $ref: "#/components/schemas/LogprobsPart" + ChatCompletionMessage: + type: object + required: [role, content] + properties: + content: + type: string + nullable: true + role: + type: string + enum: [assistant] + tool_calls: + type: array + items: + $ref: "#/components/schemas/ToolChoice" + function_call: + type: object + deprecated: true + required: [arguments, name] + properties: + arguments: + type: string + name: + type: string + ChatCompletionTool: + type: object + required: [type, function] + properties: + type: + type: string + enum: ["function"] + function: + type: object + required: [name] + properties: + description: + type: string + name: + type: string + parameters: + type: object + additionalProperties: true + + ChatCompletionRequest: + type: object + required: + - model + - messages + properties: + messages: + type: array + description: A list of messages comprising the conversation so far. + items: + type: object + properties: + role: + type: string + description: "The role of the messages author. Choice between: system, user, or assistant." + enum: + - system + - user + - assistant + - tool + content: + description: The content of the message, which can either be a simple string or a structured format. + type: string + oneOf: + - type: string + description: A plain text message. + - type: array + description: A structured message with mixed content types. + items: + type: object + oneOf: + - type: object + properties: + type: + type: string + enum: + - text + text: + type: string + required: + - type + - text + - type: object + properties: + type: + type: string + enum: + - image_url + image_url: + type: object + properties: + url: + type: string + description: The URL of the image as a plain string. + required: + - url + required: + - type + - image_url + required: + - role + - content + model: + description: > + The name of the model to query.
+
+ [See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + example: meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo + anyOf: + - type: string + enum: + - Qwen/Qwen2.5-72B-Instruct-Turbo + - Qwen/Qwen2.5-7B-Instruct-Turbo + - meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo + - meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo + - meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo + - type: string + max_tokens: + type: integer + description: The maximum number of tokens to generate. + stop: + type: array + description: A list of string sequences that will truncate (stop) inference text output. For example, "
" will stop generation as soon as the model generates the given token. + items: + type: string + temperature: + type: number + description: A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. + format: float + top_p: + type: number + description: A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. + format: float + top_k: + type: integer + description: An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. + format: int32 + context_length_exceeded_behavior: + type: string + enum: [ "truncate", "error" ] + default: "error" + description: Defined the behavior of the API when max_tokens exceed the maximum context length of the model. When set to 'error', API will return 400 with appropriate error message. When set to 'truncate', override the max_tokens with maximum context length of the model. + repetition_penalty: + type: number + description: A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. + stream: + type: boolean + description: "If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results." + logprobs: + type: integer + minimum: 0 + maximum: 1 + description: Determines the number of most likely tokens to return at each token position log probabilities to return. + echo: + type: boolean + description: If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. + n: + type: integer + description: The number of completions to generate for each prompt. + minimum: 1 + maximum: 128 + min_p: + type: number + description: A number between 0 and 1 that can be used as an alternative to top_p and top-k. + format: float + presence_penalty: + type: number + description: A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. + format: float + frequency_penalty: + type: number + description: A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. + format: float + logit_bias: + type: object + additionalProperties: + type: number + format: float + description: Adjusts the likelihood of specific tokens appearing in the generated output. + example: {"1024": -10.5, "105": 21.4} + seed: + type: integer + description: Seed value for reproducibility. + example: 42 + function_call: + oneOf: + - type: string + enum: [none, auto] + - type: object + required: [name] + properties: + name: + type: string + response_format: + type: object + description: An object specifying the format that the model must output. + properties: + type: + type: string + description: The type of the response format. + example: json + schema: + type: object + additionalProperties: + type: string + description: The schema of the response format. + tools: + type: array + description: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ToolsPart" + tool_choice: + description: Controls which (if any) function is called by the model. By default uses `auto`, which lets the model pick between generating a message or calling a function. + oneOf: + - type: string + example: "tool_name" + - $ref: "#/components/schemas/ToolChoice" + safety_model: + type: string + description: The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). + example: "safety_model_name" + + ChatCompletionMessageParam: + oneOf: + - $ref: "#/components/schemas/ChatCompletionSystemMessageParam" + - $ref: "#/components/schemas/ChatCompletionUserMessageParam" + - $ref: "#/components/schemas/ChatCompletionAssistantMessageParam" + - $ref: "#/components/schemas/ChatCompletionToolMessageParam" + - $ref: "#/components/schemas/ChatCompletionFunctionMessageParam" + + # Start Message Params + + ChatCompletionSystemMessageParam: + type: object + required: [content, role] + properties: + content: + type: string + role: + type: string + enum: ["system"] + name: + type: string + + ChatCompletionUserMessageParam: + type: object + required: [content, role] + properties: + content: + type: string + # TODO: more comple content? + role: + type: string + enum: ["user"] + name: + type: string + + ChatCompletionAssistantMessageParam: + type: object + required: [role] + properties: + content: + type: string + nullable: true + role: + type: string + enum: ["assistant"] + name: + type: string + tool_calls: + type: array + items: + $ref: "#/components/schemas/ToolChoice" + function_call: + type: object + deprecated: true + properties: + arguments: + type: string + name: + type: string + required: [arguments, name] + + ChatCompletionFunctionMessageParam: + type: object + deprecated: true + required: [content, role, name] + properties: + role: + type: string + enum: ["function"] + content: + type: string + name: + type: string + + ChatCompletionToolMessageParam: + type: object + properties: + role: + type: string + enum: ["tool"] + content: + type: string + tool_call_id: + type: string + required: [role, content, tool_call_id] + + # End Message Params + + ChatCompletionResponse: + type: object + properties: + id: + type: string + choices: + $ref: "#/components/schemas/ChatCompletionChoicesData" + usage: + $ref: "#/components/schemas/UsageData" + created: + type: integer + model: + type: string + object: + type: string + enum: + - chat.completion + required: [choices, id, created, model, object] + + ChatCompletionStream: + oneOf: + - $ref: "#/components/schemas/ChatCompletionEvent" + - $ref: "#/components/schemas/StreamSentinel" + + ChatCompletionEvent: + type: object + required: [data] + properties: + data: + $ref: "#/components/schemas/ChatCompletionChunk" + + ChatCompletionChunk: + type: object + required: [id, object, created, choices, model] + properties: + id: + type: string + object: + type: string + enum: + - chat.completion.chunk + created: + type: integer + system_fingerprint: + type: string + model: + type: string + example: mistralai/Mixtral-8x7B-Instruct-v0.1 + choices: + title: ChatCompletionChoices + type: array + items: + type: object + required: [index, delta, finish_reason] + properties: + index: + type: integer + finish_reason: + $ref: "#/components/schemas/FinishReason" + nullable: true + logprobs: + type: number + nullable: true + seed: + type: integer + nullable: true + delta: + title: ChatCompletionChoiceDelta + type: object + required: [role] + properties: + token_id: + type: integer + role: + type: string + enum: ["system", "user", "assistant", "function", "tool"] + content: + type: string + nullable: true + tool_calls: + type: array + items: + $ref: "#/components/schemas/ToolChoice" + function_call: + type: object + deprecated: true + nullable: true + properties: + arguments: + type: string + name: + type: string + required: + - arguments + - name + usage: + allOf: + - $ref: "#/components/schemas/UsageData" + - nullable: true + + AudioSpeechRequest: + type: object + required: + - model + - input + - voice + properties: + model: + description: > + The name of the model to query.
+
+ [See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models) + example: cartesia/sonic + anyOf: + - type: string + enum: + - cartesia/sonic + - type: string + input: + type: string + description: Input text to generate the audio for + voice: + description: The voice to use for generating the audio. [View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available). + anyOf: + - type: string + enum: + - laidback woman + - polite man + - storyteller lady + - friendly sidekick + - type: string + response_format: + type: string + description: The format of audio output + default: wav + enum: + - mp3 + - wav + - raw + language: + type: string + description: Language of input text + default: en + enum: + - en + - de + - fr + - es + - hi + - it + - ja + - ko + - nl + - pl + - pt + - ru + - sv + - tr + - zh + response_encoding: + type: string + description: Audio encoding of response + default: pcm_f32le + enum: + - pcm_f32le + - pcm_s16le + - pcm_mulaw + - pcm_alaw + sample_rate: + type: number + default: 44100 + description: Sampling rate to use for the output audio + stream: + type: boolean + default: false + description: "If true, output is streamed for several characters at a time instead of waiting for the full response. The stream terminates with `data: [DONE]`. If false, return the encoded audio as octet stream" + + AudioSpeechStreamResponse: + oneOf: + - $ref: "#/components/schemas/AudioSpeechStreamEvent" + - $ref: "#/components/schemas/StreamSentinel" + + AudioSpeechStreamEvent: + type: object + required: [data] + properties: + data: + $ref: "#/components/schemas/AudioSpeechStreamChunk" + + AudioSpeechStreamChunk: + type: object + required: [object, model, b64] + properties: + object: + type: string + enum: + - audio.tts.chunk + model: + type: string + example: cartesia/sonic + b64: + type: string + description: base64 encoded audio stream + StreamSentinel: + type: object + required: [data] + properties: + data: + title: stream_signal + type: string + enum: + - "[DONE]" + + ChatCompletionToken: + type: object + required: [id, text, logprob, special] + properties: + id: + type: integer + text: + type: string + logprob: + type: number + special: + type: boolean + + ChatCompletionChoice: + type: object + required: [index, delta, finish_reason] + properties: + index: + type: integer + finish_reason: + $ref: "#/components/schemas/FinishReason" + logprobs: + $ref: "#/components/schemas/LogprobsPart" + delta: + title: ChatCompletionChoiceDelta + type: object + required: [role] + properties: + token_id: + type: integer + role: + type: string + enum: ["system", "user", "assistant", "function", "tool"] + content: + type: string + nullable: true + tool_calls: + type: array + items: + $ref: "#/components/schemas/ToolChoice" + function_call: + type: object + deprecated: true + nullable: true + properties: + arguments: + type: string + name: + type: string + required: + - arguments + - name + + EmbeddingsRequest: + type: object + required: + - model + - input + properties: + model: + type: string + description: > + The name of the embedding model to use.
+
+ [See all of Together AI's embedding models](https://docs.together.ai/docs/serverless-models#embedding-models) + example: togethercomputer/m2-bert-80M-8k-retrieval + anyOf: + - type: string + enum: + - WhereIsAI/UAE-Large-V1 + - BAAI/bge-large-en-v1.5 + - BAAI/bge-base-en-v1.5 + - togethercomputer/m2-bert-80M-8k-retrieval + - type: string + input: + oneOf: + - type: string + description: A string providing the text for the model to embed. + example: Our solar system orbits the Milky Way galaxy at about 515,000 mph + - type: array + items: + type: string + description: A string providing the text for the model to embed. + example: Our solar system orbits the Milky Way galaxy at about 515,000 mph + example: Our solar system orbits the Milky Way galaxy at about 515,000 mph + + EmbeddingsResponse: + type: object + required: + - object + - model + - data + properties: + object: + type: string + enum: + - list + model: + type: string + data: + type: array + items: + type: object + required: [index, object, embedding] + properties: + object: + type: string + enum: + - embedding + embedding: + type: array + items: + type: number + index: + type: integer + + ModelInfoList: + type: array + items: + $ref: "#/components/schemas/ModelInfo" + ModelInfo: + type: object + required: [id, object, created, type] + properties: + id: + type: string + example: "Austism/chronos-hermes-13b" + object: + type: string + example: "model" + created: + type: integer + example: 1692896905 + type: + enum: + - chat + - language + - code + - image + - embedding + - moderation + - rerank + example: "chat" + display_name: + type: string + example: "Chronos Hermes (13B)" + organization: + type: string + example: "Austism" + link: + type: string + license: + type: string + example: "other" + context_length: + type: integer + example: 2048 + pricing: + $ref: "#/components/schemas/Pricing" + ImageResponse: + type: object + properties: + id: + type: string + model: + type: string + object: + enum: + - list + example: "list" + data: + type: array + items: + type: object + properties: + index: + type: integer + b64_json: + type: string + url: + type: string + required: + - index + oneOf: + - required: + - b64_json + - required: + - url + required: + - id + - model + - object + - data + Pricing: + type: object + required: [hourly, input, output, base, finetune] + properties: + hourly: + type: number + example: 0 + input: + type: number + example: 0.3 + output: + type: number + example: 0.3 + base: + type: number + example: 0 + finetune: + type: number + example: 0 + + ToolsPart: + type: object + properties: + type: + type: string + example: "tool_type" + function: + type: object + properties: + description: + type: string + example: "A description of the function." + name: + type: string + example: "function_name" + parameters: + type: object + additionalProperties: true + description: "A map of parameter names to their values." + ToolChoice: + type: object + required: [id, type, function, index] + properties: + # TODO: is this the right place for index? + index: + type: number + id: + type: string + type: + type: string + enum: ["function"] + function: + type: object + required: [name, arguments] + properties: + name: + type: string + example: "function_name" + arguments: + type: string + + FileResponse: + type: object + required: + - id + - object + - created_at + - filename + - bytes + - purpose + - FileType + - Processed + - LineCount + properties: + id: + type: string + object: + type: string + example: "file" + created_at: + type: integer + example: 1715021438 + filename: + type: string + example: "my_file.jsonl" + bytes: + type: integer + example: 2664 + purpose: + enum: + - fine-tune + example: "fine-tune" + Processed: + type: boolean + FileType: + enum: + - jsonl + - parquet + example: "jsonl" + LineCount: + type: integer + FileList: + required: + - data + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FileResponse" + FileObject: + type: object + properties: + object: + type: string + id: + type: string + filename: + type: string + size: + type: integer + FileDeleteResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + FinetuneResponse: + type: object + required: + - id + - status + properties: + id: + type: string + format: uuid + training_file: + type: string + validation_file: + type: string + model: + type: string + model_output_name: + type: string + model_output_path: + type: string + trainingfile_numlines: + type: integer + trainingfile_size: + type: integer + created_at: + type: string + updated_at: + type: string + n_epochs: + type: integer + n_checkpoints: + type: integer + n_evals: + type: integer + batch_size: + type: integer + learning_rate: + type: number + lr_scheduler: + type: object + $ref: "#/components/schemas/LRScheduler" + warmup_ratio: + type: number + max_grad_norm: + type: number + format: float + weight_decay: + type: number + format: float + eval_steps: + type: integer + train_on_inputs: + oneOf: + - type: boolean + - type: string + enum: + - auto + default: auto + training_type: + type: object + oneOf: + - $ref: "#/components/schemas/FullTrainingType" + - $ref: "#/components/schemas/LoRATrainingType" + status: + $ref: "#/components/schemas/FinetuneJobStatus" + job_id: + type: string + events: + type: array + items: + $ref: "#/components/schemas/FineTuneEvent" + token_count: + type: integer + param_count: + type: integer + total_price: + type: integer + epochs_completed: + type: integer + queue_depth: + type: integer + wandb_project_name: + type: string + wandb_url: + type: string + + FinetuneJobStatus: + type: string + enum: + - pending + - queued + - running + - compressing + - uploading + - cancel_requested + - cancelled + - error + - completed + + FinetuneEventLevels: + type: string + enum: + - null + - info + - warning + - error + - legacy_info + - legacy_iwarning + - legacy_ierror + FinetuneEventType: + type: string + enum: + - job_pending + - job_start + - job_stopped + - model_downloading + - model_download_complete + - training_data_downloading + - training_data_download_complete + - validation_data_downloading + - validation_data_download_complete + - wandb_init + - training_start + - checkpoint_save + - billing_limit + - epoch_complete + - training_complete + - model_compressing + - model_compression_complete + - model_uploading + - model_upload_complete + - job_complete + - job_error + - cancel_requested + - job_restarted + - refund + - warning + + FinetuneList: + type: object + required: + - data + properties: + data: + type: array + items: + $ref: "#/components/schemas/FinetuneResponse" + FinetuneListEvents: + type: object + required: + - data + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuneEvent" + FineTuneEvent: + type: object + required: + - object + - created_at + - message + - type + - param_count + - token_count + - total_steps + - wandb_url + - step + - checkpoint_path + - model_path + - training_offset + - hash + properties: + object: + type: string + enum: [fine-tune-event] + created_at: + type: string + level: + anyOf: + - $ref: "#/components/schemas/FinetuneEventLevels" + message: + type: string + type: + $ref: "#/components/schemas/FinetuneEventType" + param_count: + type: integer + token_count: + type: integer + total_steps: + type: integer + wandb_url: + type: string + step: + type: integer + checkpoint_path: + type: string + model_path: + type: string + training_offset: + type: integer + hash: + type: string + + FinetuneDownloadResult: + type: object + properties: + object: + enum: + - null + - local + id: + type: string + checkpoint_step: + type: integer + filename: + type: string + size: + type: integer + + FullTrainingType: + type: object + properties: + type: + type: string + enum: ["Full"] + required: + - type + LoRATrainingType: + type: object + properties: + type: + type: string + enum: ["Lora"] + lora_r: + type: integer + lora_alpha: + type: integer + lora_dropout: + type: number + format: float + default: 0.0 + lora_trainable_modules: + type: string + default: "all-linear" + required: + - type + - lora_r + - lora_alpha + LRScheduler: + type: object + properties: + lr_scheduler_type: + type: string + lr_scheduler_args: + type: object + $ref: "#/components/schemas/LinearLRSchedulerArgs" + required: + - lr_scheduler_type + LinearLRSchedulerArgs: + type: object + properties: + min_lr_ratio: + type: number + format: float + default: 0.0 + description: The ratio of the final learning rate to the peak learning rate + + Autoscaling: + type: object + description: Configuration for automatic scaling of replicas based on demand. + required: + - min_replicas + - max_replicas + properties: + min_replicas: + type: integer + format: int32 + description: The minimum number of replicas to maintain, even when there is no load + examples: + - 2 + max_replicas: + type: integer + format: int32 + description: The maximum number of replicas to scale up to under load + examples: + - 5 + + HardwareSpec: + type: object + description: Detailed specifications of a hardware configuration + required: + - gpu_type + - gpu_link + - gpu_memory + - gpu_count + properties: + gpu_type: + type: string + description: The type/model of GPU + examples: + - a100-80gb + gpu_link: + type: string + description: The GPU interconnect technology + examples: + - sxm + gpu_memory: + type: number + format: float + description: Amount of GPU memory in GB + examples: + - 80 + gpu_count: + type: integer + format: int32 + description: Number of GPUs in this configuration + examples: + - 2 + + EndpointPricing: + type: object + description: Pricing details for using an endpoint + required: + - cents_per_minute + properties: + cents_per_minute: + type: number + format: float + description: Cost per minute of endpoint uptime in cents + examples: + - 5.42 + + HardwareAvailability: + type: object + description: Indicates the current availability status of a hardware configuration + required: + - status + properties: + status: + type: string + description: The availability status of the hardware configuration + enum: + - available + - unavailable + - insufficient + + HardwareWithStatus: + type: object + description: Hardware configuration details including current availability status + required: + - object + - name + - pricing + - specs + - updated_at + properties: + object: + type: string + enum: + - hardware + name: + type: string + description: Unique identifier for the hardware configuration + examples: + - 2x_nvidia_a100_80gb_sxm + pricing: + $ref: "#/components/schemas/EndpointPricing" + specs: + $ref: "#/components/schemas/HardwareSpec" + availability: + $ref: "#/components/schemas/HardwareAvailability" + updated_at: + type: string + format: date-time + description: Timestamp of when the hardware status was last updated + + CreateEndpointRequest: + type: object + required: + - model + - hardware + - autoscaling + properties: + display_name: + type: string + description: A human-readable name for the endpoint + examples: + - My Llama3 70b endpoint + model: + type: string + description: The model to deploy on this endpoint + examples: + - meta-llama/Llama-3-8b-chat-hf + hardware: + type: string + description: The hardware configuration to use for this endpoint + examples: + - 1x_nvidia_a100_80gb_sxm + autoscaling: + $ref: "#/components/schemas/Autoscaling" + description: Configuration for automatic scaling of the endpoint + disable_prompt_cache: + type: boolean + description: Whether to disable the prompt cache for this endpoint + default: false + disable_speculative_decoding: + type: boolean + description: Whether to disable speculative decoding for this endpoint + default: false + state: + type: string + description: The desired state of the endpoint + enum: + - STARTED + - STOPPED + default: STARTED + example: STARTED + + DedicatedEndpoint: + type: object + description: Details about a dedicated endpoint deployment + required: + - object + - id + - name + - display_name + - model + - hardware + - type + - owner + - state + - autoscaling + - created_at + properties: + object: + type: string + enum: + - endpoint + description: The type of object + example: endpoint + id: + type: string + description: Unique identifier for the endpoint + example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 + name: + type: string + description: System name for the endpoint + example: devuser/meta-llama/Llama-3-8b-chat-hf-a32b82a1 + display_name: + type: string + description: Human-readable name for the endpoint + example: My Llama3 70b endpoint + model: + type: string + description: The model deployed on this endpoint + example: meta-llama/Llama-3-8b-chat-hf + hardware: + type: string + description: The hardware configuration used for this endpoint + example: 1x_nvidia_a100_80gb_sxm + type: + type: string + enum: + - dedicated + description: The type of endpoint + example: dedicated + owner: + type: string + description: The owner of this endpoint + example: devuser + state: + type: string + enum: + - PENDING + - STARTING + - STARTED + - STOPPING + - STOPPED + - ERROR + description: Current state of the endpoint + example: STARTED + autoscaling: + $ref: "#/components/schemas/Autoscaling" + description: Configuration for automatic scaling of the endpoint + created_at: + type: string + format: date-time + description: Timestamp when the endpoint was created + example: 2025-02-04T10:43:55.405Z + + ListEndpoint: + type: object + description: Details about an endpoint when listed via the list endpoint + required: + - id + - object + - name + - model + - type + - owner + - state + - created_at + properties: + object: + type: string + enum: + - endpoint + description: The type of object + example: endpoint + id: + type: string + description: Unique identifier for the endpoint + example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 + name: + type: string + description: System name for the endpoint + example: allenai/OLMo-7B + model: + type: string + description: The model deployed on this endpoint + example: allenai/OLMo-7B + type: + type: string + enum: + - serverless + - dedicated + description: The type of endpoint + example: serverless + owner: + type: string + description: The owner of this endpoint + example: together + state: + type: string + enum: + - PENDING + - STARTING + - STARTED + - STOPPING + - STOPPED + - ERROR + description: Current state of the endpoint + example: STARTED + created_at: + type: string + format: date-time + description: Timestamp when the endpoint was created + example: 2024-02-28T21:34:35.444Z diff --git a/src/together/__init__.py b/src/together/__init__.py index b4e4110d..d9e2b5d7 100644 --- a/src/together/__init__.py +++ b/src/together/__init__.py @@ -33,9 +33,7 @@ requestssession: "requests.Session" | Callable[[], "requests.Session"] | None = None -aiosession: ContextVar["ClientSession" | None] = ContextVar( - "aiohttp-session", default=None -) +aiosession: ContextVar["ClientSession" | None] = ContextVar("aiohttp-session", default=None) from together.client import AsyncClient, AsyncTogether, Client, Together diff --git a/src/together/abstract/api_requestor.py b/src/together/abstract/api_requestor.py index e4004f3e..28926a36 100644 --- a/src/together/abstract/api_requestor.py +++ b/src/together/abstract/api_requestor.py @@ -338,9 +338,7 @@ def handle_error_response( try: assert isinstance(resp.data, dict) error_resp = resp.data.get("error") - assert isinstance( - error_resp, dict - ), f"Unexpected error response {error_resp}" + assert isinstance(error_resp, dict), f"Unexpected error response {error_resp}" error_data = TogetherErrorResponse(**(error_resp)) except (KeyError, TypeError): raise error.JSONError( @@ -399,9 +397,7 @@ def handle_error_response( ) @classmethod - def _validate_headers( - cls, supplied_headers: Dict[str, str] | None - ) -> Dict[str, str]: + def _validate_headers(cls, supplied_headers: Dict[str, str] | None) -> Dict[str, str]: headers: Dict[str, str] = {} if supplied_headers is None: return headers @@ -528,9 +524,7 @@ def request_raw( request_timeout=request_timeout, ) - raise error.APIConnectionError( - "Error communicating with API: {}".format(e) - ) from e + raise error.APIConnectionError("Error communicating with API: {}".format(e)) from e # retry on 5XX error or rate-limit if result is not None: @@ -595,9 +589,7 @@ async def arequest_raw( } try: - result = await session.request( - method=options.method, url=abs_url, **request_kwargs - ) + result = await session.request(method=options.method, url=abs_url, **request_kwargs) utils.log_debug( "Together API response", path=abs_url, @@ -607,9 +599,7 @@ async def arequest_raw( ) # Don't read the whole stream for debug logging unless necessary. if together.log == "debug": - utils.log_debug( - "API response body", body=result.content, headers=result.headers - ) + utils.log_debug("API response body", body=result.content, headers=result.headers) return result except (aiohttp.ServerTimeoutError, asyncio.TimeoutError) as e: raise error.Timeout("Request timed out") from e @@ -623,9 +613,7 @@ def _interpret_response( content_type = result.headers.get("Content-Type", "") if stream and "text/event-stream" in content_type: return ( - self._interpret_response_line( - line, result.status_code, result.headers, stream=True - ) + self._interpret_response_line(line, result.status_code, result.headers, stream=True) for line in parse_stream(result.iter_lines()) ), True else: @@ -645,16 +633,11 @@ def _interpret_response( async def _interpret_async_response( self, result: aiohttp.ClientResponse, stream: bool - ) -> ( - tuple[AsyncGenerator[TogetherResponse, None], bool] - | tuple[TogetherResponse, bool] - ): + ) -> tuple[AsyncGenerator[TogetherResponse, None], bool] | tuple[TogetherResponse, bool]: """Returns the response(s) and a bool indicating whether it is a stream.""" if stream and "text/event-stream" in result.headers.get("Content-Type", ""): return ( - self._interpret_response_line( - line, result.status, result.headers, stream=True - ) + self._interpret_response_line(line, result.status, result.headers, stream=True) async for line in parse_stream_async(result.content) ), True else: diff --git a/src/together/cli/api/chat.py b/src/together/cli/api/chat.py index d95b760a..92a74db6 100644 --- a/src/together/cli/api/chat.py +++ b/src/together/cli/api/chat.py @@ -50,9 +50,7 @@ def __init__( self.system_message = system_message self.messages = ( - [{"role": "system", "content": self.system_message}] - if self.system_message - else [] + [{"role": "system", "content": self.system_message}] if self.system_message else [] ) def precmd(self, line: str) -> str: @@ -98,9 +96,7 @@ def do_say(self, arg: str) -> None: def do_reset(self, arg: str) -> None: self.messages = ( - [{"role": "system", "content": self.system_message}] - if self.system_message - else [] + [{"role": "system", "content": self.system_message}] if self.system_message else [] ) def do_exit(self, arg: str) -> bool: @@ -111,9 +107,7 @@ def do_exit(self, arg: str) -> bool: @click.pass_context @click.option("--model", type=str, required=True, help="Model name") @click.option("--max-tokens", type=int, help="Max tokens to generate") -@click.option( - "--stop", type=str, multiple=True, help="List of strings to stop generation" -) +@click.option("--stop", type=str, multiple=True, help="List of strings to stop generation") @click.option("--temperature", type=float, help="Sampling temperature") @click.option("--top-p", type=int, help="Top p sampling") @click.option("--top-k", type=float, help="Top k sampling") @@ -169,17 +163,13 @@ def interactive( ) @click.option("--model", type=str, required=True, help="Model name") @click.option("--max-tokens", type=int, help="Max tokens to generate") -@click.option( - "--stop", type=str, multiple=True, help="List of strings to stop generation" -) +@click.option("--stop", type=str, multiple=True, help="List of strings to stop generation") @click.option("--temperature", type=float, help="Sampling temperature") @click.option("--top-p", type=int, help="Top p sampling") @click.option("--top-k", type=float, help="Top k sampling") @click.option("--repetition-penalty", type=float, help="Repetition penalty") @click.option("--presence-penalty", type=float, help="Presence penalty sampling method") -@click.option( - "--frequency-penalty", type=float, help="Frequency penalty sampling method" -) +@click.option("--frequency-penalty", type=float, help="Frequency penalty sampling method") @click.option("--min-p", type=float, help="Min p sampling") @click.option("--no-stream", is_flag=True, help="Disable streaming") @click.option("--logprobs", type=int, help="Return logprobs. Only works with --raw.") @@ -261,9 +251,7 @@ def chat( assert isinstance(response.choices, list) if raw: - click.echo( - f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}" - ) + click.echo(f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}") return should_print_header = len(response.choices) > 1 diff --git a/src/together/cli/api/completions.py b/src/together/cli/api/completions.py index d5ef8b37..b6f7e258 100644 --- a/src/together/cli/api/completions.py +++ b/src/together/cli/api/completions.py @@ -15,9 +15,7 @@ @click.argument("prompt", type=str, required=True) @click.option("--model", type=str, required=True, help="Model name") @click.option("--max-tokens", type=int, help="Max tokens to generate") -@click.option( - "--stop", type=str, multiple=True, help="List of strings to stop generation" -) +@click.option("--stop", type=str, multiple=True, help="List of strings to stop generation") @click.option("--temperature", type=float, help="Sampling temperature") @click.option("--top-p", type=int, help="Top p sampling") @click.option("--top-k", type=float, help="Top k sampling") @@ -104,9 +102,7 @@ def completions( assert isinstance(response.choices, list) if raw: - click.echo( - f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}" - ) + click.echo(f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}") return should_print_header = len(response.choices) > 1 diff --git a/src/together/cli/api/endpoints.py b/src/together/cli/api/endpoints.py index 0f810d9b..e0f9c783 100644 --- a/src/together/cli/api/endpoints.py +++ b/src/together/cli/api/endpoints.py @@ -12,10 +12,7 @@ from together.types import DedicatedEndpoint, ListEndpoint -F = TypeVar("F", bound=Callable[..., Any]) - - -def print_endpoint(endpoint: Union[DedicatedEndpoint, ListEndpoint], json: bool = False): +def print_endpoint(endpoint: Union[DedicatedEndpoint, ListEndpoint], json: bool = False) -> None: """Print endpoint details in a Docker-like format or JSON.""" if json: import json as json_lib @@ -65,6 +62,9 @@ def print_endpoint(endpoint: Union[DedicatedEndpoint, ListEndpoint], json: bool click.echo(f"Created:\t{endpoint.created_at}") +F = TypeVar("F", bound=Callable[..., Any]) + + def handle_api_errors(f: F) -> F: """Decorator to handle common API errors in CLI commands.""" @@ -168,7 +168,7 @@ def create( no_prompt_cache: bool, no_speculative_decoding: bool, no_auto_start: bool, -): +) -> None: """Create a new dedicated inference endpoint.""" # Map GPU types to their full hardware ID names gpu_map = { @@ -217,7 +217,7 @@ def create( @click.option("--json", is_flag=True, help="Print output in JSON format") @click.pass_obj @handle_api_errors -def get(client: Together, endpoint_id: str, json: bool): +def get(client: Together, endpoint_id: str, json: bool) -> None: """Get a dedicated inference endpoint.""" endpoint = client.endpoints.get(endpoint_id) print_endpoint(endpoint, json=json) @@ -227,7 +227,7 @@ def get(client: Together, endpoint_id: str, json: bool): @click.argument("endpoint-id", required=True) @click.pass_obj @handle_api_errors -def stop(client: Together, endpoint_id: str): +def stop(client: Together, endpoint_id: str) -> None: """Stop a dedicated inference endpoint.""" client.endpoints.update(endpoint_id, state="STOPPED") click.echo("Successfully stopped endpoint", err=True) @@ -238,7 +238,7 @@ def stop(client: Together, endpoint_id: str): @click.argument("endpoint-id", required=True) @click.pass_obj @handle_api_errors -def start(client: Together, endpoint_id: str): +def start(client: Together, endpoint_id: str) -> None: """Start a dedicated inference endpoint.""" client.endpoints.update(endpoint_id, state="STARTED") click.echo("Successfully started endpoint", err=True) @@ -249,7 +249,7 @@ def start(client: Together, endpoint_id: str): @click.argument("endpoint-id", required=True) @click.pass_obj @handle_api_errors -def delete(client: Together, endpoint_id: str): +def delete(client: Together, endpoint_id: str) -> None: """Delete a dedicated inference endpoint.""" client.endpoints.delete(endpoint_id) click.echo("Successfully deleted endpoint", err=True) @@ -301,7 +301,7 @@ def update( display_name: str | None, min_replicas: int | None, max_replicas: int | None, -): +) -> None: """Update a dedicated inference endpoint's configuration.""" if not any([display_name, min_replicas, max_replicas]): click.echo("Error: At least one update option must be specified", err=True) @@ -316,7 +316,7 @@ def update( sys.exit(1) # Build kwargs for the update - kwargs = {} + kwargs: Dict[str, Any] = {} if display_name is not None: kwargs["display_name"] = display_name if min_replicas is not None and max_replicas is not None: diff --git a/src/together/cli/api/files.py b/src/together/cli/api/files.py index dab00a05..668607b7 100644 --- a/src/together/cli/api/files.py +++ b/src/together/cli/api/files.py @@ -21,9 +21,7 @@ def files(ctx: click.Context) -> None: @click.pass_context @click.argument( "file", - type=click.Path( - exists=True, file_okay=True, resolve_path=True, readable=True, dir_okay=False - ), + type=click.Path(exists=True, file_okay=True, resolve_path=True, readable=True, dir_okay=False), required=True, ) @click.option( @@ -61,9 +59,7 @@ def list(ctx: click.Context) -> None: { "File name": "\n".join(wrap(i.filename or "", width=30)), "File ID": i.id, - "Size": convert_bytes( - float(str(i.bytes)) - ), # convert to string for mypy typing + "Size": convert_bytes(float(str(i.bytes))), # convert to string for mypy typing "Created At": convert_unix_timestamp(i.created_at or 0), "Line Count": i.line_count, } @@ -117,9 +113,7 @@ def delete(ctx: click.Context, id: str) -> None: @click.pass_context @click.argument( "file", - type=click.Path( - exists=True, file_okay=True, resolve_path=True, readable=True, dir_okay=False - ), + type=click.Path(exists=True, file_okay=True, resolve_path=True, readable=True, dir_okay=False), required=True, ) def check(ctx: click.Context, file: pathlib.Path) -> None: diff --git a/src/together/cli/api/finetune.py b/src/together/cli/api/finetune.py index 7bc02744..9dbf44f6 100644 --- a/src/together/cli/api/finetune.py +++ b/src/together/cli/api/finetune.py @@ -51,18 +51,12 @@ def fine_tuning(ctx: click.Context) -> None: @fine_tuning.command() @click.pass_context -@click.option( - "--training-file", type=str, required=True, help="Training file ID from Files API" -) +@click.option("--training-file", type=str, required=True, help="Training file ID from Files API") @click.option("--model", type=str, required=True, help="Base model name") @click.option("--n-epochs", type=int, default=1, help="Number of epochs to train for") -@click.option( - "--validation-file", type=str, default="", help="Validation file ID from Files API" -) +@click.option("--validation-file", type=str, default="", help="Validation file ID from Files API") @click.option("--n-evals", type=int, default=0, help="Number of evaluation loops") -@click.option( - "--n-checkpoints", type=int, default=1, help="Number of checkpoints to save" -) +@click.option("--n-checkpoints", type=int, default=1, help="Number of checkpoints to save") @click.option("--batch-size", type=INT_WITH_MAX, default="max", help="Train batch size") @click.option("--learning-rate", type=float, default=1e-5, help="Learning rate") @click.option( @@ -104,9 +98,7 @@ def fine_tuning(ctx: click.Context) -> None: default="all-linear", help="Trainable modules for LoRA adapters. For example, 'all-linear', 'q_proj,v_proj'", ) -@click.option( - "--suffix", type=str, default=None, help="Suffix for the fine-tuned model name" -) +@click.option("--suffix", type=str, default=None, help="Suffix for the fine-tuned model name") @click.option("--wandb-api-key", type=str, default=None, help="Wandb API key") @click.option("--wandb-base-url", type=str, default=None, help="Wandb base URL") @click.option("--wandb-project-name", type=str, default=None, help="Wandb project name") @@ -182,15 +174,11 @@ def create( train_on_inputs=train_on_inputs, ) - model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits( - model=model - ) + model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits(model=model) if lora: if model_limits.lora_training is None: - raise click.BadParameter( - f"LoRA fine-tuning is not supported for the model `{model}`" - ) + raise click.BadParameter(f"LoRA fine-tuning is not supported for the model `{model}`") default_values = { "lora_r": model_limits.lora_training.max_rank, @@ -207,9 +195,7 @@ def create( training_args["lora_alpha"] = training_args["lora_r"] * 2 else: if model_limits.full_training is None: - raise click.BadParameter( - f"Full fine-tuning is not supported for the model `{model}`" - ) + raise click.BadParameter(f"Full fine-tuning is not supported for the model `{model}`") for param in ["lora_r", "lora_dropout", "lora_alpha", "lora_trainable_modules"]: param_source = ctx.get_parameter_source(param) # type: ignore[attr-defined] @@ -240,9 +226,7 @@ def create( report_string = f"Successfully submitted a fine-tuning job {response.id}" if response.created_at is not None: - created_time = datetime.strptime( - response.created_at, "%Y-%m-%dT%H:%M:%S.%f%z" - ) + created_time = datetime.strptime(response.created_at, "%Y-%m-%dT%H:%M:%S.%f%z") # created_at reports UTC time, we use .astimezone() to convert to local time formatted_time = created_time.astimezone().strftime("%m/%d/%Y, %H:%M:%S") report_string += f" at {formatted_time}" @@ -299,9 +283,7 @@ def retrieve(ctx: click.Context, fine_tune_id: str) -> None: @fine_tuning.command() @click.pass_context @click.argument("fine_tune_id", type=str, required=True) -@click.option( - "--quiet", is_flag=True, help="Do not prompt for confirmation before cancelling job" -) +@click.option("--quiet", is_flag=True, help="Do not prompt for confirmation before cancelling job") def cancel(ctx: click.Context, fine_tune_id: str, quiet: bool = False) -> None: """Cancel fine-tuning job""" client: Together = ctx.obj diff --git a/src/together/cli/api/utils.py b/src/together/cli/api/utils.py index 08dfe492..d4981b61 100644 --- a/src/together/cli/api/utils.py +++ b/src/together/cli/api/utils.py @@ -39,9 +39,7 @@ def convert( return bool(value) except ValueError: self.fail( - _("{value!r} is not a valid {type}.").format( - value=value, type=self.name - ), + _("{value!r} is not a valid {type}.").format(value=value, type=self.name), param, ctx, ) diff --git a/src/together/error.py b/src/together/error.py index b5bdfd40..05718599 100644 --- a/src/together/error.py +++ b/src/together/error.py @@ -11,9 +11,7 @@ class TogetherException(Exception): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, headers: str | Dict[Any, Any] | None = None, request_id: str | None = None, http_status: int | None = None, @@ -49,9 +47,7 @@ def __repr__(self) -> str: class AuthenticationError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -60,9 +56,7 @@ def __init__( class ResponseError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -71,9 +65,7 @@ def __init__( class JSONError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -95,9 +87,7 @@ def __init__(self, model: str | None = "model", **kwargs: Any) -> None: class RateLimitError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -106,9 +96,7 @@ def __init__( class FileTypeError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -117,9 +105,7 @@ def __init__( class AttributeError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -128,9 +114,7 @@ def __init__( class Timeout(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -139,9 +123,7 @@ def __init__( class APIConnectionError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -150,9 +132,7 @@ def __init__( class InvalidRequestError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -161,9 +141,7 @@ def __init__( class APIError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -172,9 +150,7 @@ def __init__( class ServiceUnavailableError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -183,9 +159,7 @@ def __init__( class DownloadError(TogetherException): def __init__( self, - message: ( - TogetherErrorResponse | Exception | str | RequestException | None - ) = None, + message: TogetherErrorResponse | Exception | str | RequestException | None = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) diff --git a/src/together/filemanager.py b/src/together/filemanager.py index ac907985..f47787ff 100644 --- a/src/together/filemanager.py +++ b/src/together/filemanager.py @@ -93,8 +93,7 @@ def _prepare_output( content_type = str(headers.get("content-type")) assert remote_name, ( - "No model name found in fine_tune object. " - "Please specify an `output` file name." + "No model name found in fine_tune object. " "Please specify an `output` file name." ) if step > 0: @@ -150,9 +149,7 @@ def get_file_metadata( try: response.raise_for_status() except requests.exceptions.HTTPError as e: - raise APIError( - "Error fetching file metadata", http_status=response.status_code - ) from e + raise APIError("Error fetching file metadata", http_status=response.status_code) from e headers = response.headers @@ -180,9 +177,7 @@ def download( ) # pre-fetch remote file name and file size - file_path, file_size = self.get_file_metadata( - url, output, remote_name, fetch_metadata - ) + file_path, file_size = self.get_file_metadata(url, output, remote_name, fetch_metadata) temp_file_manager = partial( tempfile.NamedTemporaryFile, mode="wb", dir=file_path.parent, delete=False diff --git a/src/together/legacy/embeddings.py b/src/together/legacy/embeddings.py index 4afef64e..0257d0f6 100644 --- a/src/together/legacy/embeddings.py +++ b/src/together/legacy/embeddings.py @@ -22,6 +22,4 @@ def create( client = together.Together(api_key=api_key) - return client.embeddings.create(input=input, **kwargs).model_dump( - exclude_none=True - ) + return client.embeddings.create(input=input, **kwargs).model_dump(exclude_none=True) diff --git a/src/together/legacy/files.py b/src/together/legacy/files.py index aa6feb55..5add9d87 100644 --- a/src/together/legacy/files.py +++ b/src/together/legacy/files.py @@ -54,9 +54,7 @@ def upload( client = together.Together(api_key=api_key) # disabling the check, because it was run previously - response = client.files.upload(file=file, check=False).model_dump( - exclude_none=True - ) + response = client.files.upload(file=file, check=False).model_dump(exclude_none=True) if check: response["report_dict"] = report_dict @@ -113,15 +111,11 @@ def retrieve_content( client = together.Together(api_key=api_key) - return client.files.retrieve_content(id=file_id, output=output).dict( - exclude_none=True - ) + return client.files.retrieve_content(id=file_id, output=output).dict(exclude_none=True) @classmethod @deprecated # type: ignore - def save_jsonl( - self, data: Dict[str, str], output_path: str, append: bool = False - ) -> None: + def save_jsonl(self, data: Dict[str, str], output_path: str, append: bool = False) -> None: """ Write list of objects to a JSON lines file. """ diff --git a/src/together/legacy/finetune.py b/src/together/legacy/finetune.py index fe53be0e..22ec4e28 100644 --- a/src/together/legacy/finetune.py +++ b/src/together/legacy/finetune.py @@ -18,9 +18,7 @@ def create( n_checkpoints: int | None = 1, batch_size: int | None = 32, learning_rate: float = 0.00001, - suffix: ( - str | None - ) = None, # resulting finetuned model name will include the suffix + suffix: str | None = None, # resulting finetuned model name will include the suffix estimate_price: bool = False, wandb_api_key: str | None = None, confirm_inputs: bool = False, @@ -80,9 +78,7 @@ def retrieve( client = together.Together(api_key=api_key) - return client.fine_tuning.retrieve(id=fine_tune_id).model_dump( - exclude_none=True - ) + return client.fine_tuning.retrieve(id=fine_tune_id).model_dump(exclude_none=True) @classmethod @deprecated # type: ignore @@ -116,9 +112,7 @@ def list_events( client = together.Together(api_key=api_key) - return client.fine_tuning.list_events(id=fine_tune_id).model_dump( - exclude_none=True - ) + return client.fine_tuning.list_events(id=fine_tune_id).model_dump(exclude_none=True) @classmethod @deprecated # type: ignore diff --git a/src/together/legacy/images.py b/src/together/legacy/images.py index 537023d8..1d94e3b7 100644 --- a/src/together/legacy/images.py +++ b/src/together/legacy/images.py @@ -22,6 +22,4 @@ def create( client = together.Together(api_key=api_key) - return client.images.generate(prompt=prompt, **kwargs).model_dump( - exclude_none=True - ) + return client.images.generate(prompt=prompt, **kwargs).model_dump(exclude_none=True) diff --git a/src/together/resources/endpoints.py b/src/together/resources/endpoints.py index c74c2afe..47ce3012 100644 --- a/src/together/resources/endpoints.py +++ b/src/together/resources/endpoints.py @@ -36,9 +36,10 @@ def __init__(self, client: TogetherClient) -> None: self._loop = asyncio.new_event_loop() asyncio.set_event_loop(self._loop) - def __del__(self): + def __del__(self) -> None: if hasattr(self, "api_client"): - self._loop.run_until_complete(self.api_client.close()) + # Using type: ignore since close() is untyped in the library + self._loop.run_until_complete(self.api_client.close()) # type: ignore self._loop.close() def create( @@ -70,7 +71,7 @@ def create( DedicatedEndpoint: Object containing endpoint information """ - async def _create(): + async def _create() -> DedicatedEndpoint: request = CreateEndpointRequest( model=model, hardware=hardware, @@ -95,11 +96,11 @@ def list(self, type: Literal["dedicated", "serverless"] | None = None) -> List[L Dict[str, Any]: Response containing list of endpoints in the data field """ - async def _list(): - return await self._api.list_endpoints(type=type) + async def _list() -> List[ListEndpoint]: + response = await self._api.list_endpoints(type=type) + return response.data - response = self._loop.run_until_complete(_list()) - return response.data + return self._loop.run_until_complete(_list()) def get(self, endpoint_id: str) -> DedicatedEndpoint: """ @@ -112,7 +113,7 @@ def get(self, endpoint_id: str) -> DedicatedEndpoint: DedicatedEndpoint: Object containing endpoint information """ - async def _get(): + async def _get() -> DedicatedEndpoint: return await self._api.get_endpoint(endpoint_id=endpoint_id) return self._loop.run_until_complete(_get()) @@ -125,7 +126,7 @@ def delete(self, endpoint_id: str) -> None: endpoint_id (str): ID of the endpoint to delete """ - async def _delete(): + async def _delete() -> None: return await self._api.delete_endpoint(endpoint_id=endpoint_id) return self._loop.run_until_complete(_delete()) @@ -153,7 +154,7 @@ def update( DedicatedEndpoint: Object containing endpoint information """ - async def _update(): + async def _update() -> DedicatedEndpoint: kwargs: Dict[str, Any] = {} if min_replicas is not None or max_replicas is not None: current_min = min_replicas diff --git a/src/together/resources/files.py b/src/together/resources/files.py index 14500b24..341ac1aa 100644 --- a/src/together/resources/files.py +++ b/src/together/resources/files.py @@ -83,9 +83,7 @@ def retrieve(self, id: str) -> FileResponse: return FileResponse(**response.data) - def retrieve_content( - self, id: str, *, output: Path | str | None = None - ) -> FileObject: + def retrieve_content(self, id: str, *, output: Path | str | None = None) -> FileObject: download_manager = DownloadManager(self._client) if isinstance(output, str): @@ -163,9 +161,7 @@ async def retrieve(self, id: str) -> FileResponse: return FileResponse(**response.data) - async def retrieve_content( - self, id: str, *, output: Path | str | None = None - ) -> FileObject: + async def retrieve_content(self, id: str, *, output: Path | str | None = None) -> FileObject: raise NotImplementedError() async def delete(self, id: str) -> FileDeleteResponse: diff --git a/src/together/resources/finetune.py b/src/together/resources/finetune.py index b58cdae2..b1fdd6ea 100644 --- a/src/together/resources/finetune.py +++ b/src/together/resources/finetune.py @@ -75,17 +75,13 @@ def createFinetuneRequest( ) batch_size = ( - batch_size - if batch_size != "max" - else model_limits.lora_training.max_batch_size + batch_size if batch_size != "max" else model_limits.lora_training.max_batch_size ) else: if model_limits.full_training is None: raise ValueError("Full training is not supported for the selected model.") batch_size = ( - batch_size - if batch_size != "max" - else model_limits.full_training.max_batch_size + batch_size if batch_size != "max" else model_limits.full_training.max_batch_size ) if warmup_ratio > 1 or warmup_ratio < 0: @@ -406,9 +402,7 @@ def download( if isinstance(ft_job.training_type, FullTrainingType): if checkpoint_type != DownloadCheckpointType.DEFAULT: - raise ValueError( - "Only DEFAULT checkpoint type is allowed for FullTrainingType" - ) + raise ValueError("Only DEFAULT checkpoint type is allowed for FullTrainingType") url += "&checkpoint=modelOutputPath" elif isinstance(ft_job.training_type, LoRATrainingType): if checkpoint_type == DownloadCheckpointType.DEFAULT: @@ -419,9 +413,7 @@ def download( elif checkpoint_type == DownloadCheckpointType.ADAPTER: url += f"&checkpoint={DownloadCheckpointType.ADAPTER.value}" else: - raise ValueError( - f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}" - ) + raise ValueError(f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}") remote_name = ft_job.output_name diff --git a/src/together/utils/api_helpers.py b/src/together/utils/api_helpers.py index 2ec9d3f9..d15eb33d 100644 --- a/src/together/utils/api_helpers.py +++ b/src/together/utils/api_helpers.py @@ -36,9 +36,7 @@ def get_headers( user_agent = "Together/v1 PythonBindings/%s" % (together.version,) - uname_without_node = " ".join( - v for k, v in platform.uname()._asdict().items() if k != "node" - ) + uname_without_node = " ".join(v for k, v in platform.uname()._asdict().items() if k != "node") ua = { "bindings_version": together.version, "httplib": "requests", diff --git a/src/together/utils/files.py b/src/together/utils/files.py index cc39fca0..88839a70 100644 --- a/src/together/utils/files.py +++ b/src/together/utils/files.py @@ -145,10 +145,7 @@ def _check_jsonl(file: Path) -> Dict[str, Any]: # Check that there are no extra columns for column in json_line: - if ( - column - not in JSONL_REQUIRED_COLUMNS_MAP[possible_format] - ): + if column not in JSONL_REQUIRED_COLUMNS_MAP[possible_format]: raise InvalidFileFormatError( message=f'Found extra column "{column}" in the line {idx + 1}.', line_number=idx + 1, @@ -166,9 +163,7 @@ def _check_jsonl(file: Path) -> Dict[str, Any]: ) if current_format == DatasetFormat.CONVERSATION: - message_column = JSONL_REQUIRED_COLUMNS_MAP[ - DatasetFormat.CONVERSATION - ][0] + message_column = JSONL_REQUIRED_COLUMNS_MAP[DatasetFormat.CONVERSATION][0] if not isinstance(json_line[message_column], list): raise InvalidFileFormatError( message=f"Invalid format on line {idx + 1} of the input file. " @@ -280,8 +275,7 @@ def _check_jsonl(file: Path) -> Dict[str, Any]: report_dict["load_json"] = False if idx < 0: report_dict["message"] = ( - "Unable to decode file. " - "File may be empty or in an unsupported format. " + "Unable to decode file. " "File may be empty or in an unsupported format. " ) else: report_dict["message"] = ( diff --git a/src/together/version.py b/src/together/version.py index 4eb61bf1..133c5573 100644 --- a/src/together/version.py +++ b/src/together/version.py @@ -1,6 +1,4 @@ import importlib.metadata -VERSION = importlib.metadata.version( - "together" -) # gets version number from pyproject.toml +VERSION = importlib.metadata.version("together") # gets version number from pyproject.toml diff --git a/tests/unit/test_async_client.py b/tests/unit/test_async_client.py index 0b11b39d..a65e2c24 100644 --- a/tests/unit/test_async_client.py +++ b/tests/unit/test_async_client.py @@ -59,9 +59,7 @@ def test_init_with_supplied_headers(self): supplied_headers = {"header1": "value1", "header2": "value2"} - async_together = AsyncTogether( - api_key="fake_api_key", supplied_headers=supplied_headers - ) + async_together = AsyncTogether(api_key="fake_api_key", supplied_headers=supplied_headers) assert async_together.client.supplied_headers == supplied_headers @@ -83,9 +81,7 @@ def test_chat_initialized(self, async_together_instance): assert isinstance(async_together_instance.chat._client, TogetherClient) - assert isinstance( - async_together_instance.chat.completions._client, TogetherClient - ) + assert isinstance(async_together_instance.chat.completions._client, TogetherClient) def test_embeddings_initialized(self, async_together_instance): """ diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index f8bdcbe6..c57cbe4d 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -60,9 +60,7 @@ def test_init_with_supplied_headers(self): supplied_headers = {"header1": "value1", "header2": "value2"} - sync_together = Together( - api_key="fake_api_key", supplied_headers=supplied_headers - ) + sync_together = Together(api_key="fake_api_key", supplied_headers=supplied_headers) assert sync_together.client.supplied_headers == supplied_headers @@ -84,9 +82,7 @@ def test_chat_initialized(self, sync_together_instance): assert isinstance(sync_together_instance.chat._client, TogetherClient) - assert isinstance( - sync_together_instance.chat.completions._client, TogetherClient - ) + assert isinstance(sync_together_instance.chat.completions._client, TogetherClient) def test_embeddings_initialized(self, sync_together_instance): """ diff --git a/tests/unit/test_files_checks.py b/tests/unit/test_files_checks.py index 37c698d2..903effdd 100644 --- a/tests/unit/test_files_checks.py +++ b/tests/unit/test_files_checks.py @@ -171,10 +171,7 @@ def test_check_jsonl_missing_required_field(tmp_path: Path): report = check_file(file) assert not report["is_check_passed"] - assert ( - "Error parsing file. Could not detect a format for the line 2" - in report["message"] - ) + assert "Error parsing file. Could not detect a format for the line 2" in report["message"] def test_check_jsonl_inconsistent_dataset_format(tmp_path: Path): @@ -190,10 +187,7 @@ def test_check_jsonl_inconsistent_dataset_format(tmp_path: Path): report = check_file(file) assert not report["is_check_passed"] - assert ( - "All samples in the dataset must have the same dataset format" - in report["message"] - ) + assert "All samples in the dataset must have the same dataset format" in report["message"] def test_check_jsonl_invalid_role(tmp_path: Path): @@ -275,10 +269,7 @@ def test_check_jsonl_wrong_turn_type(tmp_path: Path): report = check_file(file) assert not report["is_check_passed"] - assert ( - "Invalid format on line 1 of the input file. Expected a dictionary" - in report["message"] - ) + assert "Invalid format on line 1 of the input file. Expected a dictionary" in report["message"] def test_check_jsonl_extra_column(tmp_path: Path): @@ -300,6 +291,4 @@ def test_check_jsonl_empty_messages(tmp_path: Path): report = check_file(file) assert not report["is_check_passed"] - assert ( - "Expected a non-empty list of messages. Found empty list" in report["message"] - ) + assert "Expected a non-empty list of messages. Found empty list" in report["message"] From c24a510eededeffabf50dcf74682284f01f32627 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Wed, 12 Feb 2025 19:53:20 +0000 Subject: [PATCH 05/29] update makefile --- Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index c3d3462c..d32e7f6b 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,7 @@ integration_tests: install: poetry install --with quality,tests poetry run pre-commit install + $(MAKE) generate-client format: poetry run pre-commit run --all-files @@ -53,6 +54,4 @@ help: @echo 'test_watch - run unit tests in watch mode' @echo 'extended_tests - run extended tests' @echo 'integration_tests - run integration tests' - -generate-client: - python scripts/generate_api_client.py + @echo 'generate-client - generate the OpenAPI client' From f3b7fe8f9cf3a56d15cff4409bf21a82b0bb62b9 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Wed, 12 Feb 2025 20:01:55 +0000 Subject: [PATCH 06/29] rerun formatter --- examples/tokenize_data.py | 17 +++++-- poetry.lock | 30 ++++-------- pyproject.toml | 9 ---- scripts/.gitignore | 1 - scripts/generate_api_client.py | 4 +- scripts/openapi.yaml | 66 +++++++++++++------------- src/together/__init__.py | 4 +- src/together/abstract/api_requestor.py | 33 +++++++++---- src/together/cli/api/chat.py | 24 +++++++--- src/together/cli/api/completions.py | 8 +++- src/together/cli/api/endpoints.py | 17 +++++-- src/together/cli/api/files.py | 12 +++-- src/together/cli/api/finetune.py | 36 ++++++++++---- src/together/cli/api/utils.py | 4 +- src/together/cli/cli.py | 8 +++- src/together/error.py | 52 +++++++++++++++----- src/together/filemanager.py | 11 +++-- src/together/legacy/embeddings.py | 4 +- src/together/legacy/files.py | 12 +++-- src/together/legacy/finetune.py | 12 +++-- src/together/legacy/images.py | 4 +- src/together/resources/endpoints.py | 12 +++-- src/together/resources/files.py | 8 +++- src/together/resources/finetune.py | 16 +++++-- src/together/types/__init__.py | 6 ++- src/together/utils/api_helpers.py | 4 +- src/together/utils/files.py | 12 +++-- src/together/version.py | 4 +- tests/unit/test_async_client.py | 8 +++- tests/unit/test_client.py | 8 +++- tests/unit/test_files_checks.py | 19 ++++++-- 31 files changed, 313 insertions(+), 152 deletions(-) diff --git a/examples/tokenize_data.py b/examples/tokenize_data.py index c1ba67df..327f9cd1 100644 --- a/examples/tokenize_data.py +++ b/examples/tokenize_data.py @@ -25,7 +25,9 @@ def tokenize_variable_length( tokenizer: PreTrainedTokenizerBase, add_special_tokens: bool = True, ) -> BatchEncoding: - tokenized = tokenizer(data["text"], add_special_tokens=add_special_tokens, truncation=False) + tokenized = tokenizer( + data["text"], add_special_tokens=add_special_tokens, truncation=False + ) return tokenized @@ -100,7 +102,10 @@ def pack_sequences( output = {"input_ids": packed_sequences} if add_labels: output["labels"] = [ - [LOSS_IGNORE_INDEX if token_id == pad_token_id else token_id for token_id in example] + [ + LOSS_IGNORE_INDEX if token_id == pad_token_id else token_id + for token_id in example + ] for example in output["input_ids"] ] @@ -196,14 +201,18 @@ def process_data(args: argparse.Namespace) -> None: if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Pretokenize examples for finetuning via Together") + parser = argparse.ArgumentParser( + description="Pretokenize examples for finetuning via Together" + ) parser.add_argument( "--dataset", type=str, default="clam004/antihallucination_dataset", help="Dataset name on the Hugging Face Hub", ) - parser.add_argument("--max-seq-length", type=int, default=8192, help="Maximum sequence length") + parser.add_argument( + "--max-seq-length", type=int, default=8192, help="Maximum sequence length" + ) parser.add_argument( "--add-labels", action="store_true", diff --git a/poetry.lock b/poetry.lock index 8a45d721..80a29942 100644 --- a/poetry.lock +++ b/poetry.lock @@ -763,18 +763,6 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "makefun" -version = "1.15.6" -description = "Small library to dynamically create python functions." -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "makefun-1.15.6-py2.py3-none-any.whl", hash = "sha256:e69b870f0bb60304765b1e3db576aaecf2f9b3e5105afe8cfeff8f2afe6ad067"}, - {file = "makefun-1.15.6.tar.gz", hash = "sha256:26bc63442a6182fb75efed8b51741dd2d1db2f176bec8c64e20a586256b8f149"}, -] - [[package]] name = "markdown-it-py" version = "3.0.0" @@ -948,7 +936,7 @@ version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" -groups = ["dev", "quality"] +groups = ["quality"] files = [ {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, @@ -1008,7 +996,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" -groups = ["dev", "quality"] +groups = ["quality"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -1653,7 +1641,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main", "examples"] +groups = ["examples"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -2062,7 +2050,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main", "examples"] +groups = ["examples"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -2207,7 +2195,7 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" -groups = ["dev", "quality", "tests"] +groups = ["quality", "tests"] markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, @@ -2388,7 +2376,7 @@ version = "2.32.0.20241016" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" -groups = ["dev", "quality"] +groups = ["quality"] files = [ {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, @@ -2430,7 +2418,7 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" -groups = ["main", "dev", "examples", "quality", "tests"] +groups = ["main", "examples", "quality", "tests"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -2455,7 +2443,7 @@ version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" -groups = ["main", "dev", "examples", "quality"] +groups = ["main", "examples", "quality"] files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -2778,4 +2766,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.1" python-versions = "^3.8" -content-hash = "82809528a39fc644e6e9cef3bf8f652b195e22929c8b840e2c7d2bf5367f0ced" +content-hash = "a92016667f534f244614082fb2913bd0da6211150a99ec5a545604ec4f0306e6" diff --git a/pyproject.toml b/pyproject.toml index b823440c..c23edbab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,10 +39,8 @@ aiohttp = "^3.9.3" filelock = "^3.13.1" eval-type-backport = ">=0.1.3,<0.3.0" click = "^8.1.7" -python-dateutil = "^2.8.2" pillow = "^10.3.0" pyarrow = ">=10.0.1" -makefun = "^1.15.2" numpy = [ { version = ">=1.23.5", python = "<3.12" }, { version = ">=1.26.0", python = ">=3.12" }, @@ -78,12 +76,6 @@ datasets = ">=2.18,<4.0" transformers = "^4.39.3" -[tool.poetry.group.dev] -optional = true - -[tool.poetry.group.dev.dependencies] -mypy = "^1.14.1" -types-requests = "^2.31.0" [tool.poetry.urls] "Homepage" = "https://github.com/togethercomputer/together-python" @@ -94,7 +86,6 @@ together = "together.cli.cli:main" [tool.black] target-version = ['py310'] -line-length = 100 [tool.ruff.lint] # Never enforce `E501` (line length violations). diff --git a/scripts/.gitignore b/scripts/.gitignore index 4bf363be..c3f732c5 100644 --- a/scripts/.gitignore +++ b/scripts/.gitignore @@ -1,2 +1 @@ openapi-generator-cli.jar - diff --git a/scripts/generate_api_client.py b/scripts/generate_api_client.py index 74f52457..01d25dc7 100755 --- a/scripts/generate_api_client.py +++ b/scripts/generate_api_client.py @@ -7,7 +7,9 @@ from pathlib import Path -OPENAPI_SPEC_URL = "https://raw.githubusercontent.com/togethercomputer/openapi/main/openapi.yaml" +OPENAPI_SPEC_URL = ( + "https://raw.githubusercontent.com/togethercomputer/openapi/main/openapi.yaml" +) OUTPUT_DIR = Path(__file__).parent.parent / "src" / "together" / "generated" GENERATOR_JAR_URL = "https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/7.11.0/openapi-generator-cli-7.11.0.jar" GENERATOR_JAR = Path(__file__).parent / "openapi-generator-cli.jar" diff --git a/scripts/openapi.yaml b/scripts/openapi.yaml index c951154c..040c915d 100644 --- a/scripts/openapi.yaml +++ b/scripts/openapi.yaml @@ -933,7 +933,7 @@ paths: /hardware: get: tags: ["Hardware"] - summary: List available hardware configurations + summary: List available hardware configurations description: Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. operationId: listHardware parameters: @@ -1058,21 +1058,21 @@ components: example: Our solar system orbits the Milky Way galaxy at about 515,000 mph example: - { - "title": "Llama", - "text": "The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.", - } + "title": "Llama", + "text": "The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.", + } - { - "title": "Panda", - "text": "The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.", - } + "title": "Panda", + "text": "The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.", + } - { - "title": "Guanaco", - "text": "The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.", - } + "title": "Guanaco", + "text": "The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.", + } - { - "title": "Wild Bactrian camel", - "text": "The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.", - } + "title": "Wild Bactrian camel", + "text": "The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.", + } top_n: type: integer description: The number of top results to return. @@ -1132,21 +1132,21 @@ components: nullable: true example: - { - "index": 0, - "relevance_score": 0.29980177813003117, - "document": - { - "text": '{"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}', - }, - } + "index": 0, + "relevance_score": 0.29980177813003117, + "document": + { + "text": '{"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}', + }, + } - { - "index": 2, - "relevance_score": 0.2752447527354349, - "document": - { - "text": '{"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}', - }, - } + "index": 2, + "relevance_score": 0.2752447527354349, + "document": + { + "text": '{"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}', + }, + } usage: $ref: "#/components/schemas/UsageData" example: @@ -1345,7 +1345,7 @@ components: format: float description: Adjusts the likelihood of specific tokens appearing in the generated output. - example: {"1024": -10.5, "105": 21.4} + example: { "1024": -10.5, "105": 21.4 } seed: type: integer description: Seed value for reproducibility. @@ -1593,7 +1593,7 @@ components: format: int32 context_length_exceeded_behavior: type: string - enum: [ "truncate", "error" ] + enum: ["truncate", "error"] default: "error" description: Defined the behavior of the API when max_tokens exceed the maximum context length of the model. When set to 'error', API will return 400 with appropriate error message. When set to 'truncate', override the max_tokens with maximum context length of the model. repetition_penalty: @@ -1633,7 +1633,7 @@ components: type: number format: float description: Adjusts the likelihood of specific tokens appearing in the generated output. - example: {"1024": -10.5, "105": 21.4} + example: { "1024": -10.5, "105": 21.4 } seed: type: integer description: Seed value for reproducibility. @@ -2714,7 +2714,7 @@ components: - STARTED - STOPPED default: STARTED - example: STARTED + example: STARTED DedicatedEndpoint: type: object @@ -2773,7 +2773,7 @@ components: enum: - PENDING - STARTING - - STARTED + - STARTED - STOPPING - STOPPED - ERROR @@ -2835,7 +2835,7 @@ components: enum: - PENDING - STARTING - - STARTED + - STARTED - STOPPING - STOPPED - ERROR diff --git a/src/together/__init__.py b/src/together/__init__.py index d9e2b5d7..b4e4110d 100644 --- a/src/together/__init__.py +++ b/src/together/__init__.py @@ -33,7 +33,9 @@ requestssession: "requests.Session" | Callable[[], "requests.Session"] | None = None -aiosession: ContextVar["ClientSession" | None] = ContextVar("aiohttp-session", default=None) +aiosession: ContextVar["ClientSession" | None] = ContextVar( + "aiohttp-session", default=None +) from together.client import AsyncClient, AsyncTogether, Client, Together diff --git a/src/together/abstract/api_requestor.py b/src/together/abstract/api_requestor.py index 28926a36..e4004f3e 100644 --- a/src/together/abstract/api_requestor.py +++ b/src/together/abstract/api_requestor.py @@ -338,7 +338,9 @@ def handle_error_response( try: assert isinstance(resp.data, dict) error_resp = resp.data.get("error") - assert isinstance(error_resp, dict), f"Unexpected error response {error_resp}" + assert isinstance( + error_resp, dict + ), f"Unexpected error response {error_resp}" error_data = TogetherErrorResponse(**(error_resp)) except (KeyError, TypeError): raise error.JSONError( @@ -397,7 +399,9 @@ def handle_error_response( ) @classmethod - def _validate_headers(cls, supplied_headers: Dict[str, str] | None) -> Dict[str, str]: + def _validate_headers( + cls, supplied_headers: Dict[str, str] | None + ) -> Dict[str, str]: headers: Dict[str, str] = {} if supplied_headers is None: return headers @@ -524,7 +528,9 @@ def request_raw( request_timeout=request_timeout, ) - raise error.APIConnectionError("Error communicating with API: {}".format(e)) from e + raise error.APIConnectionError( + "Error communicating with API: {}".format(e) + ) from e # retry on 5XX error or rate-limit if result is not None: @@ -589,7 +595,9 @@ async def arequest_raw( } try: - result = await session.request(method=options.method, url=abs_url, **request_kwargs) + result = await session.request( + method=options.method, url=abs_url, **request_kwargs + ) utils.log_debug( "Together API response", path=abs_url, @@ -599,7 +607,9 @@ async def arequest_raw( ) # Don't read the whole stream for debug logging unless necessary. if together.log == "debug": - utils.log_debug("API response body", body=result.content, headers=result.headers) + utils.log_debug( + "API response body", body=result.content, headers=result.headers + ) return result except (aiohttp.ServerTimeoutError, asyncio.TimeoutError) as e: raise error.Timeout("Request timed out") from e @@ -613,7 +623,9 @@ def _interpret_response( content_type = result.headers.get("Content-Type", "") if stream and "text/event-stream" in content_type: return ( - self._interpret_response_line(line, result.status_code, result.headers, stream=True) + self._interpret_response_line( + line, result.status_code, result.headers, stream=True + ) for line in parse_stream(result.iter_lines()) ), True else: @@ -633,11 +645,16 @@ def _interpret_response( async def _interpret_async_response( self, result: aiohttp.ClientResponse, stream: bool - ) -> tuple[AsyncGenerator[TogetherResponse, None], bool] | tuple[TogetherResponse, bool]: + ) -> ( + tuple[AsyncGenerator[TogetherResponse, None], bool] + | tuple[TogetherResponse, bool] + ): """Returns the response(s) and a bool indicating whether it is a stream.""" if stream and "text/event-stream" in result.headers.get("Content-Type", ""): return ( - self._interpret_response_line(line, result.status, result.headers, stream=True) + self._interpret_response_line( + line, result.status, result.headers, stream=True + ) async for line in parse_stream_async(result.content) ), True else: diff --git a/src/together/cli/api/chat.py b/src/together/cli/api/chat.py index 92a74db6..d95b760a 100644 --- a/src/together/cli/api/chat.py +++ b/src/together/cli/api/chat.py @@ -50,7 +50,9 @@ def __init__( self.system_message = system_message self.messages = ( - [{"role": "system", "content": self.system_message}] if self.system_message else [] + [{"role": "system", "content": self.system_message}] + if self.system_message + else [] ) def precmd(self, line: str) -> str: @@ -96,7 +98,9 @@ def do_say(self, arg: str) -> None: def do_reset(self, arg: str) -> None: self.messages = ( - [{"role": "system", "content": self.system_message}] if self.system_message else [] + [{"role": "system", "content": self.system_message}] + if self.system_message + else [] ) def do_exit(self, arg: str) -> bool: @@ -107,7 +111,9 @@ def do_exit(self, arg: str) -> bool: @click.pass_context @click.option("--model", type=str, required=True, help="Model name") @click.option("--max-tokens", type=int, help="Max tokens to generate") -@click.option("--stop", type=str, multiple=True, help="List of strings to stop generation") +@click.option( + "--stop", type=str, multiple=True, help="List of strings to stop generation" +) @click.option("--temperature", type=float, help="Sampling temperature") @click.option("--top-p", type=int, help="Top p sampling") @click.option("--top-k", type=float, help="Top k sampling") @@ -163,13 +169,17 @@ def interactive( ) @click.option("--model", type=str, required=True, help="Model name") @click.option("--max-tokens", type=int, help="Max tokens to generate") -@click.option("--stop", type=str, multiple=True, help="List of strings to stop generation") +@click.option( + "--stop", type=str, multiple=True, help="List of strings to stop generation" +) @click.option("--temperature", type=float, help="Sampling temperature") @click.option("--top-p", type=int, help="Top p sampling") @click.option("--top-k", type=float, help="Top k sampling") @click.option("--repetition-penalty", type=float, help="Repetition penalty") @click.option("--presence-penalty", type=float, help="Presence penalty sampling method") -@click.option("--frequency-penalty", type=float, help="Frequency penalty sampling method") +@click.option( + "--frequency-penalty", type=float, help="Frequency penalty sampling method" +) @click.option("--min-p", type=float, help="Min p sampling") @click.option("--no-stream", is_flag=True, help="Disable streaming") @click.option("--logprobs", type=int, help="Return logprobs. Only works with --raw.") @@ -251,7 +261,9 @@ def chat( assert isinstance(response.choices, list) if raw: - click.echo(f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}") + click.echo( + f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}" + ) return should_print_header = len(response.choices) > 1 diff --git a/src/together/cli/api/completions.py b/src/together/cli/api/completions.py index b6f7e258..d5ef8b37 100644 --- a/src/together/cli/api/completions.py +++ b/src/together/cli/api/completions.py @@ -15,7 +15,9 @@ @click.argument("prompt", type=str, required=True) @click.option("--model", type=str, required=True, help="Model name") @click.option("--max-tokens", type=int, help="Max tokens to generate") -@click.option("--stop", type=str, multiple=True, help="List of strings to stop generation") +@click.option( + "--stop", type=str, multiple=True, help="List of strings to stop generation" +) @click.option("--temperature", type=float, help="Sampling temperature") @click.option("--top-p", type=int, help="Top p sampling") @click.option("--top-k", type=float, help="Top k sampling") @@ -102,7 +104,9 @@ def completions( assert isinstance(response.choices, list) if raw: - click.echo(f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}") + click.echo( + f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}" + ) return should_print_header = len(response.choices) > 1 diff --git a/src/together/cli/api/endpoints.py b/src/together/cli/api/endpoints.py index e0f9c783..3cf74944 100644 --- a/src/together/cli/api/endpoints.py +++ b/src/together/cli/api/endpoints.py @@ -12,7 +12,9 @@ from together.types import DedicatedEndpoint, ListEndpoint -def print_endpoint(endpoint: Union[DedicatedEndpoint, ListEndpoint], json: bool = False) -> None: +def print_endpoint( + endpoint: Union[DedicatedEndpoint, ListEndpoint], json: bool = False +) -> None: """Print endpoint details in a Docker-like format or JSON.""" if json: import json as json_lib @@ -79,7 +81,10 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: else: error_details = str(e) - if "credentials" in error_details.lower() or "authentication" in error_details.lower(): + if ( + "credentials" in error_details.lower() + or "authentication" in error_details.lower() + ): click.echo("Error: Invalid API key or authentication failed", err=True) else: click.echo(f"Error: {error_details}", err=True) @@ -259,11 +264,15 @@ def delete(client: Together, endpoint_id: str) -> None: @endpoints.command() @click.option("--json", is_flag=True, help="Print output in JSON format") @click.option( - "--type", type=click.Choice(["dedicated", "serverless"]), help="Filter by endpoint type" + "--type", + type=click.Choice(["dedicated", "serverless"]), + help="Filter by endpoint type", ) @click.pass_obj @handle_api_errors -def list(client: Together, json: bool, type: Literal["dedicated", "serverless"] | None) -> None: +def list( + client: Together, json: bool, type: Literal["dedicated", "serverless"] | None +) -> None: """List all inference endpoints (includes both dedicated and serverless endpoints).""" endpoints: List[ListEndpoint] = client.endpoints.list(type=type) diff --git a/src/together/cli/api/files.py b/src/together/cli/api/files.py index 668607b7..dab00a05 100644 --- a/src/together/cli/api/files.py +++ b/src/together/cli/api/files.py @@ -21,7 +21,9 @@ def files(ctx: click.Context) -> None: @click.pass_context @click.argument( "file", - type=click.Path(exists=True, file_okay=True, resolve_path=True, readable=True, dir_okay=False), + type=click.Path( + exists=True, file_okay=True, resolve_path=True, readable=True, dir_okay=False + ), required=True, ) @click.option( @@ -59,7 +61,9 @@ def list(ctx: click.Context) -> None: { "File name": "\n".join(wrap(i.filename or "", width=30)), "File ID": i.id, - "Size": convert_bytes(float(str(i.bytes))), # convert to string for mypy typing + "Size": convert_bytes( + float(str(i.bytes)) + ), # convert to string for mypy typing "Created At": convert_unix_timestamp(i.created_at or 0), "Line Count": i.line_count, } @@ -113,7 +117,9 @@ def delete(ctx: click.Context, id: str) -> None: @click.pass_context @click.argument( "file", - type=click.Path(exists=True, file_okay=True, resolve_path=True, readable=True, dir_okay=False), + type=click.Path( + exists=True, file_okay=True, resolve_path=True, readable=True, dir_okay=False + ), required=True, ) def check(ctx: click.Context, file: pathlib.Path) -> None: diff --git a/src/together/cli/api/finetune.py b/src/together/cli/api/finetune.py index 9dbf44f6..7bc02744 100644 --- a/src/together/cli/api/finetune.py +++ b/src/together/cli/api/finetune.py @@ -51,12 +51,18 @@ def fine_tuning(ctx: click.Context) -> None: @fine_tuning.command() @click.pass_context -@click.option("--training-file", type=str, required=True, help="Training file ID from Files API") +@click.option( + "--training-file", type=str, required=True, help="Training file ID from Files API" +) @click.option("--model", type=str, required=True, help="Base model name") @click.option("--n-epochs", type=int, default=1, help="Number of epochs to train for") -@click.option("--validation-file", type=str, default="", help="Validation file ID from Files API") +@click.option( + "--validation-file", type=str, default="", help="Validation file ID from Files API" +) @click.option("--n-evals", type=int, default=0, help="Number of evaluation loops") -@click.option("--n-checkpoints", type=int, default=1, help="Number of checkpoints to save") +@click.option( + "--n-checkpoints", type=int, default=1, help="Number of checkpoints to save" +) @click.option("--batch-size", type=INT_WITH_MAX, default="max", help="Train batch size") @click.option("--learning-rate", type=float, default=1e-5, help="Learning rate") @click.option( @@ -98,7 +104,9 @@ def fine_tuning(ctx: click.Context) -> None: default="all-linear", help="Trainable modules for LoRA adapters. For example, 'all-linear', 'q_proj,v_proj'", ) -@click.option("--suffix", type=str, default=None, help="Suffix for the fine-tuned model name") +@click.option( + "--suffix", type=str, default=None, help="Suffix for the fine-tuned model name" +) @click.option("--wandb-api-key", type=str, default=None, help="Wandb API key") @click.option("--wandb-base-url", type=str, default=None, help="Wandb base URL") @click.option("--wandb-project-name", type=str, default=None, help="Wandb project name") @@ -174,11 +182,15 @@ def create( train_on_inputs=train_on_inputs, ) - model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits(model=model) + model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits( + model=model + ) if lora: if model_limits.lora_training is None: - raise click.BadParameter(f"LoRA fine-tuning is not supported for the model `{model}`") + raise click.BadParameter( + f"LoRA fine-tuning is not supported for the model `{model}`" + ) default_values = { "lora_r": model_limits.lora_training.max_rank, @@ -195,7 +207,9 @@ def create( training_args["lora_alpha"] = training_args["lora_r"] * 2 else: if model_limits.full_training is None: - raise click.BadParameter(f"Full fine-tuning is not supported for the model `{model}`") + raise click.BadParameter( + f"Full fine-tuning is not supported for the model `{model}`" + ) for param in ["lora_r", "lora_dropout", "lora_alpha", "lora_trainable_modules"]: param_source = ctx.get_parameter_source(param) # type: ignore[attr-defined] @@ -226,7 +240,9 @@ def create( report_string = f"Successfully submitted a fine-tuning job {response.id}" if response.created_at is not None: - created_time = datetime.strptime(response.created_at, "%Y-%m-%dT%H:%M:%S.%f%z") + created_time = datetime.strptime( + response.created_at, "%Y-%m-%dT%H:%M:%S.%f%z" + ) # created_at reports UTC time, we use .astimezone() to convert to local time formatted_time = created_time.astimezone().strftime("%m/%d/%Y, %H:%M:%S") report_string += f" at {formatted_time}" @@ -283,7 +299,9 @@ def retrieve(ctx: click.Context, fine_tune_id: str) -> None: @fine_tuning.command() @click.pass_context @click.argument("fine_tune_id", type=str, required=True) -@click.option("--quiet", is_flag=True, help="Do not prompt for confirmation before cancelling job") +@click.option( + "--quiet", is_flag=True, help="Do not prompt for confirmation before cancelling job" +) def cancel(ctx: click.Context, fine_tune_id: str, quiet: bool = False) -> None: """Cancel fine-tuning job""" client: Together = ctx.obj diff --git a/src/together/cli/api/utils.py b/src/together/cli/api/utils.py index d4981b61..08dfe492 100644 --- a/src/together/cli/api/utils.py +++ b/src/together/cli/api/utils.py @@ -39,7 +39,9 @@ def convert( return bool(value) except ValueError: self.fail( - _("{value!r} is not a valid {type}.").format(value=value, type=self.name), + _("{value!r} is not a valid {type}.").format( + value=value, type=self.name + ), param, ctx, ) diff --git a/src/together/cli/cli.py b/src/together/cli/cli.py index 4409f648..7ae35121 100644 --- a/src/together/cli/cli.py +++ b/src/together/cli/cli.py @@ -31,8 +31,12 @@ def print_version(ctx: click.Context, params: Any, value: Any) -> None: help="API Key. Defaults to environment variable `TOGETHER_API_KEY`", default=os.getenv("TOGETHER_API_KEY"), ) -@click.option("--base-url", type=str, help="API Base URL. Defaults to Together AI endpoint.") -@click.option("--timeout", type=int, help=f"Request timeout. Defaults to {TIMEOUT_SECS} seconds") +@click.option( + "--base-url", type=str, help="API Base URL. Defaults to Together AI endpoint." +) +@click.option( + "--timeout", type=int, help=f"Request timeout. Defaults to {TIMEOUT_SECS} seconds" +) @click.option( "--max-retries", type=int, diff --git a/src/together/error.py b/src/together/error.py index 05718599..b5bdfd40 100644 --- a/src/together/error.py +++ b/src/together/error.py @@ -11,7 +11,9 @@ class TogetherException(Exception): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, headers: str | Dict[Any, Any] | None = None, request_id: str | None = None, http_status: int | None = None, @@ -47,7 +49,9 @@ def __repr__(self) -> str: class AuthenticationError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -56,7 +60,9 @@ def __init__( class ResponseError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -65,7 +71,9 @@ def __init__( class JSONError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -87,7 +95,9 @@ def __init__(self, model: str | None = "model", **kwargs: Any) -> None: class RateLimitError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -96,7 +106,9 @@ def __init__( class FileTypeError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -105,7 +117,9 @@ def __init__( class AttributeError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -114,7 +128,9 @@ def __init__( class Timeout(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -123,7 +139,9 @@ def __init__( class APIConnectionError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -132,7 +150,9 @@ def __init__( class InvalidRequestError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -141,7 +161,9 @@ def __init__( class APIError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -150,7 +172,9 @@ def __init__( class ServiceUnavailableError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) @@ -159,7 +183,9 @@ def __init__( class DownloadError(TogetherException): def __init__( self, - message: TogetherErrorResponse | Exception | str | RequestException | None = None, + message: ( + TogetherErrorResponse | Exception | str | RequestException | None + ) = None, **kwargs: Any, ) -> None: super().__init__(message=message, **kwargs) diff --git a/src/together/filemanager.py b/src/together/filemanager.py index f47787ff..ac907985 100644 --- a/src/together/filemanager.py +++ b/src/together/filemanager.py @@ -93,7 +93,8 @@ def _prepare_output( content_type = str(headers.get("content-type")) assert remote_name, ( - "No model name found in fine_tune object. " "Please specify an `output` file name." + "No model name found in fine_tune object. " + "Please specify an `output` file name." ) if step > 0: @@ -149,7 +150,9 @@ def get_file_metadata( try: response.raise_for_status() except requests.exceptions.HTTPError as e: - raise APIError("Error fetching file metadata", http_status=response.status_code) from e + raise APIError( + "Error fetching file metadata", http_status=response.status_code + ) from e headers = response.headers @@ -177,7 +180,9 @@ def download( ) # pre-fetch remote file name and file size - file_path, file_size = self.get_file_metadata(url, output, remote_name, fetch_metadata) + file_path, file_size = self.get_file_metadata( + url, output, remote_name, fetch_metadata + ) temp_file_manager = partial( tempfile.NamedTemporaryFile, mode="wb", dir=file_path.parent, delete=False diff --git a/src/together/legacy/embeddings.py b/src/together/legacy/embeddings.py index 0257d0f6..4afef64e 100644 --- a/src/together/legacy/embeddings.py +++ b/src/together/legacy/embeddings.py @@ -22,4 +22,6 @@ def create( client = together.Together(api_key=api_key) - return client.embeddings.create(input=input, **kwargs).model_dump(exclude_none=True) + return client.embeddings.create(input=input, **kwargs).model_dump( + exclude_none=True + ) diff --git a/src/together/legacy/files.py b/src/together/legacy/files.py index 5add9d87..aa6feb55 100644 --- a/src/together/legacy/files.py +++ b/src/together/legacy/files.py @@ -54,7 +54,9 @@ def upload( client = together.Together(api_key=api_key) # disabling the check, because it was run previously - response = client.files.upload(file=file, check=False).model_dump(exclude_none=True) + response = client.files.upload(file=file, check=False).model_dump( + exclude_none=True + ) if check: response["report_dict"] = report_dict @@ -111,11 +113,15 @@ def retrieve_content( client = together.Together(api_key=api_key) - return client.files.retrieve_content(id=file_id, output=output).dict(exclude_none=True) + return client.files.retrieve_content(id=file_id, output=output).dict( + exclude_none=True + ) @classmethod @deprecated # type: ignore - def save_jsonl(self, data: Dict[str, str], output_path: str, append: bool = False) -> None: + def save_jsonl( + self, data: Dict[str, str], output_path: str, append: bool = False + ) -> None: """ Write list of objects to a JSON lines file. """ diff --git a/src/together/legacy/finetune.py b/src/together/legacy/finetune.py index 22ec4e28..fe53be0e 100644 --- a/src/together/legacy/finetune.py +++ b/src/together/legacy/finetune.py @@ -18,7 +18,9 @@ def create( n_checkpoints: int | None = 1, batch_size: int | None = 32, learning_rate: float = 0.00001, - suffix: str | None = None, # resulting finetuned model name will include the suffix + suffix: ( + str | None + ) = None, # resulting finetuned model name will include the suffix estimate_price: bool = False, wandb_api_key: str | None = None, confirm_inputs: bool = False, @@ -78,7 +80,9 @@ def retrieve( client = together.Together(api_key=api_key) - return client.fine_tuning.retrieve(id=fine_tune_id).model_dump(exclude_none=True) + return client.fine_tuning.retrieve(id=fine_tune_id).model_dump( + exclude_none=True + ) @classmethod @deprecated # type: ignore @@ -112,7 +116,9 @@ def list_events( client = together.Together(api_key=api_key) - return client.fine_tuning.list_events(id=fine_tune_id).model_dump(exclude_none=True) + return client.fine_tuning.list_events(id=fine_tune_id).model_dump( + exclude_none=True + ) @classmethod @deprecated # type: ignore diff --git a/src/together/legacy/images.py b/src/together/legacy/images.py index 1d94e3b7..537023d8 100644 --- a/src/together/legacy/images.py +++ b/src/together/legacy/images.py @@ -22,4 +22,6 @@ def create( client = together.Together(api_key=api_key) - return client.images.generate(prompt=prompt, **kwargs).model_dump(exclude_none=True) + return client.images.generate(prompt=prompt, **kwargs).model_dump( + exclude_none=True + ) diff --git a/src/together/resources/endpoints.py b/src/together/resources/endpoints.py index 47ce3012..06092360 100644 --- a/src/together/resources/endpoints.py +++ b/src/together/resources/endpoints.py @@ -75,7 +75,9 @@ async def _create() -> DedicatedEndpoint: request = CreateEndpointRequest( model=model, hardware=hardware, - autoscaling=Autoscaling(min_replicas=min_replicas, max_replicas=max_replicas), + autoscaling=Autoscaling( + min_replicas=min_replicas, max_replicas=max_replicas + ), display_name=display_name, disable_prompt_cache=disable_prompt_cache, disable_speculative_decoding=disable_speculative_decoding, @@ -85,7 +87,9 @@ async def _create() -> DedicatedEndpoint: return self._loop.run_until_complete(_create()) - def list(self, type: Literal["dedicated", "serverless"] | None = None) -> List[ListEndpoint]: + def list( + self, type: Literal["dedicated", "serverless"] | None = None + ) -> List[ListEndpoint]: """ List all endpoints. @@ -218,7 +222,9 @@ async def create( request = CreateEndpointRequest( model=model, hardware=hardware, - autoscaling=Autoscaling(min_replicas=min_replicas, max_replicas=max_replicas), + autoscaling=Autoscaling( + min_replicas=min_replicas, max_replicas=max_replicas + ), display_name=display_name, disable_prompt_cache=disable_prompt_cache, disable_speculative_decoding=disable_speculative_decoding, diff --git a/src/together/resources/files.py b/src/together/resources/files.py index 341ac1aa..14500b24 100644 --- a/src/together/resources/files.py +++ b/src/together/resources/files.py @@ -83,7 +83,9 @@ def retrieve(self, id: str) -> FileResponse: return FileResponse(**response.data) - def retrieve_content(self, id: str, *, output: Path | str | None = None) -> FileObject: + def retrieve_content( + self, id: str, *, output: Path | str | None = None + ) -> FileObject: download_manager = DownloadManager(self._client) if isinstance(output, str): @@ -161,7 +163,9 @@ async def retrieve(self, id: str) -> FileResponse: return FileResponse(**response.data) - async def retrieve_content(self, id: str, *, output: Path | str | None = None) -> FileObject: + async def retrieve_content( + self, id: str, *, output: Path | str | None = None + ) -> FileObject: raise NotImplementedError() async def delete(self, id: str) -> FileDeleteResponse: diff --git a/src/together/resources/finetune.py b/src/together/resources/finetune.py index b1fdd6ea..b58cdae2 100644 --- a/src/together/resources/finetune.py +++ b/src/together/resources/finetune.py @@ -75,13 +75,17 @@ def createFinetuneRequest( ) batch_size = ( - batch_size if batch_size != "max" else model_limits.lora_training.max_batch_size + batch_size + if batch_size != "max" + else model_limits.lora_training.max_batch_size ) else: if model_limits.full_training is None: raise ValueError("Full training is not supported for the selected model.") batch_size = ( - batch_size if batch_size != "max" else model_limits.full_training.max_batch_size + batch_size + if batch_size != "max" + else model_limits.full_training.max_batch_size ) if warmup_ratio > 1 or warmup_ratio < 0: @@ -402,7 +406,9 @@ def download( if isinstance(ft_job.training_type, FullTrainingType): if checkpoint_type != DownloadCheckpointType.DEFAULT: - raise ValueError("Only DEFAULT checkpoint type is allowed for FullTrainingType") + raise ValueError( + "Only DEFAULT checkpoint type is allowed for FullTrainingType" + ) url += "&checkpoint=modelOutputPath" elif isinstance(ft_job.training_type, LoRATrainingType): if checkpoint_type == DownloadCheckpointType.DEFAULT: @@ -413,7 +419,9 @@ def download( elif checkpoint_type == DownloadCheckpointType.ADAPTER: url += f"&checkpoint={DownloadCheckpointType.ADAPTER.value}" else: - raise ValueError(f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}") + raise ValueError( + f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}" + ) remote_name = ft_job.output_name diff --git a/src/together/types/__init__.py b/src/together/types/__init__.py index e94c6778..c3100cd1 100644 --- a/src/together/types/__init__.py +++ b/src/together/types/__init__.py @@ -14,7 +14,11 @@ ChatCompletionResponse, ) from together.types.common import TogetherRequest -from together.types.completions import CompletionChunk, CompletionRequest, CompletionResponse +from together.types.completions import ( + CompletionChunk, + CompletionRequest, + CompletionResponse, +) from together.types.embeddings import EmbeddingRequest, EmbeddingResponse from together.types.endpoints import Autoscaling, DedicatedEndpoint, ListEndpoint from together.types.files import ( diff --git a/src/together/utils/api_helpers.py b/src/together/utils/api_helpers.py index d15eb33d..2ec9d3f9 100644 --- a/src/together/utils/api_helpers.py +++ b/src/together/utils/api_helpers.py @@ -36,7 +36,9 @@ def get_headers( user_agent = "Together/v1 PythonBindings/%s" % (together.version,) - uname_without_node = " ".join(v for k, v in platform.uname()._asdict().items() if k != "node") + uname_without_node = " ".join( + v for k, v in platform.uname()._asdict().items() if k != "node" + ) ua = { "bindings_version": together.version, "httplib": "requests", diff --git a/src/together/utils/files.py b/src/together/utils/files.py index 88839a70..cc39fca0 100644 --- a/src/together/utils/files.py +++ b/src/together/utils/files.py @@ -145,7 +145,10 @@ def _check_jsonl(file: Path) -> Dict[str, Any]: # Check that there are no extra columns for column in json_line: - if column not in JSONL_REQUIRED_COLUMNS_MAP[possible_format]: + if ( + column + not in JSONL_REQUIRED_COLUMNS_MAP[possible_format] + ): raise InvalidFileFormatError( message=f'Found extra column "{column}" in the line {idx + 1}.', line_number=idx + 1, @@ -163,7 +166,9 @@ def _check_jsonl(file: Path) -> Dict[str, Any]: ) if current_format == DatasetFormat.CONVERSATION: - message_column = JSONL_REQUIRED_COLUMNS_MAP[DatasetFormat.CONVERSATION][0] + message_column = JSONL_REQUIRED_COLUMNS_MAP[ + DatasetFormat.CONVERSATION + ][0] if not isinstance(json_line[message_column], list): raise InvalidFileFormatError( message=f"Invalid format on line {idx + 1} of the input file. " @@ -275,7 +280,8 @@ def _check_jsonl(file: Path) -> Dict[str, Any]: report_dict["load_json"] = False if idx < 0: report_dict["message"] = ( - "Unable to decode file. " "File may be empty or in an unsupported format. " + "Unable to decode file. " + "File may be empty or in an unsupported format. " ) else: report_dict["message"] = ( diff --git a/src/together/version.py b/src/together/version.py index 133c5573..4eb61bf1 100644 --- a/src/together/version.py +++ b/src/together/version.py @@ -1,4 +1,6 @@ import importlib.metadata -VERSION = importlib.metadata.version("together") # gets version number from pyproject.toml +VERSION = importlib.metadata.version( + "together" +) # gets version number from pyproject.toml diff --git a/tests/unit/test_async_client.py b/tests/unit/test_async_client.py index a65e2c24..0b11b39d 100644 --- a/tests/unit/test_async_client.py +++ b/tests/unit/test_async_client.py @@ -59,7 +59,9 @@ def test_init_with_supplied_headers(self): supplied_headers = {"header1": "value1", "header2": "value2"} - async_together = AsyncTogether(api_key="fake_api_key", supplied_headers=supplied_headers) + async_together = AsyncTogether( + api_key="fake_api_key", supplied_headers=supplied_headers + ) assert async_together.client.supplied_headers == supplied_headers @@ -81,7 +83,9 @@ def test_chat_initialized(self, async_together_instance): assert isinstance(async_together_instance.chat._client, TogetherClient) - assert isinstance(async_together_instance.chat.completions._client, TogetherClient) + assert isinstance( + async_together_instance.chat.completions._client, TogetherClient + ) def test_embeddings_initialized(self, async_together_instance): """ diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index c57cbe4d..f8bdcbe6 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -60,7 +60,9 @@ def test_init_with_supplied_headers(self): supplied_headers = {"header1": "value1", "header2": "value2"} - sync_together = Together(api_key="fake_api_key", supplied_headers=supplied_headers) + sync_together = Together( + api_key="fake_api_key", supplied_headers=supplied_headers + ) assert sync_together.client.supplied_headers == supplied_headers @@ -82,7 +84,9 @@ def test_chat_initialized(self, sync_together_instance): assert isinstance(sync_together_instance.chat._client, TogetherClient) - assert isinstance(sync_together_instance.chat.completions._client, TogetherClient) + assert isinstance( + sync_together_instance.chat.completions._client, TogetherClient + ) def test_embeddings_initialized(self, sync_together_instance): """ diff --git a/tests/unit/test_files_checks.py b/tests/unit/test_files_checks.py index 903effdd..37c698d2 100644 --- a/tests/unit/test_files_checks.py +++ b/tests/unit/test_files_checks.py @@ -171,7 +171,10 @@ def test_check_jsonl_missing_required_field(tmp_path: Path): report = check_file(file) assert not report["is_check_passed"] - assert "Error parsing file. Could not detect a format for the line 2" in report["message"] + assert ( + "Error parsing file. Could not detect a format for the line 2" + in report["message"] + ) def test_check_jsonl_inconsistent_dataset_format(tmp_path: Path): @@ -187,7 +190,10 @@ def test_check_jsonl_inconsistent_dataset_format(tmp_path: Path): report = check_file(file) assert not report["is_check_passed"] - assert "All samples in the dataset must have the same dataset format" in report["message"] + assert ( + "All samples in the dataset must have the same dataset format" + in report["message"] + ) def test_check_jsonl_invalid_role(tmp_path: Path): @@ -269,7 +275,10 @@ def test_check_jsonl_wrong_turn_type(tmp_path: Path): report = check_file(file) assert not report["is_check_passed"] - assert "Invalid format on line 1 of the input file. Expected a dictionary" in report["message"] + assert ( + "Invalid format on line 1 of the input file. Expected a dictionary" + in report["message"] + ) def test_check_jsonl_extra_column(tmp_path: Path): @@ -291,4 +300,6 @@ def test_check_jsonl_empty_messages(tmp_path: Path): report = check_file(file) assert not report["is_check_passed"] - assert "Expected a non-empty list of messages. Found empty list" in report["message"] + assert ( + "Expected a non-empty list of messages. Found empty list" in report["message"] + ) From a3c0637869dcd4b9a2f4ed9cc57c00329f701e82 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 13:48:16 +0000 Subject: [PATCH 07/29] fix tests --- poetry.lock | 299 ++++++++++++++++++++++++------------------------- pyproject.toml | 1 + 2 files changed, 150 insertions(+), 150 deletions(-) diff --git a/poetry.lock b/poetry.lock index f1edee10..5de741d1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,98 +2,103 @@ [[package]] name = "aiohappyeyeballs" -version = "2.4.4" +version = "2.4.6" description = "Happy Eyeballs for asyncio" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, - {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, + {file = "aiohappyeyeballs-2.4.6-py3-none-any.whl", hash = "sha256:147ec992cf873d74f5062644332c539fcd42956dc69453fe5204195e560517e1"}, + {file = "aiohappyeyeballs-2.4.6.tar.gz", hash = "sha256:9b05052f9042985d32ecbe4b59a77ae19c006a78f1344d7fdad69d28ded3d0b0"}, ] [[package]] name = "aiohttp" -version = "3.11.11" +version = "3.11.12" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" files = [ - {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a60804bff28662cbcf340a4d61598891f12eea3a66af48ecfdc975ceec21e3c8"}, - {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b4fa1cb5f270fb3eab079536b764ad740bb749ce69a94d4ec30ceee1b5940d5"}, - {file = "aiohttp-3.11.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:731468f555656767cda219ab42e033355fe48c85fbe3ba83a349631541715ba2"}, - {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb23d8bb86282b342481cad4370ea0853a39e4a32a0042bb52ca6bdde132df43"}, - {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f047569d655f81cb70ea5be942ee5d4421b6219c3f05d131f64088c73bb0917f"}, - {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd7659baae9ccf94ae5fe8bfaa2c7bc2e94d24611528395ce88d009107e00c6d"}, - {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af01e42ad87ae24932138f154105e88da13ce7d202a6de93fafdafb2883a00ef"}, - {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5854be2f3e5a729800bac57a8d76af464e160f19676ab6aea74bde18ad19d438"}, - {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6526e5fb4e14f4bbf30411216780c9967c20c5a55f2f51d3abd6de68320cc2f3"}, - {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:85992ee30a31835fc482468637b3e5bd085fa8fe9392ba0bdcbdc1ef5e9e3c55"}, - {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:88a12ad8ccf325a8a5ed80e6d7c3bdc247d66175afedbe104ee2aaca72960d8e"}, - {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0a6d3fbf2232e3a08c41eca81ae4f1dff3d8f1a30bae415ebe0af2d2458b8a33"}, - {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84a585799c58b795573c7fa9b84c455adf3e1d72f19a2bf498b54a95ae0d194c"}, - {file = "aiohttp-3.11.11-cp310-cp310-win32.whl", hash = "sha256:bfde76a8f430cf5c5584553adf9926534352251d379dcb266ad2b93c54a29745"}, - {file = "aiohttp-3.11.11-cp310-cp310-win_amd64.whl", hash = "sha256:0fd82b8e9c383af11d2b26f27a478640b6b83d669440c0a71481f7c865a51da9"}, - {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ba74ec819177af1ef7f59063c6d35a214a8fde6f987f7661f4f0eecc468a8f76"}, - {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4af57160800b7a815f3fe0eba9b46bf28aafc195555f1824555fa2cfab6c1538"}, - {file = "aiohttp-3.11.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffa336210cf9cd8ed117011085817d00abe4c08f99968deef0013ea283547204"}, - {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81b8fe282183e4a3c7a1b72f5ade1094ed1c6345a8f153506d114af5bf8accd9"}, - {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af41686ccec6a0f2bdc66686dc0f403c41ac2089f80e2214a0f82d001052c03"}, - {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70d1f9dde0e5dd9e292a6d4d00058737052b01f3532f69c0c65818dac26dc287"}, - {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:249cc6912405917344192b9f9ea5cd5b139d49e0d2f5c7f70bdfaf6b4dbf3a2e"}, - {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0eb98d90b6690827dcc84c246811feeb4e1eea683c0eac6caed7549be9c84665"}, - {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec82bf1fda6cecce7f7b915f9196601a1bd1a3079796b76d16ae4cce6d0ef89b"}, - {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9fd46ce0845cfe28f108888b3ab17abff84ff695e01e73657eec3f96d72eef34"}, - {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bd176afcf8f5d2aed50c3647d4925d0db0579d96f75a31e77cbaf67d8a87742d"}, - {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:ec2aa89305006fba9ffb98970db6c8221541be7bee4c1d027421d6f6df7d1ce2"}, - {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:92cde43018a2e17d48bb09c79e4d4cb0e236de5063ce897a5e40ac7cb4878773"}, - {file = "aiohttp-3.11.11-cp311-cp311-win32.whl", hash = "sha256:aba807f9569455cba566882c8938f1a549f205ee43c27b126e5450dc9f83cc62"}, - {file = "aiohttp-3.11.11-cp311-cp311-win_amd64.whl", hash = "sha256:ae545f31489548c87b0cced5755cfe5a5308d00407000e72c4fa30b19c3220ac"}, - {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e595c591a48bbc295ebf47cb91aebf9bd32f3ff76749ecf282ea7f9f6bb73886"}, - {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3ea1b59dc06396b0b424740a10a0a63974c725b1c64736ff788a3689d36c02d2"}, - {file = "aiohttp-3.11.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8811f3f098a78ffa16e0ea36dffd577eb031aea797cbdba81be039a4169e242c"}, - {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7227b87a355ce1f4bf83bfae4399b1f5bb42e0259cb9405824bd03d2f4336a"}, - {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d40f9da8cabbf295d3a9dae1295c69975b86d941bc20f0a087f0477fa0a66231"}, - {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffb3dc385f6bb1568aa974fe65da84723210e5d9707e360e9ecb51f59406cd2e"}, - {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8f5f7515f3552d899c61202d99dcb17d6e3b0de777900405611cd747cecd1b8"}, - {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3499c7ffbfd9c6a3d8d6a2b01c26639da7e43d47c7b4f788016226b1e711caa8"}, - {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8e2bf8029dbf0810c7bfbc3e594b51c4cc9101fbffb583a3923aea184724203c"}, - {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b6212a60e5c482ef90f2d788835387070a88d52cf6241d3916733c9176d39eab"}, - {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d119fafe7b634dbfa25a8c597718e69a930e4847f0b88e172744be24515140da"}, - {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:6fba278063559acc730abf49845d0e9a9e1ba74f85f0ee6efd5803f08b285853"}, - {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:92fc484e34b733704ad77210c7957679c5c3877bd1e6b6d74b185e9320cc716e"}, - {file = "aiohttp-3.11.11-cp312-cp312-win32.whl", hash = "sha256:9f5b3c1ed63c8fa937a920b6c1bec78b74ee09593b3f5b979ab2ae5ef60d7600"}, - {file = "aiohttp-3.11.11-cp312-cp312-win_amd64.whl", hash = "sha256:1e69966ea6ef0c14ee53ef7a3d68b564cc408121ea56c0caa2dc918c1b2f553d"}, - {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:541d823548ab69d13d23730a06f97460f4238ad2e5ed966aaf850d7c369782d9"}, - {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:929f3ed33743a49ab127c58c3e0a827de0664bfcda566108989a14068f820194"}, - {file = "aiohttp-3.11.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0882c2820fd0132240edbb4a51eb8ceb6eef8181db9ad5291ab3332e0d71df5f"}, - {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63de12e44935d5aca7ed7ed98a255a11e5cb47f83a9fded7a5e41c40277d104"}, - {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa54f8ef31d23c506910c21163f22b124facb573bff73930735cf9fe38bf7dff"}, - {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a344d5dc18074e3872777b62f5f7d584ae4344cd6006c17ba12103759d407af3"}, - {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7fb429ab1aafa1f48578eb315ca45bd46e9c37de11fe45c7f5f4138091e2f1"}, - {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c341c7d868750e31961d6d8e60ff040fb9d3d3a46d77fd85e1ab8e76c3e9a5c4"}, - {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed9ee95614a71e87f1a70bc81603f6c6760128b140bc4030abe6abaa988f1c3d"}, - {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:de8d38f1c2810fa2a4f1d995a2e9c70bb8737b18da04ac2afbf3971f65781d87"}, - {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a9b7371665d4f00deb8f32208c7c5e652059b0fda41cf6dbcac6114a041f1cc2"}, - {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:620598717fce1b3bd14dd09947ea53e1ad510317c85dda2c9c65b622edc96b12"}, - {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf8d9bfee991d8acc72d060d53860f356e07a50f0e0d09a8dfedea1c554dd0d5"}, - {file = "aiohttp-3.11.11-cp313-cp313-win32.whl", hash = "sha256:9d73ee3725b7a737ad86c2eac5c57a4a97793d9f442599bea5ec67ac9f4bdc3d"}, - {file = "aiohttp-3.11.11-cp313-cp313-win_amd64.whl", hash = "sha256:c7a06301c2fb096bdb0bd25fe2011531c1453b9f2c163c8031600ec73af1cc99"}, - {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3e23419d832d969f659c208557de4a123e30a10d26e1e14b73431d3c13444c2e"}, - {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21fef42317cf02e05d3b09c028712e1d73a9606f02467fd803f7c1f39cc59add"}, - {file = "aiohttp-3.11.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1f21bb8d0235fc10c09ce1d11ffbd40fc50d3f08a89e4cf3a0c503dc2562247a"}, - {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1642eceeaa5ab6c9b6dfeaaa626ae314d808188ab23ae196a34c9d97efb68350"}, - {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2170816e34e10f2fd120f603e951630f8a112e1be3b60963a1f159f5699059a6"}, - {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8be8508d110d93061197fd2d6a74f7401f73b6d12f8822bbcd6d74f2b55d71b1"}, - {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eed954b161e6b9b65f6be446ed448ed3921763cc432053ceb606f89d793927e"}, - {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6c9af134da4bc9b3bd3e6a70072509f295d10ee60c697826225b60b9959acdd"}, - {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44167fc6a763d534a6908bdb2592269b4bf30a03239bcb1654781adf5e49caf1"}, - {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:479b8c6ebd12aedfe64563b85920525d05d394b85f166b7873c8bde6da612f9c"}, - {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:10b4ff0ad793d98605958089fabfa350e8e62bd5d40aa65cdc69d6785859f94e"}, - {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b540bd67cfb54e6f0865ceccd9979687210d7ed1a1cc8c01f8e67e2f1e883d28"}, - {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dac54e8ce2ed83b1f6b1a54005c87dfed139cf3f777fdc8afc76e7841101226"}, - {file = "aiohttp-3.11.11-cp39-cp39-win32.whl", hash = "sha256:568c1236b2fde93b7720f95a890741854c1200fba4a3471ff48b2934d2d93fd3"}, - {file = "aiohttp-3.11.11-cp39-cp39-win_amd64.whl", hash = "sha256:943a8b052e54dfd6439fd7989f67fc6a7f2138d0a2cf0a7de5f18aa4fe7eb3b1"}, - {file = "aiohttp-3.11.11.tar.gz", hash = "sha256:bb49c7f1e6ebf3821a42d81d494f538107610c3a705987f53068546b0e90303e"}, + {file = "aiohttp-3.11.12-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:aa8a8caca81c0a3e765f19c6953416c58e2f4cc1b84829af01dd1c771bb2f91f"}, + {file = "aiohttp-3.11.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:84ede78acde96ca57f6cf8ccb8a13fbaf569f6011b9a52f870c662d4dc8cd854"}, + {file = "aiohttp-3.11.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:584096938a001378484aa4ee54e05dc79c7b9dd933e271c744a97b3b6f644957"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:392432a2dde22b86f70dd4a0e9671a349446c93965f261dbaecfaf28813e5c42"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:88d385b8e7f3a870146bf5ea31786ef7463e99eb59e31db56e2315535d811f55"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b10a47e5390c4b30a0d58ee12581003be52eedd506862ab7f97da7a66805befb"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b5263dcede17b6b0c41ef0c3ccce847d82a7da98709e75cf7efde3e9e3b5cae"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50c5c7b8aa5443304c55c262c5693b108c35a3b61ef961f1e782dd52a2f559c7"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1c031a7572f62f66f1257db37ddab4cb98bfaf9b9434a3b4840bf3560f5e788"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:7e44eba534381dd2687be50cbd5f2daded21575242ecfdaf86bbeecbc38dae8e"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:145a73850926018ec1681e734cedcf2716d6a8697d90da11284043b745c286d5"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:2c311e2f63e42c1bf86361d11e2c4a59f25d9e7aabdbdf53dc38b885c5435cdb"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ea756b5a7bac046d202a9a3889b9a92219f885481d78cd318db85b15cc0b7bcf"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:526c900397f3bbc2db9cb360ce9c35134c908961cdd0ac25b1ae6ffcaa2507ff"}, + {file = "aiohttp-3.11.12-cp310-cp310-win32.whl", hash = "sha256:b8d3bb96c147b39c02d3db086899679f31958c5d81c494ef0fc9ef5bb1359b3d"}, + {file = "aiohttp-3.11.12-cp310-cp310-win_amd64.whl", hash = "sha256:7fe3d65279bfbee8de0fb4f8c17fc4e893eed2dba21b2f680e930cc2b09075c5"}, + {file = "aiohttp-3.11.12-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:87a2e00bf17da098d90d4145375f1d985a81605267e7f9377ff94e55c5d769eb"}, + {file = "aiohttp-3.11.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b34508f1cd928ce915ed09682d11307ba4b37d0708d1f28e5774c07a7674cac9"}, + {file = "aiohttp-3.11.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:936d8a4f0f7081327014742cd51d320296b56aa6d324461a13724ab05f4b2933"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de1378f72def7dfb5dbd73d86c19eda0ea7b0a6873910cc37d57e80f10d64e1"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9d45dbb3aaec05cf01525ee1a7ac72de46a8c425cb75c003acd29f76b1ffe94"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:930ffa1925393381e1e0a9b82137fa7b34c92a019b521cf9f41263976666a0d6"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8340def6737118f5429a5df4e88f440746b791f8f1c4ce4ad8a595f42c980bd5"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4016e383f91f2814e48ed61e6bda7d24c4d7f2402c75dd28f7e1027ae44ea204"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3c0600bcc1adfaaac321422d615939ef300df81e165f6522ad096b73439c0f58"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:0450ada317a65383b7cce9576096150fdb97396dcfe559109b403c7242faffef"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:850ff6155371fd802a280f8d369d4e15d69434651b844bde566ce97ee2277420"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8fd12d0f989c6099e7b0f30dc6e0d1e05499f3337461f0b2b0dadea6c64b89df"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:76719dd521c20a58a6c256d058547b3a9595d1d885b830013366e27011ffe804"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:97fe431f2ed646a3b56142fc81d238abcbaff08548d6912acb0b19a0cadc146b"}, + {file = "aiohttp-3.11.12-cp311-cp311-win32.whl", hash = "sha256:e10c440d142fa8b32cfdb194caf60ceeceb3e49807072e0dc3a8887ea80e8c16"}, + {file = "aiohttp-3.11.12-cp311-cp311-win_amd64.whl", hash = "sha256:246067ba0cf5560cf42e775069c5d80a8989d14a7ded21af529a4e10e3e0f0e6"}, + {file = "aiohttp-3.11.12-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e392804a38353900c3fd8b7cacbea5132888f7129f8e241915e90b85f00e3250"}, + {file = "aiohttp-3.11.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8fa1510b96c08aaad49303ab11f8803787c99222288f310a62f493faf883ede1"}, + {file = "aiohttp-3.11.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dc065a4285307607df3f3686363e7f8bdd0d8ab35f12226362a847731516e42c"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddb31f8474695cd61fc9455c644fc1606c164b93bff2490390d90464b4655df"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9dec0000d2d8621d8015c293e24589d46fa218637d820894cb7356c77eca3259"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3552fe98e90fdf5918c04769f338a87fa4f00f3b28830ea9b78b1bdc6140e0d"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dfe7f984f28a8ae94ff3a7953cd9678550dbd2a1f9bda5dd9c5ae627744c78e"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a481a574af914b6e84624412666cbfbe531a05667ca197804ecc19c97b8ab1b0"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1987770fb4887560363b0e1a9b75aa303e447433c41284d3af2840a2f226d6e0"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:a4ac6a0f0f6402854adca4e3259a623f5c82ec3f0c049374133bcb243132baf9"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c96a43822f1f9f69cc5c3706af33239489a6294be486a0447fb71380070d4d5f"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a5e69046f83c0d3cb8f0d5bd9b8838271b1bc898e01562a04398e160953e8eb9"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:68d54234c8d76d8ef74744f9f9fc6324f1508129e23da8883771cdbb5818cbef"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9fd9dcf9c91affe71654ef77426f5cf8489305e1c66ed4816f5a21874b094b9"}, + {file = "aiohttp-3.11.12-cp312-cp312-win32.whl", hash = "sha256:0ed49efcd0dc1611378beadbd97beb5d9ca8fe48579fc04a6ed0844072261b6a"}, + {file = "aiohttp-3.11.12-cp312-cp312-win_amd64.whl", hash = "sha256:54775858c7f2f214476773ce785a19ee81d1294a6bedc5cc17225355aab74802"}, + {file = "aiohttp-3.11.12-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:413ad794dccb19453e2b97c2375f2ca3cdf34dc50d18cc2693bd5aed7d16f4b9"}, + {file = "aiohttp-3.11.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4a93d28ed4b4b39e6f46fd240896c29b686b75e39cc6992692e3922ff6982b4c"}, + {file = "aiohttp-3.11.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d589264dbba3b16e8951b6f145d1e6b883094075283dafcab4cdd564a9e353a0"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5148ca8955affdfeb864aca158ecae11030e952b25b3ae15d4e2b5ba299bad2"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:525410e0790aab036492eeea913858989c4cb070ff373ec3bc322d700bdf47c1"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bd8695be2c80b665ae3f05cb584093a1e59c35ecb7d794d1edd96e8cc9201d7"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0203433121484b32646a5f5ea93ae86f3d9559d7243f07e8c0eab5ff8e3f70e"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40cd36749a1035c34ba8d8aaf221b91ca3d111532e5ccb5fa8c3703ab1b967ed"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a7442662afebbf7b4c6d28cb7aab9e9ce3a5df055fc4116cc7228192ad6cb484"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:8a2fb742ef378284a50766e985804bd6adb5adb5aa781100b09befdbfa757b65"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2cee3b117a8d13ab98b38d5b6bdcd040cfb4181068d05ce0c474ec9db5f3c5bb"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f6a19bcab7fbd8f8649d6595624856635159a6527861b9cdc3447af288a00c00"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e4cecdb52aaa9994fbed6b81d4568427b6002f0a91c322697a4bfcc2b2363f5a"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:30f546358dfa0953db92ba620101fefc81574f87b2346556b90b5f3ef16e55ce"}, + {file = "aiohttp-3.11.12-cp313-cp313-win32.whl", hash = "sha256:ce1bb21fc7d753b5f8a5d5a4bae99566386b15e716ebdb410154c16c91494d7f"}, + {file = "aiohttp-3.11.12-cp313-cp313-win_amd64.whl", hash = "sha256:f7914ab70d2ee8ab91c13e5402122edbc77821c66d2758abb53aabe87f013287"}, + {file = "aiohttp-3.11.12-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c3623053b85b4296cd3925eeb725e386644fd5bc67250b3bb08b0f144803e7b"}, + {file = "aiohttp-3.11.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:67453e603cea8e85ed566b2700efa1f6916aefbc0c9fcb2e86aaffc08ec38e78"}, + {file = "aiohttp-3.11.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6130459189e61baac5a88c10019b21e1f0c6d00ebc770e9ce269475650ff7f73"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9060addfa4ff753b09392efe41e6af06ea5dd257829199747b9f15bfad819460"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34245498eeb9ae54c687a07ad7f160053911b5745e186afe2d0c0f2898a1ab8a"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dc0fba9a74b471c45ca1a3cb6e6913ebfae416678d90529d188886278e7f3f6"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a478aa11b328983c4444dacb947d4513cb371cd323f3845e53caeda6be5589d5"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c160a04283c8c6f55b5bf6d4cad59bb9c5b9c9cd08903841b25f1f7109ef1259"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:edb69b9589324bdc40961cdf0657815df674f1743a8d5ad9ab56a99e4833cfdd"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ee84c2a22a809c4f868153b178fe59e71423e1f3d6a8cd416134bb231fbf6d3"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:bf4480a5438f80e0f1539e15a7eb8b5f97a26fe087e9828e2c0ec2be119a9f72"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b2732ef3bafc759f653a98881b5b9cdef0716d98f013d376ee8dfd7285abf1"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f752e80606b132140883bb262a457c475d219d7163d996dc9072434ffb0784c4"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ab3247d58b393bda5b1c8f31c9edece7162fc13265334217785518dd770792b8"}, + {file = "aiohttp-3.11.12-cp39-cp39-win32.whl", hash = "sha256:0d5176f310a7fe6f65608213cc74f4228e4f4ce9fd10bcb2bb6da8fc66991462"}, + {file = "aiohttp-3.11.12-cp39-cp39-win_amd64.whl", hash = "sha256:74bd573dde27e58c760d9ca8615c41a57e719bff315c9adb6f2a4281a28e8798"}, + {file = "aiohttp-3.11.12.tar.gz", hash = "sha256:7603ca26d75b1b86160ce1bbe2787a0b706e592af5b2504e12caa88a217767b0"}, ] [package.dependencies] @@ -695,13 +700,13 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t [[package]] name = "identify" -version = "2.6.6" +version = "2.6.7" description = "File identification library for Python" optional = false python-versions = ">=3.9" files = [ - {file = "identify-2.6.6-py2.py3-none-any.whl", hash = "sha256:cbd1810bce79f8b671ecb20f53ee0ae8e86ae84b557de31d89709dc2a48ba881"}, - {file = "identify-2.6.6.tar.gz", hash = "sha256:7bec12768ed44ea4761efb47806f0a41f86e7c0a5fdf5950d4648c90eca7e251"}, + {file = "identify-2.6.7-py2.py3-none-any.whl", hash = "sha256:155931cb617a401807b09ecec6635d6c692d180090a1cedca8ef7d58ba5b6aa0"}, + {file = "identify-2.6.7.tar.gz", hash = "sha256:3fa266b42eba321ee0b2bb0936a6a6b9e36a1351cbb69055b3082f4193035684"}, ] [package.extras] @@ -897,49 +902,43 @@ dill = ">=0.3.8" [[package]] name = "mypy" -version = "1.14.1" +version = "1.15.0" description = "Optional static typing for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, - {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, - {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, - {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, - {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, - {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, - {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, - {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, - {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, - {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, - {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, - {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, - {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, - {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, - {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, - {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, + {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, + {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, + {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, + {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, + {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, + {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, + {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, + {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, + {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, + {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, + {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, + {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, + {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, + {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, ] [package.dependencies] @@ -1933,29 +1932,29 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruff" -version = "0.9.4" +version = "0.9.6" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.9.4-py3-none-linux_armv6l.whl", hash = "sha256:64e73d25b954f71ff100bb70f39f1ee09e880728efb4250c632ceed4e4cdf706"}, - {file = "ruff-0.9.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6ce6743ed64d9afab4fafeaea70d3631b4d4b28b592db21a5c2d1f0ef52934bf"}, - {file = "ruff-0.9.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:54499fb08408e32b57360f6f9de7157a5fec24ad79cb3f42ef2c3f3f728dfe2b"}, - {file = "ruff-0.9.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37c892540108314a6f01f105040b5106aeb829fa5fb0561d2dcaf71485021137"}, - {file = "ruff-0.9.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de9edf2ce4b9ddf43fd93e20ef635a900e25f622f87ed6e3047a664d0e8f810e"}, - {file = "ruff-0.9.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c90c32357c74f11deb7fbb065126d91771b207bf9bfaaee01277ca59b574ec"}, - {file = "ruff-0.9.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:56acd6c694da3695a7461cc55775f3a409c3815ac467279dfa126061d84b314b"}, - {file = "ruff-0.9.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0c93e7d47ed951b9394cf352d6695b31498e68fd5782d6cbc282425655f687a"}, - {file = "ruff-0.9.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4c8772670aecf037d1bf7a07c39106574d143b26cfe5ed1787d2f31e800214"}, - {file = "ruff-0.9.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfc5f1d7afeda8d5d37660eeca6d389b142d7f2b5a1ab659d9214ebd0e025231"}, - {file = "ruff-0.9.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:faa935fc00ae854d8b638c16a5f1ce881bc3f67446957dd6f2af440a5fc8526b"}, - {file = "ruff-0.9.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a6c634fc6f5a0ceae1ab3e13c58183978185d131a29c425e4eaa9f40afe1e6d6"}, - {file = "ruff-0.9.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:433dedf6ddfdec7f1ac7575ec1eb9844fa60c4c8c2f8887a070672b8d353d34c"}, - {file = "ruff-0.9.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d612dbd0f3a919a8cc1d12037168bfa536862066808960e0cc901404b77968f0"}, - {file = "ruff-0.9.4-py3-none-win32.whl", hash = "sha256:db1192ddda2200671f9ef61d9597fcef89d934f5d1705e571a93a67fb13a4402"}, - {file = "ruff-0.9.4-py3-none-win_amd64.whl", hash = "sha256:05bebf4cdbe3ef75430d26c375773978950bbf4ee3c95ccb5448940dc092408e"}, - {file = "ruff-0.9.4-py3-none-win_arm64.whl", hash = "sha256:585792f1e81509e38ac5123492f8875fbc36f3ede8185af0a26df348e5154f41"}, - {file = "ruff-0.9.4.tar.gz", hash = "sha256:6907ee3529244bb0ed066683e075f09285b38dd5b4039370df6ff06041ca19e7"}, + {file = "ruff-0.9.6-py3-none-linux_armv6l.whl", hash = "sha256:2f218f356dd2d995839f1941322ff021c72a492c470f0b26a34f844c29cdf5ba"}, + {file = "ruff-0.9.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b908ff4df65dad7b251c9968a2e4560836d8f5487c2f0cc238321ed951ea0504"}, + {file = "ruff-0.9.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b109c0ad2ececf42e75fa99dc4043ff72a357436bb171900714a9ea581ddef83"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1de4367cca3dac99bcbd15c161404e849bb0bfd543664db39232648dc00112dc"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3ee4d7c2c92ddfdaedf0bf31b2b176fa7aa8950efc454628d477394d35638b"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc1edd1775270e6aa2386119aea692039781429f0be1e0949ea5884e011aa8e"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4a091729086dffa4bd070aa5dab7e39cc6b9d62eb2bef8f3d91172d30d599666"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1bbc6808bf7b15796cef0815e1dfb796fbd383e7dbd4334709642649625e7c5"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:589d1d9f25b5754ff230dce914a174a7c951a85a4e9270613a2b74231fdac2f5"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc61dd5131742e21103fbbdcad683a8813be0e3c204472d520d9a5021ca8b217"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5e2d9126161d0357e5c8f30b0bd6168d2c3872372f14481136d13de9937f79b6"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:68660eab1a8e65babb5229a1f97b46e3120923757a68b5413d8561f8a85d4897"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c4cae6c4cc7b9b4017c71114115db0445b00a16de3bcde0946273e8392856f08"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19f505b643228b417c1111a2a536424ddde0db4ef9023b9e04a46ed8a1cb4656"}, + {file = "ruff-0.9.6-py3-none-win32.whl", hash = "sha256:194d8402bceef1b31164909540a597e0d913c0e4952015a5b40e28c146121b5d"}, + {file = "ruff-0.9.6-py3-none-win_amd64.whl", hash = "sha256:03482d5c09d90d4ee3f40d97578423698ad895c87314c4de39ed2af945633caa"}, + {file = "ruff-0.9.6-py3-none-win_arm64.whl", hash = "sha256:0e2bb706a2be7ddfea4a4af918562fdc1bcb16df255e5fa595bbd800ce322a5a"}, + {file = "ruff-0.9.6.tar.gz", hash = "sha256:81761592f72b620ec8fa1068a6fd00e98a5ebee342a3642efd84454f3031dca9"}, ] [[package]] @@ -2154,13 +2153,13 @@ telegram = ["requests"] [[package]] name = "transformers" -version = "4.48.2" +version = "4.48.3" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.9.0" files = [ - {file = "transformers-4.48.2-py3-none-any.whl", hash = "sha256:493bc5b0268b116eff305edf6656367fc89cf570e7a9d5891369e04751db698a"}, - {file = "transformers-4.48.2.tar.gz", hash = "sha256:dcfb73473e61f22fb3366fe2471ed2e42779ecdd49527a1bdf1937574855d516"}, + {file = "transformers-4.48.3-py3-none-any.whl", hash = "sha256:78697f990f5ef350c23b46bf86d5081ce96b49479ab180b2de7687267de8fd36"}, + {file = "transformers-4.48.3.tar.gz", hash = "sha256:a5e8f1e9a6430aa78215836be70cecd3f872d99eeda300f41ad6cc841724afdb"}, ] [package.dependencies] @@ -2318,13 +2317,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.29.1" +version = "20.29.2" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" files = [ - {file = "virtualenv-20.29.1-py3-none-any.whl", hash = "sha256:4e4cb403c0b0da39e13b46b1b2476e505cb0046b25f242bee80f62bf990b2779"}, - {file = "virtualenv-20.29.1.tar.gz", hash = "sha256:b8b8970138d32fb606192cb97f6cd4bb644fa486be9308fb9b63f81091b5dc35"}, + {file = "virtualenv-20.29.2-py3-none-any.whl", hash = "sha256:febddfc3d1ea571bdb1dc0f98d7b45d24def7428214d4fb73cc486c9568cce6a"}, + {file = "virtualenv-20.29.2.tar.gz", hash = "sha256:fdaabebf6d03b5ba83ae0a02cfe96f48a716f4fae556461d180825866f75b728"}, ] [package.dependencies] @@ -2609,4 +2608,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "a5f551155b24abbf0f7b70f15470c9a91c5ae35235b68d750d51b0c1eb775c9d" +content-hash = "5578b1ebfec18261ec47ebf32e3ee89424ff3b630810704bfefb62905d652c4a" diff --git a/pyproject.toml b/pyproject.toml index 1ce89d9c..5ac75ea5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,6 +40,7 @@ filelock = "^3.13.1" eval-type-backport = ">=0.1.3,<0.3.0" click = "^8.1.7" pyarrow = ">=10.0.1" +python-dateutil = "^2.8.2" numpy = [ { version = ">=1.23.5", python = "<3.12" }, { version = ">=1.26.0", python = ">=3.12" }, From d8faecc712488163ef4e4ee72cebe53e287ba3d7 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 13:50:21 +0000 Subject: [PATCH 08/29] make install --- .github/workflows/_integration_tests.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/_integration_tests.yml b/.github/workflows/_integration_tests.yml index c035debe..34a4d5ce 100644 --- a/.github/workflows/_integration_tests.yml +++ b/.github/workflows/_integration_tests.yml @@ -44,6 +44,10 @@ jobs: shell: bash run: poetry install --with quality,tests + - name: Make install + run: | + make install + - name: Install together run: | poetry run pip install . From 94e0c8ee1d8d815b0760928bd18e3173d4def5e6 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 17:22:58 +0100 Subject: [PATCH 09/29] skip openapi spec download in ci --- .github/workflows/_integration_tests.yml | 4 ++-- Makefile | 3 +++ scripts/generate_api_client.py | 28 ++++++++++++++++++++---- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/.github/workflows/_integration_tests.yml b/.github/workflows/_integration_tests.yml index 34a4d5ce..347b26a4 100644 --- a/.github/workflows/_integration_tests.yml +++ b/.github/workflows/_integration_tests.yml @@ -44,9 +44,9 @@ jobs: shell: bash run: poetry install --with quality,tests - - name: Make install + - name: Generate OpenAPI client run: | - make install + make generate-client-from-existing-spec - name: Install together run: | diff --git a/Makefile b/Makefile index d32e7f6b..3aa71b97 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,9 @@ format: generate-client: python scripts/generate_api_client.py +generate-client-from-existing-spec: + python scripts/generate_api_client.py --skip-spec-download + # Documentation html: diff --git a/scripts/generate_api_client.py b/scripts/generate_api_client.py index 01d25dc7..44948b8e 100755 --- a/scripts/generate_api_client.py +++ b/scripts/generate_api_client.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 from __future__ import annotations +import argparse import shutil import subprocess import sys @@ -28,13 +29,32 @@ def download_file(url: str, target: Path) -> None: run_command(["wget", "-O", str(target), url]) +def parse_args() -> argparse.Namespace: + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description="Generate Together API client") + parser.add_argument( + "--skip-spec-download", + action="store_true", + help="Skip downloading the OpenAPI spec file", + ) + return parser.parse_args() + + def main() -> None: - # Download OpenAPI spec + args = parse_args() spec_file = Path(__file__).parent / "openapi.yaml" - download_file(OPENAPI_SPEC_URL, spec_file) - # Run formatter on the spec for better merge conflict handling - run_command(["npx", "-y", "prettier", "--write", str(spec_file)]) + # Download OpenAPI spec if not skipped + if not args.skip_spec_download: + download_file(OPENAPI_SPEC_URL, spec_file) + # Run formatter on the spec for better merge conflict handling + run_command(["npx", "-y", "prettier", "--write", str(spec_file)]) + elif not spec_file.exists(): + print( + "Error: OpenAPI spec file not found and download was skipped", + file=sys.stderr, + ) + sys.exit(1) # Download generator if needed download_file(GENERATOR_JAR_URL, GENERATOR_JAR) From 6a8e0e8391143404f4d84958f203ffc461727c46 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 16:30:09 +0000 Subject: [PATCH 10/29] add debug step --- .github/workflows/_integration_tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/_integration_tests.yml b/.github/workflows/_integration_tests.yml index 347b26a4..0feaf902 100644 --- a/.github/workflows/_integration_tests.yml +++ b/.github/workflows/_integration_tests.yml @@ -48,6 +48,11 @@ jobs: run: | make generate-client-from-existing-spec + - name: Debug - Check generated files + run: | + ls -la src/together/generated/ + ls -la src/together/generated/models/ + - name: Install together run: | poetry run pip install . From 32f0580102602ed571cc5b7b6426d1db49dcba52 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 16:33:09 +0000 Subject: [PATCH 11/29] add generated to output --- src/together/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/together/__init__.py b/src/together/__init__.py index b4e4110d..ae2a866a 100644 --- a/src/together/__init__.py +++ b/src/together/__init__.py @@ -9,6 +9,7 @@ constants, error, filemanager, + generated, resources, together_response, types, @@ -54,6 +55,7 @@ "Client", "AsyncClient", "resources", + "generated", "types", "abstract", "filemanager", From fd0e2cfd90a0f33fb9ff219b1429c1e280bd18aa Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 16:33:32 +0000 Subject: [PATCH 12/29] remove debug --- .github/workflows/_integration_tests.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/_integration_tests.yml b/.github/workflows/_integration_tests.yml index 0feaf902..347b26a4 100644 --- a/.github/workflows/_integration_tests.yml +++ b/.github/workflows/_integration_tests.yml @@ -48,11 +48,6 @@ jobs: run: | make generate-client-from-existing-spec - - name: Debug - Check generated files - run: | - ls -la src/together/generated/ - ls -la src/together/generated/models/ - - name: Install together run: | poetry run pip install . From 4828da90e6e3d49a5ed108247ae6450e38f83c6a Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 16:36:25 +0000 Subject: [PATCH 13/29] fix poetry inclusion of generated files --- pyproject.toml | 4 ++++ src/together/__init__.py | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5ac75ea5..10684053 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,10 @@ classifiers = [ ] repository = "https://github.com/togethercomputer/together-python" homepage = "https://github.com/togethercomputer/together-python" +packages = [ + { include = "together", from = "src" }, + { include = "together/generated/**/*.py", from = "src" } +] [tool.poetry.dependencies] python = "^3.9" diff --git a/src/together/__init__.py b/src/together/__init__.py index ae2a866a..b4e4110d 100644 --- a/src/together/__init__.py +++ b/src/together/__init__.py @@ -9,7 +9,6 @@ constants, error, filemanager, - generated, resources, together_response, types, @@ -55,7 +54,6 @@ "Client", "AsyncClient", "resources", - "generated", "types", "abstract", "filemanager", From dba33c85d707c0dd6defd581a0be8c2a5fd0e9ed Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 16:37:20 +0000 Subject: [PATCH 14/29] fix order of generation --- .github/workflows/_integration_tests.yml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/_integration_tests.yml b/.github/workflows/_integration_tests.yml index 347b26a4..f20d3a68 100644 --- a/.github/workflows/_integration_tests.yml +++ b/.github/workflows/_integration_tests.yml @@ -24,7 +24,6 @@ jobs: strategy: matrix: python-version: - - "3.8" - "3.9" - "3.10" - "3.11" @@ -40,14 +39,14 @@ jobs: poetry-version: ${{ env.POETRY_VERSION }} cache-key: core - - name: Install dependencies - shell: bash - run: poetry install --with quality,tests - - name: Generate OpenAPI client run: | make generate-client-from-existing-spec + - name: Install dependencies + shell: bash + run: poetry install --with quality,tests + - name: Install together run: | poetry run pip install . From 5d40a932221d0c091d36d74ced292d2fd4e97e19 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 16:37:46 +0000 Subject: [PATCH 15/29] remove 3.8 --- .github/workflows/_tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/_tests.yml b/.github/workflows/_tests.yml index 851320dd..62e42844 100644 --- a/.github/workflows/_tests.yml +++ b/.github/workflows/_tests.yml @@ -24,7 +24,6 @@ jobs: strategy: matrix: python-version: - - "3.8" - "3.9" - "3.10" - "3.11" From a904a8926f48a31f8c71dd74d316bd7d6d24c8a9 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 16:44:25 +0000 Subject: [PATCH 16/29] fix normal test --- .github/workflows/_tests.yml | 4 ++++ .github/workflows/upload-to-pypi.yml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/_tests.yml b/.github/workflows/_tests.yml index 62e42844..fd7fcbae 100644 --- a/.github/workflows/_tests.yml +++ b/.github/workflows/_tests.yml @@ -39,6 +39,10 @@ jobs: poetry-version: ${{ env.POETRY_VERSION }} cache-key: core + - name: Generate OpenAPI client + run: | + make generate-client-from-existing-spec + - name: Install dependencies shell: bash run: poetry install --with quality,tests diff --git a/.github/workflows/upload-to-pypi.yml b/.github/workflows/upload-to-pypi.yml index 45485ce7..066f9b9e 100644 --- a/.github/workflows/upload-to-pypi.yml +++ b/.github/workflows/upload-to-pypi.yml @@ -27,6 +27,10 @@ jobs: path: .venv key: venv-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }}-${{ hashFiles('poetry.lock') }} + - name: Generate OpenAPI client + run: | + make generate-client-from-existing-spec + - name: Install dependencies run: | poetry config virtualenvs.in-project true From 3d59638caf585db27caac469317aa2434d2e5325 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 16:53:20 +0000 Subject: [PATCH 17/29] try include directive --- pyproject.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 10684053..a7454b44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,7 @@ +include = [ + "src/together/generated/**" +] + [build-system] requires = [ "poetry", @@ -26,10 +30,6 @@ classifiers = [ ] repository = "https://github.com/togethercomputer/together-python" homepage = "https://github.com/togethercomputer/together-python" -packages = [ - { include = "together", from = "src" }, - { include = "together/generated/**/*.py", from = "src" } -] [tool.poetry.dependencies] python = "^3.9" From ea7054fd46a397a0465322b075b772361da9e703 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 17:01:24 +0000 Subject: [PATCH 18/29] new try --- pyproject.toml | 8 +-- scripts/generate_api_client.py | 101 ++++++++++++++++----------------- 2 files changed, 54 insertions(+), 55 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a7454b44..306fd795 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,3 @@ -include = [ - "src/together/generated/**" -] - [build-system] requires = [ "poetry", @@ -14,6 +10,10 @@ requires = [ ] build-backend = "poetry.masonry.api" +packages = [ + { include = "together", from = "src" }, +] + [tool.poetry] name = "together" version = "1.4.0" diff --git a/scripts/generate_api_client.py b/scripts/generate_api_client.py index 44948b8e..2f18888a 100755 --- a/scripts/generate_api_client.py +++ b/scripts/generate_api_client.py @@ -5,13 +5,16 @@ import shutil import subprocess import sys +import tempfile from pathlib import Path OPENAPI_SPEC_URL = ( "https://raw.githubusercontent.com/togethercomputer/openapi/main/openapi.yaml" ) -OUTPUT_DIR = Path(__file__).parent.parent / "src" / "together" / "generated" +# We no longer set OUTPUT_DIR to the src folder for generation. +# Instead, we'll copy the generated client to the target directory. +TARGET_DIR = Path(__file__).parent.parent / "src" / "together" / "generated" GENERATOR_JAR_URL = "https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/7.11.0/openapi-generator-cli-7.11.0.jar" GENERATOR_JAR = Path(__file__).parent / "openapi-generator-cli.jar" @@ -23,8 +26,7 @@ def run_command(cmd: list[str], check: bool = True) -> subprocess.CompletedProce def download_file(url: str, target: Path) -> None: - """Download a file""" - + """Download a file.""" print(f"Downloading {url} to {target}") run_command(["wget", "-O", str(target), url]) @@ -44,10 +46,10 @@ def main() -> None: args = parse_args() spec_file = Path(__file__).parent / "openapi.yaml" - # Download OpenAPI spec if not skipped + # Download OpenAPI spec if not skipped. if not args.skip_spec_download: download_file(OPENAPI_SPEC_URL, spec_file) - # Run formatter on the spec for better merge conflict handling + # Format the spec for better merge conflict handling. run_command(["npx", "-y", "prettier", "--write", str(spec_file)]) elif not spec_file.exists(): print( @@ -56,54 +58,51 @@ def main() -> None: ) sys.exit(1) - # Download generator if needed + # Download generator if needed. download_file(GENERATOR_JAR_URL, GENERATOR_JAR) - # Delete existing generated code - shutil.rmtree(OUTPUT_DIR, ignore_errors=True) - - # Ensure output directory exists - OUTPUT_DIR.mkdir(parents=True, exist_ok=True) - - # Generate client code - cmd = [ - "java", - "-jar", - str(GENERATOR_JAR), - "generate", - "-i", - str(spec_file), - "-g", - "python", - "-o", - str(OUTPUT_DIR), - "--package-name=together.generated", - "--git-repo-id=together-python", - "--git-user-id=togethercomputer", - "--additional-properties=packageUrl=https://github.com/togethercomputer/together-python", - "--additional-properties=library=asyncio", - "--additional-properties=generateSourceCodeOnly=true", - ] - - print("Generating client code...") - result = run_command(cmd, check=False) - - if result.returncode != 0: - print("Error generating client code:", file=sys.stderr) - print(result.stderr, file=sys.stderr) - sys.exit(1) - - # Move files from nested directory to target directory - nested_dir = OUTPUT_DIR / "together" / "generated" - if nested_dir.exists(): - print("Moving files from nested directory...") - # Move all contents to parent directory - for item in nested_dir.iterdir(): - shutil.move(str(item), str(OUTPUT_DIR / item.name)) - # Clean up empty directories - shutil.rmtree(OUTPUT_DIR / "together", ignore_errors=True) - - print("Successfully generated client code") + # Create a temporary directory for generation. + with tempfile.TemporaryDirectory() as tmp_dir: + tmp_path = Path(tmp_dir) + # Build the generation command. + cmd = [ + "java", + "-jar", + str(GENERATOR_JAR), + "generate", + "-i", + str(spec_file), + "-g", + "python", + "-o", + str(tmp_path), + "--package-name=together.generated", + "--git-repo-id=together-python", + "--git-user-id=togethercomputer", + "--additional-properties=packageUrl=https://github.com/togethercomputer/together-python", + "--additional-properties=library=asyncio", + "--additional-properties=generateSourceCodeOnly=true", + ] + + print("Generating client code into temporary directory...") + result = run_command(cmd, check=False) + if result.returncode != 0: + print("Error generating client code:", file=sys.stderr) + print(result.stderr, file=sys.stderr) + sys.exit(1) + + # The generator will create a directory structure like: tmp_dir/together/generated + generated_dir = tmp_path / "together" / "generated" + if not generated_dir.exists(): + print("Error: Expected generated directory not found", file=sys.stderr) + sys.exit(1) + + # Remove any existing generated client code. + shutil.rmtree(TARGET_DIR, ignore_errors=True) + TARGET_DIR.parent.mkdir(parents=True, exist_ok=True) + # Copy the generated code from the temporary directory to the target directory. + shutil.copytree(generated_dir, TARGET_DIR) + print("Successfully generated and copied client code to", TARGET_DIR) if __name__ == "__main__": From e9f08afe26a96c9eddefdc5304d7b1ae388f9550 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 17:04:15 +0000 Subject: [PATCH 19/29] move --- pyproject.toml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 306fd795..6301660b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,10 +10,6 @@ requires = [ ] build-backend = "poetry.masonry.api" -packages = [ - { include = "together", from = "src" }, -] - [tool.poetry] name = "together" version = "1.4.0" @@ -30,6 +26,9 @@ classifiers = [ ] repository = "https://github.com/togethercomputer/together-python" homepage = "https://github.com/togethercomputer/together-python" +packages = [ + { include = "together", from = "src" }, +] [tool.poetry.dependencies] python = "^3.9" From d8766cdf81090de6aa96411eab9e774334115be9 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 17:07:11 +0000 Subject: [PATCH 20/29] try even more --- pyproject.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 6301660b..470100e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,7 @@ +include = [ + "src/together/generated/**" +] + [build-system] requires = [ "poetry", From 555f2c470ed00fb50cc81c7475eb9ed3c103c344 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 17:09:53 +0000 Subject: [PATCH 21/29] try manifest --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) create mode 100644 MANIFEST.in diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..0c0a3427 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +recursive-include src/together/generated * From bcaed822cd49aa8d5f51fb73ef8a565baf706cec Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 17:11:57 +0000 Subject: [PATCH 22/29] i give up, include openapi generated files --- .github/workflows/_integration_tests.yml | 4 - .github/workflows/_tests.yml | 4 - .gitignore | 3 - MANIFEST.in | 1 - pyproject.toml | 4 - src/together/generated/__init__.py | 224 +++ src/together/generated/api/__init__.py | 14 + src/together/generated/api/audio_api.py | 302 +++ src/together/generated/api/chat_api.py | 308 ++++ src/together/generated/api/completion_api.py | 308 ++++ src/together/generated/api/embeddings_api.py | 308 ++++ src/together/generated/api/endpoints_api.py | 1354 ++++++++++++++ src/together/generated/api/files_api.py | 996 ++++++++++ src/together/generated/api/fine_tuning_api.py | 1630 +++++++++++++++++ src/together/generated/api/hardware_api.py | 298 +++ src/together/generated/api/images_api.py | 291 +++ src/together/generated/api/models_api.py | 279 +++ src/together/generated/api/rerank_api.py | 308 ++++ src/together/generated/api_client.py | 758 ++++++++ src/together/generated/api_response.py | 20 + src/together/generated/configuration.py | 583 ++++++ src/together/generated/docs/AudioApi.md | 88 + .../generated/docs/AudioSpeechRequest.md | 34 + .../generated/docs/AudioSpeechRequestModel.md | 27 + .../generated/docs/AudioSpeechRequestVoice.md | 27 + .../generated/docs/AudioSpeechStreamChunk.md | 29 + .../generated/docs/AudioSpeechStreamEvent.md | 27 + .../docs/AudioSpeechStreamResponse.md | 27 + src/together/generated/docs/Autoscaling.md | 29 + src/together/generated/docs/ChatApi.md | 93 + .../ChatCompletionAssistantMessageParam.md | 31 + .../generated/docs/ChatCompletionChoice.md | 30 + .../docs/ChatCompletionChoiceDelta.md | 31 + .../ChatCompletionChoiceDeltaFunctionCall.md | 28 + .../docs/ChatCompletionChoicesDataInner.md | 32 + .../ChatCompletionChoicesDataInnerLogprobs.md | 29 + .../generated/docs/ChatCompletionChunk.md | 33 + .../docs/ChatCompletionChunkChoicesInner.md | 31 + .../generated/docs/ChatCompletionEvent.md | 27 + .../ChatCompletionFunctionMessageParam.md | 29 + .../generated/docs/ChatCompletionMessage.md | 30 + .../docs/ChatCompletionMessageFunctionCall.md | 28 + .../docs/ChatCompletionMessageParam.md | 32 + .../generated/docs/ChatCompletionRequest.md | 49 + .../docs/ChatCompletionRequestFunctionCall.md | 27 + .../ChatCompletionRequestFunctionCallOneOf.md | 27 + .../ChatCompletionRequestMessagesInner.md | 28 + .../docs/ChatCompletionRequestModel.md | 27 + .../ChatCompletionRequestResponseFormat.md | 29 + .../docs/ChatCompletionRequestToolChoice.md | 31 + .../generated/docs/ChatCompletionResponse.md | 32 + .../generated/docs/ChatCompletionStream.md | 27 + .../docs/ChatCompletionSystemMessageParam.md | 29 + .../generated/docs/ChatCompletionToken.md | 30 + .../generated/docs/ChatCompletionTool.md | 28 + .../docs/ChatCompletionToolFunction.md | 29 + .../docs/ChatCompletionToolMessageParam.md | 29 + .../docs/ChatCompletionUserMessageParam.md | 29 + src/together/generated/docs/CompletionApi.md | 93 + .../generated/docs/CompletionChoice.md | 27 + .../docs/CompletionChoicesDataInner.md | 30 + .../generated/docs/CompletionChunk.md | 32 + .../generated/docs/CompletionChunkUsage.md | 29 + .../generated/docs/CompletionEvent.md | 27 + .../generated/docs/CompletionRequest.md | 44 + .../generated/docs/CompletionRequestModel.md | 27 + .../docs/CompletionRequestSafetyModel.md | 27 + .../generated/docs/CompletionResponse.md | 33 + .../generated/docs/CompletionStream.md | 27 + .../generated/docs/CompletionToken.md | 30 + .../generated/docs/CreateEndpointRequest.md | 33 + .../generated/docs/DedicatedEndpoint.md | 38 + src/together/generated/docs/EmbeddingsApi.md | 93 + .../generated/docs/EmbeddingsRequest.md | 28 + .../generated/docs/EmbeddingsRequestInput.md | 26 + .../generated/docs/EmbeddingsRequestModel.md | 27 + .../generated/docs/EmbeddingsResponse.md | 29 + .../docs/EmbeddingsResponseDataInner.md | 29 + .../generated/docs/EndpointPricing.md | 28 + src/together/generated/docs/EndpointsApi.md | 416 +++++ src/together/generated/docs/ErrorData.md | 27 + src/together/generated/docs/ErrorDataError.md | 30 + .../generated/docs/FileDeleteResponse.md | 28 + src/together/generated/docs/FileList.md | 27 + src/together/generated/docs/FileObject.md | 30 + src/together/generated/docs/FileResponse.md | 35 + src/together/generated/docs/FilesApi.md | 320 ++++ src/together/generated/docs/FineTuneEvent.md | 40 + .../generated/docs/FineTunesPostRequest.md | 45 + .../docs/FineTunesPostRequestTrainOnInputs.md | 27 + .../docs/FineTunesPostRequestTrainingType.md | 31 + src/together/generated/docs/FineTuningApi.md | 488 +++++ .../generated/docs/FinetuneDownloadResult.md | 31 + .../generated/docs/FinetuneEventLevels.md | 18 + .../generated/docs/FinetuneEventType.md | 56 + .../generated/docs/FinetuneJobStatus.md | 24 + src/together/generated/docs/FinetuneList.md | 27 + .../generated/docs/FinetuneListEvents.md | 27 + .../generated/docs/FinetuneResponse.md | 58 + .../docs/FinetuneResponseTrainOnInputs.md | 26 + src/together/generated/docs/FinishReason.md | 16 + .../generated/docs/FullTrainingType.md | 27 + src/together/generated/docs/HardwareApi.md | 88 + .../generated/docs/HardwareAvailability.md | 28 + src/together/generated/docs/HardwareSpec.md | 31 + .../generated/docs/HardwareWithStatus.md | 33 + src/together/generated/docs/ImageResponse.md | 30 + .../generated/docs/ImageResponseDataInner.md | 29 + src/together/generated/docs/ImagesApi.md | 87 + .../docs/ImagesGenerationsPostRequest.md | 39 + ...esGenerationsPostRequestImageLorasInner.md | 28 + .../docs/ImagesGenerationsPostRequestModel.md | 27 + src/together/generated/docs/LRScheduler.md | 28 + .../generated/docs/LinearLRSchedulerArgs.md | 27 + src/together/generated/docs/ListEndpoint.md | 35 + .../docs/ListEndpoints200Response.md | 28 + .../generated/docs/ListHardware200Response.md | 28 + .../docs/ListHardware200ResponseOneOf.md | 29 + .../docs/ListHardware200ResponseOneOf1.md | 29 + .../ListHardware200ResponseOneOf1DataInner.md | 32 + .../ListHardware200ResponseOneOfDataInner.md | 32 + .../generated/docs/LoRATrainingType.md | 31 + src/together/generated/docs/LogprobsPart.md | 29 + src/together/generated/docs/ModelInfo.md | 36 + src/together/generated/docs/ModelsApi.md | 87 + src/together/generated/docs/Pricing.md | 31 + .../generated/docs/PromptPartInner.md | 28 + src/together/generated/docs/RerankApi.md | 93 + src/together/generated/docs/RerankRequest.md | 32 + .../generated/docs/RerankRequestDocuments.md | 27 + .../generated/docs/RerankRequestModel.md | 27 + src/together/generated/docs/RerankResponse.md | 31 + .../docs/RerankResponseResultsInner.md | 29 + .../RerankResponseResultsInnerDocument.md | 27 + src/together/generated/docs/StreamSentinel.md | 27 + src/together/generated/docs/ToolChoice.md | 30 + .../generated/docs/ToolChoiceFunction.md | 28 + src/together/generated/docs/ToolsPart.md | 28 + .../generated/docs/ToolsPartFunction.md | 29 + .../generated/docs/UpdateEndpointRequest.md | 29 + src/together/generated/docs/UsageData.md | 29 + src/together/generated/exceptions.py | 220 +++ src/together/generated/models/__init__.py | 197 ++ .../generated/models/audio_speech_request.py | 212 +++ .../models/audio_speech_request_model.py | 158 ++ .../models/audio_speech_request_voice.py | 158 ++ .../models/audio_speech_stream_chunk.py | 98 + .../models/audio_speech_stream_event.py | 95 + .../models/audio_speech_stream_response.py | 169 ++ src/together/generated/models/autoscaling.py | 93 + ...chat_completion_assistant_message_param.py | 130 ++ .../models/chat_completion_choice.py | 112 ++ .../models/chat_completion_choice_delta.py | 134 ++ ...t_completion_choice_delta_function_call.py | 86 + .../chat_completion_choices_data_inner.py | 123 ++ ..._completion_choices_data_inner_logprobs.py | 97 + .../generated/models/chat_completion_chunk.py | 139 ++ .../chat_completion_chunk_choices_inner.py | 112 ++ .../generated/models/chat_completion_event.py | 95 + .../chat_completion_function_message_param.py | 98 + .../models/chat_completion_message.py | 127 ++ .../chat_completion_message_function_call.py | 86 + .../models/chat_completion_message_param.py | 266 +++ .../models/chat_completion_request.py | 304 +++ .../chat_completion_request_function_call.py | 177 ++ ...completion_request_function_call_one_of.py | 83 + .../chat_completion_request_messages_inner.py | 99 + .../models/chat_completion_request_model.py | 158 ++ ...chat_completion_request_response_format.py | 90 + .../chat_completion_request_tool_choice.py | 166 ++ .../models/chat_completion_response.py | 136 ++ .../models/chat_completion_stream.py | 169 ++ .../chat_completion_system_message_param.py | 98 + .../generated/models/chat_completion_token.py | 100 + .../generated/models/chat_completion_tool.py | 106 ++ .../models/chat_completion_tool_function.py | 91 + .../chat_completion_tool_message_param.py | 98 + .../chat_completion_user_message_param.py | 98 + .../generated/models/completion_choice.py | 83 + .../models/completion_choices_data_inner.py | 101 + .../generated/models/completion_chunk.py | 139 ++ .../models/completion_chunk_usage.py | 95 + .../generated/models/completion_event.py | 95 + .../generated/models/completion_request.py | 212 +++ .../models/completion_request_model.py | 158 ++ .../models/completion_request_safety_model.py | 158 ++ .../generated/models/completion_response.py | 151 ++ .../generated/models/completion_stream.py | 169 ++ .../generated/models/completion_token.py | 100 + .../models/create_endpoint_request.py | 156 ++ .../generated/models/dedicated_endpoint.py | 157 ++ .../generated/models/embeddings_request.py | 105 ++ .../models/embeddings_request_input.py | 171 ++ .../models/embeddings_request_model.py | 158 ++ .../generated/models/embeddings_response.py | 115 ++ .../models/embeddings_response_data_inner.py | 105 ++ .../generated/models/endpoint_pricing.py | 85 + src/together/generated/models/error_data.py | 95 + .../generated/models/error_data_error.py | 93 + .../generated/models/file_delete_response.py | 84 + src/together/generated/models/file_list.py | 99 + src/together/generated/models/file_object.py | 93 + .../generated/models/file_response.py | 135 ++ .../generated/models/fine_tune_event.py | 137 ++ .../models/fine_tunes_post_request.py | 233 +++ ...fine_tunes_post_request_train_on_inputs.py | 170 ++ .../fine_tunes_post_request_training_type.py | 172 ++ .../models/finetune_download_result.py | 116 ++ .../generated/models/finetune_event_levels.py | 39 + .../generated/models/finetune_event_type.py | 58 + .../generated/models/finetune_job_status.py | 42 + .../generated/models/finetune_list.py | 99 + .../generated/models/finetune_list_events.py | 99 + .../generated/models/finetune_response.py | 222 +++ .../finetune_response_train_on_inputs.py | 170 ++ .../generated/models/finish_reason.py | 38 + .../generated/models/full_training_type.py | 90 + .../generated/models/hardware_availability.py | 94 + .../generated/models/hardware_spec.py | 100 + .../generated/models/hardware_with_status.py | 140 ++ .../generated/models/image_response.py | 112 ++ .../models/image_response_data_inner.py | 85 + .../models/images_generations_post_request.py | 217 +++ ...erations_post_request_image_loras_inner.py | 88 + .../images_generations_post_request_model.py | 158 ++ .../models/linear_lr_scheduler_args.py | 94 + .../generated/models/list_endpoint.py | 136 ++ .../models/list_endpoints200_response.py | 108 ++ .../models/list_hardware200_response.py | 185 ++ .../list_hardware200_response_one_of.py | 113 ++ .../list_hardware200_response_one_of1.py | 113 ++ ...hardware200_response_one_of1_data_inner.py | 140 ++ ..._hardware200_response_one_of_data_inner.py | 137 ++ .../generated/models/lo_ra_training_type.py | 123 ++ .../generated/models/logprobs_part.py | 97 + src/together/generated/models/lr_scheduler.py | 96 + src/together/generated/models/model_info.py | 135 ++ src/together/generated/models/pricing.py | 101 + .../generated/models/prompt_part_inner.py | 97 + .../generated/models/rerank_request.py | 144 ++ .../models/rerank_request_documents.py | 171 ++ .../generated/models/rerank_request_model.py | 158 ++ .../generated/models/rerank_response.py | 127 ++ .../models/rerank_response_results_inner.py | 101 + .../rerank_response_results_inner_document.py | 83 + .../generated/models/stream_sentinel.py | 90 + src/together/generated/models/tool_choice.py | 115 ++ .../generated/models/tool_choice_function.py | 86 + src/together/generated/models/tools_part.py | 97 + .../generated/models/tools_part_function.py | 93 + .../models/update_endpoint_request.py | 115 ++ src/together/generated/models/usage_data.py | 95 + src/together/generated/rest.py | 195 ++ src/together/generated/test/__init__.py | 0 src/together/generated/test/test_audio_api.py | 38 + .../test/test_audio_speech_request.py | 63 + .../test/test_audio_speech_request_model.py | 52 + .../test/test_audio_speech_request_voice.py | 52 + .../test/test_audio_speech_stream_chunk.py | 58 + .../test/test_audio_speech_stream_event.py | 60 + .../test/test_audio_speech_stream_response.py | 56 + .../generated/test/test_autoscaling.py | 56 + src/together/generated/test/test_chat_api.py | 38 + ...chat_completion_assistant_message_param.py | 70 + .../test/test_chat_completion_choice.py | 98 + .../test/test_chat_completion_choice_delta.py | 70 + ...t_completion_choice_delta_function_call.py | 58 + ...test_chat_completion_choices_data_inner.py | 74 + ..._completion_choices_data_inner_logprobs.py | 63 + .../test/test_chat_completion_chunk.py | 108 ++ ...est_chat_completion_chunk_choices_inner.py | 92 + .../test/test_chat_completion_event.py | 112 ++ ..._chat_completion_function_message_param.py | 60 + .../test/test_chat_completion_message.py | 68 + ...t_chat_completion_message_function_call.py | 58 + .../test_chat_completion_message_param.py | 74 + .../test/test_chat_completion_request.py | 98 + ...t_chat_completion_request_function_call.py | 56 + ...completion_request_function_call_one_of.py | 56 + ..._chat_completion_request_messages_inner.py | 58 + .../test_chat_completion_request_model.py | 54 + ...chat_completion_request_response_format.py | 58 + ...est_chat_completion_request_tool_choice.py | 66 + .../test/test_chat_completion_response.py | 110 ++ .../test/test_chat_completion_stream.py | 54 + ...st_chat_completion_system_message_param.py | 59 + .../test/test_chat_completion_token.py | 60 + .../test/test_chat_completion_tool.py | 66 + .../test_chat_completion_tool_function.py | 60 + ...test_chat_completion_tool_message_param.py | 60 + ...test_chat_completion_user_message_param.py | 59 + .../generated/test/test_completion_api.py | 38 + .../generated/test/test_completion_choice.py | 53 + .../test_completion_choices_data_inner.py | 67 + .../generated/test/test_completion_chunk.py | 77 + .../test/test_completion_chunk_usage.py | 58 + .../generated/test/test_completion_event.py | 80 + .../generated/test/test_completion_request.py | 74 + .../test/test_completion_request_model.py | 52 + .../test_completion_request_safety_model.py | 54 + .../test/test_completion_response.py | 114 ++ .../generated/test/test_completion_stream.py | 54 + .../generated/test/test_completion_token.py | 60 + .../test/test_create_endpoint_request.py | 66 + .../generated/test/test_dedicated_endpoint.py | 78 + .../generated/test/test_embeddings_api.py | 38 + .../generated/test/test_embeddings_request.py | 56 + .../test/test_embeddings_request_input.py | 52 + .../test/test_embeddings_request_model.py | 52 + .../test/test_embeddings_response.py | 72 + .../test_embeddings_response_data_inner.py | 64 + .../generated/test/test_endpoint_pricing.py | 54 + .../generated/test/test_endpoints_api.py | 66 + .../generated/test/test_error_data.py | 62 + .../generated/test/test_error_data_error.py | 58 + .../test/test_file_delete_response.py | 54 + src/together/generated/test/test_file_list.py | 76 + .../generated/test/test_file_object.py | 56 + .../generated/test/test_file_response.py | 70 + src/together/generated/test/test_files_api.py | 59 + .../generated/test/test_fine_tune_event.py | 79 + .../test/test_fine_tunes_post_request.py | 76 + ...fine_tunes_post_request_train_on_inputs.py | 54 + ...t_fine_tunes_post_request_training_type.py | 62 + .../generated/test/test_fine_tuning_api.py | 73 + .../test/test_finetune_download_result.py | 57 + .../test/test_finetune_event_levels.py | 35 + .../test/test_finetune_event_type.py | 35 + .../test/test_finetune_job_status.py | 35 + .../generated/test/test_finetune_list.py | 54 + .../test/test_finetune_list_events.py | 54 + .../generated/test/test_finetune_response.py | 89 + .../test_finetune_response_train_on_inputs.py | 54 + .../generated/test/test_finish_reason.py | 35 + .../generated/test/test_full_training_type.py | 54 + .../generated/test/test_hardware_api.py | 38 + .../test/test_hardware_availability.py | 54 + .../generated/test/test_hardware_spec.py | 60 + .../test/test_hardware_with_status.py | 74 + .../generated/test/test_image_response.py | 70 + .../test/test_image_response_data_inner.py | 56 + .../generated/test/test_images_api.py | 38 + .../test_images_generations_post_request.py | 73 + ...erations_post_request_image_loras_inner.py | 60 + ...t_images_generations_post_request_model.py | 54 + .../test/test_linear_lr_scheduler_args.py | 53 + .../generated/test/test_list_endpoint.py | 68 + .../test/test_list_endpoints200_response.py | 78 + .../test/test_list_hardware200_response.py | 60 + .../test_list_hardware200_response_one_of.py | 62 + .../test_list_hardware200_response_one_of1.py | 62 + ...hardware200_response_one_of1_data_inner.py | 78 + ..._hardware200_response_one_of_data_inner.py | 75 + .../test/test_lo_ra_training_type.py | 60 + .../generated/test/test_logprobs_part.py | 61 + .../generated/test/test_lr_scheduler.py | 56 + .../generated/test/test_model_info.py | 71 + .../generated/test/test_models_api.py | 38 + src/together/generated/test/test_pricing.py | 62 + .../generated/test/test_prompt_part_inner.py | 63 + .../generated/test/test_rerank_api.py | 38 + .../generated/test/test_rerank_request.py | 61 + .../test/test_rerank_request_documents.py | 52 + .../test/test_rerank_request_model.py | 52 + .../generated/test/test_rerank_response.py | 60 + .../test_rerank_response_results_inner.py | 62 + ..._rerank_response_results_inner_document.py | 55 + .../generated/test/test_stream_sentinel.py | 54 + .../generated/test/test_tool_choice.py | 64 + .../test/test_tool_choice_function.py | 56 + .../generated/test/test_tools_part.py | 57 + .../test/test_tools_part_function.py | 55 + .../test/test_update_endpoint_request.py | 57 + .../generated/test/test_usage_data.py | 58 + 374 files changed, 34742 insertions(+), 16 deletions(-) delete mode 100644 MANIFEST.in create mode 100644 src/together/generated/__init__.py create mode 100644 src/together/generated/api/__init__.py create mode 100644 src/together/generated/api/audio_api.py create mode 100644 src/together/generated/api/chat_api.py create mode 100644 src/together/generated/api/completion_api.py create mode 100644 src/together/generated/api/embeddings_api.py create mode 100644 src/together/generated/api/endpoints_api.py create mode 100644 src/together/generated/api/files_api.py create mode 100644 src/together/generated/api/fine_tuning_api.py create mode 100644 src/together/generated/api/hardware_api.py create mode 100644 src/together/generated/api/images_api.py create mode 100644 src/together/generated/api/models_api.py create mode 100644 src/together/generated/api/rerank_api.py create mode 100644 src/together/generated/api_client.py create mode 100644 src/together/generated/api_response.py create mode 100644 src/together/generated/configuration.py create mode 100644 src/together/generated/docs/AudioApi.md create mode 100644 src/together/generated/docs/AudioSpeechRequest.md create mode 100644 src/together/generated/docs/AudioSpeechRequestModel.md create mode 100644 src/together/generated/docs/AudioSpeechRequestVoice.md create mode 100644 src/together/generated/docs/AudioSpeechStreamChunk.md create mode 100644 src/together/generated/docs/AudioSpeechStreamEvent.md create mode 100644 src/together/generated/docs/AudioSpeechStreamResponse.md create mode 100644 src/together/generated/docs/Autoscaling.md create mode 100644 src/together/generated/docs/ChatApi.md create mode 100644 src/together/generated/docs/ChatCompletionAssistantMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionChoice.md create mode 100644 src/together/generated/docs/ChatCompletionChoiceDelta.md create mode 100644 src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md create mode 100644 src/together/generated/docs/ChatCompletionChoicesDataInner.md create mode 100644 src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md create mode 100644 src/together/generated/docs/ChatCompletionChunk.md create mode 100644 src/together/generated/docs/ChatCompletionChunkChoicesInner.md create mode 100644 src/together/generated/docs/ChatCompletionEvent.md create mode 100644 src/together/generated/docs/ChatCompletionFunctionMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionMessage.md create mode 100644 src/together/generated/docs/ChatCompletionMessageFunctionCall.md create mode 100644 src/together/generated/docs/ChatCompletionMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionRequest.md create mode 100644 src/together/generated/docs/ChatCompletionRequestFunctionCall.md create mode 100644 src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md create mode 100644 src/together/generated/docs/ChatCompletionRequestMessagesInner.md create mode 100644 src/together/generated/docs/ChatCompletionRequestModel.md create mode 100644 src/together/generated/docs/ChatCompletionRequestResponseFormat.md create mode 100644 src/together/generated/docs/ChatCompletionRequestToolChoice.md create mode 100644 src/together/generated/docs/ChatCompletionResponse.md create mode 100644 src/together/generated/docs/ChatCompletionStream.md create mode 100644 src/together/generated/docs/ChatCompletionSystemMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionToken.md create mode 100644 src/together/generated/docs/ChatCompletionTool.md create mode 100644 src/together/generated/docs/ChatCompletionToolFunction.md create mode 100644 src/together/generated/docs/ChatCompletionToolMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionUserMessageParam.md create mode 100644 src/together/generated/docs/CompletionApi.md create mode 100644 src/together/generated/docs/CompletionChoice.md create mode 100644 src/together/generated/docs/CompletionChoicesDataInner.md create mode 100644 src/together/generated/docs/CompletionChunk.md create mode 100644 src/together/generated/docs/CompletionChunkUsage.md create mode 100644 src/together/generated/docs/CompletionEvent.md create mode 100644 src/together/generated/docs/CompletionRequest.md create mode 100644 src/together/generated/docs/CompletionRequestModel.md create mode 100644 src/together/generated/docs/CompletionRequestSafetyModel.md create mode 100644 src/together/generated/docs/CompletionResponse.md create mode 100644 src/together/generated/docs/CompletionStream.md create mode 100644 src/together/generated/docs/CompletionToken.md create mode 100644 src/together/generated/docs/CreateEndpointRequest.md create mode 100644 src/together/generated/docs/DedicatedEndpoint.md create mode 100644 src/together/generated/docs/EmbeddingsApi.md create mode 100644 src/together/generated/docs/EmbeddingsRequest.md create mode 100644 src/together/generated/docs/EmbeddingsRequestInput.md create mode 100644 src/together/generated/docs/EmbeddingsRequestModel.md create mode 100644 src/together/generated/docs/EmbeddingsResponse.md create mode 100644 src/together/generated/docs/EmbeddingsResponseDataInner.md create mode 100644 src/together/generated/docs/EndpointPricing.md create mode 100644 src/together/generated/docs/EndpointsApi.md create mode 100644 src/together/generated/docs/ErrorData.md create mode 100644 src/together/generated/docs/ErrorDataError.md create mode 100644 src/together/generated/docs/FileDeleteResponse.md create mode 100644 src/together/generated/docs/FileList.md create mode 100644 src/together/generated/docs/FileObject.md create mode 100644 src/together/generated/docs/FileResponse.md create mode 100644 src/together/generated/docs/FilesApi.md create mode 100644 src/together/generated/docs/FineTuneEvent.md create mode 100644 src/together/generated/docs/FineTunesPostRequest.md create mode 100644 src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md create mode 100644 src/together/generated/docs/FineTunesPostRequestTrainingType.md create mode 100644 src/together/generated/docs/FineTuningApi.md create mode 100644 src/together/generated/docs/FinetuneDownloadResult.md create mode 100644 src/together/generated/docs/FinetuneEventLevels.md create mode 100644 src/together/generated/docs/FinetuneEventType.md create mode 100644 src/together/generated/docs/FinetuneJobStatus.md create mode 100644 src/together/generated/docs/FinetuneList.md create mode 100644 src/together/generated/docs/FinetuneListEvents.md create mode 100644 src/together/generated/docs/FinetuneResponse.md create mode 100644 src/together/generated/docs/FinetuneResponseTrainOnInputs.md create mode 100644 src/together/generated/docs/FinishReason.md create mode 100644 src/together/generated/docs/FullTrainingType.md create mode 100644 src/together/generated/docs/HardwareApi.md create mode 100644 src/together/generated/docs/HardwareAvailability.md create mode 100644 src/together/generated/docs/HardwareSpec.md create mode 100644 src/together/generated/docs/HardwareWithStatus.md create mode 100644 src/together/generated/docs/ImageResponse.md create mode 100644 src/together/generated/docs/ImageResponseDataInner.md create mode 100644 src/together/generated/docs/ImagesApi.md create mode 100644 src/together/generated/docs/ImagesGenerationsPostRequest.md create mode 100644 src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md create mode 100644 src/together/generated/docs/ImagesGenerationsPostRequestModel.md create mode 100644 src/together/generated/docs/LRScheduler.md create mode 100644 src/together/generated/docs/LinearLRSchedulerArgs.md create mode 100644 src/together/generated/docs/ListEndpoint.md create mode 100644 src/together/generated/docs/ListEndpoints200Response.md create mode 100644 src/together/generated/docs/ListHardware200Response.md create mode 100644 src/together/generated/docs/ListHardware200ResponseOneOf.md create mode 100644 src/together/generated/docs/ListHardware200ResponseOneOf1.md create mode 100644 src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md create mode 100644 src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md create mode 100644 src/together/generated/docs/LoRATrainingType.md create mode 100644 src/together/generated/docs/LogprobsPart.md create mode 100644 src/together/generated/docs/ModelInfo.md create mode 100644 src/together/generated/docs/ModelsApi.md create mode 100644 src/together/generated/docs/Pricing.md create mode 100644 src/together/generated/docs/PromptPartInner.md create mode 100644 src/together/generated/docs/RerankApi.md create mode 100644 src/together/generated/docs/RerankRequest.md create mode 100644 src/together/generated/docs/RerankRequestDocuments.md create mode 100644 src/together/generated/docs/RerankRequestModel.md create mode 100644 src/together/generated/docs/RerankResponse.md create mode 100644 src/together/generated/docs/RerankResponseResultsInner.md create mode 100644 src/together/generated/docs/RerankResponseResultsInnerDocument.md create mode 100644 src/together/generated/docs/StreamSentinel.md create mode 100644 src/together/generated/docs/ToolChoice.md create mode 100644 src/together/generated/docs/ToolChoiceFunction.md create mode 100644 src/together/generated/docs/ToolsPart.md create mode 100644 src/together/generated/docs/ToolsPartFunction.md create mode 100644 src/together/generated/docs/UpdateEndpointRequest.md create mode 100644 src/together/generated/docs/UsageData.md create mode 100644 src/together/generated/exceptions.py create mode 100644 src/together/generated/models/__init__.py create mode 100644 src/together/generated/models/audio_speech_request.py create mode 100644 src/together/generated/models/audio_speech_request_model.py create mode 100644 src/together/generated/models/audio_speech_request_voice.py create mode 100644 src/together/generated/models/audio_speech_stream_chunk.py create mode 100644 src/together/generated/models/audio_speech_stream_event.py create mode 100644 src/together/generated/models/audio_speech_stream_response.py create mode 100644 src/together/generated/models/autoscaling.py create mode 100644 src/together/generated/models/chat_completion_assistant_message_param.py create mode 100644 src/together/generated/models/chat_completion_choice.py create mode 100644 src/together/generated/models/chat_completion_choice_delta.py create mode 100644 src/together/generated/models/chat_completion_choice_delta_function_call.py create mode 100644 src/together/generated/models/chat_completion_choices_data_inner.py create mode 100644 src/together/generated/models/chat_completion_choices_data_inner_logprobs.py create mode 100644 src/together/generated/models/chat_completion_chunk.py create mode 100644 src/together/generated/models/chat_completion_chunk_choices_inner.py create mode 100644 src/together/generated/models/chat_completion_event.py create mode 100644 src/together/generated/models/chat_completion_function_message_param.py create mode 100644 src/together/generated/models/chat_completion_message.py create mode 100644 src/together/generated/models/chat_completion_message_function_call.py create mode 100644 src/together/generated/models/chat_completion_message_param.py create mode 100644 src/together/generated/models/chat_completion_request.py create mode 100644 src/together/generated/models/chat_completion_request_function_call.py create mode 100644 src/together/generated/models/chat_completion_request_function_call_one_of.py create mode 100644 src/together/generated/models/chat_completion_request_messages_inner.py create mode 100644 src/together/generated/models/chat_completion_request_model.py create mode 100644 src/together/generated/models/chat_completion_request_response_format.py create mode 100644 src/together/generated/models/chat_completion_request_tool_choice.py create mode 100644 src/together/generated/models/chat_completion_response.py create mode 100644 src/together/generated/models/chat_completion_stream.py create mode 100644 src/together/generated/models/chat_completion_system_message_param.py create mode 100644 src/together/generated/models/chat_completion_token.py create mode 100644 src/together/generated/models/chat_completion_tool.py create mode 100644 src/together/generated/models/chat_completion_tool_function.py create mode 100644 src/together/generated/models/chat_completion_tool_message_param.py create mode 100644 src/together/generated/models/chat_completion_user_message_param.py create mode 100644 src/together/generated/models/completion_choice.py create mode 100644 src/together/generated/models/completion_choices_data_inner.py create mode 100644 src/together/generated/models/completion_chunk.py create mode 100644 src/together/generated/models/completion_chunk_usage.py create mode 100644 src/together/generated/models/completion_event.py create mode 100644 src/together/generated/models/completion_request.py create mode 100644 src/together/generated/models/completion_request_model.py create mode 100644 src/together/generated/models/completion_request_safety_model.py create mode 100644 src/together/generated/models/completion_response.py create mode 100644 src/together/generated/models/completion_stream.py create mode 100644 src/together/generated/models/completion_token.py create mode 100644 src/together/generated/models/create_endpoint_request.py create mode 100644 src/together/generated/models/dedicated_endpoint.py create mode 100644 src/together/generated/models/embeddings_request.py create mode 100644 src/together/generated/models/embeddings_request_input.py create mode 100644 src/together/generated/models/embeddings_request_model.py create mode 100644 src/together/generated/models/embeddings_response.py create mode 100644 src/together/generated/models/embeddings_response_data_inner.py create mode 100644 src/together/generated/models/endpoint_pricing.py create mode 100644 src/together/generated/models/error_data.py create mode 100644 src/together/generated/models/error_data_error.py create mode 100644 src/together/generated/models/file_delete_response.py create mode 100644 src/together/generated/models/file_list.py create mode 100644 src/together/generated/models/file_object.py create mode 100644 src/together/generated/models/file_response.py create mode 100644 src/together/generated/models/fine_tune_event.py create mode 100644 src/together/generated/models/fine_tunes_post_request.py create mode 100644 src/together/generated/models/fine_tunes_post_request_train_on_inputs.py create mode 100644 src/together/generated/models/fine_tunes_post_request_training_type.py create mode 100644 src/together/generated/models/finetune_download_result.py create mode 100644 src/together/generated/models/finetune_event_levels.py create mode 100644 src/together/generated/models/finetune_event_type.py create mode 100644 src/together/generated/models/finetune_job_status.py create mode 100644 src/together/generated/models/finetune_list.py create mode 100644 src/together/generated/models/finetune_list_events.py create mode 100644 src/together/generated/models/finetune_response.py create mode 100644 src/together/generated/models/finetune_response_train_on_inputs.py create mode 100644 src/together/generated/models/finish_reason.py create mode 100644 src/together/generated/models/full_training_type.py create mode 100644 src/together/generated/models/hardware_availability.py create mode 100644 src/together/generated/models/hardware_spec.py create mode 100644 src/together/generated/models/hardware_with_status.py create mode 100644 src/together/generated/models/image_response.py create mode 100644 src/together/generated/models/image_response_data_inner.py create mode 100644 src/together/generated/models/images_generations_post_request.py create mode 100644 src/together/generated/models/images_generations_post_request_image_loras_inner.py create mode 100644 src/together/generated/models/images_generations_post_request_model.py create mode 100644 src/together/generated/models/linear_lr_scheduler_args.py create mode 100644 src/together/generated/models/list_endpoint.py create mode 100644 src/together/generated/models/list_endpoints200_response.py create mode 100644 src/together/generated/models/list_hardware200_response.py create mode 100644 src/together/generated/models/list_hardware200_response_one_of.py create mode 100644 src/together/generated/models/list_hardware200_response_one_of1.py create mode 100644 src/together/generated/models/list_hardware200_response_one_of1_data_inner.py create mode 100644 src/together/generated/models/list_hardware200_response_one_of_data_inner.py create mode 100644 src/together/generated/models/lo_ra_training_type.py create mode 100644 src/together/generated/models/logprobs_part.py create mode 100644 src/together/generated/models/lr_scheduler.py create mode 100644 src/together/generated/models/model_info.py create mode 100644 src/together/generated/models/pricing.py create mode 100644 src/together/generated/models/prompt_part_inner.py create mode 100644 src/together/generated/models/rerank_request.py create mode 100644 src/together/generated/models/rerank_request_documents.py create mode 100644 src/together/generated/models/rerank_request_model.py create mode 100644 src/together/generated/models/rerank_response.py create mode 100644 src/together/generated/models/rerank_response_results_inner.py create mode 100644 src/together/generated/models/rerank_response_results_inner_document.py create mode 100644 src/together/generated/models/stream_sentinel.py create mode 100644 src/together/generated/models/tool_choice.py create mode 100644 src/together/generated/models/tool_choice_function.py create mode 100644 src/together/generated/models/tools_part.py create mode 100644 src/together/generated/models/tools_part_function.py create mode 100644 src/together/generated/models/update_endpoint_request.py create mode 100644 src/together/generated/models/usage_data.py create mode 100644 src/together/generated/rest.py create mode 100644 src/together/generated/test/__init__.py create mode 100644 src/together/generated/test/test_audio_api.py create mode 100644 src/together/generated/test/test_audio_speech_request.py create mode 100644 src/together/generated/test/test_audio_speech_request_model.py create mode 100644 src/together/generated/test/test_audio_speech_request_voice.py create mode 100644 src/together/generated/test/test_audio_speech_stream_chunk.py create mode 100644 src/together/generated/test/test_audio_speech_stream_event.py create mode 100644 src/together/generated/test/test_audio_speech_stream_response.py create mode 100644 src/together/generated/test/test_autoscaling.py create mode 100644 src/together/generated/test/test_chat_api.py create mode 100644 src/together/generated/test/test_chat_completion_assistant_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_choice.py create mode 100644 src/together/generated/test/test_chat_completion_choice_delta.py create mode 100644 src/together/generated/test/test_chat_completion_choice_delta_function_call.py create mode 100644 src/together/generated/test/test_chat_completion_choices_data_inner.py create mode 100644 src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py create mode 100644 src/together/generated/test/test_chat_completion_chunk.py create mode 100644 src/together/generated/test/test_chat_completion_chunk_choices_inner.py create mode 100644 src/together/generated/test/test_chat_completion_event.py create mode 100644 src/together/generated/test/test_chat_completion_function_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_message.py create mode 100644 src/together/generated/test/test_chat_completion_message_function_call.py create mode 100644 src/together/generated/test/test_chat_completion_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_request.py create mode 100644 src/together/generated/test/test_chat_completion_request_function_call.py create mode 100644 src/together/generated/test/test_chat_completion_request_function_call_one_of.py create mode 100644 src/together/generated/test/test_chat_completion_request_messages_inner.py create mode 100644 src/together/generated/test/test_chat_completion_request_model.py create mode 100644 src/together/generated/test/test_chat_completion_request_response_format.py create mode 100644 src/together/generated/test/test_chat_completion_request_tool_choice.py create mode 100644 src/together/generated/test/test_chat_completion_response.py create mode 100644 src/together/generated/test/test_chat_completion_stream.py create mode 100644 src/together/generated/test/test_chat_completion_system_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_token.py create mode 100644 src/together/generated/test/test_chat_completion_tool.py create mode 100644 src/together/generated/test/test_chat_completion_tool_function.py create mode 100644 src/together/generated/test/test_chat_completion_tool_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_user_message_param.py create mode 100644 src/together/generated/test/test_completion_api.py create mode 100644 src/together/generated/test/test_completion_choice.py create mode 100644 src/together/generated/test/test_completion_choices_data_inner.py create mode 100644 src/together/generated/test/test_completion_chunk.py create mode 100644 src/together/generated/test/test_completion_chunk_usage.py create mode 100644 src/together/generated/test/test_completion_event.py create mode 100644 src/together/generated/test/test_completion_request.py create mode 100644 src/together/generated/test/test_completion_request_model.py create mode 100644 src/together/generated/test/test_completion_request_safety_model.py create mode 100644 src/together/generated/test/test_completion_response.py create mode 100644 src/together/generated/test/test_completion_stream.py create mode 100644 src/together/generated/test/test_completion_token.py create mode 100644 src/together/generated/test/test_create_endpoint_request.py create mode 100644 src/together/generated/test/test_dedicated_endpoint.py create mode 100644 src/together/generated/test/test_embeddings_api.py create mode 100644 src/together/generated/test/test_embeddings_request.py create mode 100644 src/together/generated/test/test_embeddings_request_input.py create mode 100644 src/together/generated/test/test_embeddings_request_model.py create mode 100644 src/together/generated/test/test_embeddings_response.py create mode 100644 src/together/generated/test/test_embeddings_response_data_inner.py create mode 100644 src/together/generated/test/test_endpoint_pricing.py create mode 100644 src/together/generated/test/test_endpoints_api.py create mode 100644 src/together/generated/test/test_error_data.py create mode 100644 src/together/generated/test/test_error_data_error.py create mode 100644 src/together/generated/test/test_file_delete_response.py create mode 100644 src/together/generated/test/test_file_list.py create mode 100644 src/together/generated/test/test_file_object.py create mode 100644 src/together/generated/test/test_file_response.py create mode 100644 src/together/generated/test/test_files_api.py create mode 100644 src/together/generated/test/test_fine_tune_event.py create mode 100644 src/together/generated/test/test_fine_tunes_post_request.py create mode 100644 src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py create mode 100644 src/together/generated/test/test_fine_tunes_post_request_training_type.py create mode 100644 src/together/generated/test/test_fine_tuning_api.py create mode 100644 src/together/generated/test/test_finetune_download_result.py create mode 100644 src/together/generated/test/test_finetune_event_levels.py create mode 100644 src/together/generated/test/test_finetune_event_type.py create mode 100644 src/together/generated/test/test_finetune_job_status.py create mode 100644 src/together/generated/test/test_finetune_list.py create mode 100644 src/together/generated/test/test_finetune_list_events.py create mode 100644 src/together/generated/test/test_finetune_response.py create mode 100644 src/together/generated/test/test_finetune_response_train_on_inputs.py create mode 100644 src/together/generated/test/test_finish_reason.py create mode 100644 src/together/generated/test/test_full_training_type.py create mode 100644 src/together/generated/test/test_hardware_api.py create mode 100644 src/together/generated/test/test_hardware_availability.py create mode 100644 src/together/generated/test/test_hardware_spec.py create mode 100644 src/together/generated/test/test_hardware_with_status.py create mode 100644 src/together/generated/test/test_image_response.py create mode 100644 src/together/generated/test/test_image_response_data_inner.py create mode 100644 src/together/generated/test/test_images_api.py create mode 100644 src/together/generated/test/test_images_generations_post_request.py create mode 100644 src/together/generated/test/test_images_generations_post_request_image_loras_inner.py create mode 100644 src/together/generated/test/test_images_generations_post_request_model.py create mode 100644 src/together/generated/test/test_linear_lr_scheduler_args.py create mode 100644 src/together/generated/test/test_list_endpoint.py create mode 100644 src/together/generated/test/test_list_endpoints200_response.py create mode 100644 src/together/generated/test/test_list_hardware200_response.py create mode 100644 src/together/generated/test/test_list_hardware200_response_one_of.py create mode 100644 src/together/generated/test/test_list_hardware200_response_one_of1.py create mode 100644 src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py create mode 100644 src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py create mode 100644 src/together/generated/test/test_lo_ra_training_type.py create mode 100644 src/together/generated/test/test_logprobs_part.py create mode 100644 src/together/generated/test/test_lr_scheduler.py create mode 100644 src/together/generated/test/test_model_info.py create mode 100644 src/together/generated/test/test_models_api.py create mode 100644 src/together/generated/test/test_pricing.py create mode 100644 src/together/generated/test/test_prompt_part_inner.py create mode 100644 src/together/generated/test/test_rerank_api.py create mode 100644 src/together/generated/test/test_rerank_request.py create mode 100644 src/together/generated/test/test_rerank_request_documents.py create mode 100644 src/together/generated/test/test_rerank_request_model.py create mode 100644 src/together/generated/test/test_rerank_response.py create mode 100644 src/together/generated/test/test_rerank_response_results_inner.py create mode 100644 src/together/generated/test/test_rerank_response_results_inner_document.py create mode 100644 src/together/generated/test/test_stream_sentinel.py create mode 100644 src/together/generated/test/test_tool_choice.py create mode 100644 src/together/generated/test/test_tool_choice_function.py create mode 100644 src/together/generated/test/test_tools_part.py create mode 100644 src/together/generated/test/test_tools_part_function.py create mode 100644 src/together/generated/test/test_update_endpoint_request.py create mode 100644 src/together/generated/test/test_usage_data.py diff --git a/.github/workflows/_integration_tests.yml b/.github/workflows/_integration_tests.yml index f20d3a68..5b6813b9 100644 --- a/.github/workflows/_integration_tests.yml +++ b/.github/workflows/_integration_tests.yml @@ -39,10 +39,6 @@ jobs: poetry-version: ${{ env.POETRY_VERSION }} cache-key: core - - name: Generate OpenAPI client - run: | - make generate-client-from-existing-spec - - name: Install dependencies shell: bash run: poetry install --with quality,tests diff --git a/.github/workflows/_tests.yml b/.github/workflows/_tests.yml index fd7fcbae..62e42844 100644 --- a/.github/workflows/_tests.yml +++ b/.github/workflows/_tests.yml @@ -39,10 +39,6 @@ jobs: poetry-version: ${{ env.POETRY_VERSION }} cache-key: core - - name: Generate OpenAPI client - run: | - make generate-client-from-existing-spec - - name: Install dependencies shell: bash run: poetry install --with quality,tests diff --git a/.gitignore b/.gitignore index c94c15ff..b6e47617 100644 --- a/.gitignore +++ b/.gitignore @@ -27,9 +27,6 @@ share/python-wheels/ *.egg MANIFEST -# OpenAPI Generator Ignore -src/together/generated/ - # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 0c0a3427..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -recursive-include src/together/generated * diff --git a/pyproject.toml b/pyproject.toml index 470100e0..6301660b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,3 @@ -include = [ - "src/together/generated/**" -] - [build-system] requires = [ "poetry", diff --git a/src/together/generated/__init__.py b/src/together/generated/__init__.py new file mode 100644 index 00000000..cf250519 --- /dev/null +++ b/src/together/generated/__init__.py @@ -0,0 +1,224 @@ +# coding: utf-8 + +# flake8: noqa + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +__version__ = "1.0.0" + +# import apis into sdk package +from together.generated.api.audio_api import AudioApi +from together.generated.api.chat_api import ChatApi +from together.generated.api.completion_api import CompletionApi +from together.generated.api.embeddings_api import EmbeddingsApi +from together.generated.api.endpoints_api import EndpointsApi +from together.generated.api.files_api import FilesApi +from together.generated.api.fine_tuning_api import FineTuningApi +from together.generated.api.hardware_api import HardwareApi +from together.generated.api.images_api import ImagesApi +from together.generated.api.models_api import ModelsApi +from together.generated.api.rerank_api import RerankApi + +# import ApiClient +from together.generated.api_response import ApiResponse +from together.generated.api_client import ApiClient +from together.generated.configuration import Configuration +from together.generated.exceptions import OpenApiException +from together.generated.exceptions import ApiTypeError +from together.generated.exceptions import ApiValueError +from together.generated.exceptions import ApiKeyError +from together.generated.exceptions import ApiAttributeError +from together.generated.exceptions import ApiException + +# import models into sdk package +from together.generated.models.audio_speech_request import AudioSpeechRequest +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent +from together.generated.models.audio_speech_stream_response import ( + AudioSpeechStreamResponse, +) +from together.generated.models.autoscaling import Autoscaling +from together.generated.models.chat_completion_assistant_message_param import ( + ChatCompletionAssistantMessageParam, +) +from together.generated.models.chat_completion_choice import ChatCompletionChoice +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) +from together.generated.models.chat_completion_choice_delta_function_call import ( + ChatCompletionChoiceDeltaFunctionCall, +) +from together.generated.models.chat_completion_choices_data_inner import ( + ChatCompletionChoicesDataInner, +) +from together.generated.models.chat_completion_choices_data_inner_logprobs import ( + ChatCompletionChoicesDataInnerLogprobs, +) +from together.generated.models.chat_completion_chunk import ChatCompletionChunk +from together.generated.models.chat_completion_chunk_choices_inner import ( + ChatCompletionChunkChoicesInner, +) +from together.generated.models.chat_completion_event import ChatCompletionEvent +from together.generated.models.chat_completion_function_message_param import ( + ChatCompletionFunctionMessageParam, +) +from together.generated.models.chat_completion_message import ChatCompletionMessage +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) +from together.generated.models.chat_completion_message_param import ( + ChatCompletionMessageParam, +) +from together.generated.models.chat_completion_request import ChatCompletionRequest +from together.generated.models.chat_completion_request_function_call import ( + ChatCompletionRequestFunctionCall, +) +from together.generated.models.chat_completion_request_function_call_one_of import ( + ChatCompletionRequestFunctionCallOneOf, +) +from together.generated.models.chat_completion_request_messages_inner import ( + ChatCompletionRequestMessagesInner, +) +from together.generated.models.chat_completion_request_model import ( + ChatCompletionRequestModel, +) +from together.generated.models.chat_completion_request_response_format import ( + ChatCompletionRequestResponseFormat, +) +from together.generated.models.chat_completion_request_tool_choice import ( + ChatCompletionRequestToolChoice, +) +from together.generated.models.chat_completion_response import ChatCompletionResponse +from together.generated.models.chat_completion_stream import ChatCompletionStream +from together.generated.models.chat_completion_system_message_param import ( + ChatCompletionSystemMessageParam, +) +from together.generated.models.chat_completion_token import ChatCompletionToken +from together.generated.models.chat_completion_tool import ChatCompletionTool +from together.generated.models.chat_completion_tool_function import ( + ChatCompletionToolFunction, +) +from together.generated.models.chat_completion_tool_message_param import ( + ChatCompletionToolMessageParam, +) +from together.generated.models.chat_completion_user_message_param import ( + ChatCompletionUserMessageParam, +) +from together.generated.models.completion_choice import CompletionChoice +from together.generated.models.completion_choices_data_inner import ( + CompletionChoicesDataInner, +) +from together.generated.models.completion_chunk import CompletionChunk +from together.generated.models.completion_chunk_usage import CompletionChunkUsage +from together.generated.models.completion_event import CompletionEvent +from together.generated.models.completion_request import CompletionRequest +from together.generated.models.completion_request_model import CompletionRequestModel +from together.generated.models.completion_request_safety_model import ( + CompletionRequestSafetyModel, +) +from together.generated.models.completion_response import CompletionResponse +from together.generated.models.completion_stream import CompletionStream +from together.generated.models.completion_token import CompletionToken +from together.generated.models.create_endpoint_request import CreateEndpointRequest +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.embeddings_request import EmbeddingsRequest +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel +from together.generated.models.embeddings_response import EmbeddingsResponse +from together.generated.models.embeddings_response_data_inner import ( + EmbeddingsResponseDataInner, +) +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.error_data import ErrorData +from together.generated.models.error_data_error import ErrorDataError +from together.generated.models.file_delete_response import FileDeleteResponse +from together.generated.models.file_list import FileList +from together.generated.models.file_object import FileObject +from together.generated.models.file_response import FileResponse +from together.generated.models.fine_tune_event import FineTuneEvent +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest +from together.generated.models.fine_tunes_post_request_train_on_inputs import ( + FineTunesPostRequestTrainOnInputs, +) +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) +from together.generated.models.finetune_download_result import FinetuneDownloadResult +from together.generated.models.finetune_event_levels import FinetuneEventLevels +from together.generated.models.finetune_event_type import FinetuneEventType +from together.generated.models.finetune_job_status import FinetuneJobStatus +from together.generated.models.finetune_list import FinetuneList +from together.generated.models.finetune_list_events import FinetuneListEvents +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.models.finetune_response_train_on_inputs import ( + FinetuneResponseTrainOnInputs, +) +from together.generated.models.finish_reason import FinishReason +from together.generated.models.full_training_type import FullTrainingType +from together.generated.models.hardware_availability import HardwareAvailability +from together.generated.models.hardware_spec import HardwareSpec +from together.generated.models.hardware_with_status import HardwareWithStatus +from together.generated.models.image_response import ImageResponse +from together.generated.models.image_response_data_inner import ImageResponseDataInner +from together.generated.models.images_generations_post_request import ( + ImagesGenerationsPostRequest, +) +from together.generated.models.images_generations_post_request_image_loras_inner import ( + ImagesGenerationsPostRequestImageLorasInner, +) +from together.generated.models.images_generations_post_request_model import ( + ImagesGenerationsPostRequestModel, +) +from together.generated.models.lr_scheduler import LRScheduler +from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs +from together.generated.models.list_endpoint import ListEndpoint +from together.generated.models.list_endpoints200_response import ( + ListEndpoints200Response, +) +from together.generated.models.list_hardware200_response import ListHardware200Response +from together.generated.models.list_hardware200_response_one_of import ( + ListHardware200ResponseOneOf, +) +from together.generated.models.list_hardware200_response_one_of1 import ( + ListHardware200ResponseOneOf1, +) +from together.generated.models.list_hardware200_response_one_of1_data_inner import ( + ListHardware200ResponseOneOf1DataInner, +) +from together.generated.models.list_hardware200_response_one_of_data_inner import ( + ListHardware200ResponseOneOfDataInner, +) +from together.generated.models.lo_ra_training_type import LoRATrainingType +from together.generated.models.logprobs_part import LogprobsPart +from together.generated.models.model_info import ModelInfo +from together.generated.models.pricing import Pricing +from together.generated.models.prompt_part_inner import PromptPartInner +from together.generated.models.rerank_request import RerankRequest +from together.generated.models.rerank_request_documents import RerankRequestDocuments +from together.generated.models.rerank_request_model import RerankRequestModel +from together.generated.models.rerank_response import RerankResponse +from together.generated.models.rerank_response_results_inner import ( + RerankResponseResultsInner, +) +from together.generated.models.rerank_response_results_inner_document import ( + RerankResponseResultsInnerDocument, +) +from together.generated.models.stream_sentinel import StreamSentinel +from together.generated.models.tool_choice import ToolChoice +from together.generated.models.tool_choice_function import ToolChoiceFunction +from together.generated.models.tools_part import ToolsPart +from together.generated.models.tools_part_function import ToolsPartFunction +from together.generated.models.update_endpoint_request import UpdateEndpointRequest +from together.generated.models.usage_data import UsageData diff --git a/src/together/generated/api/__init__.py b/src/together/generated/api/__init__.py new file mode 100644 index 00000000..50f8b438 --- /dev/null +++ b/src/together/generated/api/__init__.py @@ -0,0 +1,14 @@ +# flake8: noqa + +# import apis into api package +from together.generated.api.audio_api import AudioApi +from together.generated.api.chat_api import ChatApi +from together.generated.api.completion_api import CompletionApi +from together.generated.api.embeddings_api import EmbeddingsApi +from together.generated.api.endpoints_api import EndpointsApi +from together.generated.api.files_api import FilesApi +from together.generated.api.fine_tuning_api import FineTuningApi +from together.generated.api.hardware_api import HardwareApi +from together.generated.api.images_api import ImagesApi +from together.generated.api.models_api import ModelsApi +from together.generated.api.rerank_api import RerankApi diff --git a/src/together/generated/api/audio_api.py b/src/together/generated/api/audio_api.py new file mode 100644 index 00000000..f242af24 --- /dev/null +++ b/src/together/generated/api/audio_api.py @@ -0,0 +1,302 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import StrictBytes, StrictStr +from typing import Optional, Tuple, Union +from together.generated.models.audio_speech_request import AudioSpeechRequest + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class AudioApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def audio_speech( + self, + audio_speech_request: Optional[AudioSpeechRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> bytearray: + """Create audio generation request + + Generate audio from input text + + :param audio_speech_request: + :type audio_speech_request: AudioSpeechRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._audio_speech_serialize( + audio_speech_request=audio_speech_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "bytearray", + "400": "ErrorData", + "429": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def audio_speech_with_http_info( + self, + audio_speech_request: Optional[AudioSpeechRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[bytearray]: + """Create audio generation request + + Generate audio from input text + + :param audio_speech_request: + :type audio_speech_request: AudioSpeechRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._audio_speech_serialize( + audio_speech_request=audio_speech_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "bytearray", + "400": "ErrorData", + "429": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def audio_speech_without_preload_content( + self, + audio_speech_request: Optional[AudioSpeechRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create audio generation request + + Generate audio from input text + + :param audio_speech_request: + :type audio_speech_request: AudioSpeechRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._audio_speech_serialize( + audio_speech_request=audio_speech_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "bytearray", + "400": "ErrorData", + "429": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _audio_speech_serialize( + self, + audio_speech_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if audio_speech_request is not None: + _body_params = audio_speech_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + [ + "application/octet-stream", + "audio/wav", + "audio/mpeg", + "text/event-stream", + "application/json", + ] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/audio/speech", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/chat_api.py b/src/together/generated/api/chat_api.py new file mode 100644 index 00000000..9c4bb818 --- /dev/null +++ b/src/together/generated/api/chat_api.py @@ -0,0 +1,308 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import Optional +from together.generated.models.chat_completion_request import ChatCompletionRequest +from together.generated.models.chat_completion_response import ChatCompletionResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class ChatApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def chat_completions( + self, + chat_completion_request: Optional[ChatCompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ChatCompletionResponse: + """Create chat completion + + Query a chat model. + + :param chat_completion_request: + :type chat_completion_request: ChatCompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._chat_completions_serialize( + chat_completion_request=chat_completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ChatCompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def chat_completions_with_http_info( + self, + chat_completion_request: Optional[ChatCompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[ChatCompletionResponse]: + """Create chat completion + + Query a chat model. + + :param chat_completion_request: + :type chat_completion_request: ChatCompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._chat_completions_serialize( + chat_completion_request=chat_completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ChatCompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def chat_completions_without_preload_content( + self, + chat_completion_request: Optional[ChatCompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create chat completion + + Query a chat model. + + :param chat_completion_request: + :type chat_completion_request: ChatCompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._chat_completions_serialize( + chat_completion_request=chat_completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ChatCompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _chat_completions_serialize( + self, + chat_completion_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if chat_completion_request is not None: + _body_params = chat_completion_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json", "text/event-stream"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/chat/completions", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/completion_api.py b/src/together/generated/api/completion_api.py new file mode 100644 index 00000000..73f5e7fb --- /dev/null +++ b/src/together/generated/api/completion_api.py @@ -0,0 +1,308 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import Optional +from together.generated.models.completion_request import CompletionRequest +from together.generated.models.completion_response import CompletionResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class CompletionApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def completions( + self, + completion_request: Optional[CompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> CompletionResponse: + """Create completion + + Query a language, code, or image model. + + :param completion_request: + :type completion_request: CompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._completions_serialize( + completion_request=completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "CompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def completions_with_http_info( + self, + completion_request: Optional[CompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[CompletionResponse]: + """Create completion + + Query a language, code, or image model. + + :param completion_request: + :type completion_request: CompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._completions_serialize( + completion_request=completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "CompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def completions_without_preload_content( + self, + completion_request: Optional[CompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create completion + + Query a language, code, or image model. + + :param completion_request: + :type completion_request: CompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._completions_serialize( + completion_request=completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "CompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _completions_serialize( + self, + completion_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if completion_request is not None: + _body_params = completion_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json", "text/event-stream"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/completions", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/embeddings_api.py b/src/together/generated/api/embeddings_api.py new file mode 100644 index 00000000..e2dea123 --- /dev/null +++ b/src/together/generated/api/embeddings_api.py @@ -0,0 +1,308 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import Optional +from together.generated.models.embeddings_request import EmbeddingsRequest +from together.generated.models.embeddings_response import EmbeddingsResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class EmbeddingsApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def embeddings( + self, + embeddings_request: Optional[EmbeddingsRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> EmbeddingsResponse: + """Create embedding + + Query an embedding model for a given string of text. + + :param embeddings_request: + :type embeddings_request: EmbeddingsRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._embeddings_serialize( + embeddings_request=embeddings_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "EmbeddingsResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def embeddings_with_http_info( + self, + embeddings_request: Optional[EmbeddingsRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[EmbeddingsResponse]: + """Create embedding + + Query an embedding model for a given string of text. + + :param embeddings_request: + :type embeddings_request: EmbeddingsRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._embeddings_serialize( + embeddings_request=embeddings_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "EmbeddingsResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def embeddings_without_preload_content( + self, + embeddings_request: Optional[EmbeddingsRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create embedding + + Query an embedding model for a given string of text. + + :param embeddings_request: + :type embeddings_request: EmbeddingsRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._embeddings_serialize( + embeddings_request=embeddings_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "EmbeddingsResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _embeddings_serialize( + self, + embeddings_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if embeddings_request is not None: + _body_params = embeddings_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/embeddings", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/endpoints_api.py b/src/together/generated/api/endpoints_api.py new file mode 100644 index 00000000..70c8824a --- /dev/null +++ b/src/together/generated/api/endpoints_api.py @@ -0,0 +1,1354 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import Field, StrictStr, field_validator +from typing import Optional +from typing_extensions import Annotated +from together.generated.models.create_endpoint_request import CreateEndpointRequest +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.list_endpoints200_response import ( + ListEndpoints200Response, +) +from together.generated.models.update_endpoint_request import UpdateEndpointRequest + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class EndpointsApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def create_endpoint( + self, + create_endpoint_request: CreateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> DedicatedEndpoint: + """Create a dedicated endpoint, it will start automatically + + Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. + + :param create_endpoint_request: (required) + :type create_endpoint_request: CreateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._create_endpoint_serialize( + create_endpoint_request=create_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def create_endpoint_with_http_info( + self, + create_endpoint_request: CreateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[DedicatedEndpoint]: + """Create a dedicated endpoint, it will start automatically + + Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. + + :param create_endpoint_request: (required) + :type create_endpoint_request: CreateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._create_endpoint_serialize( + create_endpoint_request=create_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def create_endpoint_without_preload_content( + self, + create_endpoint_request: CreateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create a dedicated endpoint, it will start automatically + + Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. + + :param create_endpoint_request: (required) + :type create_endpoint_request: CreateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._create_endpoint_serialize( + create_endpoint_request=create_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _create_endpoint_serialize( + self, + create_endpoint_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if create_endpoint_request is not None: + _body_params = create_endpoint_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/endpoints", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def delete_endpoint( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to delete") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> None: + """Delete endpoint + + Permanently deletes an endpoint. This action cannot be undone. + + :param endpoint_id: The ID of the endpoint to delete (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._delete_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "204": None, + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def delete_endpoint_with_http_info( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to delete") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[None]: + """Delete endpoint + + Permanently deletes an endpoint. This action cannot be undone. + + :param endpoint_id: The ID of the endpoint to delete (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._delete_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "204": None, + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def delete_endpoint_without_preload_content( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to delete") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Delete endpoint + + Permanently deletes an endpoint. This action cannot be undone. + + :param endpoint_id: The ID of the endpoint to delete (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._delete_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "204": None, + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _delete_endpoint_serialize( + self, + endpoint_id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if endpoint_id is not None: + _path_params["endpointId"] = endpoint_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="DELETE", + resource_path="/endpoints/{endpointId}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def get_endpoint( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to retrieve") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> DedicatedEndpoint: + """Get endpoint by ID + + Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. + + :param endpoint_id: The ID of the endpoint to retrieve (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._get_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def get_endpoint_with_http_info( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to retrieve") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[DedicatedEndpoint]: + """Get endpoint by ID + + Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. + + :param endpoint_id: The ID of the endpoint to retrieve (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._get_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def get_endpoint_without_preload_content( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to retrieve") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Get endpoint by ID + + Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. + + :param endpoint_id: The ID of the endpoint to retrieve (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._get_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _get_endpoint_serialize( + self, + endpoint_id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if endpoint_id is not None: + _path_params["endpointId"] = endpoint_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/endpoints/{endpointId}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def list_endpoints( + self, + type: Annotated[ + Optional[StrictStr], Field(description="Filter endpoints by type") + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ListEndpoints200Response: + """List all endpoints, can be filtered by type + + Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). + + :param type: Filter endpoints by type + :type type: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_endpoints_serialize( + type=type, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListEndpoints200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def list_endpoints_with_http_info( + self, + type: Annotated[ + Optional[StrictStr], Field(description="Filter endpoints by type") + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[ListEndpoints200Response]: + """List all endpoints, can be filtered by type + + Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). + + :param type: Filter endpoints by type + :type type: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_endpoints_serialize( + type=type, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListEndpoints200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def list_endpoints_without_preload_content( + self, + type: Annotated[ + Optional[StrictStr], Field(description="Filter endpoints by type") + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List all endpoints, can be filtered by type + + Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). + + :param type: Filter endpoints by type + :type type: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_endpoints_serialize( + type=type, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListEndpoints200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _list_endpoints_serialize( + self, + type, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + if type is not None: + + _query_params.append(("type", type)) + + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/endpoints", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def update_endpoint( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to update") + ], + update_endpoint_request: UpdateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> DedicatedEndpoint: + """Update endpoint, this can also be used to start or stop a dedicated endpoint + + Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). + + :param endpoint_id: The ID of the endpoint to update (required) + :type endpoint_id: str + :param update_endpoint_request: (required) + :type update_endpoint_request: UpdateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._update_endpoint_serialize( + endpoint_id=endpoint_id, + update_endpoint_request=update_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def update_endpoint_with_http_info( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to update") + ], + update_endpoint_request: UpdateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[DedicatedEndpoint]: + """Update endpoint, this can also be used to start or stop a dedicated endpoint + + Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). + + :param endpoint_id: The ID of the endpoint to update (required) + :type endpoint_id: str + :param update_endpoint_request: (required) + :type update_endpoint_request: UpdateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._update_endpoint_serialize( + endpoint_id=endpoint_id, + update_endpoint_request=update_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def update_endpoint_without_preload_content( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to update") + ], + update_endpoint_request: UpdateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Update endpoint, this can also be used to start or stop a dedicated endpoint + + Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). + + :param endpoint_id: The ID of the endpoint to update (required) + :type endpoint_id: str + :param update_endpoint_request: (required) + :type update_endpoint_request: UpdateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._update_endpoint_serialize( + endpoint_id=endpoint_id, + update_endpoint_request=update_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _update_endpoint_serialize( + self, + endpoint_id, + update_endpoint_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if endpoint_id is not None: + _path_params["endpointId"] = endpoint_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if update_endpoint_request is not None: + _body_params = update_endpoint_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="PATCH", + resource_path="/endpoints/{endpointId}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/files_api.py b/src/together/generated/api/files_api.py new file mode 100644 index 00000000..1981fc9e --- /dev/null +++ b/src/together/generated/api/files_api.py @@ -0,0 +1,996 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import StrictStr +from together.generated.models.file_delete_response import FileDeleteResponse +from together.generated.models.file_list import FileList +from together.generated.models.file_object import FileObject +from together.generated.models.file_response import FileResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class FilesApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def files_get( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FileList: + """List all files + + List the metadata for all uploaded data files. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def files_get_with_http_info( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FileList]: + """List all files + + List the metadata for all uploaded data files. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def files_get_without_preload_content( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List all files + + List the metadata for all uploaded data files. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _files_get_serialize( + self, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/files", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def files_id_content_get( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FileObject: + """Get file contents + + Get the contents of a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_content_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileObject", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def files_id_content_get_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FileObject]: + """Get file contents + + Get the contents of a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_content_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileObject", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def files_id_content_get_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Get file contents + + Get the contents of a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_content_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileObject", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _files_id_content_get_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/files/{id}/content", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def files_id_delete( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FileDeleteResponse: + """Delete a file + + Delete a previously uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_delete_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileDeleteResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def files_id_delete_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FileDeleteResponse]: + """Delete a file + + Delete a previously uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_delete_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileDeleteResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def files_id_delete_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Delete a file + + Delete a previously uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_delete_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileDeleteResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _files_id_delete_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="DELETE", + resource_path="/files/{id}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def files_id_get( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FileResponse: + """List file + + List the metadata for a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def files_id_get_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FileResponse]: + """List file + + List the metadata for a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def files_id_get_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List file + + List the metadata for a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _files_id_get_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/files/{id}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/fine_tuning_api.py b/src/together/generated/api/fine_tuning_api.py new file mode 100644 index 00000000..d25eb662 --- /dev/null +++ b/src/together/generated/api/fine_tuning_api.py @@ -0,0 +1,1630 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import Field, StrictInt, StrictStr, field_validator +from typing import Optional +from typing_extensions import Annotated +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest +from together.generated.models.finetune_download_result import FinetuneDownloadResult +from together.generated.models.finetune_list import FinetuneList +from together.generated.models.finetune_list_events import FinetuneListEvents +from together.generated.models.finetune_response import FinetuneResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class FineTuningApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def fine_tunes_get( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneList: + """List all jobs + + List the metadata for all fine-tuning jobs. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_get_with_http_info( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneList]: + """List all jobs + + List the metadata for all fine-tuning jobs. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_get_without_preload_content( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List all jobs + + List the metadata for all fine-tuning jobs. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_get_serialize( + self, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/fine-tunes", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def fine_tunes_id_cancel_post( + self, + id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to cancel. A string that starts with `ft-`." + ), + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneResponse: + """Cancel job + + Cancel a currently running fine-tuning job. + + :param id: Fine-tune ID to cancel. A string that starts with `ft-`. (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_cancel_post_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_id_cancel_post_with_http_info( + self, + id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to cancel. A string that starts with `ft-`." + ), + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneResponse]: + """Cancel job + + Cancel a currently running fine-tuning job. + + :param id: Fine-tune ID to cancel. A string that starts with `ft-`. (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_cancel_post_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_id_cancel_post_without_preload_content( + self, + id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to cancel. A string that starts with `ft-`." + ), + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Cancel job + + Cancel a currently running fine-tuning job. + + :param id: Fine-tune ID to cancel. A string that starts with `ft-`. (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_cancel_post_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_id_cancel_post_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/fine-tunes/{id}/cancel", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def fine_tunes_id_events_get( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneListEvents: + """List job events + + List the events for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_events_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneListEvents", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_id_events_get_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneListEvents]: + """List job events + + List the events for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_events_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneListEvents", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_id_events_get_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List job events + + List the events for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_events_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneListEvents", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_id_events_get_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/fine-tunes/{id}/events", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def fine_tunes_id_get( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneResponse: + """List job + + List the metadata for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_id_get_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneResponse]: + """List job + + List the metadata for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_id_get_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List job + + List the metadata for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_id_get_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/fine-tunes/{id}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def fine_tunes_post( + self, + fine_tunes_post_request: FineTunesPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneResponse: + """Create job + + Use a model to create a fine-tuning job. + + :param fine_tunes_post_request: (required) + :type fine_tunes_post_request: FineTunesPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_post_serialize( + fine_tunes_post_request=fine_tunes_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_post_with_http_info( + self, + fine_tunes_post_request: FineTunesPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneResponse]: + """Create job + + Use a model to create a fine-tuning job. + + :param fine_tunes_post_request: (required) + :type fine_tunes_post_request: FineTunesPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_post_serialize( + fine_tunes_post_request=fine_tunes_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_post_without_preload_content( + self, + fine_tunes_post_request: FineTunesPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create job + + Use a model to create a fine-tuning job. + + :param fine_tunes_post_request: (required) + :type fine_tunes_post_request: FineTunesPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_post_serialize( + fine_tunes_post_request=fine_tunes_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_post_serialize( + self, + fine_tunes_post_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if fine_tunes_post_request is not None: + _body_params = fine_tunes_post_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/fine-tunes", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def finetune_download_get( + self, + ft_id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to download. A string that starts with `ft-`." + ), + ], + checkpoint_step: Annotated[ + Optional[StrictInt], + Field( + description="Specifies step number for checkpoint to download. Ignores `checkpoint` value if set." + ), + ] = None, + checkpoint: Annotated[ + Optional[StrictStr], + Field( + description="Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set." + ), + ] = None, + output: Annotated[ + Optional[StrictStr], + Field( + description="Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`." + ), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneDownloadResult: + """Download model + + Download a compressed fine-tuned model or checkpoint to local disk. + + :param ft_id: Fine-tune ID to download. A string that starts with `ft-`. (required) + :type ft_id: str + :param checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. + :type checkpoint_step: int + :param checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. + :type checkpoint: str + :param output: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. + :type output: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._finetune_download_get_serialize( + ft_id=ft_id, + checkpoint_step=checkpoint_step, + checkpoint=checkpoint, + output=output, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneDownloadResult", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def finetune_download_get_with_http_info( + self, + ft_id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to download. A string that starts with `ft-`." + ), + ], + checkpoint_step: Annotated[ + Optional[StrictInt], + Field( + description="Specifies step number for checkpoint to download. Ignores `checkpoint` value if set." + ), + ] = None, + checkpoint: Annotated[ + Optional[StrictStr], + Field( + description="Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set." + ), + ] = None, + output: Annotated[ + Optional[StrictStr], + Field( + description="Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`." + ), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneDownloadResult]: + """Download model + + Download a compressed fine-tuned model or checkpoint to local disk. + + :param ft_id: Fine-tune ID to download. A string that starts with `ft-`. (required) + :type ft_id: str + :param checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. + :type checkpoint_step: int + :param checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. + :type checkpoint: str + :param output: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. + :type output: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._finetune_download_get_serialize( + ft_id=ft_id, + checkpoint_step=checkpoint_step, + checkpoint=checkpoint, + output=output, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneDownloadResult", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def finetune_download_get_without_preload_content( + self, + ft_id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to download. A string that starts with `ft-`." + ), + ], + checkpoint_step: Annotated[ + Optional[StrictInt], + Field( + description="Specifies step number for checkpoint to download. Ignores `checkpoint` value if set." + ), + ] = None, + checkpoint: Annotated[ + Optional[StrictStr], + Field( + description="Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set." + ), + ] = None, + output: Annotated[ + Optional[StrictStr], + Field( + description="Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`." + ), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Download model + + Download a compressed fine-tuned model or checkpoint to local disk. + + :param ft_id: Fine-tune ID to download. A string that starts with `ft-`. (required) + :type ft_id: str + :param checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. + :type checkpoint_step: int + :param checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. + :type checkpoint: str + :param output: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. + :type output: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._finetune_download_get_serialize( + ft_id=ft_id, + checkpoint_step=checkpoint_step, + checkpoint=checkpoint, + output=output, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneDownloadResult", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _finetune_download_get_serialize( + self, + ft_id, + checkpoint_step, + checkpoint, + output, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + if ft_id is not None: + + _query_params.append(("ft_id", ft_id)) + + if checkpoint_step is not None: + + _query_params.append(("checkpoint_step", checkpoint_step)) + + if checkpoint is not None: + + _query_params.append(("checkpoint", checkpoint)) + + if output is not None: + + _query_params.append(("output", output)) + + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/finetune/download", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/hardware_api.py b/src/together/generated/api/hardware_api.py new file mode 100644 index 00000000..0f8e78fd --- /dev/null +++ b/src/together/generated/api/hardware_api.py @@ -0,0 +1,298 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import Field, StrictStr +from typing import Optional +from typing_extensions import Annotated +from together.generated.models.list_hardware200_response import ListHardware200Response + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class HardwareApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def list_hardware( + self, + model: Annotated[ + Optional[StrictStr], + Field(description="Filter hardware configurations by model compatibility"), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ListHardware200Response: + """List available hardware configurations + + Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + + :param model: Filter hardware configurations by model compatibility + :type model: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_hardware_serialize( + model=model, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListHardware200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def list_hardware_with_http_info( + self, + model: Annotated[ + Optional[StrictStr], + Field(description="Filter hardware configurations by model compatibility"), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[ListHardware200Response]: + """List available hardware configurations + + Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + + :param model: Filter hardware configurations by model compatibility + :type model: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_hardware_serialize( + model=model, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListHardware200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def list_hardware_without_preload_content( + self, + model: Annotated[ + Optional[StrictStr], + Field(description="Filter hardware configurations by model compatibility"), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List available hardware configurations + + Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + + :param model: Filter hardware configurations by model compatibility + :type model: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_hardware_serialize( + model=model, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListHardware200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _list_hardware_serialize( + self, + model, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + if model is not None: + + _query_params.append(("model", model)) + + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/hardware", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/images_api.py b/src/together/generated/api/images_api.py new file mode 100644 index 00000000..a61365e8 --- /dev/null +++ b/src/together/generated/api/images_api.py @@ -0,0 +1,291 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from together.generated.models.image_response import ImageResponse +from together.generated.models.images_generations_post_request import ( + ImagesGenerationsPostRequest, +) + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class ImagesApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def images_generations_post( + self, + images_generations_post_request: ImagesGenerationsPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ImageResponse: + """Create image + + Use an image model to generate an image for a given prompt. + + :param images_generations_post_request: (required) + :type images_generations_post_request: ImagesGenerationsPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._images_generations_post_serialize( + images_generations_post_request=images_generations_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ImageResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def images_generations_post_with_http_info( + self, + images_generations_post_request: ImagesGenerationsPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[ImageResponse]: + """Create image + + Use an image model to generate an image for a given prompt. + + :param images_generations_post_request: (required) + :type images_generations_post_request: ImagesGenerationsPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._images_generations_post_serialize( + images_generations_post_request=images_generations_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ImageResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def images_generations_post_without_preload_content( + self, + images_generations_post_request: ImagesGenerationsPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create image + + Use an image model to generate an image for a given prompt. + + :param images_generations_post_request: (required) + :type images_generations_post_request: ImagesGenerationsPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._images_generations_post_serialize( + images_generations_post_request=images_generations_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ImageResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _images_generations_post_serialize( + self, + images_generations_post_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if images_generations_post_request is not None: + _body_params = images_generations_post_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/images/generations", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/models_api.py b/src/together/generated/api/models_api.py new file mode 100644 index 00000000..bb2d0e7b --- /dev/null +++ b/src/together/generated/api/models_api.py @@ -0,0 +1,279 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import List +from together.generated.models.model_info import ModelInfo + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class ModelsApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def models( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> List[ModelInfo]: + """List all models + + Lists all of Together's open-source models + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._models_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "List[ModelInfo]", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def models_with_http_info( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[List[ModelInfo]]: + """List all models + + Lists all of Together's open-source models + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._models_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "List[ModelInfo]", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def models_without_preload_content( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List all models + + Lists all of Together's open-source models + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._models_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "List[ModelInfo]", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _models_serialize( + self, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/models", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/rerank_api.py b/src/together/generated/api/rerank_api.py new file mode 100644 index 00000000..2b4f99ae --- /dev/null +++ b/src/together/generated/api/rerank_api.py @@ -0,0 +1,308 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import Optional +from together.generated.models.rerank_request import RerankRequest +from together.generated.models.rerank_response import RerankResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class RerankApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def rerank( + self, + rerank_request: Optional[RerankRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RerankResponse: + """Create a rerank request + + Query a reranker model + + :param rerank_request: + :type rerank_request: RerankRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._rerank_serialize( + rerank_request=rerank_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RerankResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def rerank_with_http_info( + self, + rerank_request: Optional[RerankRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[RerankResponse]: + """Create a rerank request + + Query a reranker model + + :param rerank_request: + :type rerank_request: RerankRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._rerank_serialize( + rerank_request=rerank_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RerankResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def rerank_without_preload_content( + self, + rerank_request: Optional[RerankRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create a rerank request + + Query a reranker model + + :param rerank_request: + :type rerank_request: RerankRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._rerank_serialize( + rerank_request=rerank_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RerankResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _rerank_serialize( + self, + rerank_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if rerank_request is not None: + _body_params = rerank_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/rerank", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api_client.py b/src/together/generated/api_client.py new file mode 100644 index 00000000..bfdac8dc --- /dev/null +++ b/src/together/generated/api_client.py @@ -0,0 +1,758 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import datetime +from dateutil.parser import parse +from enum import Enum +import decimal +import json +import mimetypes +import os +import re +import tempfile + +from urllib.parse import quote +from typing import Tuple, Optional, List, Dict, Union +from pydantic import SecretStr + +from together.generated.configuration import Configuration +from together.generated.api_response import ApiResponse, T as ApiResponseT +import together.generated.models +from together.generated import rest +from together.generated.exceptions import ( + ApiValueError, + ApiException, + BadRequestException, + UnauthorizedException, + ForbiddenException, + NotFoundException, + ServiceException, +) + +RequestSerialized = Tuple[str, str, Dict[str, str], Optional[str], List[str]] + + +class ApiClient: + """Generic API client for OpenAPI client library builds. + + OpenAPI generic API client. This client handles the client- + server communication, and is invariant across implementations. Specifics of + the methods and models for each application are generated from the OpenAPI + templates. + + :param configuration: .Configuration object for this client + :param header_name: a header to pass when making calls to the API. + :param header_value: a header value to pass when making calls to + the API. + :param cookie: a cookie to include in the header when making calls + to the API + """ + + PRIMITIVE_TYPES = (float, bool, bytes, str, int) + NATIVE_TYPES_MAPPING = { + "int": int, + "long": int, # TODO remove as only py3 is supported? + "float": float, + "str": str, + "bool": bool, + "date": datetime.date, + "datetime": datetime.datetime, + "decimal": decimal.Decimal, + "object": object, + } + _pool = None + + def __init__( + self, configuration=None, header_name=None, header_value=None, cookie=None + ) -> None: + # use default configuration if none is provided + if configuration is None: + configuration = Configuration.get_default() + self.configuration = configuration + + self.rest_client = rest.RESTClientObject(configuration) + self.default_headers = {} + if header_name is not None: + self.default_headers[header_name] = header_value + self.cookie = cookie + # Set default User-Agent. + self.user_agent = "OpenAPI-Generator/1.0.0/python" + self.client_side_validation = configuration.client_side_validation + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + await self.close() + + async def close(self): + await self.rest_client.close() + + @property + def user_agent(self): + """User agent for this API client""" + return self.default_headers["User-Agent"] + + @user_agent.setter + def user_agent(self, value): + self.default_headers["User-Agent"] = value + + def set_default_header(self, header_name, header_value): + self.default_headers[header_name] = header_value + + _default = None + + @classmethod + def get_default(cls): + """Return new instance of ApiClient. + + This method returns newly created, based on default constructor, + object of ApiClient class or returns a copy of default + ApiClient. + + :return: The ApiClient object. + """ + if cls._default is None: + cls._default = ApiClient() + return cls._default + + @classmethod + def set_default(cls, default): + """Set default instance of ApiClient. + + It stores default ApiClient. + + :param default: object of ApiClient. + """ + cls._default = default + + def param_serialize( + self, + method, + resource_path, + path_params=None, + query_params=None, + header_params=None, + body=None, + post_params=None, + files=None, + auth_settings=None, + collection_formats=None, + _host=None, + _request_auth=None, + ) -> RequestSerialized: + """Builds the HTTP request params needed by the request. + :param method: Method to call. + :param resource_path: Path to method endpoint. + :param path_params: Path parameters in the url. + :param query_params: Query parameters in the url. + :param header_params: Header parameters to be + placed in the request header. + :param body: Request body. + :param post_params dict: Request post form parameters, + for `application/x-www-form-urlencoded`, `multipart/form-data`. + :param auth_settings list: Auth Settings names for the request. + :param files dict: key -> filename, value -> filepath, + for `multipart/form-data`. + :param collection_formats: dict of collection formats for path, query, + header, and post parameters. + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the authentication + in the spec for a single request. + :return: tuple of form (path, http_method, query_params, header_params, + body, post_params, files) + """ + + config = self.configuration + + # header parameters + header_params = header_params or {} + header_params.update(self.default_headers) + if self.cookie: + header_params["Cookie"] = self.cookie + if header_params: + header_params = self.sanitize_for_serialization(header_params) + header_params = dict( + self.parameters_to_tuples(header_params, collection_formats) + ) + + # path parameters + if path_params: + path_params = self.sanitize_for_serialization(path_params) + path_params = self.parameters_to_tuples(path_params, collection_formats) + for k, v in path_params: + # specified safe chars, encode everything + resource_path = resource_path.replace( + "{%s}" % k, quote(str(v), safe=config.safe_chars_for_path_param) + ) + + # post parameters + if post_params or files: + post_params = post_params if post_params else [] + post_params = self.sanitize_for_serialization(post_params) + post_params = self.parameters_to_tuples(post_params, collection_formats) + if files: + post_params.extend(self.files_parameters(files)) + + # auth setting + self.update_params_for_auth( + header_params, + query_params, + auth_settings, + resource_path, + method, + body, + request_auth=_request_auth, + ) + + # body + if body: + body = self.sanitize_for_serialization(body) + + # request url + if _host is None or self.configuration.ignore_operation_servers: + url = self.configuration.host + resource_path + else: + # use server/host defined in path or operation instead + url = _host + resource_path + + # query parameters + if query_params: + query_params = self.sanitize_for_serialization(query_params) + url_query = self.parameters_to_url_query(query_params, collection_formats) + url += "?" + url_query + + return method, url, header_params, body, post_params + + async def call_api( + self, + method, + url, + header_params=None, + body=None, + post_params=None, + _request_timeout=None, + ) -> rest.RESTResponse: + """Makes the HTTP request (synchronous) + :param method: Method to call. + :param url: Path to method endpoint. + :param header_params: Header parameters to be + placed in the request header. + :param body: Request body. + :param post_params dict: Request post form parameters, + for `application/x-www-form-urlencoded`, `multipart/form-data`. + :param _request_timeout: timeout setting for this request. + :return: RESTResponse + """ + + try: + # perform request and return response + response_data = await self.rest_client.request( + method, + url, + headers=header_params, + body=body, + post_params=post_params, + _request_timeout=_request_timeout, + ) + + except ApiException as e: + raise e + + return response_data + + def response_deserialize( + self, + response_data: rest.RESTResponse, + response_types_map: Optional[Dict[str, ApiResponseT]] = None, + ) -> ApiResponse[ApiResponseT]: + """Deserializes response into an object. + :param response_data: RESTResponse object to be deserialized. + :param response_types_map: dict of response types. + :return: ApiResponse + """ + + msg = "RESTResponse.read() must be called before passing it to response_deserialize()" + assert response_data.data is not None, msg + + response_type = response_types_map.get(str(response_data.status), None) + if ( + not response_type + and isinstance(response_data.status, int) + and 100 <= response_data.status <= 599 + ): + # if not found, look for '1XX', '2XX', etc. + response_type = response_types_map.get( + str(response_data.status)[0] + "XX", None + ) + + # deserialize response data + response_text = None + return_data = None + try: + if response_type == "bytearray": + return_data = response_data.data + elif response_type == "file": + return_data = self.__deserialize_file(response_data) + elif response_type is not None: + match = None + content_type = response_data.getheader("content-type") + if content_type is not None: + match = re.search(r"charset=([a-zA-Z\-\d]+)[\s;]?", content_type) + encoding = match.group(1) if match else "utf-8" + response_text = response_data.data.decode(encoding) + return_data = self.deserialize( + response_text, response_type, content_type + ) + finally: + if not 200 <= response_data.status <= 299: + raise ApiException.from_response( + http_resp=response_data, + body=response_text, + data=return_data, + ) + + return ApiResponse( + status_code=response_data.status, + data=return_data, + headers=response_data.getheaders(), + raw_data=response_data.data, + ) + + def sanitize_for_serialization(self, obj): + """Builds a JSON POST object. + + If obj is None, return None. + If obj is SecretStr, return obj.get_secret_value() + If obj is str, int, long, float, bool, return directly. + If obj is datetime.datetime, datetime.date + convert to string in iso8601 format. + If obj is decimal.Decimal return string representation. + If obj is list, sanitize each element in the list. + If obj is dict, return the dict. + If obj is OpenAPI model, return the properties dict. + + :param obj: The data to serialize. + :return: The serialized form of data. + """ + if obj is None: + return None + elif isinstance(obj, Enum): + return obj.value + elif isinstance(obj, SecretStr): + return obj.get_secret_value() + elif isinstance(obj, self.PRIMITIVE_TYPES): + return obj + elif isinstance(obj, list): + return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] + elif isinstance(obj, tuple): + return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) + elif isinstance(obj, (datetime.datetime, datetime.date)): + return obj.isoformat() + elif isinstance(obj, decimal.Decimal): + return str(obj) + + elif isinstance(obj, dict): + obj_dict = obj + else: + # Convert model obj to dict except + # attributes `openapi_types`, `attribute_map` + # and attributes which value is not None. + # Convert attribute name to json key in + # model definition for request. + if hasattr(obj, "to_dict") and callable(getattr(obj, "to_dict")): + obj_dict = obj.to_dict() + else: + obj_dict = obj.__dict__ + + return { + key: self.sanitize_for_serialization(val) for key, val in obj_dict.items() + } + + def deserialize( + self, response_text: str, response_type: str, content_type: Optional[str] + ): + """Deserializes response into an object. + + :param response: RESTResponse object to be deserialized. + :param response_type: class literal for + deserialized object, or string of class name. + :param content_type: content type of response. + + :return: deserialized object. + """ + + # fetch data from response object + if content_type is None: + try: + data = json.loads(response_text) + except ValueError: + data = response_text + elif re.match( + r"^application/(json|[\w!#$&.+-^_]+\+json)\s*(;|$)", + content_type, + re.IGNORECASE, + ): + if response_text == "": + data = "" + else: + data = json.loads(response_text) + elif re.match(r"^text\/[a-z.+-]+\s*(;|$)", content_type, re.IGNORECASE): + data = response_text + else: + raise ApiException( + status=0, reason="Unsupported content type: {0}".format(content_type) + ) + + return self.__deserialize(data, response_type) + + def __deserialize(self, data, klass): + """Deserializes dict, list, str into an object. + + :param data: dict, list or str. + :param klass: class literal, or string of class name. + + :return: object. + """ + if data is None: + return None + + if isinstance(klass, str): + if klass.startswith("List["): + m = re.match(r"List\[(.*)]", klass) + assert m is not None, "Malformed List type definition" + sub_kls = m.group(1) + return [self.__deserialize(sub_data, sub_kls) for sub_data in data] + + if klass.startswith("Dict["): + m = re.match(r"Dict\[([^,]*), (.*)]", klass) + assert m is not None, "Malformed Dict type definition" + sub_kls = m.group(2) + return {k: self.__deserialize(v, sub_kls) for k, v in data.items()} + + # convert str to class + if klass in self.NATIVE_TYPES_MAPPING: + klass = self.NATIVE_TYPES_MAPPING[klass] + else: + klass = getattr(together.generated.models, klass) + + if klass in self.PRIMITIVE_TYPES: + return self.__deserialize_primitive(data, klass) + elif klass == object: + return self.__deserialize_object(data) + elif klass == datetime.date: + return self.__deserialize_date(data) + elif klass == datetime.datetime: + return self.__deserialize_datetime(data) + elif klass == decimal.Decimal: + return decimal.Decimal(data) + elif issubclass(klass, Enum): + return self.__deserialize_enum(data, klass) + else: + return self.__deserialize_model(data, klass) + + def parameters_to_tuples(self, params, collection_formats): + """Get parameters as list of tuples, formatting collections. + + :param params: Parameters as dict or list of two-tuples + :param dict collection_formats: Parameter collection formats + :return: Parameters as list of tuples, collections formatted + """ + new_params: List[Tuple[str, str]] = [] + if collection_formats is None: + collection_formats = {} + for k, v in params.items() if isinstance(params, dict) else params: + if k in collection_formats: + collection_format = collection_formats[k] + if collection_format == "multi": + new_params.extend((k, value) for value in v) + else: + if collection_format == "ssv": + delimiter = " " + elif collection_format == "tsv": + delimiter = "\t" + elif collection_format == "pipes": + delimiter = "|" + else: # csv is the default + delimiter = "," + new_params.append((k, delimiter.join(str(value) for value in v))) + else: + new_params.append((k, v)) + return new_params + + def parameters_to_url_query(self, params, collection_formats): + """Get parameters as list of tuples, formatting collections. + + :param params: Parameters as dict or list of two-tuples + :param dict collection_formats: Parameter collection formats + :return: URL query string (e.g. a=Hello%20World&b=123) + """ + new_params: List[Tuple[str, str]] = [] + if collection_formats is None: + collection_formats = {} + for k, v in params.items() if isinstance(params, dict) else params: + if isinstance(v, bool): + v = str(v).lower() + if isinstance(v, (int, float)): + v = str(v) + if isinstance(v, dict): + v = json.dumps(v) + + if k in collection_formats: + collection_format = collection_formats[k] + if collection_format == "multi": + new_params.extend((k, quote(str(value))) for value in v) + else: + if collection_format == "ssv": + delimiter = " " + elif collection_format == "tsv": + delimiter = "\t" + elif collection_format == "pipes": + delimiter = "|" + else: # csv is the default + delimiter = "," + new_params.append( + (k, delimiter.join(quote(str(value)) for value in v)) + ) + else: + new_params.append((k, quote(str(v)))) + + return "&".join(["=".join(map(str, item)) for item in new_params]) + + def files_parameters( + self, + files: Dict[str, Union[str, bytes, List[str], List[bytes], Tuple[str, bytes]]], + ): + """Builds form parameters. + + :param files: File parameters. + :return: Form parameters with files. + """ + params = [] + for k, v in files.items(): + if isinstance(v, str): + with open(v, "rb") as f: + filename = os.path.basename(f.name) + filedata = f.read() + elif isinstance(v, bytes): + filename = k + filedata = v + elif isinstance(v, tuple): + filename, filedata = v + elif isinstance(v, list): + for file_param in v: + params.extend(self.files_parameters({k: file_param})) + continue + else: + raise ValueError("Unsupported file value") + mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream" + params.append(tuple([k, tuple([filename, filedata, mimetype])])) + return params + + def select_header_accept(self, accepts: List[str]) -> Optional[str]: + """Returns `Accept` based on an array of accepts provided. + + :param accepts: List of headers. + :return: Accept (e.g. application/json). + """ + if not accepts: + return None + + for accept in accepts: + if re.search("json", accept, re.IGNORECASE): + return accept + + return accepts[0] + + def select_header_content_type(self, content_types): + """Returns `Content-Type` based on an array of content_types provided. + + :param content_types: List of content-types. + :return: Content-Type (e.g. application/json). + """ + if not content_types: + return None + + for content_type in content_types: + if re.search("json", content_type, re.IGNORECASE): + return content_type + + return content_types[0] + + def update_params_for_auth( + self, + headers, + queries, + auth_settings, + resource_path, + method, + body, + request_auth=None, + ) -> None: + """Updates header and query params based on authentication setting. + + :param headers: Header parameters dict to be updated. + :param queries: Query parameters tuple list to be updated. + :param auth_settings: Authentication setting identifiers list. + :resource_path: A string representation of the HTTP request resource path. + :method: A string representation of the HTTP request method. + :body: A object representing the body of the HTTP request. + The object type is the return value of sanitize_for_serialization(). + :param request_auth: if set, the provided settings will + override the token in the configuration. + """ + if not auth_settings: + return + + if request_auth: + self._apply_auth_params( + headers, queries, resource_path, method, body, request_auth + ) + else: + for auth in auth_settings: + auth_setting = self.configuration.auth_settings().get(auth) + if auth_setting: + self._apply_auth_params( + headers, queries, resource_path, method, body, auth_setting + ) + + def _apply_auth_params( + self, headers, queries, resource_path, method, body, auth_setting + ) -> None: + """Updates the request parameters based on a single auth_setting + + :param headers: Header parameters dict to be updated. + :param queries: Query parameters tuple list to be updated. + :resource_path: A string representation of the HTTP request resource path. + :method: A string representation of the HTTP request method. + :body: A object representing the body of the HTTP request. + The object type is the return value of sanitize_for_serialization(). + :param auth_setting: auth settings for the endpoint + """ + if auth_setting["in"] == "cookie": + headers["Cookie"] = auth_setting["value"] + elif auth_setting["in"] == "header": + if auth_setting["type"] != "http-signature": + headers[auth_setting["key"]] = auth_setting["value"] + elif auth_setting["in"] == "query": + queries.append((auth_setting["key"], auth_setting["value"])) + else: + raise ApiValueError("Authentication token must be in `query` or `header`") + + def __deserialize_file(self, response): + """Deserializes body to file + + Saves response body into a file in a temporary folder, + using the filename from the `Content-Disposition` header if provided. + + handle file downloading + save response body into a tmp file and return the instance + + :param response: RESTResponse. + :return: file path. + """ + fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path) + os.close(fd) + os.remove(path) + + content_disposition = response.getheader("Content-Disposition") + if content_disposition: + m = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition) + assert m is not None, "Unexpected 'content-disposition' header value" + filename = m.group(1) + path = os.path.join(os.path.dirname(path), filename) + + with open(path, "wb") as f: + f.write(response.data) + + return path + + def __deserialize_primitive(self, data, klass): + """Deserializes string to primitive type. + + :param data: str. + :param klass: class literal. + + :return: int, long, float, str, bool. + """ + try: + return klass(data) + except UnicodeEncodeError: + return str(data) + except TypeError: + return data + + def __deserialize_object(self, value): + """Return an original value. + + :return: object. + """ + return value + + def __deserialize_date(self, string): + """Deserializes string to date. + + :param string: str. + :return: date. + """ + try: + return parse(string).date() + except ImportError: + return string + except ValueError: + raise rest.ApiException( + status=0, reason="Failed to parse `{0}` as date object".format(string) + ) + + def __deserialize_datetime(self, string): + """Deserializes string to datetime. + + The string should be in iso8601 datetime format. + + :param string: str. + :return: datetime. + """ + try: + return parse(string) + except ImportError: + return string + except ValueError: + raise rest.ApiException( + status=0, + reason=("Failed to parse `{0}` as datetime object".format(string)), + ) + + def __deserialize_enum(self, data, klass): + """Deserializes primitive type to enum. + + :param data: primitive type. + :param klass: class literal. + :return: enum value. + """ + try: + return klass(data) + except ValueError: + raise rest.ApiException( + status=0, reason=("Failed to parse `{0}` as `{1}`".format(data, klass)) + ) + + def __deserialize_model(self, data, klass): + """Deserializes list or dict to model. + + :param data: dict, list. + :param klass: class literal. + :return: model object. + """ + + return klass.from_dict(data) diff --git a/src/together/generated/api_response.py b/src/together/generated/api_response.py new file mode 100644 index 00000000..1ce13729 --- /dev/null +++ b/src/together/generated/api_response.py @@ -0,0 +1,20 @@ +"""API response object.""" + +from __future__ import annotations +from typing import Optional, Generic, Mapping, TypeVar +from pydantic import Field, StrictInt, StrictBytes, BaseModel + +T = TypeVar("T") + + +class ApiResponse(BaseModel, Generic[T]): + """ + API response object + """ + + status_code: StrictInt = Field(description="HTTP status code") + headers: Optional[Mapping[str, str]] = Field(None, description="HTTP headers") + data: T = Field(description="Deserialized data given the data type") + raw_data: StrictBytes = Field(description="Raw data (HTTP response body)") + + model_config = {"arbitrary_types_allowed": True} diff --git a/src/together/generated/configuration.py b/src/together/generated/configuration.py new file mode 100644 index 00000000..603014b0 --- /dev/null +++ b/src/together/generated/configuration.py @@ -0,0 +1,583 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import copy +import http.client as httplib +import logging +from logging import FileHandler +import sys +from typing import Any, ClassVar, Dict, List, Literal, Optional, TypedDict +from typing_extensions import NotRequired, Self + +import urllib3 + + +JSON_SCHEMA_VALIDATION_KEYWORDS = { + "multipleOf", + "maximum", + "exclusiveMaximum", + "minimum", + "exclusiveMinimum", + "maxLength", + "minLength", + "pattern", + "maxItems", + "minItems", +} + +ServerVariablesT = Dict[str, str] + +GenericAuthSetting = TypedDict( + "GenericAuthSetting", + { + "type": str, + "in": str, + "key": str, + "value": str, + }, +) + + +OAuth2AuthSetting = TypedDict( + "OAuth2AuthSetting", + { + "type": Literal["oauth2"], + "in": Literal["header"], + "key": Literal["Authorization"], + "value": str, + }, +) + + +APIKeyAuthSetting = TypedDict( + "APIKeyAuthSetting", + { + "type": Literal["api_key"], + "in": str, + "key": str, + "value": Optional[str], + }, +) + + +BasicAuthSetting = TypedDict( + "BasicAuthSetting", + { + "type": Literal["basic"], + "in": Literal["header"], + "key": Literal["Authorization"], + "value": Optional[str], + }, +) + + +BearerFormatAuthSetting = TypedDict( + "BearerFormatAuthSetting", + { + "type": Literal["bearer"], + "in": Literal["header"], + "format": Literal["JWT"], + "key": Literal["Authorization"], + "value": str, + }, +) + + +BearerAuthSetting = TypedDict( + "BearerAuthSetting", + { + "type": Literal["bearer"], + "in": Literal["header"], + "key": Literal["Authorization"], + "value": str, + }, +) + + +HTTPSignatureAuthSetting = TypedDict( + "HTTPSignatureAuthSetting", + { + "type": Literal["http-signature"], + "in": Literal["header"], + "key": Literal["Authorization"], + "value": None, + }, +) + + +AuthSettings = TypedDict( + "AuthSettings", + { + "bearerAuth": BearerAuthSetting, + }, + total=False, +) + + +class HostSettingVariable(TypedDict): + description: str + default_value: str + enum_values: List[str] + + +class HostSetting(TypedDict): + url: str + description: str + variables: NotRequired[Dict[str, HostSettingVariable]] + + +class Configuration: + """This class contains various settings of the API client. + + :param host: Base url. + :param ignore_operation_servers + Boolean to ignore operation servers for the API client. + Config will use `host` as the base url regardless of the operation servers. + :param api_key: Dict to store API key(s). + Each entry in the dict specifies an API key. + The dict key is the name of the security scheme in the OAS specification. + The dict value is the API key secret. + :param api_key_prefix: Dict to store API prefix (e.g. Bearer). + The dict key is the name of the security scheme in the OAS specification. + The dict value is an API key prefix when generating the auth data. + :param username: Username for HTTP basic authentication. + :param password: Password for HTTP basic authentication. + :param access_token: Access token. + :param server_index: Index to servers configuration. + :param server_variables: Mapping with string values to replace variables in + templated server configuration. The validation of enums is performed for + variables with defined enum values before. + :param server_operation_index: Mapping from operation ID to an index to server + configuration. + :param server_operation_variables: Mapping from operation ID to a mapping with + string values to replace variables in templated server configuration. + The validation of enums is performed for variables with defined enum + values before. + :param ssl_ca_cert: str - the path to a file of concatenated CA certificates + in PEM format. + :param retries: Number of retries for API requests. + + :Example: + """ + + _default: ClassVar[Optional[Self]] = None + + def __init__( + self, + host: Optional[str] = None, + api_key: Optional[Dict[str, str]] = None, + api_key_prefix: Optional[Dict[str, str]] = None, + username: Optional[str] = None, + password: Optional[str] = None, + access_token: Optional[str] = None, + server_index: Optional[int] = None, + server_variables: Optional[ServerVariablesT] = None, + server_operation_index: Optional[Dict[int, int]] = None, + server_operation_variables: Optional[Dict[int, ServerVariablesT]] = None, + ignore_operation_servers: bool = False, + ssl_ca_cert: Optional[str] = None, + retries: Optional[int] = None, + *, + debug: Optional[bool] = None, + ) -> None: + """Constructor""" + self._base_path = "https://api.together.xyz/v1" if host is None else host + """Default Base url + """ + self.server_index = 0 if server_index is None and host is None else server_index + self.server_operation_index = server_operation_index or {} + """Default server index + """ + self.server_variables = server_variables or {} + self.server_operation_variables = server_operation_variables or {} + """Default server variables + """ + self.ignore_operation_servers = ignore_operation_servers + """Ignore operation servers + """ + self.temp_folder_path = None + """Temp file folder for downloading files + """ + # Authentication Settings + self.api_key = {} + if api_key: + self.api_key = api_key + """dict to store API key(s) + """ + self.api_key_prefix = {} + if api_key_prefix: + self.api_key_prefix = api_key_prefix + """dict to store API prefix (e.g. Bearer) + """ + self.refresh_api_key_hook = None + """function hook to refresh API key if expired + """ + self.username = username + """Username for HTTP basic authentication + """ + self.password = password + """Password for HTTP basic authentication + """ + self.access_token = access_token + """Access token + """ + self.logger = {} + """Logging Settings + """ + self.logger["package_logger"] = logging.getLogger("together.generated") + self.logger["urllib3_logger"] = logging.getLogger("urllib3") + self.logger_format = "%(asctime)s %(levelname)s %(message)s" + """Log format + """ + self.logger_stream_handler = None + """Log stream handler + """ + self.logger_file_handler: Optional[FileHandler] = None + """Log file handler + """ + self.logger_file = None + """Debug file location + """ + if debug is not None: + self.debug = debug + else: + self.__debug = False + """Debug switch + """ + + self.verify_ssl = True + """SSL/TLS verification + Set this to false to skip verifying SSL certificate when calling API + from https server. + """ + self.ssl_ca_cert = ssl_ca_cert + """Set this to customize the certificate file to verify the peer. + """ + self.cert_file = None + """client certificate file + """ + self.key_file = None + """client key file + """ + self.assert_hostname = None + """Set this to True/False to enable/disable SSL hostname verification. + """ + self.tls_server_name = None + """SSL/TLS Server Name Indication (SNI) + Set this to the SNI value expected by the server. + """ + + self.connection_pool_maxsize = 100 + """This value is passed to the aiohttp to limit simultaneous connections. + Default values is 100, None means no-limit. + """ + + self.proxy: Optional[str] = None + """Proxy URL + """ + self.proxy_headers = None + """Proxy headers + """ + self.safe_chars_for_path_param = "" + """Safe chars for path_param + """ + self.retries = retries + """Adding retries to override urllib3 default value 3 + """ + # Enable client side validation + self.client_side_validation = True + + self.socket_options = None + """Options to pass down to the underlying urllib3 socket + """ + + self.datetime_format = "%Y-%m-%dT%H:%M:%S.%f%z" + """datetime format + """ + + self.date_format = "%Y-%m-%d" + """date format + """ + + def __deepcopy__(self, memo: Dict[int, Any]) -> Self: + cls = self.__class__ + result = cls.__new__(cls) + memo[id(self)] = result + for k, v in self.__dict__.items(): + if k not in ("logger", "logger_file_handler"): + setattr(result, k, copy.deepcopy(v, memo)) + # shallow copy of loggers + result.logger = copy.copy(self.logger) + # use setters to configure loggers + result.logger_file = self.logger_file + result.debug = self.debug + return result + + def __setattr__(self, name: str, value: Any) -> None: + object.__setattr__(self, name, value) + + @classmethod + def set_default(cls, default: Optional[Self]) -> None: + """Set default instance of configuration. + + It stores default configuration, which can be + returned by get_default_copy method. + + :param default: object of Configuration + """ + cls._default = default + + @classmethod + def get_default_copy(cls) -> Self: + """Deprecated. Please use `get_default` instead. + + Deprecated. Please use `get_default` instead. + + :return: The configuration object. + """ + return cls.get_default() + + @classmethod + def get_default(cls) -> Self: + """Return the default configuration. + + This method returns newly created, based on default constructor, + object of Configuration class or returns a copy of default + configuration. + + :return: The configuration object. + """ + if cls._default is None: + cls._default = cls() + return cls._default + + @property + def logger_file(self) -> Optional[str]: + """The logger file. + + If the logger_file is None, then add stream handler and remove file + handler. Otherwise, add file handler and remove stream handler. + + :param value: The logger_file path. + :type: str + """ + return self.__logger_file + + @logger_file.setter + def logger_file(self, value: Optional[str]) -> None: + """The logger file. + + If the logger_file is None, then add stream handler and remove file + handler. Otherwise, add file handler and remove stream handler. + + :param value: The logger_file path. + :type: str + """ + self.__logger_file = value + if self.__logger_file: + # If set logging file, + # then add file handler and remove stream handler. + self.logger_file_handler = logging.FileHandler(self.__logger_file) + self.logger_file_handler.setFormatter(self.logger_formatter) + for _, logger in self.logger.items(): + logger.addHandler(self.logger_file_handler) + + @property + def debug(self) -> bool: + """Debug status + + :param value: The debug status, True or False. + :type: bool + """ + return self.__debug + + @debug.setter + def debug(self, value: bool) -> None: + """Debug status + + :param value: The debug status, True or False. + :type: bool + """ + self.__debug = value + if self.__debug: + # if debug status is True, turn on debug logging + for _, logger in self.logger.items(): + logger.setLevel(logging.DEBUG) + # turn on httplib debug + httplib.HTTPConnection.debuglevel = 1 + else: + # if debug status is False, turn off debug logging, + # setting log level to default `logging.WARNING` + for _, logger in self.logger.items(): + logger.setLevel(logging.WARNING) + # turn off httplib debug + httplib.HTTPConnection.debuglevel = 0 + + @property + def logger_format(self) -> str: + """The logger format. + + The logger_formatter will be updated when sets logger_format. + + :param value: The format string. + :type: str + """ + return self.__logger_format + + @logger_format.setter + def logger_format(self, value: str) -> None: + """The logger format. + + The logger_formatter will be updated when sets logger_format. + + :param value: The format string. + :type: str + """ + self.__logger_format = value + self.logger_formatter = logging.Formatter(self.__logger_format) + + def get_api_key_with_prefix( + self, identifier: str, alias: Optional[str] = None + ) -> Optional[str]: + """Gets API key (with prefix if set). + + :param identifier: The identifier of apiKey. + :param alias: The alternative identifier of apiKey. + :return: The token for api key authentication. + """ + if self.refresh_api_key_hook is not None: + self.refresh_api_key_hook(self) + key = self.api_key.get( + identifier, self.api_key.get(alias) if alias is not None else None + ) + if key: + prefix = self.api_key_prefix.get(identifier) + if prefix: + return "%s %s" % (prefix, key) + else: + return key + + return None + + def get_basic_auth_token(self) -> Optional[str]: + """Gets HTTP basic authentication header (string). + + :return: The token for basic HTTP authentication. + """ + username = "" + if self.username is not None: + username = self.username + password = "" + if self.password is not None: + password = self.password + return urllib3.util.make_headers(basic_auth=username + ":" + password).get( + "authorization" + ) + + def auth_settings(self) -> AuthSettings: + """Gets Auth Settings dict for api client. + + :return: The Auth Settings information dict. + """ + auth: AuthSettings = {} + if self.access_token is not None: + auth["bearerAuth"] = { + "type": "bearer", + "in": "header", + "key": "Authorization", + "value": "Bearer " + self.access_token, + } + return auth + + def to_debug_report(self) -> str: + """Gets the essential information for debugging. + + :return: The report for debugging. + """ + return ( + "Python SDK Debug Report:\n" + "OS: {env}\n" + "Python Version: {pyversion}\n" + "Version of the API: 2.0.0\n" + "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version) + ) + + def get_host_settings(self) -> List[HostSetting]: + """Gets an array of host settings + + :return: An array of host settings + """ + return [ + { + "url": "https://api.together.xyz/v1", + "description": "No description provided", + } + ] + + def get_host_from_settings( + self, + index: Optional[int], + variables: Optional[ServerVariablesT] = None, + servers: Optional[List[HostSetting]] = None, + ) -> str: + """Gets host URL based on the index and variables + :param index: array index of the host settings + :param variables: hash of variable and the corresponding value + :param servers: an array of host settings or None + :return: URL based on host settings + """ + if index is None: + return self._base_path + + variables = {} if variables is None else variables + servers = self.get_host_settings() if servers is None else servers + + try: + server = servers[index] + except IndexError: + raise ValueError( + "Invalid index {0} when selecting the host settings. " + "Must be less than {1}".format(index, len(servers)) + ) + + url = server["url"] + + # go through variables and replace placeholders + for variable_name, variable in server.get("variables", {}).items(): + used_value = variables.get(variable_name, variable["default_value"]) + + if "enum_values" in variable and used_value not in variable["enum_values"]: + raise ValueError( + "The variable `{0}` in the host URL has invalid value " + "{1}. Must be {2}.".format( + variable_name, variables[variable_name], variable["enum_values"] + ) + ) + + url = url.replace("{" + variable_name + "}", used_value) + + return url + + @property + def host(self) -> str: + """Return generated host.""" + return self.get_host_from_settings( + self.server_index, variables=self.server_variables + ) + + @host.setter + def host(self, value: str) -> None: + """Fix base path.""" + self._base_path = value + self.server_index = None diff --git a/src/together/generated/docs/AudioApi.md b/src/together/generated/docs/AudioApi.md new file mode 100644 index 00000000..adf6ebb4 --- /dev/null +++ b/src/together/generated/docs/AudioApi.md @@ -0,0 +1,88 @@ +# together.generated.AudioApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**audio_speech**](AudioApi.md#audio_speech) | **POST** /audio/speech | Create audio generation request + + +# **audio_speech** +> bytearray audio_speech(audio_speech_request=audio_speech_request) + +Create audio generation request + +Generate audio from input text + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.audio_speech_request import AudioSpeechRequest +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.AudioApi(api_client) + audio_speech_request = together.generated.AudioSpeechRequest() # AudioSpeechRequest | (optional) + + try: + # Create audio generation request + api_response = await api_instance.audio_speech(audio_speech_request=audio_speech_request) + print("The response of AudioApi->audio_speech:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling AudioApi->audio_speech: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **audio_speech_request** | [**AudioSpeechRequest**](AudioSpeechRequest.md)| | [optional] + +### Return type + +**bytearray** + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/octet-stream, audio/wav, audio/mpeg, text/event-stream, application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**400** | BadRequest | - | +**429** | RateLimit | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechRequest.md b/src/together/generated/docs/AudioSpeechRequest.md new file mode 100644 index 00000000..23ef3ade --- /dev/null +++ b/src/together/generated/docs/AudioSpeechRequest.md @@ -0,0 +1,34 @@ +# AudioSpeechRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model** | [**AudioSpeechRequestModel**](AudioSpeechRequestModel.md) | | +**input** | **str** | Input text to generate the audio for | +**voice** | [**AudioSpeechRequestVoice**](AudioSpeechRequestVoice.md) | | +**response_format** | **str** | The format of audio output | [optional] [default to 'wav'] +**language** | **str** | Language of input text | [optional] [default to 'en'] +**response_encoding** | **str** | Audio encoding of response | [optional] [default to 'pcm_f32le'] +**sample_rate** | **float** | Sampling rate to use for the output audio | [optional] [default to 44100] +**stream** | **bool** | If true, output is streamed for several characters at a time instead of waiting for the full response. The stream terminates with `data: [DONE]`. If false, return the encoded audio as octet stream | [optional] [default to False] + +## Example + +```python +from together.generated.models.audio_speech_request import AudioSpeechRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechRequest from a JSON string +audio_speech_request_instance = AudioSpeechRequest.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechRequest.to_json()) + +# convert the object into a dict +audio_speech_request_dict = audio_speech_request_instance.to_dict() +# create an instance of AudioSpeechRequest from a dict +audio_speech_request_from_dict = AudioSpeechRequest.from_dict(audio_speech_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechRequestModel.md b/src/together/generated/docs/AudioSpeechRequestModel.md new file mode 100644 index 00000000..41febc9a --- /dev/null +++ b/src/together/generated/docs/AudioSpeechRequestModel.md @@ -0,0 +1,27 @@ +# AudioSpeechRequestModel + +The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechRequestModel from a JSON string +audio_speech_request_model_instance = AudioSpeechRequestModel.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechRequestModel.to_json()) + +# convert the object into a dict +audio_speech_request_model_dict = audio_speech_request_model_instance.to_dict() +# create an instance of AudioSpeechRequestModel from a dict +audio_speech_request_model_from_dict = AudioSpeechRequestModel.from_dict(audio_speech_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechRequestVoice.md b/src/together/generated/docs/AudioSpeechRequestVoice.md new file mode 100644 index 00000000..9ad16586 --- /dev/null +++ b/src/together/generated/docs/AudioSpeechRequestVoice.md @@ -0,0 +1,27 @@ +# AudioSpeechRequestVoice + +The voice to use for generating the audio. [View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available). + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechRequestVoice from a JSON string +audio_speech_request_voice_instance = AudioSpeechRequestVoice.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechRequestVoice.to_json()) + +# convert the object into a dict +audio_speech_request_voice_dict = audio_speech_request_voice_instance.to_dict() +# create an instance of AudioSpeechRequestVoice from a dict +audio_speech_request_voice_from_dict = AudioSpeechRequestVoice.from_dict(audio_speech_request_voice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechStreamChunk.md b/src/together/generated/docs/AudioSpeechStreamChunk.md new file mode 100644 index 00000000..7d9f5558 --- /dev/null +++ b/src/together/generated/docs/AudioSpeechStreamChunk.md @@ -0,0 +1,29 @@ +# AudioSpeechStreamChunk + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**model** | **str** | | +**b64** | **str** | base64 encoded audio stream | + +## Example + +```python +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechStreamChunk from a JSON string +audio_speech_stream_chunk_instance = AudioSpeechStreamChunk.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechStreamChunk.to_json()) + +# convert the object into a dict +audio_speech_stream_chunk_dict = audio_speech_stream_chunk_instance.to_dict() +# create an instance of AudioSpeechStreamChunk from a dict +audio_speech_stream_chunk_from_dict = AudioSpeechStreamChunk.from_dict(audio_speech_stream_chunk_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechStreamEvent.md b/src/together/generated/docs/AudioSpeechStreamEvent.md new file mode 100644 index 00000000..9c2d9f7a --- /dev/null +++ b/src/together/generated/docs/AudioSpeechStreamEvent.md @@ -0,0 +1,27 @@ +# AudioSpeechStreamEvent + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**AudioSpeechStreamChunk**](AudioSpeechStreamChunk.md) | | + +## Example + +```python +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechStreamEvent from a JSON string +audio_speech_stream_event_instance = AudioSpeechStreamEvent.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechStreamEvent.to_json()) + +# convert the object into a dict +audio_speech_stream_event_dict = audio_speech_stream_event_instance.to_dict() +# create an instance of AudioSpeechStreamEvent from a dict +audio_speech_stream_event_from_dict = AudioSpeechStreamEvent.from_dict(audio_speech_stream_event_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechStreamResponse.md b/src/together/generated/docs/AudioSpeechStreamResponse.md new file mode 100644 index 00000000..eda7c0b0 --- /dev/null +++ b/src/together/generated/docs/AudioSpeechStreamResponse.md @@ -0,0 +1,27 @@ +# AudioSpeechStreamResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | **str** | | + +## Example + +```python +from together.generated.models.audio_speech_stream_response import AudioSpeechStreamResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechStreamResponse from a JSON string +audio_speech_stream_response_instance = AudioSpeechStreamResponse.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechStreamResponse.to_json()) + +# convert the object into a dict +audio_speech_stream_response_dict = audio_speech_stream_response_instance.to_dict() +# create an instance of AudioSpeechStreamResponse from a dict +audio_speech_stream_response_from_dict = AudioSpeechStreamResponse.from_dict(audio_speech_stream_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/Autoscaling.md b/src/together/generated/docs/Autoscaling.md new file mode 100644 index 00000000..b1ee0b95 --- /dev/null +++ b/src/together/generated/docs/Autoscaling.md @@ -0,0 +1,29 @@ +# Autoscaling + +Configuration for automatic scaling of replicas based on demand. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**min_replicas** | **int** | The minimum number of replicas to maintain, even when there is no load | +**max_replicas** | **int** | The maximum number of replicas to scale up to under load | + +## Example + +```python +from together.generated.models.autoscaling import Autoscaling + +# TODO update the JSON string below +json = "{}" +# create an instance of Autoscaling from a JSON string +autoscaling_instance = Autoscaling.from_json(json) +# print the JSON string representation of the object +print(Autoscaling.to_json()) + +# convert the object into a dict +autoscaling_dict = autoscaling_instance.to_dict() +# create an instance of Autoscaling from a dict +autoscaling_from_dict = Autoscaling.from_dict(autoscaling_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatApi.md b/src/together/generated/docs/ChatApi.md new file mode 100644 index 00000000..dd0e179d --- /dev/null +++ b/src/together/generated/docs/ChatApi.md @@ -0,0 +1,93 @@ +# together.generated.ChatApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**chat_completions**](ChatApi.md#chat_completions) | **POST** /chat/completions | Create chat completion + + +# **chat_completions** +> ChatCompletionResponse chat_completions(chat_completion_request=chat_completion_request) + +Create chat completion + +Query a chat model. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.chat_completion_request import ChatCompletionRequest +from together.generated.models.chat_completion_response import ChatCompletionResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.ChatApi(api_client) + chat_completion_request = together.generated.ChatCompletionRequest() # ChatCompletionRequest | (optional) + + try: + # Create chat completion + api_response = await api_instance.chat_completions(chat_completion_request=chat_completion_request) + print("The response of ChatApi->chat_completions:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling ChatApi->chat_completions: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **chat_completion_request** | [**ChatCompletionRequest**](ChatCompletionRequest.md)| | [optional] + +### Return type + +[**ChatCompletionResponse**](ChatCompletionResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json, text/event-stream + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**503** | Overloaded | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionAssistantMessageParam.md b/src/together/generated/docs/ChatCompletionAssistantMessageParam.md new file mode 100644 index 00000000..5281fe5c --- /dev/null +++ b/src/together/generated/docs/ChatCompletionAssistantMessageParam.md @@ -0,0 +1,31 @@ +# ChatCompletionAssistantMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | [optional] +**role** | **str** | | +**name** | **str** | | [optional] +**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] +**function_call** | [**ChatCompletionMessageFunctionCall**](ChatCompletionMessageFunctionCall.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionAssistantMessageParam from a JSON string +chat_completion_assistant_message_param_instance = ChatCompletionAssistantMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionAssistantMessageParam.to_json()) + +# convert the object into a dict +chat_completion_assistant_message_param_dict = chat_completion_assistant_message_param_instance.to_dict() +# create an instance of ChatCompletionAssistantMessageParam from a dict +chat_completion_assistant_message_param_from_dict = ChatCompletionAssistantMessageParam.from_dict(chat_completion_assistant_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoice.md b/src/together/generated/docs/ChatCompletionChoice.md new file mode 100644 index 00000000..b75becc7 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoice.md @@ -0,0 +1,30 @@ +# ChatCompletionChoice + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **int** | | +**finish_reason** | [**FinishReason**](FinishReason.md) | | +**logprobs** | [**LogprobsPart**](LogprobsPart.md) | | [optional] +**delta** | [**ChatCompletionChoiceDelta**](ChatCompletionChoiceDelta.md) | | + +## Example + +```python +from together.generated.models.chat_completion_choice import ChatCompletionChoice + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoice from a JSON string +chat_completion_choice_instance = ChatCompletionChoice.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoice.to_json()) + +# convert the object into a dict +chat_completion_choice_dict = chat_completion_choice_instance.to_dict() +# create an instance of ChatCompletionChoice from a dict +chat_completion_choice_from_dict = ChatCompletionChoice.from_dict(chat_completion_choice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoiceDelta.md b/src/together/generated/docs/ChatCompletionChoiceDelta.md new file mode 100644 index 00000000..865b8090 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoiceDelta.md @@ -0,0 +1,31 @@ +# ChatCompletionChoiceDelta + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**token_id** | **int** | | [optional] +**role** | **str** | | +**content** | **str** | | [optional] +**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] +**function_call** | [**ChatCompletionChoiceDeltaFunctionCall**](ChatCompletionChoiceDeltaFunctionCall.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_choice_delta import ChatCompletionChoiceDelta + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoiceDelta from a JSON string +chat_completion_choice_delta_instance = ChatCompletionChoiceDelta.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoiceDelta.to_json()) + +# convert the object into a dict +chat_completion_choice_delta_dict = chat_completion_choice_delta_instance.to_dict() +# create an instance of ChatCompletionChoiceDelta from a dict +chat_completion_choice_delta_from_dict = ChatCompletionChoiceDelta.from_dict(chat_completion_choice_delta_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md b/src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md new file mode 100644 index 00000000..e6e861f0 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md @@ -0,0 +1,28 @@ +# ChatCompletionChoiceDeltaFunctionCall + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**arguments** | **str** | | +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_choice_delta_function_call import ChatCompletionChoiceDeltaFunctionCall + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoiceDeltaFunctionCall from a JSON string +chat_completion_choice_delta_function_call_instance = ChatCompletionChoiceDeltaFunctionCall.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoiceDeltaFunctionCall.to_json()) + +# convert the object into a dict +chat_completion_choice_delta_function_call_dict = chat_completion_choice_delta_function_call_instance.to_dict() +# create an instance of ChatCompletionChoiceDeltaFunctionCall from a dict +chat_completion_choice_delta_function_call_from_dict = ChatCompletionChoiceDeltaFunctionCall.from_dict(chat_completion_choice_delta_function_call_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoicesDataInner.md b/src/together/generated/docs/ChatCompletionChoicesDataInner.md new file mode 100644 index 00000000..56fe92b1 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoicesDataInner.md @@ -0,0 +1,32 @@ +# ChatCompletionChoicesDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] +**index** | **int** | | [optional] +**seed** | **int** | | [optional] +**finish_reason** | [**FinishReason**](FinishReason.md) | | [optional] +**message** | [**ChatCompletionMessage**](ChatCompletionMessage.md) | | [optional] +**logprobs** | [**ChatCompletionChoicesDataInnerLogprobs**](ChatCompletionChoicesDataInnerLogprobs.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_choices_data_inner import ChatCompletionChoicesDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoicesDataInner from a JSON string +chat_completion_choices_data_inner_instance = ChatCompletionChoicesDataInner.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoicesDataInner.to_json()) + +# convert the object into a dict +chat_completion_choices_data_inner_dict = chat_completion_choices_data_inner_instance.to_dict() +# create an instance of ChatCompletionChoicesDataInner from a dict +chat_completion_choices_data_inner_from_dict = ChatCompletionChoicesDataInner.from_dict(chat_completion_choices_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md b/src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md new file mode 100644 index 00000000..72320aab --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md @@ -0,0 +1,29 @@ +# ChatCompletionChoicesDataInnerLogprobs + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**token_ids** | **List[float]** | List of token IDs corresponding to the logprobs | [optional] +**tokens** | **List[str]** | List of token strings | [optional] +**token_logprobs** | **List[float]** | List of token log probabilities | [optional] + +## Example + +```python +from together.generated.models.chat_completion_choices_data_inner_logprobs import ChatCompletionChoicesDataInnerLogprobs + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoicesDataInnerLogprobs from a JSON string +chat_completion_choices_data_inner_logprobs_instance = ChatCompletionChoicesDataInnerLogprobs.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoicesDataInnerLogprobs.to_json()) + +# convert the object into a dict +chat_completion_choices_data_inner_logprobs_dict = chat_completion_choices_data_inner_logprobs_instance.to_dict() +# create an instance of ChatCompletionChoicesDataInnerLogprobs from a dict +chat_completion_choices_data_inner_logprobs_from_dict = ChatCompletionChoicesDataInnerLogprobs.from_dict(chat_completion_choices_data_inner_logprobs_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChunk.md b/src/together/generated/docs/ChatCompletionChunk.md new file mode 100644 index 00000000..d42484b9 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChunk.md @@ -0,0 +1,33 @@ +# ChatCompletionChunk + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**object** | **str** | | +**created** | **int** | | +**system_fingerprint** | **str** | | [optional] +**model** | **str** | | +**choices** | [**List[ChatCompletionChunkChoicesInner]**](ChatCompletionChunkChoicesInner.md) | | +**usage** | [**CompletionChunkUsage**](CompletionChunkUsage.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_chunk import ChatCompletionChunk + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChunk from a JSON string +chat_completion_chunk_instance = ChatCompletionChunk.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChunk.to_json()) + +# convert the object into a dict +chat_completion_chunk_dict = chat_completion_chunk_instance.to_dict() +# create an instance of ChatCompletionChunk from a dict +chat_completion_chunk_from_dict = ChatCompletionChunk.from_dict(chat_completion_chunk_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChunkChoicesInner.md b/src/together/generated/docs/ChatCompletionChunkChoicesInner.md new file mode 100644 index 00000000..b33bff02 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChunkChoicesInner.md @@ -0,0 +1,31 @@ +# ChatCompletionChunkChoicesInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **int** | | +**finish_reason** | [**FinishReason**](FinishReason.md) | | +**logprobs** | **float** | | [optional] +**seed** | **int** | | [optional] +**delta** | [**ChatCompletionChoiceDelta**](ChatCompletionChoiceDelta.md) | | + +## Example + +```python +from together.generated.models.chat_completion_chunk_choices_inner import ChatCompletionChunkChoicesInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChunkChoicesInner from a JSON string +chat_completion_chunk_choices_inner_instance = ChatCompletionChunkChoicesInner.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChunkChoicesInner.to_json()) + +# convert the object into a dict +chat_completion_chunk_choices_inner_dict = chat_completion_chunk_choices_inner_instance.to_dict() +# create an instance of ChatCompletionChunkChoicesInner from a dict +chat_completion_chunk_choices_inner_from_dict = ChatCompletionChunkChoicesInner.from_dict(chat_completion_chunk_choices_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionEvent.md b/src/together/generated/docs/ChatCompletionEvent.md new file mode 100644 index 00000000..49c1046a --- /dev/null +++ b/src/together/generated/docs/ChatCompletionEvent.md @@ -0,0 +1,27 @@ +# ChatCompletionEvent + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**ChatCompletionChunk**](ChatCompletionChunk.md) | | + +## Example + +```python +from together.generated.models.chat_completion_event import ChatCompletionEvent + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionEvent from a JSON string +chat_completion_event_instance = ChatCompletionEvent.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionEvent.to_json()) + +# convert the object into a dict +chat_completion_event_dict = chat_completion_event_instance.to_dict() +# create an instance of ChatCompletionEvent from a dict +chat_completion_event_from_dict = ChatCompletionEvent.from_dict(chat_completion_event_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionFunctionMessageParam.md b/src/together/generated/docs/ChatCompletionFunctionMessageParam.md new file mode 100644 index 00000000..1f89e299 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionFunctionMessageParam.md @@ -0,0 +1,29 @@ +# ChatCompletionFunctionMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**role** | **str** | | +**content** | **str** | | +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_function_message_param import ChatCompletionFunctionMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionFunctionMessageParam from a JSON string +chat_completion_function_message_param_instance = ChatCompletionFunctionMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionFunctionMessageParam.to_json()) + +# convert the object into a dict +chat_completion_function_message_param_dict = chat_completion_function_message_param_instance.to_dict() +# create an instance of ChatCompletionFunctionMessageParam from a dict +chat_completion_function_message_param_from_dict = ChatCompletionFunctionMessageParam.from_dict(chat_completion_function_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionMessage.md b/src/together/generated/docs/ChatCompletionMessage.md new file mode 100644 index 00000000..d3d814a1 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionMessage.md @@ -0,0 +1,30 @@ +# ChatCompletionMessage + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | +**role** | **str** | | +**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] +**function_call** | [**ChatCompletionMessageFunctionCall**](ChatCompletionMessageFunctionCall.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_message import ChatCompletionMessage + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionMessage from a JSON string +chat_completion_message_instance = ChatCompletionMessage.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionMessage.to_json()) + +# convert the object into a dict +chat_completion_message_dict = chat_completion_message_instance.to_dict() +# create an instance of ChatCompletionMessage from a dict +chat_completion_message_from_dict = ChatCompletionMessage.from_dict(chat_completion_message_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionMessageFunctionCall.md b/src/together/generated/docs/ChatCompletionMessageFunctionCall.md new file mode 100644 index 00000000..177d9d1d --- /dev/null +++ b/src/together/generated/docs/ChatCompletionMessageFunctionCall.md @@ -0,0 +1,28 @@ +# ChatCompletionMessageFunctionCall + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**arguments** | **str** | | +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_message_function_call import ChatCompletionMessageFunctionCall + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionMessageFunctionCall from a JSON string +chat_completion_message_function_call_instance = ChatCompletionMessageFunctionCall.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionMessageFunctionCall.to_json()) + +# convert the object into a dict +chat_completion_message_function_call_dict = chat_completion_message_function_call_instance.to_dict() +# create an instance of ChatCompletionMessageFunctionCall from a dict +chat_completion_message_function_call_from_dict = ChatCompletionMessageFunctionCall.from_dict(chat_completion_message_function_call_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionMessageParam.md b/src/together/generated/docs/ChatCompletionMessageParam.md new file mode 100644 index 00000000..c8e6136d --- /dev/null +++ b/src/together/generated/docs/ChatCompletionMessageParam.md @@ -0,0 +1,32 @@ +# ChatCompletionMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | +**role** | **str** | | +**name** | **str** | | +**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] +**function_call** | [**ChatCompletionMessageFunctionCall**](ChatCompletionMessageFunctionCall.md) | | [optional] +**tool_call_id** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_message_param import ChatCompletionMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionMessageParam from a JSON string +chat_completion_message_param_instance = ChatCompletionMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionMessageParam.to_json()) + +# convert the object into a dict +chat_completion_message_param_dict = chat_completion_message_param_instance.to_dict() +# create an instance of ChatCompletionMessageParam from a dict +chat_completion_message_param_from_dict = ChatCompletionMessageParam.from_dict(chat_completion_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequest.md b/src/together/generated/docs/ChatCompletionRequest.md new file mode 100644 index 00000000..f0200b3f --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequest.md @@ -0,0 +1,49 @@ +# ChatCompletionRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**messages** | [**List[ChatCompletionRequestMessagesInner]**](ChatCompletionRequestMessagesInner.md) | A list of messages comprising the conversation so far. | +**model** | [**ChatCompletionRequestModel**](ChatCompletionRequestModel.md) | | +**max_tokens** | **int** | The maximum number of tokens to generate. | [optional] +**stop** | **List[str]** | A list of string sequences that will truncate (stop) inference text output. For example, \"</s>\" will stop generation as soon as the model generates the given token. | [optional] +**temperature** | **float** | A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. | [optional] +**top_p** | **float** | A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. | [optional] +**top_k** | **int** | An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. | [optional] +**context_length_exceeded_behavior** | **str** | Defined the behavior of the API when max_tokens exceed the maximum context length of the model. When set to 'error', API will return 400 with appropriate error message. When set to 'truncate', override the max_tokens with maximum context length of the model. | [optional] [default to 'error'] +**repetition_penalty** | **float** | A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. | [optional] +**stream** | **bool** | If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results. | [optional] +**logprobs** | **int** | Determines the number of most likely tokens to return at each token position log probabilities to return. | [optional] +**echo** | **bool** | If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. | [optional] +**n** | **int** | The number of completions to generate for each prompt. | [optional] +**min_p** | **float** | A number between 0 and 1 that can be used as an alternative to top_p and top-k. | [optional] +**presence_penalty** | **float** | A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. | [optional] +**frequency_penalty** | **float** | A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. | [optional] +**logit_bias** | **Dict[str, float]** | Adjusts the likelihood of specific tokens appearing in the generated output. | [optional] +**seed** | **int** | Seed value for reproducibility. | [optional] +**function_call** | [**ChatCompletionRequestFunctionCall**](ChatCompletionRequestFunctionCall.md) | | [optional] +**response_format** | [**ChatCompletionRequestResponseFormat**](ChatCompletionRequestResponseFormat.md) | | [optional] +**tools** | [**List[ToolsPart]**](ToolsPart.md) | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. | [optional] +**tool_choice** | [**ChatCompletionRequestToolChoice**](ChatCompletionRequestToolChoice.md) | | [optional] +**safety_model** | **str** | The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). | [optional] + +## Example + +```python +from together.generated.models.chat_completion_request import ChatCompletionRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequest from a JSON string +chat_completion_request_instance = ChatCompletionRequest.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequest.to_json()) + +# convert the object into a dict +chat_completion_request_dict = chat_completion_request_instance.to_dict() +# create an instance of ChatCompletionRequest from a dict +chat_completion_request_from_dict = ChatCompletionRequest.from_dict(chat_completion_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestFunctionCall.md b/src/together/generated/docs/ChatCompletionRequestFunctionCall.md new file mode 100644 index 00000000..dbeddd2a --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestFunctionCall.md @@ -0,0 +1,27 @@ +# ChatCompletionRequestFunctionCall + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_request_function_call import ChatCompletionRequestFunctionCall + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestFunctionCall from a JSON string +chat_completion_request_function_call_instance = ChatCompletionRequestFunctionCall.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestFunctionCall.to_json()) + +# convert the object into a dict +chat_completion_request_function_call_dict = chat_completion_request_function_call_instance.to_dict() +# create an instance of ChatCompletionRequestFunctionCall from a dict +chat_completion_request_function_call_from_dict = ChatCompletionRequestFunctionCall.from_dict(chat_completion_request_function_call_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md b/src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md new file mode 100644 index 00000000..bb0a34f7 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md @@ -0,0 +1,27 @@ +# ChatCompletionRequestFunctionCallOneOf + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_request_function_call_one_of import ChatCompletionRequestFunctionCallOneOf + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestFunctionCallOneOf from a JSON string +chat_completion_request_function_call_one_of_instance = ChatCompletionRequestFunctionCallOneOf.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestFunctionCallOneOf.to_json()) + +# convert the object into a dict +chat_completion_request_function_call_one_of_dict = chat_completion_request_function_call_one_of_instance.to_dict() +# create an instance of ChatCompletionRequestFunctionCallOneOf from a dict +chat_completion_request_function_call_one_of_from_dict = ChatCompletionRequestFunctionCallOneOf.from_dict(chat_completion_request_function_call_one_of_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestMessagesInner.md b/src/together/generated/docs/ChatCompletionRequestMessagesInner.md new file mode 100644 index 00000000..8512ef45 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestMessagesInner.md @@ -0,0 +1,28 @@ +# ChatCompletionRequestMessagesInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**role** | **str** | The role of the messages author. Choice between: system, user, or assistant. | +**content** | **str** | The content of the message, which can either be a simple string or a structured format. | + +## Example + +```python +from together.generated.models.chat_completion_request_messages_inner import ChatCompletionRequestMessagesInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestMessagesInner from a JSON string +chat_completion_request_messages_inner_instance = ChatCompletionRequestMessagesInner.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestMessagesInner.to_json()) + +# convert the object into a dict +chat_completion_request_messages_inner_dict = chat_completion_request_messages_inner_instance.to_dict() +# create an instance of ChatCompletionRequestMessagesInner from a dict +chat_completion_request_messages_inner_from_dict = ChatCompletionRequestMessagesInner.from_dict(chat_completion_request_messages_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestModel.md b/src/together/generated/docs/ChatCompletionRequestModel.md new file mode 100644 index 00000000..c9387ca0 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestModel.md @@ -0,0 +1,27 @@ +# ChatCompletionRequestModel + +The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.chat_completion_request_model import ChatCompletionRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestModel from a JSON string +chat_completion_request_model_instance = ChatCompletionRequestModel.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestModel.to_json()) + +# convert the object into a dict +chat_completion_request_model_dict = chat_completion_request_model_instance.to_dict() +# create an instance of ChatCompletionRequestModel from a dict +chat_completion_request_model_from_dict = ChatCompletionRequestModel.from_dict(chat_completion_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestResponseFormat.md b/src/together/generated/docs/ChatCompletionRequestResponseFormat.md new file mode 100644 index 00000000..aa94fd0a --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestResponseFormat.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestResponseFormat + +An object specifying the format that the model must output. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | The type of the response format. | [optional] +**var_schema** | **Dict[str, str]** | The schema of the response format. | [optional] + +## Example + +```python +from together.generated.models.chat_completion_request_response_format import ChatCompletionRequestResponseFormat + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestResponseFormat from a JSON string +chat_completion_request_response_format_instance = ChatCompletionRequestResponseFormat.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestResponseFormat.to_json()) + +# convert the object into a dict +chat_completion_request_response_format_dict = chat_completion_request_response_format_instance.to_dict() +# create an instance of ChatCompletionRequestResponseFormat from a dict +chat_completion_request_response_format_from_dict = ChatCompletionRequestResponseFormat.from_dict(chat_completion_request_response_format_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestToolChoice.md b/src/together/generated/docs/ChatCompletionRequestToolChoice.md new file mode 100644 index 00000000..c6a50e08 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestToolChoice.md @@ -0,0 +1,31 @@ +# ChatCompletionRequestToolChoice + +Controls which (if any) function is called by the model. By default uses `auto`, which lets the model pick between generating a message or calling a function. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **float** | | +**id** | **str** | | +**type** | **str** | | +**function** | [**ToolChoiceFunction**](ToolChoiceFunction.md) | | + +## Example + +```python +from together.generated.models.chat_completion_request_tool_choice import ChatCompletionRequestToolChoice + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestToolChoice from a JSON string +chat_completion_request_tool_choice_instance = ChatCompletionRequestToolChoice.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestToolChoice.to_json()) + +# convert the object into a dict +chat_completion_request_tool_choice_dict = chat_completion_request_tool_choice_instance.to_dict() +# create an instance of ChatCompletionRequestToolChoice from a dict +chat_completion_request_tool_choice_from_dict = ChatCompletionRequestToolChoice.from_dict(chat_completion_request_tool_choice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionResponse.md b/src/together/generated/docs/ChatCompletionResponse.md new file mode 100644 index 00000000..bff9c23b --- /dev/null +++ b/src/together/generated/docs/ChatCompletionResponse.md @@ -0,0 +1,32 @@ +# ChatCompletionResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**choices** | [**List[ChatCompletionChoicesDataInner]**](ChatCompletionChoicesDataInner.md) | | +**usage** | [**UsageData**](UsageData.md) | | [optional] +**created** | **int** | | +**model** | **str** | | +**object** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_response import ChatCompletionResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionResponse from a JSON string +chat_completion_response_instance = ChatCompletionResponse.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionResponse.to_json()) + +# convert the object into a dict +chat_completion_response_dict = chat_completion_response_instance.to_dict() +# create an instance of ChatCompletionResponse from a dict +chat_completion_response_from_dict = ChatCompletionResponse.from_dict(chat_completion_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionStream.md b/src/together/generated/docs/ChatCompletionStream.md new file mode 100644 index 00000000..0425981a --- /dev/null +++ b/src/together/generated/docs/ChatCompletionStream.md @@ -0,0 +1,27 @@ +# ChatCompletionStream + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_stream import ChatCompletionStream + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionStream from a JSON string +chat_completion_stream_instance = ChatCompletionStream.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionStream.to_json()) + +# convert the object into a dict +chat_completion_stream_dict = chat_completion_stream_instance.to_dict() +# create an instance of ChatCompletionStream from a dict +chat_completion_stream_from_dict = ChatCompletionStream.from_dict(chat_completion_stream_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionSystemMessageParam.md b/src/together/generated/docs/ChatCompletionSystemMessageParam.md new file mode 100644 index 00000000..b1b6ee2e --- /dev/null +++ b/src/together/generated/docs/ChatCompletionSystemMessageParam.md @@ -0,0 +1,29 @@ +# ChatCompletionSystemMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | +**role** | **str** | | +**name** | **str** | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_system_message_param import ChatCompletionSystemMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionSystemMessageParam from a JSON string +chat_completion_system_message_param_instance = ChatCompletionSystemMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionSystemMessageParam.to_json()) + +# convert the object into a dict +chat_completion_system_message_param_dict = chat_completion_system_message_param_instance.to_dict() +# create an instance of ChatCompletionSystemMessageParam from a dict +chat_completion_system_message_param_from_dict = ChatCompletionSystemMessageParam.from_dict(chat_completion_system_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionToken.md b/src/together/generated/docs/ChatCompletionToken.md new file mode 100644 index 00000000..159ba763 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionToken.md @@ -0,0 +1,30 @@ +# ChatCompletionToken + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **int** | | +**text** | **str** | | +**logprob** | **float** | | +**special** | **bool** | | + +## Example + +```python +from together.generated.models.chat_completion_token import ChatCompletionToken + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionToken from a JSON string +chat_completion_token_instance = ChatCompletionToken.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionToken.to_json()) + +# convert the object into a dict +chat_completion_token_dict = chat_completion_token_instance.to_dict() +# create an instance of ChatCompletionToken from a dict +chat_completion_token_from_dict = ChatCompletionToken.from_dict(chat_completion_token_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionTool.md b/src/together/generated/docs/ChatCompletionTool.md new file mode 100644 index 00000000..ff0a341f --- /dev/null +++ b/src/together/generated/docs/ChatCompletionTool.md @@ -0,0 +1,28 @@ +# ChatCompletionTool + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | +**function** | [**ChatCompletionToolFunction**](ChatCompletionToolFunction.md) | | + +## Example + +```python +from together.generated.models.chat_completion_tool import ChatCompletionTool + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionTool from a JSON string +chat_completion_tool_instance = ChatCompletionTool.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionTool.to_json()) + +# convert the object into a dict +chat_completion_tool_dict = chat_completion_tool_instance.to_dict() +# create an instance of ChatCompletionTool from a dict +chat_completion_tool_from_dict = ChatCompletionTool.from_dict(chat_completion_tool_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionToolFunction.md b/src/together/generated/docs/ChatCompletionToolFunction.md new file mode 100644 index 00000000..a84fb6e5 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionToolFunction.md @@ -0,0 +1,29 @@ +# ChatCompletionToolFunction + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**description** | **str** | | [optional] +**name** | **str** | | +**parameters** | **Dict[str, object]** | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_tool_function import ChatCompletionToolFunction + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionToolFunction from a JSON string +chat_completion_tool_function_instance = ChatCompletionToolFunction.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionToolFunction.to_json()) + +# convert the object into a dict +chat_completion_tool_function_dict = chat_completion_tool_function_instance.to_dict() +# create an instance of ChatCompletionToolFunction from a dict +chat_completion_tool_function_from_dict = ChatCompletionToolFunction.from_dict(chat_completion_tool_function_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionToolMessageParam.md b/src/together/generated/docs/ChatCompletionToolMessageParam.md new file mode 100644 index 00000000..6e45af91 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionToolMessageParam.md @@ -0,0 +1,29 @@ +# ChatCompletionToolMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**role** | **str** | | +**content** | **str** | | +**tool_call_id** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_tool_message_param import ChatCompletionToolMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionToolMessageParam from a JSON string +chat_completion_tool_message_param_instance = ChatCompletionToolMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionToolMessageParam.to_json()) + +# convert the object into a dict +chat_completion_tool_message_param_dict = chat_completion_tool_message_param_instance.to_dict() +# create an instance of ChatCompletionToolMessageParam from a dict +chat_completion_tool_message_param_from_dict = ChatCompletionToolMessageParam.from_dict(chat_completion_tool_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionUserMessageParam.md b/src/together/generated/docs/ChatCompletionUserMessageParam.md new file mode 100644 index 00000000..1720154b --- /dev/null +++ b/src/together/generated/docs/ChatCompletionUserMessageParam.md @@ -0,0 +1,29 @@ +# ChatCompletionUserMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | +**role** | **str** | | +**name** | **str** | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_user_message_param import ChatCompletionUserMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionUserMessageParam from a JSON string +chat_completion_user_message_param_instance = ChatCompletionUserMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionUserMessageParam.to_json()) + +# convert the object into a dict +chat_completion_user_message_param_dict = chat_completion_user_message_param_instance.to_dict() +# create an instance of ChatCompletionUserMessageParam from a dict +chat_completion_user_message_param_from_dict = ChatCompletionUserMessageParam.from_dict(chat_completion_user_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionApi.md b/src/together/generated/docs/CompletionApi.md new file mode 100644 index 00000000..3e0ef088 --- /dev/null +++ b/src/together/generated/docs/CompletionApi.md @@ -0,0 +1,93 @@ +# together.generated.CompletionApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**completions**](CompletionApi.md#completions) | **POST** /completions | Create completion + + +# **completions** +> CompletionResponse completions(completion_request=completion_request) + +Create completion + +Query a language, code, or image model. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.completion_request import CompletionRequest +from together.generated.models.completion_response import CompletionResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.CompletionApi(api_client) + completion_request = together.generated.CompletionRequest() # CompletionRequest | (optional) + + try: + # Create completion + api_response = await api_instance.completions(completion_request=completion_request) + print("The response of CompletionApi->completions:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling CompletionApi->completions: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **completion_request** | [**CompletionRequest**](CompletionRequest.md)| | [optional] + +### Return type + +[**CompletionResponse**](CompletionResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json, text/event-stream + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**503** | Overloaded | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChoice.md b/src/together/generated/docs/CompletionChoice.md new file mode 100644 index 00000000..8c8f978c --- /dev/null +++ b/src/together/generated/docs/CompletionChoice.md @@ -0,0 +1,27 @@ +# CompletionChoice + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] + +## Example + +```python +from together.generated.models.completion_choice import CompletionChoice + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionChoice from a JSON string +completion_choice_instance = CompletionChoice.from_json(json) +# print the JSON string representation of the object +print(CompletionChoice.to_json()) + +# convert the object into a dict +completion_choice_dict = completion_choice_instance.to_dict() +# create an instance of CompletionChoice from a dict +completion_choice_from_dict = CompletionChoice.from_dict(completion_choice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChoicesDataInner.md b/src/together/generated/docs/CompletionChoicesDataInner.md new file mode 100644 index 00000000..370136e4 --- /dev/null +++ b/src/together/generated/docs/CompletionChoicesDataInner.md @@ -0,0 +1,30 @@ +# CompletionChoicesDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] +**seed** | **int** | | [optional] +**finish_reason** | [**FinishReason**](FinishReason.md) | | [optional] +**logprobs** | [**LogprobsPart**](.md) | | [optional] + +## Example + +```python +from together.generated.models.completion_choices_data_inner import CompletionChoicesDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionChoicesDataInner from a JSON string +completion_choices_data_inner_instance = CompletionChoicesDataInner.from_json(json) +# print the JSON string representation of the object +print(CompletionChoicesDataInner.to_json()) + +# convert the object into a dict +completion_choices_data_inner_dict = completion_choices_data_inner_instance.to_dict() +# create an instance of CompletionChoicesDataInner from a dict +completion_choices_data_inner_from_dict = CompletionChoicesDataInner.from_dict(completion_choices_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChunk.md b/src/together/generated/docs/CompletionChunk.md new file mode 100644 index 00000000..2d9fe0e5 --- /dev/null +++ b/src/together/generated/docs/CompletionChunk.md @@ -0,0 +1,32 @@ +# CompletionChunk + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**token** | [**CompletionToken**](CompletionToken.md) | | +**choices** | [**List[CompletionChoice]**](CompletionChoice.md) | | +**usage** | [**CompletionChunkUsage**](CompletionChunkUsage.md) | | +**seed** | **int** | | [optional] +**finish_reason** | [**FinishReason**](FinishReason.md) | | + +## Example + +```python +from together.generated.models.completion_chunk import CompletionChunk + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionChunk from a JSON string +completion_chunk_instance = CompletionChunk.from_json(json) +# print the JSON string representation of the object +print(CompletionChunk.to_json()) + +# convert the object into a dict +completion_chunk_dict = completion_chunk_instance.to_dict() +# create an instance of CompletionChunk from a dict +completion_chunk_from_dict = CompletionChunk.from_dict(completion_chunk_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChunkUsage.md b/src/together/generated/docs/CompletionChunkUsage.md new file mode 100644 index 00000000..3e74c9f4 --- /dev/null +++ b/src/together/generated/docs/CompletionChunkUsage.md @@ -0,0 +1,29 @@ +# CompletionChunkUsage + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**prompt_tokens** | **int** | | +**completion_tokens** | **int** | | +**total_tokens** | **int** | | + +## Example + +```python +from together.generated.models.completion_chunk_usage import CompletionChunkUsage + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionChunkUsage from a JSON string +completion_chunk_usage_instance = CompletionChunkUsage.from_json(json) +# print the JSON string representation of the object +print(CompletionChunkUsage.to_json()) + +# convert the object into a dict +completion_chunk_usage_dict = completion_chunk_usage_instance.to_dict() +# create an instance of CompletionChunkUsage from a dict +completion_chunk_usage_from_dict = CompletionChunkUsage.from_dict(completion_chunk_usage_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionEvent.md b/src/together/generated/docs/CompletionEvent.md new file mode 100644 index 00000000..92acef57 --- /dev/null +++ b/src/together/generated/docs/CompletionEvent.md @@ -0,0 +1,27 @@ +# CompletionEvent + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**CompletionChunk**](CompletionChunk.md) | | + +## Example + +```python +from together.generated.models.completion_event import CompletionEvent + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionEvent from a JSON string +completion_event_instance = CompletionEvent.from_json(json) +# print the JSON string representation of the object +print(CompletionEvent.to_json()) + +# convert the object into a dict +completion_event_dict = completion_event_instance.to_dict() +# create an instance of CompletionEvent from a dict +completion_event_from_dict = CompletionEvent.from_dict(completion_event_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionRequest.md b/src/together/generated/docs/CompletionRequest.md new file mode 100644 index 00000000..0af19ec6 --- /dev/null +++ b/src/together/generated/docs/CompletionRequest.md @@ -0,0 +1,44 @@ +# CompletionRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**prompt** | **str** | A string providing context for the model to complete. | +**model** | [**CompletionRequestModel**](CompletionRequestModel.md) | | +**max_tokens** | **int** | The maximum number of tokens to generate. | [optional] +**stop** | **List[str]** | A list of string sequences that will truncate (stop) inference text output. For example, \"</s>\" will stop generation as soon as the model generates the given token. | [optional] +**temperature** | **float** | A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. | [optional] +**top_p** | **float** | A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. | [optional] +**top_k** | **int** | An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. | [optional] +**repetition_penalty** | **float** | A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. | [optional] +**stream** | **bool** | If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results. | [optional] +**logprobs** | **int** | Determines the number of most likely tokens to return at each token position log probabilities to return. | [optional] +**echo** | **bool** | If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. | [optional] +**n** | **int** | The number of completions to generate for each prompt. | [optional] +**safety_model** | [**CompletionRequestSafetyModel**](CompletionRequestSafetyModel.md) | | [optional] +**min_p** | **float** | A number between 0 and 1 that can be used as an alternative to top-p and top-k. | [optional] +**presence_penalty** | **float** | A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. | [optional] +**frequency_penalty** | **float** | A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. | [optional] +**logit_bias** | **Dict[str, float]** | Adjusts the likelihood of specific tokens appearing in the generated output. | [optional] +**seed** | **int** | Seed value for reproducibility. | [optional] + +## Example + +```python +from together.generated.models.completion_request import CompletionRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionRequest from a JSON string +completion_request_instance = CompletionRequest.from_json(json) +# print the JSON string representation of the object +print(CompletionRequest.to_json()) + +# convert the object into a dict +completion_request_dict = completion_request_instance.to_dict() +# create an instance of CompletionRequest from a dict +completion_request_from_dict = CompletionRequest.from_dict(completion_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionRequestModel.md b/src/together/generated/docs/CompletionRequestModel.md new file mode 100644 index 00000000..15040351 --- /dev/null +++ b/src/together/generated/docs/CompletionRequestModel.md @@ -0,0 +1,27 @@ +# CompletionRequestModel + +The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.completion_request_model import CompletionRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionRequestModel from a JSON string +completion_request_model_instance = CompletionRequestModel.from_json(json) +# print the JSON string representation of the object +print(CompletionRequestModel.to_json()) + +# convert the object into a dict +completion_request_model_dict = completion_request_model_instance.to_dict() +# create an instance of CompletionRequestModel from a dict +completion_request_model_from_dict = CompletionRequestModel.from_dict(completion_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionRequestSafetyModel.md b/src/together/generated/docs/CompletionRequestSafetyModel.md new file mode 100644 index 00000000..a5b83b73 --- /dev/null +++ b/src/together/generated/docs/CompletionRequestSafetyModel.md @@ -0,0 +1,27 @@ +# CompletionRequestSafetyModel + +The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.completion_request_safety_model import CompletionRequestSafetyModel + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionRequestSafetyModel from a JSON string +completion_request_safety_model_instance = CompletionRequestSafetyModel.from_json(json) +# print the JSON string representation of the object +print(CompletionRequestSafetyModel.to_json()) + +# convert the object into a dict +completion_request_safety_model_dict = completion_request_safety_model_instance.to_dict() +# create an instance of CompletionRequestSafetyModel from a dict +completion_request_safety_model_from_dict = CompletionRequestSafetyModel.from_dict(completion_request_safety_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionResponse.md b/src/together/generated/docs/CompletionResponse.md new file mode 100644 index 00000000..6170558e --- /dev/null +++ b/src/together/generated/docs/CompletionResponse.md @@ -0,0 +1,33 @@ +# CompletionResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**choices** | [**List[CompletionChoicesDataInner]**](CompletionChoicesDataInner.md) | | +**prompt** | [**List[PromptPartInner]**](PromptPartInner.md) | | [optional] +**usage** | [**UsageData**](UsageData.md) | | +**created** | **int** | | +**model** | **str** | | +**object** | **str** | | + +## Example + +```python +from together.generated.models.completion_response import CompletionResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionResponse from a JSON string +completion_response_instance = CompletionResponse.from_json(json) +# print the JSON string representation of the object +print(CompletionResponse.to_json()) + +# convert the object into a dict +completion_response_dict = completion_response_instance.to_dict() +# create an instance of CompletionResponse from a dict +completion_response_from_dict = CompletionResponse.from_dict(completion_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionStream.md b/src/together/generated/docs/CompletionStream.md new file mode 100644 index 00000000..aa6342a9 --- /dev/null +++ b/src/together/generated/docs/CompletionStream.md @@ -0,0 +1,27 @@ +# CompletionStream + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | **str** | | + +## Example + +```python +from together.generated.models.completion_stream import CompletionStream + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionStream from a JSON string +completion_stream_instance = CompletionStream.from_json(json) +# print the JSON string representation of the object +print(CompletionStream.to_json()) + +# convert the object into a dict +completion_stream_dict = completion_stream_instance.to_dict() +# create an instance of CompletionStream from a dict +completion_stream_from_dict = CompletionStream.from_dict(completion_stream_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionToken.md b/src/together/generated/docs/CompletionToken.md new file mode 100644 index 00000000..f4d5f6b9 --- /dev/null +++ b/src/together/generated/docs/CompletionToken.md @@ -0,0 +1,30 @@ +# CompletionToken + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **int** | | +**text** | **str** | | +**logprob** | **float** | | +**special** | **bool** | | + +## Example + +```python +from together.generated.models.completion_token import CompletionToken + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionToken from a JSON string +completion_token_instance = CompletionToken.from_json(json) +# print the JSON string representation of the object +print(CompletionToken.to_json()) + +# convert the object into a dict +completion_token_dict = completion_token_instance.to_dict() +# create an instance of CompletionToken from a dict +completion_token_from_dict = CompletionToken.from_dict(completion_token_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CreateEndpointRequest.md b/src/together/generated/docs/CreateEndpointRequest.md new file mode 100644 index 00000000..add9632f --- /dev/null +++ b/src/together/generated/docs/CreateEndpointRequest.md @@ -0,0 +1,33 @@ +# CreateEndpointRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**display_name** | **str** | A human-readable name for the endpoint | [optional] +**model** | **str** | The model to deploy on this endpoint | +**hardware** | **str** | The hardware configuration to use for this endpoint | +**autoscaling** | [**Autoscaling**](Autoscaling.md) | Configuration for automatic scaling of the endpoint | +**disable_prompt_cache** | **bool** | Whether to disable the prompt cache for this endpoint | [optional] [default to False] +**disable_speculative_decoding** | **bool** | Whether to disable speculative decoding for this endpoint | [optional] [default to False] +**state** | **str** | The desired state of the endpoint | [optional] [default to 'STARTED'] + +## Example + +```python +from together.generated.models.create_endpoint_request import CreateEndpointRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of CreateEndpointRequest from a JSON string +create_endpoint_request_instance = CreateEndpointRequest.from_json(json) +# print the JSON string representation of the object +print(CreateEndpointRequest.to_json()) + +# convert the object into a dict +create_endpoint_request_dict = create_endpoint_request_instance.to_dict() +# create an instance of CreateEndpointRequest from a dict +create_endpoint_request_from_dict = CreateEndpointRequest.from_dict(create_endpoint_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/DedicatedEndpoint.md b/src/together/generated/docs/DedicatedEndpoint.md new file mode 100644 index 00000000..eb9079f0 --- /dev/null +++ b/src/together/generated/docs/DedicatedEndpoint.md @@ -0,0 +1,38 @@ +# DedicatedEndpoint + +Details about a dedicated endpoint deployment + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | The type of object | +**id** | **str** | Unique identifier for the endpoint | +**name** | **str** | System name for the endpoint | +**display_name** | **str** | Human-readable name for the endpoint | +**model** | **str** | The model deployed on this endpoint | +**hardware** | **str** | The hardware configuration used for this endpoint | +**type** | **str** | The type of endpoint | +**owner** | **str** | The owner of this endpoint | +**state** | **str** | Current state of the endpoint | +**autoscaling** | [**Autoscaling**](Autoscaling.md) | Configuration for automatic scaling of the endpoint | +**created_at** | **datetime** | Timestamp when the endpoint was created | + +## Example + +```python +from together.generated.models.dedicated_endpoint import DedicatedEndpoint + +# TODO update the JSON string below +json = "{}" +# create an instance of DedicatedEndpoint from a JSON string +dedicated_endpoint_instance = DedicatedEndpoint.from_json(json) +# print the JSON string representation of the object +print(DedicatedEndpoint.to_json()) + +# convert the object into a dict +dedicated_endpoint_dict = dedicated_endpoint_instance.to_dict() +# create an instance of DedicatedEndpoint from a dict +dedicated_endpoint_from_dict = DedicatedEndpoint.from_dict(dedicated_endpoint_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsApi.md b/src/together/generated/docs/EmbeddingsApi.md new file mode 100644 index 00000000..3035a5ac --- /dev/null +++ b/src/together/generated/docs/EmbeddingsApi.md @@ -0,0 +1,93 @@ +# together.generated.EmbeddingsApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**embeddings**](EmbeddingsApi.md#embeddings) | **POST** /embeddings | Create embedding + + +# **embeddings** +> EmbeddingsResponse embeddings(embeddings_request=embeddings_request) + +Create embedding + +Query an embedding model for a given string of text. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.embeddings_request import EmbeddingsRequest +from together.generated.models.embeddings_response import EmbeddingsResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EmbeddingsApi(api_client) + embeddings_request = together.generated.EmbeddingsRequest() # EmbeddingsRequest | (optional) + + try: + # Create embedding + api_response = await api_instance.embeddings(embeddings_request=embeddings_request) + print("The response of EmbeddingsApi->embeddings:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EmbeddingsApi->embeddings: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **embeddings_request** | [**EmbeddingsRequest**](EmbeddingsRequest.md)| | [optional] + +### Return type + +[**EmbeddingsResponse**](EmbeddingsResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**503** | Overloaded | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsRequest.md b/src/together/generated/docs/EmbeddingsRequest.md new file mode 100644 index 00000000..f14bc778 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsRequest.md @@ -0,0 +1,28 @@ +# EmbeddingsRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model** | [**EmbeddingsRequestModel**](EmbeddingsRequestModel.md) | | +**input** | [**EmbeddingsRequestInput**](EmbeddingsRequestInput.md) | | + +## Example + +```python +from together.generated.models.embeddings_request import EmbeddingsRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsRequest from a JSON string +embeddings_request_instance = EmbeddingsRequest.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsRequest.to_json()) + +# convert the object into a dict +embeddings_request_dict = embeddings_request_instance.to_dict() +# create an instance of EmbeddingsRequest from a dict +embeddings_request_from_dict = EmbeddingsRequest.from_dict(embeddings_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsRequestInput.md b/src/together/generated/docs/EmbeddingsRequestInput.md new file mode 100644 index 00000000..e3b4af93 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsRequestInput.md @@ -0,0 +1,26 @@ +# EmbeddingsRequestInput + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsRequestInput from a JSON string +embeddings_request_input_instance = EmbeddingsRequestInput.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsRequestInput.to_json()) + +# convert the object into a dict +embeddings_request_input_dict = embeddings_request_input_instance.to_dict() +# create an instance of EmbeddingsRequestInput from a dict +embeddings_request_input_from_dict = EmbeddingsRequestInput.from_dict(embeddings_request_input_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsRequestModel.md b/src/together/generated/docs/EmbeddingsRequestModel.md new file mode 100644 index 00000000..6376e042 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsRequestModel.md @@ -0,0 +1,27 @@ +# EmbeddingsRequestModel + +The name of the embedding model to use.

[See all of Together AI's embedding models](https://docs.together.ai/docs/serverless-models#embedding-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsRequestModel from a JSON string +embeddings_request_model_instance = EmbeddingsRequestModel.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsRequestModel.to_json()) + +# convert the object into a dict +embeddings_request_model_dict = embeddings_request_model_instance.to_dict() +# create an instance of EmbeddingsRequestModel from a dict +embeddings_request_model_from_dict = EmbeddingsRequestModel.from_dict(embeddings_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsResponse.md b/src/together/generated/docs/EmbeddingsResponse.md new file mode 100644 index 00000000..0e08c129 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsResponse.md @@ -0,0 +1,29 @@ +# EmbeddingsResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**model** | **str** | | +**data** | [**List[EmbeddingsResponseDataInner]**](EmbeddingsResponseDataInner.md) | | + +## Example + +```python +from together.generated.models.embeddings_response import EmbeddingsResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsResponse from a JSON string +embeddings_response_instance = EmbeddingsResponse.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsResponse.to_json()) + +# convert the object into a dict +embeddings_response_dict = embeddings_response_instance.to_dict() +# create an instance of EmbeddingsResponse from a dict +embeddings_response_from_dict = EmbeddingsResponse.from_dict(embeddings_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsResponseDataInner.md b/src/together/generated/docs/EmbeddingsResponseDataInner.md new file mode 100644 index 00000000..cc11de78 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsResponseDataInner.md @@ -0,0 +1,29 @@ +# EmbeddingsResponseDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**embedding** | **List[float]** | | +**index** | **int** | | + +## Example + +```python +from together.generated.models.embeddings_response_data_inner import EmbeddingsResponseDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsResponseDataInner from a JSON string +embeddings_response_data_inner_instance = EmbeddingsResponseDataInner.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsResponseDataInner.to_json()) + +# convert the object into a dict +embeddings_response_data_inner_dict = embeddings_response_data_inner_instance.to_dict() +# create an instance of EmbeddingsResponseDataInner from a dict +embeddings_response_data_inner_from_dict = EmbeddingsResponseDataInner.from_dict(embeddings_response_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EndpointPricing.md b/src/together/generated/docs/EndpointPricing.md new file mode 100644 index 00000000..e557beff --- /dev/null +++ b/src/together/generated/docs/EndpointPricing.md @@ -0,0 +1,28 @@ +# EndpointPricing + +Pricing details for using an endpoint + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**cents_per_minute** | **float** | Cost per minute of endpoint uptime in cents | + +## Example + +```python +from together.generated.models.endpoint_pricing import EndpointPricing + +# TODO update the JSON string below +json = "{}" +# create an instance of EndpointPricing from a JSON string +endpoint_pricing_instance = EndpointPricing.from_json(json) +# print the JSON string representation of the object +print(EndpointPricing.to_json()) + +# convert the object into a dict +endpoint_pricing_dict = endpoint_pricing_instance.to_dict() +# create an instance of EndpointPricing from a dict +endpoint_pricing_from_dict = EndpointPricing.from_dict(endpoint_pricing_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EndpointsApi.md b/src/together/generated/docs/EndpointsApi.md new file mode 100644 index 00000000..74fc70f1 --- /dev/null +++ b/src/together/generated/docs/EndpointsApi.md @@ -0,0 +1,416 @@ +# together.generated.EndpointsApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**create_endpoint**](EndpointsApi.md#create_endpoint) | **POST** /endpoints | Create a dedicated endpoint, it will start automatically +[**delete_endpoint**](EndpointsApi.md#delete_endpoint) | **DELETE** /endpoints/{endpointId} | Delete endpoint +[**get_endpoint**](EndpointsApi.md#get_endpoint) | **GET** /endpoints/{endpointId} | Get endpoint by ID +[**list_endpoints**](EndpointsApi.md#list_endpoints) | **GET** /endpoints | List all endpoints, can be filtered by type +[**update_endpoint**](EndpointsApi.md#update_endpoint) | **PATCH** /endpoints/{endpointId} | Update endpoint, this can also be used to start or stop a dedicated endpoint + + +# **create_endpoint** +> DedicatedEndpoint create_endpoint(create_endpoint_request) + +Create a dedicated endpoint, it will start automatically + +Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.create_endpoint_request import CreateEndpointRequest +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + create_endpoint_request = together.generated.CreateEndpointRequest() # CreateEndpointRequest | + + try: + # Create a dedicated endpoint, it will start automatically + api_response = await api_instance.create_endpoint(create_endpoint_request) + print("The response of EndpointsApi->create_endpoint:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EndpointsApi->create_endpoint: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **create_endpoint_request** | [**CreateEndpointRequest**](CreateEndpointRequest.md)| | + +### Return type + +[**DedicatedEndpoint**](DedicatedEndpoint.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**403** | Unauthorized | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **delete_endpoint** +> delete_endpoint(endpoint_id) + +Delete endpoint + +Permanently deletes an endpoint. This action cannot be undone. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + endpoint_id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7' # str | The ID of the endpoint to delete + + try: + # Delete endpoint + await api_instance.delete_endpoint(endpoint_id) + except Exception as e: + print("Exception when calling EndpointsApi->delete_endpoint: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **endpoint_id** | **str**| The ID of the endpoint to delete | + +### Return type + +void (empty response body) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**204** | No Content - Endpoint successfully deleted | - | +**403** | Unauthorized | - | +**404** | Not Found | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **get_endpoint** +> DedicatedEndpoint get_endpoint(endpoint_id) + +Get endpoint by ID + +Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + endpoint_id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7' # str | The ID of the endpoint to retrieve + + try: + # Get endpoint by ID + api_response = await api_instance.get_endpoint(endpoint_id) + print("The response of EndpointsApi->get_endpoint:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EndpointsApi->get_endpoint: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **endpoint_id** | **str**| The ID of the endpoint to retrieve | + +### Return type + +[**DedicatedEndpoint**](DedicatedEndpoint.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**403** | Unauthorized | - | +**404** | Not Found | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **list_endpoints** +> ListEndpoints200Response list_endpoints(type=type) + +List all endpoints, can be filtered by type + +Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.list_endpoints200_response import ListEndpoints200Response +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + type = 'dedicated' # str | Filter endpoints by type (optional) + + try: + # List all endpoints, can be filtered by type + api_response = await api_instance.list_endpoints(type=type) + print("The response of EndpointsApi->list_endpoints:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EndpointsApi->list_endpoints: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **type** | **str**| Filter endpoints by type | [optional] + +### Return type + +[**ListEndpoints200Response**](ListEndpoints200Response.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**403** | Unauthorized | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **update_endpoint** +> DedicatedEndpoint update_endpoint(endpoint_id, update_endpoint_request) + +Update endpoint, this can also be used to start or stop a dedicated endpoint + +Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.update_endpoint_request import UpdateEndpointRequest +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + endpoint_id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7' # str | The ID of the endpoint to update + update_endpoint_request = together.generated.UpdateEndpointRequest() # UpdateEndpointRequest | + + try: + # Update endpoint, this can also be used to start or stop a dedicated endpoint + api_response = await api_instance.update_endpoint(endpoint_id, update_endpoint_request) + print("The response of EndpointsApi->update_endpoint:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EndpointsApi->update_endpoint: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **endpoint_id** | **str**| The ID of the endpoint to update | + **update_endpoint_request** | [**UpdateEndpointRequest**](UpdateEndpointRequest.md)| | + +### Return type + +[**DedicatedEndpoint**](DedicatedEndpoint.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**403** | Unauthorized | - | +**404** | Not Found | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ErrorData.md b/src/together/generated/docs/ErrorData.md new file mode 100644 index 00000000..d29d8ec3 --- /dev/null +++ b/src/together/generated/docs/ErrorData.md @@ -0,0 +1,27 @@ +# ErrorData + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**error** | [**ErrorDataError**](ErrorDataError.md) | | + +## Example + +```python +from together.generated.models.error_data import ErrorData + +# TODO update the JSON string below +json = "{}" +# create an instance of ErrorData from a JSON string +error_data_instance = ErrorData.from_json(json) +# print the JSON string representation of the object +print(ErrorData.to_json()) + +# convert the object into a dict +error_data_dict = error_data_instance.to_dict() +# create an instance of ErrorData from a dict +error_data_from_dict = ErrorData.from_dict(error_data_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ErrorDataError.md b/src/together/generated/docs/ErrorDataError.md new file mode 100644 index 00000000..d4990950 --- /dev/null +++ b/src/together/generated/docs/ErrorDataError.md @@ -0,0 +1,30 @@ +# ErrorDataError + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**message** | **str** | | +**type** | **str** | | +**param** | **str** | | [optional] +**code** | **str** | | [optional] + +## Example + +```python +from together.generated.models.error_data_error import ErrorDataError + +# TODO update the JSON string below +json = "{}" +# create an instance of ErrorDataError from a JSON string +error_data_error_instance = ErrorDataError.from_json(json) +# print the JSON string representation of the object +print(ErrorDataError.to_json()) + +# convert the object into a dict +error_data_error_dict = error_data_error_instance.to_dict() +# create an instance of ErrorDataError from a dict +error_data_error_from_dict = ErrorDataError.from_dict(error_data_error_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileDeleteResponse.md b/src/together/generated/docs/FileDeleteResponse.md new file mode 100644 index 00000000..e20ba15d --- /dev/null +++ b/src/together/generated/docs/FileDeleteResponse.md @@ -0,0 +1,28 @@ +# FileDeleteResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | [optional] +**deleted** | **bool** | | [optional] + +## Example + +```python +from together.generated.models.file_delete_response import FileDeleteResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of FileDeleteResponse from a JSON string +file_delete_response_instance = FileDeleteResponse.from_json(json) +# print the JSON string representation of the object +print(FileDeleteResponse.to_json()) + +# convert the object into a dict +file_delete_response_dict = file_delete_response_instance.to_dict() +# create an instance of FileDeleteResponse from a dict +file_delete_response_from_dict = FileDeleteResponse.from_dict(file_delete_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileList.md b/src/together/generated/docs/FileList.md new file mode 100644 index 00000000..83bc78a6 --- /dev/null +++ b/src/together/generated/docs/FileList.md @@ -0,0 +1,27 @@ +# FileList + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**List[FileResponse]**](FileResponse.md) | | + +## Example + +```python +from together.generated.models.file_list import FileList + +# TODO update the JSON string below +json = "{}" +# create an instance of FileList from a JSON string +file_list_instance = FileList.from_json(json) +# print the JSON string representation of the object +print(FileList.to_json()) + +# convert the object into a dict +file_list_dict = file_list_instance.to_dict() +# create an instance of FileList from a dict +file_list_from_dict = FileList.from_dict(file_list_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileObject.md b/src/together/generated/docs/FileObject.md new file mode 100644 index 00000000..a95e1fe1 --- /dev/null +++ b/src/together/generated/docs/FileObject.md @@ -0,0 +1,30 @@ +# FileObject + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | [optional] +**id** | **str** | | [optional] +**filename** | **str** | | [optional] +**size** | **int** | | [optional] + +## Example + +```python +from together.generated.models.file_object import FileObject + +# TODO update the JSON string below +json = "{}" +# create an instance of FileObject from a JSON string +file_object_instance = FileObject.from_json(json) +# print the JSON string representation of the object +print(FileObject.to_json()) + +# convert the object into a dict +file_object_dict = file_object_instance.to_dict() +# create an instance of FileObject from a dict +file_object_from_dict = FileObject.from_dict(file_object_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileResponse.md b/src/together/generated/docs/FileResponse.md new file mode 100644 index 00000000..88317020 --- /dev/null +++ b/src/together/generated/docs/FileResponse.md @@ -0,0 +1,35 @@ +# FileResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**object** | **str** | | +**created_at** | **int** | | +**filename** | **str** | | +**bytes** | **int** | | +**purpose** | **str** | | +**processed** | **bool** | | +**file_type** | **str** | | +**line_count** | **int** | | + +## Example + +```python +from together.generated.models.file_response import FileResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of FileResponse from a JSON string +file_response_instance = FileResponse.from_json(json) +# print the JSON string representation of the object +print(FileResponse.to_json()) + +# convert the object into a dict +file_response_dict = file_response_instance.to_dict() +# create an instance of FileResponse from a dict +file_response_from_dict = FileResponse.from_dict(file_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FilesApi.md b/src/together/generated/docs/FilesApi.md new file mode 100644 index 00000000..5d0e6962 --- /dev/null +++ b/src/together/generated/docs/FilesApi.md @@ -0,0 +1,320 @@ +# together.generated.FilesApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**files_get**](FilesApi.md#files_get) | **GET** /files | List all files +[**files_id_content_get**](FilesApi.md#files_id_content_get) | **GET** /files/{id}/content | Get file contents +[**files_id_delete**](FilesApi.md#files_id_delete) | **DELETE** /files/{id} | Delete a file +[**files_id_get**](FilesApi.md#files_id_get) | **GET** /files/{id} | List file + + +# **files_get** +> FileList files_get() + +List all files + +List the metadata for all uploaded data files. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.file_list import FileList +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FilesApi(api_client) + + try: + # List all files + api_response = await api_instance.files_get() + print("The response of FilesApi->files_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FilesApi->files_get: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**FileList**](FileList.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | List of files | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **files_id_content_get** +> FileObject files_id_content_get(id) + +Get file contents + +Get the contents of a single uploaded data file. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.file_object import FileObject +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FilesApi(api_client) + id = 'id_example' # str | + + try: + # Get file contents + api_response = await api_instance.files_id_content_get(id) + print("The response of FilesApi->files_id_content_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FilesApi->files_id_content_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FileObject**](FileObject.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | File content retrieved successfully | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **files_id_delete** +> FileDeleteResponse files_id_delete(id) + +Delete a file + +Delete a previously uploaded data file. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.file_delete_response import FileDeleteResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FilesApi(api_client) + id = 'id_example' # str | + + try: + # Delete a file + api_response = await api_instance.files_id_delete(id) + print("The response of FilesApi->files_id_delete:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FilesApi->files_id_delete: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FileDeleteResponse**](FileDeleteResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | File deleted successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **files_id_get** +> FileResponse files_id_get(id) + +List file + +List the metadata for a single uploaded data file. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.file_response import FileResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FilesApi(api_client) + id = 'id_example' # str | + + try: + # List file + api_response = await api_instance.files_id_get(id) + print("The response of FilesApi->files_id_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FilesApi->files_id_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FileResponse**](FileResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | File retrieved successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTuneEvent.md b/src/together/generated/docs/FineTuneEvent.md new file mode 100644 index 00000000..23eea549 --- /dev/null +++ b/src/together/generated/docs/FineTuneEvent.md @@ -0,0 +1,40 @@ +# FineTuneEvent + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**created_at** | **str** | | +**level** | [**FinetuneEventLevels**](FinetuneEventLevels.md) | | [optional] +**message** | **str** | | +**type** | [**FinetuneEventType**](FinetuneEventType.md) | | +**param_count** | **int** | | +**token_count** | **int** | | +**total_steps** | **int** | | +**wandb_url** | **str** | | +**step** | **int** | | +**checkpoint_path** | **str** | | +**model_path** | **str** | | +**training_offset** | **int** | | +**hash** | **str** | | + +## Example + +```python +from together.generated.models.fine_tune_event import FineTuneEvent + +# TODO update the JSON string below +json = "{}" +# create an instance of FineTuneEvent from a JSON string +fine_tune_event_instance = FineTuneEvent.from_json(json) +# print the JSON string representation of the object +print(FineTuneEvent.to_json()) + +# convert the object into a dict +fine_tune_event_dict = fine_tune_event_instance.to_dict() +# create an instance of FineTuneEvent from a dict +fine_tune_event_from_dict = FineTuneEvent.from_dict(fine_tune_event_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTunesPostRequest.md b/src/together/generated/docs/FineTunesPostRequest.md new file mode 100644 index 00000000..f75f37cd --- /dev/null +++ b/src/together/generated/docs/FineTunesPostRequest.md @@ -0,0 +1,45 @@ +# FineTunesPostRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**training_file** | **str** | File-ID of a training file uploaded to the Together API | +**validation_file** | **str** | File-ID of a validation file uploaded to the Together API | [optional] +**model** | **str** | Name of the base model to run fine-tune job on | +**n_epochs** | **int** | Number of epochs for fine-tuning | [optional] [default to 1] +**n_checkpoints** | **int** | Number of checkpoints to save during fine-tuning | [optional] [default to 1] +**n_evals** | **int** | Number of evaluations to be run on a given validation set during training | [optional] [default to 0] +**batch_size** | **int** | Batch size for fine-tuning | [optional] [default to 32] +**learning_rate** | **float** | Learning rate multiplier to use for training | [optional] [default to 0.000010] +**lr_scheduler** | [**LRScheduler**](.md) | | [optional] +**warmup_ratio** | **float** | The percent of steps at the start of training to linearly increase the learning rate. | [optional] [default to 0.0] +**max_grad_norm** | **float** | Max gradient norm to be used for gradient clipping. Set to 0 to disable. | [optional] [default to 1.0] +**weight_decay** | **float** | Weight decay | [optional] [default to 0.0] +**suffix** | **str** | Suffix that will be added to your fine-tuned model name | [optional] +**wandb_api_key** | **str** | API key for Weights & Biases integration | [optional] +**wandb_base_url** | **str** | The base URL of a dedicated Weights & Biases instance. | [optional] +**wandb_project_name** | **str** | The Weights & Biases project for your run. If not specified, will use `together` as the project name. | [optional] +**wandb_name** | **str** | The Weights & Biases name for your run. | [optional] +**train_on_inputs** | [**FineTunesPostRequestTrainOnInputs**](FineTunesPostRequestTrainOnInputs.md) | | [optional] [default to False] +**training_type** | [**FineTunesPostRequestTrainingType**](FineTunesPostRequestTrainingType.md) | | [optional] + +## Example + +```python +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of FineTunesPostRequest from a JSON string +fine_tunes_post_request_instance = FineTunesPostRequest.from_json(json) +# print the JSON string representation of the object +print(FineTunesPostRequest.to_json()) + +# convert the object into a dict +fine_tunes_post_request_dict = fine_tunes_post_request_instance.to_dict() +# create an instance of FineTunesPostRequest from a dict +fine_tunes_post_request_from_dict = FineTunesPostRequest.from_dict(fine_tunes_post_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md b/src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md new file mode 100644 index 00000000..554e32a3 --- /dev/null +++ b/src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md @@ -0,0 +1,27 @@ +# FineTunesPostRequestTrainOnInputs + +Whether to mask the user messages in conversational data or prompts in instruction data. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.fine_tunes_post_request_train_on_inputs import FineTunesPostRequestTrainOnInputs + +# TODO update the JSON string below +json = "{}" +# create an instance of FineTunesPostRequestTrainOnInputs from a JSON string +fine_tunes_post_request_train_on_inputs_instance = FineTunesPostRequestTrainOnInputs.from_json(json) +# print the JSON string representation of the object +print(FineTunesPostRequestTrainOnInputs.to_json()) + +# convert the object into a dict +fine_tunes_post_request_train_on_inputs_dict = fine_tunes_post_request_train_on_inputs_instance.to_dict() +# create an instance of FineTunesPostRequestTrainOnInputs from a dict +fine_tunes_post_request_train_on_inputs_from_dict = FineTunesPostRequestTrainOnInputs.from_dict(fine_tunes_post_request_train_on_inputs_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTunesPostRequestTrainingType.md b/src/together/generated/docs/FineTunesPostRequestTrainingType.md new file mode 100644 index 00000000..92af3191 --- /dev/null +++ b/src/together/generated/docs/FineTunesPostRequestTrainingType.md @@ -0,0 +1,31 @@ +# FineTunesPostRequestTrainingType + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | +**lora_r** | **int** | | +**lora_alpha** | **int** | | +**lora_dropout** | **float** | | [optional] [default to 0.0] +**lora_trainable_modules** | **str** | | [optional] [default to 'all-linear'] + +## Example + +```python +from together.generated.models.fine_tunes_post_request_training_type import FineTunesPostRequestTrainingType + +# TODO update the JSON string below +json = "{}" +# create an instance of FineTunesPostRequestTrainingType from a JSON string +fine_tunes_post_request_training_type_instance = FineTunesPostRequestTrainingType.from_json(json) +# print the JSON string representation of the object +print(FineTunesPostRequestTrainingType.to_json()) + +# convert the object into a dict +fine_tunes_post_request_training_type_dict = fine_tunes_post_request_training_type_instance.to_dict() +# create an instance of FineTunesPostRequestTrainingType from a dict +fine_tunes_post_request_training_type_from_dict = FineTunesPostRequestTrainingType.from_dict(fine_tunes_post_request_training_type_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTuningApi.md b/src/together/generated/docs/FineTuningApi.md new file mode 100644 index 00000000..465f9925 --- /dev/null +++ b/src/together/generated/docs/FineTuningApi.md @@ -0,0 +1,488 @@ +# together.generated.FineTuningApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**fine_tunes_get**](FineTuningApi.md#fine_tunes_get) | **GET** /fine-tunes | List all jobs +[**fine_tunes_id_cancel_post**](FineTuningApi.md#fine_tunes_id_cancel_post) | **POST** /fine-tunes/{id}/cancel | Cancel job +[**fine_tunes_id_events_get**](FineTuningApi.md#fine_tunes_id_events_get) | **GET** /fine-tunes/{id}/events | List job events +[**fine_tunes_id_get**](FineTuningApi.md#fine_tunes_id_get) | **GET** /fine-tunes/{id} | List job +[**fine_tunes_post**](FineTuningApi.md#fine_tunes_post) | **POST** /fine-tunes | Create job +[**finetune_download_get**](FineTuningApi.md#finetune_download_get) | **GET** /finetune/download | Download model + + +# **fine_tunes_get** +> FinetuneList fine_tunes_get() + +List all jobs + +List the metadata for all fine-tuning jobs. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_list import FinetuneList +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + + try: + # List all jobs + api_response = await api_instance.fine_tunes_get() + print("The response of FineTuningApi->fine_tunes_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_get: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**FinetuneList**](FinetuneList.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | List of fine-tune jobs | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **fine_tunes_id_cancel_post** +> FinetuneResponse fine_tunes_id_cancel_post(id) + +Cancel job + +Cancel a currently running fine-tuning job. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + id = 'id_example' # str | Fine-tune ID to cancel. A string that starts with `ft-`. + + try: + # Cancel job + api_response = await api_instance.fine_tunes_id_cancel_post(id) + print("The response of FineTuningApi->fine_tunes_id_cancel_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_id_cancel_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| Fine-tune ID to cancel. A string that starts with `ft-`. | + +### Return type + +[**FinetuneResponse**](FinetuneResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Successfully cancelled the fine-tuning job. | - | +**400** | Invalid request parameters. | - | +**404** | Fine-tune ID not found. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **fine_tunes_id_events_get** +> FinetuneListEvents fine_tunes_id_events_get(id) + +List job events + +List the events for a single fine-tuning job. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_list_events import FinetuneListEvents +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + id = 'id_example' # str | + + try: + # List job events + api_response = await api_instance.fine_tunes_id_events_get(id) + print("The response of FineTuningApi->fine_tunes_id_events_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_id_events_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FinetuneListEvents**](FinetuneListEvents.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | List of fine-tune events | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **fine_tunes_id_get** +> FinetuneResponse fine_tunes_id_get(id) + +List job + +List the metadata for a single fine-tuning job. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + id = 'id_example' # str | + + try: + # List job + api_response = await api_instance.fine_tunes_id_get(id) + print("The response of FineTuningApi->fine_tunes_id_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_id_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FinetuneResponse**](FinetuneResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Fine-tune job details retrieved successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **fine_tunes_post** +> FinetuneResponse fine_tunes_post(fine_tunes_post_request) + +Create job + +Use a model to create a fine-tuning job. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + fine_tunes_post_request = together.generated.FineTunesPostRequest() # FineTunesPostRequest | + + try: + # Create job + api_response = await api_instance.fine_tunes_post(fine_tunes_post_request) + print("The response of FineTuningApi->fine_tunes_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **fine_tunes_post_request** | [**FineTunesPostRequest**](FineTunesPostRequest.md)| | + +### Return type + +[**FinetuneResponse**](FinetuneResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Fine-tuning job initiated successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **finetune_download_get** +> FinetuneDownloadResult finetune_download_get(ft_id, checkpoint_step=checkpoint_step, checkpoint=checkpoint, output=output) + +Download model + +Download a compressed fine-tuned model or checkpoint to local disk. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_download_result import FinetuneDownloadResult +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + ft_id = 'ft_id_example' # str | Fine-tune ID to download. A string that starts with `ft-`. + checkpoint_step = 56 # int | Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. (optional) + checkpoint = 'checkpoint_example' # str | Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. (optional) + output = 'output_example' # str | Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. (optional) + + try: + # Download model + api_response = await api_instance.finetune_download_get(ft_id, checkpoint_step=checkpoint_step, checkpoint=checkpoint, output=output) + print("The response of FineTuningApi->finetune_download_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->finetune_download_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **ft_id** | **str**| Fine-tune ID to download. A string that starts with `ft-`. | + **checkpoint_step** | **int**| Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. | [optional] + **checkpoint** | **str**| Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. | [optional] + **output** | **str**| Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. | [optional] + +### Return type + +[**FinetuneDownloadResult**](FinetuneDownloadResult.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Successfully downloaded the fine-tuned model or checkpoint. | - | +**400** | Invalid request parameters. | - | +**404** | Fine-tune ID not found. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneDownloadResult.md b/src/together/generated/docs/FinetuneDownloadResult.md new file mode 100644 index 00000000..36bce63b --- /dev/null +++ b/src/together/generated/docs/FinetuneDownloadResult.md @@ -0,0 +1,31 @@ +# FinetuneDownloadResult + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | [optional] +**id** | **str** | | [optional] +**checkpoint_step** | **int** | | [optional] +**filename** | **str** | | [optional] +**size** | **int** | | [optional] + +## Example + +```python +from together.generated.models.finetune_download_result import FinetuneDownloadResult + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneDownloadResult from a JSON string +finetune_download_result_instance = FinetuneDownloadResult.from_json(json) +# print the JSON string representation of the object +print(FinetuneDownloadResult.to_json()) + +# convert the object into a dict +finetune_download_result_dict = finetune_download_result_instance.to_dict() +# create an instance of FinetuneDownloadResult from a dict +finetune_download_result_from_dict = FinetuneDownloadResult.from_dict(finetune_download_result_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneEventLevels.md b/src/together/generated/docs/FinetuneEventLevels.md new file mode 100644 index 00000000..0056898d --- /dev/null +++ b/src/together/generated/docs/FinetuneEventLevels.md @@ -0,0 +1,18 @@ +# FinetuneEventLevels + + +## Enum + +* `INFO` (value: `'info'`) + +* `WARNING` (value: `'warning'`) + +* `ERROR` (value: `'error'`) + +* `LEGACY_INFO` (value: `'legacy_info'`) + +* `LEGACY_IWARNING` (value: `'legacy_iwarning'`) + +* `LEGACY_IERROR` (value: `'legacy_ierror'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneEventType.md b/src/together/generated/docs/FinetuneEventType.md new file mode 100644 index 00000000..6f936a60 --- /dev/null +++ b/src/together/generated/docs/FinetuneEventType.md @@ -0,0 +1,56 @@ +# FinetuneEventType + + +## Enum + +* `JOB_PENDING` (value: `'job_pending'`) + +* `JOB_START` (value: `'job_start'`) + +* `JOB_STOPPED` (value: `'job_stopped'`) + +* `MODEL_DOWNLOADING` (value: `'model_downloading'`) + +* `MODEL_DOWNLOAD_COMPLETE` (value: `'model_download_complete'`) + +* `TRAINING_DATA_DOWNLOADING` (value: `'training_data_downloading'`) + +* `TRAINING_DATA_DOWNLOAD_COMPLETE` (value: `'training_data_download_complete'`) + +* `VALIDATION_DATA_DOWNLOADING` (value: `'validation_data_downloading'`) + +* `VALIDATION_DATA_DOWNLOAD_COMPLETE` (value: `'validation_data_download_complete'`) + +* `WANDB_INIT` (value: `'wandb_init'`) + +* `TRAINING_START` (value: `'training_start'`) + +* `CHECKPOINT_SAVE` (value: `'checkpoint_save'`) + +* `BILLING_LIMIT` (value: `'billing_limit'`) + +* `EPOCH_COMPLETE` (value: `'epoch_complete'`) + +* `TRAINING_COMPLETE` (value: `'training_complete'`) + +* `MODEL_COMPRESSING` (value: `'model_compressing'`) + +* `MODEL_COMPRESSION_COMPLETE` (value: `'model_compression_complete'`) + +* `MODEL_UPLOADING` (value: `'model_uploading'`) + +* `MODEL_UPLOAD_COMPLETE` (value: `'model_upload_complete'`) + +* `JOB_COMPLETE` (value: `'job_complete'`) + +* `JOB_ERROR` (value: `'job_error'`) + +* `CANCEL_REQUESTED` (value: `'cancel_requested'`) + +* `JOB_RESTARTED` (value: `'job_restarted'`) + +* `REFUND` (value: `'refund'`) + +* `WARNING` (value: `'warning'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneJobStatus.md b/src/together/generated/docs/FinetuneJobStatus.md new file mode 100644 index 00000000..750038be --- /dev/null +++ b/src/together/generated/docs/FinetuneJobStatus.md @@ -0,0 +1,24 @@ +# FinetuneJobStatus + + +## Enum + +* `PENDING` (value: `'pending'`) + +* `QUEUED` (value: `'queued'`) + +* `RUNNING` (value: `'running'`) + +* `COMPRESSING` (value: `'compressing'`) + +* `UPLOADING` (value: `'uploading'`) + +* `CANCEL_REQUESTED` (value: `'cancel_requested'`) + +* `CANCELLED` (value: `'cancelled'`) + +* `ERROR` (value: `'error'`) + +* `COMPLETED` (value: `'completed'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneList.md b/src/together/generated/docs/FinetuneList.md new file mode 100644 index 00000000..4785467b --- /dev/null +++ b/src/together/generated/docs/FinetuneList.md @@ -0,0 +1,27 @@ +# FinetuneList + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**List[FinetuneResponse]**](FinetuneResponse.md) | | + +## Example + +```python +from together.generated.models.finetune_list import FinetuneList + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneList from a JSON string +finetune_list_instance = FinetuneList.from_json(json) +# print the JSON string representation of the object +print(FinetuneList.to_json()) + +# convert the object into a dict +finetune_list_dict = finetune_list_instance.to_dict() +# create an instance of FinetuneList from a dict +finetune_list_from_dict = FinetuneList.from_dict(finetune_list_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneListEvents.md b/src/together/generated/docs/FinetuneListEvents.md new file mode 100644 index 00000000..2fa6ed43 --- /dev/null +++ b/src/together/generated/docs/FinetuneListEvents.md @@ -0,0 +1,27 @@ +# FinetuneListEvents + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**List[FineTuneEvent]**](FineTuneEvent.md) | | + +## Example + +```python +from together.generated.models.finetune_list_events import FinetuneListEvents + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneListEvents from a JSON string +finetune_list_events_instance = FinetuneListEvents.from_json(json) +# print the JSON string representation of the object +print(FinetuneListEvents.to_json()) + +# convert the object into a dict +finetune_list_events_dict = finetune_list_events_instance.to_dict() +# create an instance of FinetuneListEvents from a dict +finetune_list_events_from_dict = FinetuneListEvents.from_dict(finetune_list_events_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneResponse.md b/src/together/generated/docs/FinetuneResponse.md new file mode 100644 index 00000000..68dc10c6 --- /dev/null +++ b/src/together/generated/docs/FinetuneResponse.md @@ -0,0 +1,58 @@ +# FinetuneResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**training_file** | **str** | | [optional] +**validation_file** | **str** | | [optional] +**model** | **str** | | [optional] +**model_output_name** | **str** | | [optional] +**model_output_path** | **str** | | [optional] +**trainingfile_numlines** | **int** | | [optional] +**trainingfile_size** | **int** | | [optional] +**created_at** | **str** | | [optional] +**updated_at** | **str** | | [optional] +**n_epochs** | **int** | | [optional] +**n_checkpoints** | **int** | | [optional] +**n_evals** | **int** | | [optional] +**batch_size** | **int** | | [optional] +**learning_rate** | **float** | | [optional] +**lr_scheduler** | [**LRScheduler**](.md) | | [optional] +**warmup_ratio** | **float** | | [optional] +**max_grad_norm** | **float** | | [optional] +**weight_decay** | **float** | | [optional] +**eval_steps** | **int** | | [optional] +**train_on_inputs** | [**FinetuneResponseTrainOnInputs**](FinetuneResponseTrainOnInputs.md) | | [optional] +**training_type** | [**FineTunesPostRequestTrainingType**](FineTunesPostRequestTrainingType.md) | | [optional] +**status** | [**FinetuneJobStatus**](FinetuneJobStatus.md) | | +**job_id** | **str** | | [optional] +**events** | [**List[FineTuneEvent]**](FineTuneEvent.md) | | [optional] +**token_count** | **int** | | [optional] +**param_count** | **int** | | [optional] +**total_price** | **int** | | [optional] +**epochs_completed** | **int** | | [optional] +**queue_depth** | **int** | | [optional] +**wandb_project_name** | **str** | | [optional] +**wandb_url** | **str** | | [optional] + +## Example + +```python +from together.generated.models.finetune_response import FinetuneResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneResponse from a JSON string +finetune_response_instance = FinetuneResponse.from_json(json) +# print the JSON string representation of the object +print(FinetuneResponse.to_json()) + +# convert the object into a dict +finetune_response_dict = finetune_response_instance.to_dict() +# create an instance of FinetuneResponse from a dict +finetune_response_from_dict = FinetuneResponse.from_dict(finetune_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneResponseTrainOnInputs.md b/src/together/generated/docs/FinetuneResponseTrainOnInputs.md new file mode 100644 index 00000000..0ea8e32c --- /dev/null +++ b/src/together/generated/docs/FinetuneResponseTrainOnInputs.md @@ -0,0 +1,26 @@ +# FinetuneResponseTrainOnInputs + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.finetune_response_train_on_inputs import FinetuneResponseTrainOnInputs + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneResponseTrainOnInputs from a JSON string +finetune_response_train_on_inputs_instance = FinetuneResponseTrainOnInputs.from_json(json) +# print the JSON string representation of the object +print(FinetuneResponseTrainOnInputs.to_json()) + +# convert the object into a dict +finetune_response_train_on_inputs_dict = finetune_response_train_on_inputs_instance.to_dict() +# create an instance of FinetuneResponseTrainOnInputs from a dict +finetune_response_train_on_inputs_from_dict = FinetuneResponseTrainOnInputs.from_dict(finetune_response_train_on_inputs_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinishReason.md b/src/together/generated/docs/FinishReason.md new file mode 100644 index 00000000..e6907d83 --- /dev/null +++ b/src/together/generated/docs/FinishReason.md @@ -0,0 +1,16 @@ +# FinishReason + + +## Enum + +* `STOP` (value: `'stop'`) + +* `EOS` (value: `'eos'`) + +* `LENGTH` (value: `'length'`) + +* `TOOL_CALLS` (value: `'tool_calls'`) + +* `FUNCTION_CALL` (value: `'function_call'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FullTrainingType.md b/src/together/generated/docs/FullTrainingType.md new file mode 100644 index 00000000..4b40ee0f --- /dev/null +++ b/src/together/generated/docs/FullTrainingType.md @@ -0,0 +1,27 @@ +# FullTrainingType + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | + +## Example + +```python +from together.generated.models.full_training_type import FullTrainingType + +# TODO update the JSON string below +json = "{}" +# create an instance of FullTrainingType from a JSON string +full_training_type_instance = FullTrainingType.from_json(json) +# print the JSON string representation of the object +print(FullTrainingType.to_json()) + +# convert the object into a dict +full_training_type_dict = full_training_type_instance.to_dict() +# create an instance of FullTrainingType from a dict +full_training_type_from_dict = FullTrainingType.from_dict(full_training_type_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareApi.md b/src/together/generated/docs/HardwareApi.md new file mode 100644 index 00000000..b631d038 --- /dev/null +++ b/src/together/generated/docs/HardwareApi.md @@ -0,0 +1,88 @@ +# together.generated.HardwareApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**list_hardware**](HardwareApi.md#list_hardware) | **GET** /hardware | List available hardware configurations + + +# **list_hardware** +> ListHardware200Response list_hardware(model=model) + +List available hardware configurations + +Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.list_hardware200_response import ListHardware200Response +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.HardwareApi(api_client) + model = 'meta-llama/Llama-3-70b-chat-hf' # str | Filter hardware configurations by model compatibility (optional) + + try: + # List available hardware configurations + api_response = await api_instance.list_hardware(model=model) + print("The response of HardwareApi->list_hardware:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling HardwareApi->list_hardware: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **model** | **str**| Filter hardware configurations by model compatibility | [optional] + +### Return type + +[**ListHardware200Response**](ListHardware200Response.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | List of available hardware configurations | - | +**403** | Unauthorized | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareAvailability.md b/src/together/generated/docs/HardwareAvailability.md new file mode 100644 index 00000000..6ff309ee --- /dev/null +++ b/src/together/generated/docs/HardwareAvailability.md @@ -0,0 +1,28 @@ +# HardwareAvailability + +Indicates the current availability status of a hardware configuration + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**status** | **str** | The availability status of the hardware configuration | + +## Example + +```python +from together.generated.models.hardware_availability import HardwareAvailability + +# TODO update the JSON string below +json = "{}" +# create an instance of HardwareAvailability from a JSON string +hardware_availability_instance = HardwareAvailability.from_json(json) +# print the JSON string representation of the object +print(HardwareAvailability.to_json()) + +# convert the object into a dict +hardware_availability_dict = hardware_availability_instance.to_dict() +# create an instance of HardwareAvailability from a dict +hardware_availability_from_dict = HardwareAvailability.from_dict(hardware_availability_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareSpec.md b/src/together/generated/docs/HardwareSpec.md new file mode 100644 index 00000000..9967c6f2 --- /dev/null +++ b/src/together/generated/docs/HardwareSpec.md @@ -0,0 +1,31 @@ +# HardwareSpec + +Detailed specifications of a hardware configuration + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**gpu_type** | **str** | The type/model of GPU | +**gpu_link** | **str** | The GPU interconnect technology | +**gpu_memory** | **float** | Amount of GPU memory in GB | +**gpu_count** | **int** | Number of GPUs in this configuration | + +## Example + +```python +from together.generated.models.hardware_spec import HardwareSpec + +# TODO update the JSON string below +json = "{}" +# create an instance of HardwareSpec from a JSON string +hardware_spec_instance = HardwareSpec.from_json(json) +# print the JSON string representation of the object +print(HardwareSpec.to_json()) + +# convert the object into a dict +hardware_spec_dict = hardware_spec_instance.to_dict() +# create an instance of HardwareSpec from a dict +hardware_spec_from_dict = HardwareSpec.from_dict(hardware_spec_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareWithStatus.md b/src/together/generated/docs/HardwareWithStatus.md new file mode 100644 index 00000000..6435273e --- /dev/null +++ b/src/together/generated/docs/HardwareWithStatus.md @@ -0,0 +1,33 @@ +# HardwareWithStatus + +Hardware configuration details including current availability status + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**name** | **str** | Unique identifier for the hardware configuration | +**pricing** | [**EndpointPricing**](EndpointPricing.md) | | +**specs** | [**HardwareSpec**](HardwareSpec.md) | | +**availability** | [**HardwareAvailability**](HardwareAvailability.md) | | [optional] +**updated_at** | **datetime** | Timestamp of when the hardware status was last updated | + +## Example + +```python +from together.generated.models.hardware_with_status import HardwareWithStatus + +# TODO update the JSON string below +json = "{}" +# create an instance of HardwareWithStatus from a JSON string +hardware_with_status_instance = HardwareWithStatus.from_json(json) +# print the JSON string representation of the object +print(HardwareWithStatus.to_json()) + +# convert the object into a dict +hardware_with_status_dict = hardware_with_status_instance.to_dict() +# create an instance of HardwareWithStatus from a dict +hardware_with_status_from_dict = HardwareWithStatus.from_dict(hardware_with_status_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImageResponse.md b/src/together/generated/docs/ImageResponse.md new file mode 100644 index 00000000..eca8ec0c --- /dev/null +++ b/src/together/generated/docs/ImageResponse.md @@ -0,0 +1,30 @@ +# ImageResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**model** | **str** | | +**object** | **str** | | +**data** | [**List[ImageResponseDataInner]**](ImageResponseDataInner.md) | | + +## Example + +```python +from together.generated.models.image_response import ImageResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of ImageResponse from a JSON string +image_response_instance = ImageResponse.from_json(json) +# print the JSON string representation of the object +print(ImageResponse.to_json()) + +# convert the object into a dict +image_response_dict = image_response_instance.to_dict() +# create an instance of ImageResponse from a dict +image_response_from_dict = ImageResponse.from_dict(image_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImageResponseDataInner.md b/src/together/generated/docs/ImageResponseDataInner.md new file mode 100644 index 00000000..f529b63c --- /dev/null +++ b/src/together/generated/docs/ImageResponseDataInner.md @@ -0,0 +1,29 @@ +# ImageResponseDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **int** | | +**b64_json** | **str** | | [optional] +**url** | **str** | | [optional] + +## Example + +```python +from together.generated.models.image_response_data_inner import ImageResponseDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ImageResponseDataInner from a JSON string +image_response_data_inner_instance = ImageResponseDataInner.from_json(json) +# print the JSON string representation of the object +print(ImageResponseDataInner.to_json()) + +# convert the object into a dict +image_response_data_inner_dict = image_response_data_inner_instance.to_dict() +# create an instance of ImageResponseDataInner from a dict +image_response_data_inner_from_dict = ImageResponseDataInner.from_dict(image_response_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesApi.md b/src/together/generated/docs/ImagesApi.md new file mode 100644 index 00000000..d7db520e --- /dev/null +++ b/src/together/generated/docs/ImagesApi.md @@ -0,0 +1,87 @@ +# together.generated.ImagesApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**images_generations_post**](ImagesApi.md#images_generations_post) | **POST** /images/generations | Create image + + +# **images_generations_post** +> ImageResponse images_generations_post(images_generations_post_request) + +Create image + +Use an image model to generate an image for a given prompt. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.image_response import ImageResponse +from together.generated.models.images_generations_post_request import ImagesGenerationsPostRequest +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.ImagesApi(api_client) + images_generations_post_request = together.generated.ImagesGenerationsPostRequest() # ImagesGenerationsPostRequest | + + try: + # Create image + api_response = await api_instance.images_generations_post(images_generations_post_request) + print("The response of ImagesApi->images_generations_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling ImagesApi->images_generations_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **images_generations_post_request** | [**ImagesGenerationsPostRequest**](ImagesGenerationsPostRequest.md)| | + +### Return type + +[**ImageResponse**](ImageResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Image generated successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesGenerationsPostRequest.md b/src/together/generated/docs/ImagesGenerationsPostRequest.md new file mode 100644 index 00000000..4263429b --- /dev/null +++ b/src/together/generated/docs/ImagesGenerationsPostRequest.md @@ -0,0 +1,39 @@ +# ImagesGenerationsPostRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**prompt** | **str** | A description of the desired images. Maximum length varies by model. | +**model** | [**ImagesGenerationsPostRequestModel**](ImagesGenerationsPostRequestModel.md) | | +**steps** | **int** | Number of generation steps. | [optional] [default to 20] +**image_url** | **str** | URL of an image to use for image models that support it. | [optional] +**seed** | **int** | Seed used for generation. Can be used to reproduce image generations. | [optional] +**n** | **int** | Number of image results to generate. | [optional] [default to 1] +**height** | **int** | Height of the image to generate in number of pixels. | [optional] [default to 1024] +**width** | **int** | Width of the image to generate in number of pixels. | [optional] [default to 1024] +**negative_prompt** | **str** | The prompt or prompts not to guide the image generation. | [optional] +**response_format** | **str** | Format of the image response. Can be either a base64 string or a URL. | [optional] +**guidance** | **float** | Adjusts the alignment of the generated image with the input prompt. Higher values (e.g., 8-10) make the output more faithful to the prompt, while lower values (e.g., 1-5) encourage more creative freedom. | [optional] [default to 3.5] +**output_format** | **str** | The format of the image response. Can be either be `jpeg` or `png`. Defaults to `jpeg`. | [optional] [default to 'jpeg'] +**image_loras** | [**List[ImagesGenerationsPostRequestImageLorasInner]**](ImagesGenerationsPostRequestImageLorasInner.md) | An array of objects that define LoRAs (Low-Rank Adaptations) to influence the generated image. | [optional] + +## Example + +```python +from together.generated.models.images_generations_post_request import ImagesGenerationsPostRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of ImagesGenerationsPostRequest from a JSON string +images_generations_post_request_instance = ImagesGenerationsPostRequest.from_json(json) +# print the JSON string representation of the object +print(ImagesGenerationsPostRequest.to_json()) + +# convert the object into a dict +images_generations_post_request_dict = images_generations_post_request_instance.to_dict() +# create an instance of ImagesGenerationsPostRequest from a dict +images_generations_post_request_from_dict = ImagesGenerationsPostRequest.from_dict(images_generations_post_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md b/src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md new file mode 100644 index 00000000..6bb54e5e --- /dev/null +++ b/src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md @@ -0,0 +1,28 @@ +# ImagesGenerationsPostRequestImageLorasInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**path** | **str** | The URL of the LoRA to apply (e.g. https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA). | +**scale** | **float** | The strength of the LoRA's influence. Most LoRA's recommend a value of 1. | + +## Example + +```python +from together.generated.models.images_generations_post_request_image_loras_inner import ImagesGenerationsPostRequestImageLorasInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ImagesGenerationsPostRequestImageLorasInner from a JSON string +images_generations_post_request_image_loras_inner_instance = ImagesGenerationsPostRequestImageLorasInner.from_json(json) +# print the JSON string representation of the object +print(ImagesGenerationsPostRequestImageLorasInner.to_json()) + +# convert the object into a dict +images_generations_post_request_image_loras_inner_dict = images_generations_post_request_image_loras_inner_instance.to_dict() +# create an instance of ImagesGenerationsPostRequestImageLorasInner from a dict +images_generations_post_request_image_loras_inner_from_dict = ImagesGenerationsPostRequestImageLorasInner.from_dict(images_generations_post_request_image_loras_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesGenerationsPostRequestModel.md b/src/together/generated/docs/ImagesGenerationsPostRequestModel.md new file mode 100644 index 00000000..333020df --- /dev/null +++ b/src/together/generated/docs/ImagesGenerationsPostRequestModel.md @@ -0,0 +1,27 @@ +# ImagesGenerationsPostRequestModel + +The model to use for image generation.

[See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.images_generations_post_request_model import ImagesGenerationsPostRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of ImagesGenerationsPostRequestModel from a JSON string +images_generations_post_request_model_instance = ImagesGenerationsPostRequestModel.from_json(json) +# print the JSON string representation of the object +print(ImagesGenerationsPostRequestModel.to_json()) + +# convert the object into a dict +images_generations_post_request_model_dict = images_generations_post_request_model_instance.to_dict() +# create an instance of ImagesGenerationsPostRequestModel from a dict +images_generations_post_request_model_from_dict = ImagesGenerationsPostRequestModel.from_dict(images_generations_post_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LRScheduler.md b/src/together/generated/docs/LRScheduler.md new file mode 100644 index 00000000..6580bafd --- /dev/null +++ b/src/together/generated/docs/LRScheduler.md @@ -0,0 +1,28 @@ +# LRScheduler + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**lr_scheduler_type** | **str** | | +**lr_scheduler_args** | [**LinearLRSchedulerArgs**](.md) | | [optional] + +## Example + +```python +from together.generated.models.lr_scheduler import LRScheduler + +# TODO update the JSON string below +json = "{}" +# create an instance of LRScheduler from a JSON string +lr_scheduler_instance = LRScheduler.from_json(json) +# print the JSON string representation of the object +print(LRScheduler.to_json()) + +# convert the object into a dict +lr_scheduler_dict = lr_scheduler_instance.to_dict() +# create an instance of LRScheduler from a dict +lr_scheduler_from_dict = LRScheduler.from_dict(lr_scheduler_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LinearLRSchedulerArgs.md b/src/together/generated/docs/LinearLRSchedulerArgs.md new file mode 100644 index 00000000..82240c87 --- /dev/null +++ b/src/together/generated/docs/LinearLRSchedulerArgs.md @@ -0,0 +1,27 @@ +# LinearLRSchedulerArgs + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**min_lr_ratio** | **float** | The ratio of the final learning rate to the peak learning rate | [optional] [default to 0.0] + +## Example + +```python +from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs + +# TODO update the JSON string below +json = "{}" +# create an instance of LinearLRSchedulerArgs from a JSON string +linear_lr_scheduler_args_instance = LinearLRSchedulerArgs.from_json(json) +# print the JSON string representation of the object +print(LinearLRSchedulerArgs.to_json()) + +# convert the object into a dict +linear_lr_scheduler_args_dict = linear_lr_scheduler_args_instance.to_dict() +# create an instance of LinearLRSchedulerArgs from a dict +linear_lr_scheduler_args_from_dict = LinearLRSchedulerArgs.from_dict(linear_lr_scheduler_args_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListEndpoint.md b/src/together/generated/docs/ListEndpoint.md new file mode 100644 index 00000000..c7a226ea --- /dev/null +++ b/src/together/generated/docs/ListEndpoint.md @@ -0,0 +1,35 @@ +# ListEndpoint + +Details about an endpoint when listed via the list endpoint + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | The type of object | +**id** | **str** | Unique identifier for the endpoint | +**name** | **str** | System name for the endpoint | +**model** | **str** | The model deployed on this endpoint | +**type** | **str** | The type of endpoint | +**owner** | **str** | The owner of this endpoint | +**state** | **str** | Current state of the endpoint | +**created_at** | **datetime** | Timestamp when the endpoint was created | + +## Example + +```python +from together.generated.models.list_endpoint import ListEndpoint + +# TODO update the JSON string below +json = "{}" +# create an instance of ListEndpoint from a JSON string +list_endpoint_instance = ListEndpoint.from_json(json) +# print the JSON string representation of the object +print(ListEndpoint.to_json()) + +# convert the object into a dict +list_endpoint_dict = list_endpoint_instance.to_dict() +# create an instance of ListEndpoint from a dict +list_endpoint_from_dict = ListEndpoint.from_dict(list_endpoint_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListEndpoints200Response.md b/src/together/generated/docs/ListEndpoints200Response.md new file mode 100644 index 00000000..16babfb5 --- /dev/null +++ b/src/together/generated/docs/ListEndpoints200Response.md @@ -0,0 +1,28 @@ +# ListEndpoints200Response + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**data** | [**List[ListEndpoint]**](ListEndpoint.md) | | + +## Example + +```python +from together.generated.models.list_endpoints200_response import ListEndpoints200Response + +# TODO update the JSON string below +json = "{}" +# create an instance of ListEndpoints200Response from a JSON string +list_endpoints200_response_instance = ListEndpoints200Response.from_json(json) +# print the JSON string representation of the object +print(ListEndpoints200Response.to_json()) + +# convert the object into a dict +list_endpoints200_response_dict = list_endpoints200_response_instance.to_dict() +# create an instance of ListEndpoints200Response from a dict +list_endpoints200_response_from_dict = ListEndpoints200Response.from_dict(list_endpoints200_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200Response.md b/src/together/generated/docs/ListHardware200Response.md new file mode 100644 index 00000000..7621d170 --- /dev/null +++ b/src/together/generated/docs/ListHardware200Response.md @@ -0,0 +1,28 @@ +# ListHardware200Response + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**data** | [**List[ListHardware200ResponseOneOf1DataInner]**](ListHardware200ResponseOneOf1DataInner.md) | | + +## Example + +```python +from together.generated.models.list_hardware200_response import ListHardware200Response + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200Response from a JSON string +list_hardware200_response_instance = ListHardware200Response.from_json(json) +# print the JSON string representation of the object +print(ListHardware200Response.to_json()) + +# convert the object into a dict +list_hardware200_response_dict = list_hardware200_response_instance.to_dict() +# create an instance of ListHardware200Response from a dict +list_hardware200_response_from_dict = ListHardware200Response.from_dict(list_hardware200_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOf.md b/src/together/generated/docs/ListHardware200ResponseOneOf.md new file mode 100644 index 00000000..4c4472d7 --- /dev/null +++ b/src/together/generated/docs/ListHardware200ResponseOneOf.md @@ -0,0 +1,29 @@ +# ListHardware200ResponseOneOf + +Response when no model filter is provided + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**data** | [**List[ListHardware200ResponseOneOfDataInner]**](ListHardware200ResponseOneOfDataInner.md) | | + +## Example + +```python +from together.generated.models.list_hardware200_response_one_of import ListHardware200ResponseOneOf + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200ResponseOneOf from a JSON string +list_hardware200_response_one_of_instance = ListHardware200ResponseOneOf.from_json(json) +# print the JSON string representation of the object +print(ListHardware200ResponseOneOf.to_json()) + +# convert the object into a dict +list_hardware200_response_one_of_dict = list_hardware200_response_one_of_instance.to_dict() +# create an instance of ListHardware200ResponseOneOf from a dict +list_hardware200_response_one_of_from_dict = ListHardware200ResponseOneOf.from_dict(list_hardware200_response_one_of_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOf1.md b/src/together/generated/docs/ListHardware200ResponseOneOf1.md new file mode 100644 index 00000000..e93eeca5 --- /dev/null +++ b/src/together/generated/docs/ListHardware200ResponseOneOf1.md @@ -0,0 +1,29 @@ +# ListHardware200ResponseOneOf1 + +Response when model filter is provided + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**data** | [**List[ListHardware200ResponseOneOf1DataInner]**](ListHardware200ResponseOneOf1DataInner.md) | | + +## Example + +```python +from together.generated.models.list_hardware200_response_one_of1 import ListHardware200ResponseOneOf1 + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200ResponseOneOf1 from a JSON string +list_hardware200_response_one_of1_instance = ListHardware200ResponseOneOf1.from_json(json) +# print the JSON string representation of the object +print(ListHardware200ResponseOneOf1.to_json()) + +# convert the object into a dict +list_hardware200_response_one_of1_dict = list_hardware200_response_one_of1_instance.to_dict() +# create an instance of ListHardware200ResponseOneOf1 from a dict +list_hardware200_response_one_of1_from_dict = ListHardware200ResponseOneOf1.from_dict(list_hardware200_response_one_of1_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md b/src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md new file mode 100644 index 00000000..ffe4d491 --- /dev/null +++ b/src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md @@ -0,0 +1,32 @@ +# ListHardware200ResponseOneOf1DataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**name** | **str** | Unique identifier for the hardware configuration | +**pricing** | [**EndpointPricing**](EndpointPricing.md) | | +**specs** | [**HardwareSpec**](HardwareSpec.md) | | +**availability** | [**HardwareAvailability**](HardwareAvailability.md) | | +**updated_at** | **datetime** | Timestamp of when the hardware status was last updated | + +## Example + +```python +from together.generated.models.list_hardware200_response_one_of1_data_inner import ListHardware200ResponseOneOf1DataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200ResponseOneOf1DataInner from a JSON string +list_hardware200_response_one_of1_data_inner_instance = ListHardware200ResponseOneOf1DataInner.from_json(json) +# print the JSON string representation of the object +print(ListHardware200ResponseOneOf1DataInner.to_json()) + +# convert the object into a dict +list_hardware200_response_one_of1_data_inner_dict = list_hardware200_response_one_of1_data_inner_instance.to_dict() +# create an instance of ListHardware200ResponseOneOf1DataInner from a dict +list_hardware200_response_one_of1_data_inner_from_dict = ListHardware200ResponseOneOf1DataInner.from_dict(list_hardware200_response_one_of1_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md b/src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md new file mode 100644 index 00000000..75586f0d --- /dev/null +++ b/src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md @@ -0,0 +1,32 @@ +# ListHardware200ResponseOneOfDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**name** | **str** | Unique identifier for the hardware configuration | +**pricing** | [**EndpointPricing**](EndpointPricing.md) | | +**specs** | [**HardwareSpec**](HardwareSpec.md) | | +**availability** | **object** | | [optional] +**updated_at** | **datetime** | Timestamp of when the hardware status was last updated | + +## Example + +```python +from together.generated.models.list_hardware200_response_one_of_data_inner import ListHardware200ResponseOneOfDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200ResponseOneOfDataInner from a JSON string +list_hardware200_response_one_of_data_inner_instance = ListHardware200ResponseOneOfDataInner.from_json(json) +# print the JSON string representation of the object +print(ListHardware200ResponseOneOfDataInner.to_json()) + +# convert the object into a dict +list_hardware200_response_one_of_data_inner_dict = list_hardware200_response_one_of_data_inner_instance.to_dict() +# create an instance of ListHardware200ResponseOneOfDataInner from a dict +list_hardware200_response_one_of_data_inner_from_dict = ListHardware200ResponseOneOfDataInner.from_dict(list_hardware200_response_one_of_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LoRATrainingType.md b/src/together/generated/docs/LoRATrainingType.md new file mode 100644 index 00000000..e977d18b --- /dev/null +++ b/src/together/generated/docs/LoRATrainingType.md @@ -0,0 +1,31 @@ +# LoRATrainingType + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | +**lora_r** | **int** | | +**lora_alpha** | **int** | | +**lora_dropout** | **float** | | [optional] [default to 0.0] +**lora_trainable_modules** | **str** | | [optional] [default to 'all-linear'] + +## Example + +```python +from together.generated.models.lo_ra_training_type import LoRATrainingType + +# TODO update the JSON string below +json = "{}" +# create an instance of LoRATrainingType from a JSON string +lo_ra_training_type_instance = LoRATrainingType.from_json(json) +# print the JSON string representation of the object +print(LoRATrainingType.to_json()) + +# convert the object into a dict +lo_ra_training_type_dict = lo_ra_training_type_instance.to_dict() +# create an instance of LoRATrainingType from a dict +lo_ra_training_type_from_dict = LoRATrainingType.from_dict(lo_ra_training_type_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LogprobsPart.md b/src/together/generated/docs/LogprobsPart.md new file mode 100644 index 00000000..d489ad95 --- /dev/null +++ b/src/together/generated/docs/LogprobsPart.md @@ -0,0 +1,29 @@ +# LogprobsPart + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**token_ids** | **List[float]** | List of token IDs corresponding to the logprobs | [optional] +**tokens** | **List[str]** | List of token strings | [optional] +**token_logprobs** | **List[float]** | List of token log probabilities | [optional] + +## Example + +```python +from together.generated.models.logprobs_part import LogprobsPart + +# TODO update the JSON string below +json = "{}" +# create an instance of LogprobsPart from a JSON string +logprobs_part_instance = LogprobsPart.from_json(json) +# print the JSON string representation of the object +print(LogprobsPart.to_json()) + +# convert the object into a dict +logprobs_part_dict = logprobs_part_instance.to_dict() +# create an instance of LogprobsPart from a dict +logprobs_part_from_dict = LogprobsPart.from_dict(logprobs_part_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ModelInfo.md b/src/together/generated/docs/ModelInfo.md new file mode 100644 index 00000000..06bac6fe --- /dev/null +++ b/src/together/generated/docs/ModelInfo.md @@ -0,0 +1,36 @@ +# ModelInfo + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**object** | **str** | | +**created** | **int** | | +**type** | **str** | | +**display_name** | **str** | | [optional] +**organization** | **str** | | [optional] +**link** | **str** | | [optional] +**license** | **str** | | [optional] +**context_length** | **int** | | [optional] +**pricing** | [**Pricing**](Pricing.md) | | [optional] + +## Example + +```python +from together.generated.models.model_info import ModelInfo + +# TODO update the JSON string below +json = "{}" +# create an instance of ModelInfo from a JSON string +model_info_instance = ModelInfo.from_json(json) +# print the JSON string representation of the object +print(ModelInfo.to_json()) + +# convert the object into a dict +model_info_dict = model_info_instance.to_dict() +# create an instance of ModelInfo from a dict +model_info_from_dict = ModelInfo.from_dict(model_info_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ModelsApi.md b/src/together/generated/docs/ModelsApi.md new file mode 100644 index 00000000..d5584a59 --- /dev/null +++ b/src/together/generated/docs/ModelsApi.md @@ -0,0 +1,87 @@ +# together.generated.ModelsApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**models**](ModelsApi.md#models) | **GET** /models | List all models + + +# **models** +> List[ModelInfo] models() + +List all models + +Lists all of Together's open-source models + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.model_info import ModelInfo +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.ModelsApi(api_client) + + try: + # List all models + api_response = await api_instance.models() + print("The response of ModelsApi->models:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling ModelsApi->models: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**List[ModelInfo]**](ModelInfo.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/Pricing.md b/src/together/generated/docs/Pricing.md new file mode 100644 index 00000000..24a5d7b8 --- /dev/null +++ b/src/together/generated/docs/Pricing.md @@ -0,0 +1,31 @@ +# Pricing + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**hourly** | **float** | | +**input** | **float** | | +**output** | **float** | | +**base** | **float** | | +**finetune** | **float** | | + +## Example + +```python +from together.generated.models.pricing import Pricing + +# TODO update the JSON string below +json = "{}" +# create an instance of Pricing from a JSON string +pricing_instance = Pricing.from_json(json) +# print the JSON string representation of the object +print(Pricing.to_json()) + +# convert the object into a dict +pricing_dict = pricing_instance.to_dict() +# create an instance of Pricing from a dict +pricing_from_dict = Pricing.from_dict(pricing_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/PromptPartInner.md b/src/together/generated/docs/PromptPartInner.md new file mode 100644 index 00000000..e1270712 --- /dev/null +++ b/src/together/generated/docs/PromptPartInner.md @@ -0,0 +1,28 @@ +# PromptPartInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] +**logprobs** | [**LogprobsPart**](LogprobsPart.md) | | [optional] + +## Example + +```python +from together.generated.models.prompt_part_inner import PromptPartInner + +# TODO update the JSON string below +json = "{}" +# create an instance of PromptPartInner from a JSON string +prompt_part_inner_instance = PromptPartInner.from_json(json) +# print the JSON string representation of the object +print(PromptPartInner.to_json()) + +# convert the object into a dict +prompt_part_inner_dict = prompt_part_inner_instance.to_dict() +# create an instance of PromptPartInner from a dict +prompt_part_inner_from_dict = PromptPartInner.from_dict(prompt_part_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankApi.md b/src/together/generated/docs/RerankApi.md new file mode 100644 index 00000000..2990c1f9 --- /dev/null +++ b/src/together/generated/docs/RerankApi.md @@ -0,0 +1,93 @@ +# together.generated.RerankApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**rerank**](RerankApi.md#rerank) | **POST** /rerank | Create a rerank request + + +# **rerank** +> RerankResponse rerank(rerank_request=rerank_request) + +Create a rerank request + +Query a reranker model + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.rerank_request import RerankRequest +from together.generated.models.rerank_response import RerankResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.RerankApi(api_client) + rerank_request = together.generated.RerankRequest() # RerankRequest | (optional) + + try: + # Create a rerank request + api_response = await api_instance.rerank(rerank_request=rerank_request) + print("The response of RerankApi->rerank:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling RerankApi->rerank: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **rerank_request** | [**RerankRequest**](RerankRequest.md)| | [optional] + +### Return type + +[**RerankResponse**](RerankResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**503** | Overloaded | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankRequest.md b/src/together/generated/docs/RerankRequest.md new file mode 100644 index 00000000..4c7ce08a --- /dev/null +++ b/src/together/generated/docs/RerankRequest.md @@ -0,0 +1,32 @@ +# RerankRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model** | [**RerankRequestModel**](RerankRequestModel.md) | | +**query** | **str** | The search query to be used for ranking. | +**documents** | [**RerankRequestDocuments**](RerankRequestDocuments.md) | | +**top_n** | **int** | The number of top results to return. | [optional] +**return_documents** | **bool** | Whether to return supplied documents with the response. | [optional] +**rank_fields** | **List[str]** | List of keys in the JSON Object document to rank by. Defaults to use all supplied keys for ranking. | [optional] + +## Example + +```python +from together.generated.models.rerank_request import RerankRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankRequest from a JSON string +rerank_request_instance = RerankRequest.from_json(json) +# print the JSON string representation of the object +print(RerankRequest.to_json()) + +# convert the object into a dict +rerank_request_dict = rerank_request_instance.to_dict() +# create an instance of RerankRequest from a dict +rerank_request_from_dict = RerankRequest.from_dict(rerank_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankRequestDocuments.md b/src/together/generated/docs/RerankRequestDocuments.md new file mode 100644 index 00000000..51411574 --- /dev/null +++ b/src/together/generated/docs/RerankRequestDocuments.md @@ -0,0 +1,27 @@ +# RerankRequestDocuments + +List of documents, which can be either strings or objects. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.rerank_request_documents import RerankRequestDocuments + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankRequestDocuments from a JSON string +rerank_request_documents_instance = RerankRequestDocuments.from_json(json) +# print the JSON string representation of the object +print(RerankRequestDocuments.to_json()) + +# convert the object into a dict +rerank_request_documents_dict = rerank_request_documents_instance.to_dict() +# create an instance of RerankRequestDocuments from a dict +rerank_request_documents_from_dict = RerankRequestDocuments.from_dict(rerank_request_documents_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankRequestModel.md b/src/together/generated/docs/RerankRequestModel.md new file mode 100644 index 00000000..8d99f7bd --- /dev/null +++ b/src/together/generated/docs/RerankRequestModel.md @@ -0,0 +1,27 @@ +# RerankRequestModel + +The model to be used for the rerank request.

[See all of Together AI's rerank models](https://docs.together.ai/docs/serverless-models#rerank-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.rerank_request_model import RerankRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankRequestModel from a JSON string +rerank_request_model_instance = RerankRequestModel.from_json(json) +# print the JSON string representation of the object +print(RerankRequestModel.to_json()) + +# convert the object into a dict +rerank_request_model_dict = rerank_request_model_instance.to_dict() +# create an instance of RerankRequestModel from a dict +rerank_request_model_from_dict = RerankRequestModel.from_dict(rerank_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankResponse.md b/src/together/generated/docs/RerankResponse.md new file mode 100644 index 00000000..a40aa152 --- /dev/null +++ b/src/together/generated/docs/RerankResponse.md @@ -0,0 +1,31 @@ +# RerankResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | Object type | +**id** | **str** | Request ID | [optional] +**model** | **str** | The model to be used for the rerank request. | +**results** | [**List[RerankResponseResultsInner]**](RerankResponseResultsInner.md) | | +**usage** | [**UsageData**](UsageData.md) | | [optional] + +## Example + +```python +from together.generated.models.rerank_response import RerankResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankResponse from a JSON string +rerank_response_instance = RerankResponse.from_json(json) +# print the JSON string representation of the object +print(RerankResponse.to_json()) + +# convert the object into a dict +rerank_response_dict = rerank_response_instance.to_dict() +# create an instance of RerankResponse from a dict +rerank_response_from_dict = RerankResponse.from_dict(rerank_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankResponseResultsInner.md b/src/together/generated/docs/RerankResponseResultsInner.md new file mode 100644 index 00000000..0f245895 --- /dev/null +++ b/src/together/generated/docs/RerankResponseResultsInner.md @@ -0,0 +1,29 @@ +# RerankResponseResultsInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **int** | | +**relevance_score** | **float** | | +**document** | [**RerankResponseResultsInnerDocument**](RerankResponseResultsInnerDocument.md) | | + +## Example + +```python +from together.generated.models.rerank_response_results_inner import RerankResponseResultsInner + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankResponseResultsInner from a JSON string +rerank_response_results_inner_instance = RerankResponseResultsInner.from_json(json) +# print the JSON string representation of the object +print(RerankResponseResultsInner.to_json()) + +# convert the object into a dict +rerank_response_results_inner_dict = rerank_response_results_inner_instance.to_dict() +# create an instance of RerankResponseResultsInner from a dict +rerank_response_results_inner_from_dict = RerankResponseResultsInner.from_dict(rerank_response_results_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankResponseResultsInnerDocument.md b/src/together/generated/docs/RerankResponseResultsInnerDocument.md new file mode 100644 index 00000000..75ea6439 --- /dev/null +++ b/src/together/generated/docs/RerankResponseResultsInnerDocument.md @@ -0,0 +1,27 @@ +# RerankResponseResultsInnerDocument + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] + +## Example + +```python +from together.generated.models.rerank_response_results_inner_document import RerankResponseResultsInnerDocument + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankResponseResultsInnerDocument from a JSON string +rerank_response_results_inner_document_instance = RerankResponseResultsInnerDocument.from_json(json) +# print the JSON string representation of the object +print(RerankResponseResultsInnerDocument.to_json()) + +# convert the object into a dict +rerank_response_results_inner_document_dict = rerank_response_results_inner_document_instance.to_dict() +# create an instance of RerankResponseResultsInnerDocument from a dict +rerank_response_results_inner_document_from_dict = RerankResponseResultsInnerDocument.from_dict(rerank_response_results_inner_document_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/StreamSentinel.md b/src/together/generated/docs/StreamSentinel.md new file mode 100644 index 00000000..aeb5f6f4 --- /dev/null +++ b/src/together/generated/docs/StreamSentinel.md @@ -0,0 +1,27 @@ +# StreamSentinel + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | **str** | | + +## Example + +```python +from together.generated.models.stream_sentinel import StreamSentinel + +# TODO update the JSON string below +json = "{}" +# create an instance of StreamSentinel from a JSON string +stream_sentinel_instance = StreamSentinel.from_json(json) +# print the JSON string representation of the object +print(StreamSentinel.to_json()) + +# convert the object into a dict +stream_sentinel_dict = stream_sentinel_instance.to_dict() +# create an instance of StreamSentinel from a dict +stream_sentinel_from_dict = StreamSentinel.from_dict(stream_sentinel_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolChoice.md b/src/together/generated/docs/ToolChoice.md new file mode 100644 index 00000000..e0d6a775 --- /dev/null +++ b/src/together/generated/docs/ToolChoice.md @@ -0,0 +1,30 @@ +# ToolChoice + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **float** | | +**id** | **str** | | +**type** | **str** | | +**function** | [**ToolChoiceFunction**](ToolChoiceFunction.md) | | + +## Example + +```python +from together.generated.models.tool_choice import ToolChoice + +# TODO update the JSON string below +json = "{}" +# create an instance of ToolChoice from a JSON string +tool_choice_instance = ToolChoice.from_json(json) +# print the JSON string representation of the object +print(ToolChoice.to_json()) + +# convert the object into a dict +tool_choice_dict = tool_choice_instance.to_dict() +# create an instance of ToolChoice from a dict +tool_choice_from_dict = ToolChoice.from_dict(tool_choice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolChoiceFunction.md b/src/together/generated/docs/ToolChoiceFunction.md new file mode 100644 index 00000000..a740c34e --- /dev/null +++ b/src/together/generated/docs/ToolChoiceFunction.md @@ -0,0 +1,28 @@ +# ToolChoiceFunction + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | +**arguments** | **str** | | + +## Example + +```python +from together.generated.models.tool_choice_function import ToolChoiceFunction + +# TODO update the JSON string below +json = "{}" +# create an instance of ToolChoiceFunction from a JSON string +tool_choice_function_instance = ToolChoiceFunction.from_json(json) +# print the JSON string representation of the object +print(ToolChoiceFunction.to_json()) + +# convert the object into a dict +tool_choice_function_dict = tool_choice_function_instance.to_dict() +# create an instance of ToolChoiceFunction from a dict +tool_choice_function_from_dict = ToolChoiceFunction.from_dict(tool_choice_function_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolsPart.md b/src/together/generated/docs/ToolsPart.md new file mode 100644 index 00000000..733e311e --- /dev/null +++ b/src/together/generated/docs/ToolsPart.md @@ -0,0 +1,28 @@ +# ToolsPart + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | [optional] +**function** | [**ToolsPartFunction**](ToolsPartFunction.md) | | [optional] + +## Example + +```python +from together.generated.models.tools_part import ToolsPart + +# TODO update the JSON string below +json = "{}" +# create an instance of ToolsPart from a JSON string +tools_part_instance = ToolsPart.from_json(json) +# print the JSON string representation of the object +print(ToolsPart.to_json()) + +# convert the object into a dict +tools_part_dict = tools_part_instance.to_dict() +# create an instance of ToolsPart from a dict +tools_part_from_dict = ToolsPart.from_dict(tools_part_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolsPartFunction.md b/src/together/generated/docs/ToolsPartFunction.md new file mode 100644 index 00000000..27d59e43 --- /dev/null +++ b/src/together/generated/docs/ToolsPartFunction.md @@ -0,0 +1,29 @@ +# ToolsPartFunction + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**description** | **str** | | [optional] +**name** | **str** | | [optional] +**parameters** | **Dict[str, object]** | A map of parameter names to their values. | [optional] + +## Example + +```python +from together.generated.models.tools_part_function import ToolsPartFunction + +# TODO update the JSON string below +json = "{}" +# create an instance of ToolsPartFunction from a JSON string +tools_part_function_instance = ToolsPartFunction.from_json(json) +# print the JSON string representation of the object +print(ToolsPartFunction.to_json()) + +# convert the object into a dict +tools_part_function_dict = tools_part_function_instance.to_dict() +# create an instance of ToolsPartFunction from a dict +tools_part_function_from_dict = ToolsPartFunction.from_dict(tools_part_function_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/UpdateEndpointRequest.md b/src/together/generated/docs/UpdateEndpointRequest.md new file mode 100644 index 00000000..76fb7a8e --- /dev/null +++ b/src/together/generated/docs/UpdateEndpointRequest.md @@ -0,0 +1,29 @@ +# UpdateEndpointRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**display_name** | **str** | A human-readable name for the endpoint | [optional] +**state** | **str** | The desired state of the endpoint | [optional] +**autoscaling** | [**Autoscaling**](Autoscaling.md) | New autoscaling configuration for the endpoint | [optional] + +## Example + +```python +from together.generated.models.update_endpoint_request import UpdateEndpointRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of UpdateEndpointRequest from a JSON string +update_endpoint_request_instance = UpdateEndpointRequest.from_json(json) +# print the JSON string representation of the object +print(UpdateEndpointRequest.to_json()) + +# convert the object into a dict +update_endpoint_request_dict = update_endpoint_request_instance.to_dict() +# create an instance of UpdateEndpointRequest from a dict +update_endpoint_request_from_dict = UpdateEndpointRequest.from_dict(update_endpoint_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/UsageData.md b/src/together/generated/docs/UsageData.md new file mode 100644 index 00000000..0a0f4692 --- /dev/null +++ b/src/together/generated/docs/UsageData.md @@ -0,0 +1,29 @@ +# UsageData + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**prompt_tokens** | **int** | | +**completion_tokens** | **int** | | +**total_tokens** | **int** | | + +## Example + +```python +from together.generated.models.usage_data import UsageData + +# TODO update the JSON string below +json = "{}" +# create an instance of UsageData from a JSON string +usage_data_instance = UsageData.from_json(json) +# print the JSON string representation of the object +print(UsageData.to_json()) + +# convert the object into a dict +usage_data_dict = usage_data_instance.to_dict() +# create an instance of UsageData from a dict +usage_data_from_dict = UsageData.from_dict(usage_data_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/exceptions.py b/src/together/generated/exceptions.py new file mode 100644 index 00000000..ade3cc31 --- /dev/null +++ b/src/together/generated/exceptions.py @@ -0,0 +1,220 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +from typing import Any, Optional +from typing_extensions import Self + + +class OpenApiException(Exception): + """The base exception class for all OpenAPIExceptions""" + + +class ApiTypeError(OpenApiException, TypeError): + def __init__( + self, msg, path_to_item=None, valid_classes=None, key_type=None + ) -> None: + """Raises an exception for TypeErrors + + Args: + msg (str): the exception message + + Keyword Args: + path_to_item (list): a list of keys an indices to get to the + current_item + None if unset + valid_classes (tuple): the primitive classes that current item + should be an instance of + None if unset + key_type (bool): False if our value is a value in a dict + True if it is a key in a dict + False if our item is an item in a list + None if unset + """ + self.path_to_item = path_to_item + self.valid_classes = valid_classes + self.key_type = key_type + full_msg = msg + if path_to_item: + full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) + super(ApiTypeError, self).__init__(full_msg) + + +class ApiValueError(OpenApiException, ValueError): + def __init__(self, msg, path_to_item=None) -> None: + """ + Args: + msg (str): the exception message + + Keyword Args: + path_to_item (list) the path to the exception in the + received_data dict. None if unset + """ + + self.path_to_item = path_to_item + full_msg = msg + if path_to_item: + full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) + super(ApiValueError, self).__init__(full_msg) + + +class ApiAttributeError(OpenApiException, AttributeError): + def __init__(self, msg, path_to_item=None) -> None: + """ + Raised when an attribute reference or assignment fails. + + Args: + msg (str): the exception message + + Keyword Args: + path_to_item (None/list) the path to the exception in the + received_data dict + """ + self.path_to_item = path_to_item + full_msg = msg + if path_to_item: + full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) + super(ApiAttributeError, self).__init__(full_msg) + + +class ApiKeyError(OpenApiException, KeyError): + def __init__(self, msg, path_to_item=None) -> None: + """ + Args: + msg (str): the exception message + + Keyword Args: + path_to_item (None/list) the path to the exception in the + received_data dict + """ + self.path_to_item = path_to_item + full_msg = msg + if path_to_item: + full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) + super(ApiKeyError, self).__init__(full_msg) + + +class ApiException(OpenApiException): + + def __init__( + self, + status=None, + reason=None, + http_resp=None, + *, + body: Optional[str] = None, + data: Optional[Any] = None, + ) -> None: + self.status = status + self.reason = reason + self.body = body + self.data = data + self.headers = None + + if http_resp: + if self.status is None: + self.status = http_resp.status + if self.reason is None: + self.reason = http_resp.reason + if self.body is None: + try: + self.body = http_resp.data.decode("utf-8") + except Exception: + pass + self.headers = http_resp.getheaders() + + @classmethod + def from_response( + cls, + *, + http_resp, + body: Optional[str], + data: Optional[Any], + ) -> Self: + if http_resp.status == 400: + raise BadRequestException(http_resp=http_resp, body=body, data=data) + + if http_resp.status == 401: + raise UnauthorizedException(http_resp=http_resp, body=body, data=data) + + if http_resp.status == 403: + raise ForbiddenException(http_resp=http_resp, body=body, data=data) + + if http_resp.status == 404: + raise NotFoundException(http_resp=http_resp, body=body, data=data) + + # Added new conditions for 409 and 422 + if http_resp.status == 409: + raise ConflictException(http_resp=http_resp, body=body, data=data) + + if http_resp.status == 422: + raise UnprocessableEntityException( + http_resp=http_resp, body=body, data=data + ) + + if 500 <= http_resp.status <= 599: + raise ServiceException(http_resp=http_resp, body=body, data=data) + raise ApiException(http_resp=http_resp, body=body, data=data) + + def __str__(self): + """Custom error messages for exception""" + error_message = "({0})\n" "Reason: {1}\n".format(self.status, self.reason) + if self.headers: + error_message += "HTTP response headers: {0}\n".format(self.headers) + + if self.data or self.body: + error_message += "HTTP response body: {0}\n".format(self.data or self.body) + + return error_message + + +class BadRequestException(ApiException): + pass + + +class NotFoundException(ApiException): + pass + + +class UnauthorizedException(ApiException): + pass + + +class ForbiddenException(ApiException): + pass + + +class ServiceException(ApiException): + pass + + +class ConflictException(ApiException): + """Exception for HTTP 409 Conflict.""" + + pass + + +class UnprocessableEntityException(ApiException): + """Exception for HTTP 422 Unprocessable Entity.""" + + pass + + +def render_path(path_to_item): + """Returns a string representation of a path""" + result = "" + for pth in path_to_item: + if isinstance(pth, int): + result += "[{0}]".format(pth) + else: + result += "['{0}']".format(pth) + return result diff --git a/src/together/generated/models/__init__.py b/src/together/generated/models/__init__.py new file mode 100644 index 00000000..7b50c345 --- /dev/null +++ b/src/together/generated/models/__init__.py @@ -0,0 +1,197 @@ +# coding: utf-8 + +# flake8: noqa +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +# import models into model package +from together.generated.models.audio_speech_request import AudioSpeechRequest +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent +from together.generated.models.audio_speech_stream_response import ( + AudioSpeechStreamResponse, +) +from together.generated.models.autoscaling import Autoscaling +from together.generated.models.chat_completion_assistant_message_param import ( + ChatCompletionAssistantMessageParam, +) +from together.generated.models.chat_completion_choice import ChatCompletionChoice +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) +from together.generated.models.chat_completion_choice_delta_function_call import ( + ChatCompletionChoiceDeltaFunctionCall, +) +from together.generated.models.chat_completion_choices_data_inner import ( + ChatCompletionChoicesDataInner, +) +from together.generated.models.chat_completion_choices_data_inner_logprobs import ( + ChatCompletionChoicesDataInnerLogprobs, +) +from together.generated.models.chat_completion_chunk import ChatCompletionChunk +from together.generated.models.chat_completion_chunk_choices_inner import ( + ChatCompletionChunkChoicesInner, +) +from together.generated.models.chat_completion_event import ChatCompletionEvent +from together.generated.models.chat_completion_function_message_param import ( + ChatCompletionFunctionMessageParam, +) +from together.generated.models.chat_completion_message import ChatCompletionMessage +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) +from together.generated.models.chat_completion_message_param import ( + ChatCompletionMessageParam, +) +from together.generated.models.chat_completion_request import ChatCompletionRequest +from together.generated.models.chat_completion_request_function_call import ( + ChatCompletionRequestFunctionCall, +) +from together.generated.models.chat_completion_request_function_call_one_of import ( + ChatCompletionRequestFunctionCallOneOf, +) +from together.generated.models.chat_completion_request_messages_inner import ( + ChatCompletionRequestMessagesInner, +) +from together.generated.models.chat_completion_request_model import ( + ChatCompletionRequestModel, +) +from together.generated.models.chat_completion_request_response_format import ( + ChatCompletionRequestResponseFormat, +) +from together.generated.models.chat_completion_request_tool_choice import ( + ChatCompletionRequestToolChoice, +) +from together.generated.models.chat_completion_response import ChatCompletionResponse +from together.generated.models.chat_completion_stream import ChatCompletionStream +from together.generated.models.chat_completion_system_message_param import ( + ChatCompletionSystemMessageParam, +) +from together.generated.models.chat_completion_token import ChatCompletionToken +from together.generated.models.chat_completion_tool import ChatCompletionTool +from together.generated.models.chat_completion_tool_function import ( + ChatCompletionToolFunction, +) +from together.generated.models.chat_completion_tool_message_param import ( + ChatCompletionToolMessageParam, +) +from together.generated.models.chat_completion_user_message_param import ( + ChatCompletionUserMessageParam, +) +from together.generated.models.completion_choice import CompletionChoice +from together.generated.models.completion_choices_data_inner import ( + CompletionChoicesDataInner, +) +from together.generated.models.completion_chunk import CompletionChunk +from together.generated.models.completion_chunk_usage import CompletionChunkUsage +from together.generated.models.completion_event import CompletionEvent +from together.generated.models.completion_request import CompletionRequest +from together.generated.models.completion_request_model import CompletionRequestModel +from together.generated.models.completion_request_safety_model import ( + CompletionRequestSafetyModel, +) +from together.generated.models.completion_response import CompletionResponse +from together.generated.models.completion_stream import CompletionStream +from together.generated.models.completion_token import CompletionToken +from together.generated.models.create_endpoint_request import CreateEndpointRequest +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.embeddings_request import EmbeddingsRequest +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel +from together.generated.models.embeddings_response import EmbeddingsResponse +from together.generated.models.embeddings_response_data_inner import ( + EmbeddingsResponseDataInner, +) +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.error_data import ErrorData +from together.generated.models.error_data_error import ErrorDataError +from together.generated.models.file_delete_response import FileDeleteResponse +from together.generated.models.file_list import FileList +from together.generated.models.file_object import FileObject +from together.generated.models.file_response import FileResponse +from together.generated.models.fine_tune_event import FineTuneEvent +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest +from together.generated.models.fine_tunes_post_request_train_on_inputs import ( + FineTunesPostRequestTrainOnInputs, +) +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) +from together.generated.models.finetune_download_result import FinetuneDownloadResult +from together.generated.models.finetune_event_levels import FinetuneEventLevels +from together.generated.models.finetune_event_type import FinetuneEventType +from together.generated.models.finetune_job_status import FinetuneJobStatus +from together.generated.models.finetune_list import FinetuneList +from together.generated.models.finetune_list_events import FinetuneListEvents +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.models.finetune_response_train_on_inputs import ( + FinetuneResponseTrainOnInputs, +) +from together.generated.models.finish_reason import FinishReason +from together.generated.models.full_training_type import FullTrainingType +from together.generated.models.hardware_availability import HardwareAvailability +from together.generated.models.hardware_spec import HardwareSpec +from together.generated.models.hardware_with_status import HardwareWithStatus +from together.generated.models.image_response import ImageResponse +from together.generated.models.image_response_data_inner import ImageResponseDataInner +from together.generated.models.images_generations_post_request import ( + ImagesGenerationsPostRequest, +) +from together.generated.models.images_generations_post_request_image_loras_inner import ( + ImagesGenerationsPostRequestImageLorasInner, +) +from together.generated.models.images_generations_post_request_model import ( + ImagesGenerationsPostRequestModel, +) +from together.generated.models.lr_scheduler import LRScheduler +from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs +from together.generated.models.list_endpoint import ListEndpoint +from together.generated.models.list_endpoints200_response import ( + ListEndpoints200Response, +) +from together.generated.models.list_hardware200_response import ListHardware200Response +from together.generated.models.list_hardware200_response_one_of import ( + ListHardware200ResponseOneOf, +) +from together.generated.models.list_hardware200_response_one_of1 import ( + ListHardware200ResponseOneOf1, +) +from together.generated.models.list_hardware200_response_one_of1_data_inner import ( + ListHardware200ResponseOneOf1DataInner, +) +from together.generated.models.list_hardware200_response_one_of_data_inner import ( + ListHardware200ResponseOneOfDataInner, +) +from together.generated.models.lo_ra_training_type import LoRATrainingType +from together.generated.models.logprobs_part import LogprobsPart +from together.generated.models.model_info import ModelInfo +from together.generated.models.pricing import Pricing +from together.generated.models.prompt_part_inner import PromptPartInner +from together.generated.models.rerank_request import RerankRequest +from together.generated.models.rerank_request_documents import RerankRequestDocuments +from together.generated.models.rerank_request_model import RerankRequestModel +from together.generated.models.rerank_response import RerankResponse +from together.generated.models.rerank_response_results_inner import ( + RerankResponseResultsInner, +) +from together.generated.models.rerank_response_results_inner_document import ( + RerankResponseResultsInnerDocument, +) +from together.generated.models.stream_sentinel import StreamSentinel +from together.generated.models.tool_choice import ToolChoice +from together.generated.models.tool_choice_function import ToolChoiceFunction +from together.generated.models.tools_part import ToolsPart +from together.generated.models.tools_part_function import ToolsPartFunction +from together.generated.models.update_endpoint_request import UpdateEndpointRequest +from together.generated.models.usage_data import UsageData diff --git a/src/together/generated/models/audio_speech_request.py b/src/together/generated/models/audio_speech_request.py new file mode 100644 index 00000000..af1a11ef --- /dev/null +++ b/src/together/generated/models/audio_speech_request.py @@ -0,0 +1,212 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice +from typing import Optional, Set +from typing_extensions import Self + + +class AudioSpeechRequest(BaseModel): + """ + AudioSpeechRequest + """ # noqa: E501 + + model: AudioSpeechRequestModel + input: StrictStr = Field(description="Input text to generate the audio for") + voice: AudioSpeechRequestVoice + response_format: Optional[StrictStr] = Field( + default="wav", description="The format of audio output" + ) + language: Optional[StrictStr] = Field( + default="en", description="Language of input text" + ) + response_encoding: Optional[StrictStr] = Field( + default="pcm_f32le", description="Audio encoding of response" + ) + sample_rate: Optional[Union[StrictFloat, StrictInt]] = Field( + default=44100, description="Sampling rate to use for the output audio" + ) + stream: Optional[StrictBool] = Field( + default=False, + description="If true, output is streamed for several characters at a time instead of waiting for the full response. The stream terminates with `data: [DONE]`. If false, return the encoded audio as octet stream", + ) + __properties: ClassVar[List[str]] = [ + "model", + "input", + "voice", + "response_format", + "language", + "response_encoding", + "sample_rate", + "stream", + ] + + @field_validator("response_format") + def response_format_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["mp3", "wav", "raw"]): + raise ValueError("must be one of enum values ('mp3', 'wav', 'raw')") + return value + + @field_validator("language") + def language_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set( + [ + "en", + "de", + "fr", + "es", + "hi", + "it", + "ja", + "ko", + "nl", + "pl", + "pt", + "ru", + "sv", + "tr", + "zh", + ] + ): + raise ValueError( + "must be one of enum values ('en', 'de', 'fr', 'es', 'hi', 'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ru', 'sv', 'tr', 'zh')" + ) + return value + + @field_validator("response_encoding") + def response_encoding_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["pcm_f32le", "pcm_s16le", "pcm_mulaw", "pcm_alaw"]): + raise ValueError( + "must be one of enum values ('pcm_f32le', 'pcm_s16le', 'pcm_mulaw', 'pcm_alaw')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of AudioSpeechRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of voice + if self.voice: + _dict["voice"] = self.voice.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of AudioSpeechRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "model": ( + AudioSpeechRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "input": obj.get("input"), + "voice": ( + AudioSpeechRequestVoice.from_dict(obj["voice"]) + if obj.get("voice") is not None + else None + ), + "response_format": ( + obj.get("response_format") + if obj.get("response_format") is not None + else "wav" + ), + "language": ( + obj.get("language") if obj.get("language") is not None else "en" + ), + "response_encoding": ( + obj.get("response_encoding") + if obj.get("response_encoding") is not None + else "pcm_f32le" + ), + "sample_rate": ( + obj.get("sample_rate") + if obj.get("sample_rate") is not None + else 44100 + ), + "stream": obj.get("stream") if obj.get("stream") is not None else False, + } + ) + return _obj diff --git a/src/together/generated/models/audio_speech_request_model.py b/src/together/generated/models/audio_speech_request_model.py new file mode 100644 index 00000000..4ab613b5 --- /dev/null +++ b/src/together/generated/models/audio_speech_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +AUDIOSPEECHREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class AudioSpeechRequestModel(BaseModel): + """ + The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = AudioSpeechRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in AudioSpeechRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into AudioSpeechRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/audio_speech_request_voice.py b/src/together/generated/models/audio_speech_request_voice.py new file mode 100644 index 00000000..81c1f689 --- /dev/null +++ b/src/together/generated/models/audio_speech_request_voice.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +AUDIOSPEECHREQUESTVOICE_ANY_OF_SCHEMAS = ["str"] + + +class AudioSpeechRequestVoice(BaseModel): + """ + The voice to use for generating the audio. [View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available). + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = AudioSpeechRequestVoice.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in AudioSpeechRequestVoice with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into AudioSpeechRequestVoice with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/audio_speech_stream_chunk.py b/src/together/generated/models/audio_speech_stream_chunk.py new file mode 100644 index 00000000..27627dd2 --- /dev/null +++ b/src/together/generated/models/audio_speech_stream_chunk.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class AudioSpeechStreamChunk(BaseModel): + """ + AudioSpeechStreamChunk + """ # noqa: E501 + + object: StrictStr + model: StrictStr + b64: StrictStr = Field(description="base64 encoded audio stream") + __properties: ClassVar[List[str]] = ["object", "model", "b64"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["audio.tts.chunk"]): + raise ValueError("must be one of enum values ('audio.tts.chunk')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of AudioSpeechStreamChunk from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of AudioSpeechStreamChunk from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "model": obj.get("model"), + "b64": obj.get("b64"), + } + ) + return _obj diff --git a/src/together/generated/models/audio_speech_stream_event.py b/src/together/generated/models/audio_speech_stream_event.py new file mode 100644 index 00000000..9c11b923 --- /dev/null +++ b/src/together/generated/models/audio_speech_stream_event.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk +from typing import Optional, Set +from typing_extensions import Self + + +class AudioSpeechStreamEvent(BaseModel): + """ + AudioSpeechStreamEvent + """ # noqa: E501 + + data: AudioSpeechStreamChunk + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of AudioSpeechStreamEvent from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of data + if self.data: + _dict["data"] = self.data.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of AudioSpeechStreamEvent from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + AudioSpeechStreamChunk.from_dict(obj["data"]) + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/audio_speech_stream_response.py b/src/together/generated/models/audio_speech_stream_response.py new file mode 100644 index 00000000..b573857d --- /dev/null +++ b/src/together/generated/models/audio_speech_stream_response.py @@ -0,0 +1,169 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent +from together.generated.models.stream_sentinel import StreamSentinel +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +AUDIOSPEECHSTREAMRESPONSE_ONE_OF_SCHEMAS = ["AudioSpeechStreamEvent", "StreamSentinel"] + + +class AudioSpeechStreamResponse(BaseModel): + """ + AudioSpeechStreamResponse + """ + + # data type: AudioSpeechStreamEvent + oneof_schema_1_validator: Optional[AudioSpeechStreamEvent] = None + # data type: StreamSentinel + oneof_schema_2_validator: Optional[StreamSentinel] = None + actual_instance: Optional[Union[AudioSpeechStreamEvent, StreamSentinel]] = None + one_of_schemas: Set[str] = {"AudioSpeechStreamEvent", "StreamSentinel"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = AudioSpeechStreamResponse.model_construct() + error_messages = [] + match = 0 + # validate data type: AudioSpeechStreamEvent + if not isinstance(v, AudioSpeechStreamEvent): + error_messages.append( + f"Error! Input type `{type(v)}` is not `AudioSpeechStreamEvent`" + ) + else: + match += 1 + # validate data type: StreamSentinel + if not isinstance(v, StreamSentinel): + error_messages.append( + f"Error! Input type `{type(v)}` is not `StreamSentinel`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into AudioSpeechStreamEvent + try: + instance.actual_instance = AudioSpeechStreamEvent.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into StreamSentinel + try: + instance.actual_instance = StreamSentinel.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], AudioSpeechStreamEvent, StreamSentinel]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/autoscaling.py b/src/together/generated/models/autoscaling.py new file mode 100644 index 00000000..fb79d4f5 --- /dev/null +++ b/src/together/generated/models/autoscaling.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictInt +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class Autoscaling(BaseModel): + """ + Configuration for automatic scaling of replicas based on demand. + """ # noqa: E501 + + min_replicas: StrictInt = Field( + description="The minimum number of replicas to maintain, even when there is no load" + ) + max_replicas: StrictInt = Field( + description="The maximum number of replicas to scale up to under load" + ) + __properties: ClassVar[List[str]] = ["min_replicas", "max_replicas"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Autoscaling from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Autoscaling from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "min_replicas": obj.get("min_replicas"), + "max_replicas": obj.get("max_replicas"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_assistant_message_param.py b/src/together/generated/models/chat_completion_assistant_message_param.py new file mode 100644 index 00000000..dbb10cfe --- /dev/null +++ b/src/together/generated/models/chat_completion_assistant_message_param.py @@ -0,0 +1,130 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) +from together.generated.models.tool_choice import ToolChoice +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionAssistantMessageParam(BaseModel): + """ + ChatCompletionAssistantMessageParam + """ # noqa: E501 + + content: Optional[StrictStr] = None + role: StrictStr + name: Optional[StrictStr] = None + tool_calls: Optional[List[ToolChoice]] = None + function_call: Optional[ChatCompletionMessageFunctionCall] = None + __properties: ClassVar[List[str]] = [ + "content", + "role", + "name", + "tool_calls", + "function_call", + ] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["assistant"]): + raise ValueError("must be one of enum values ('assistant')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionAssistantMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in tool_calls (list) + _items = [] + if self.tool_calls: + for _item_tool_calls in self.tool_calls: + if _item_tool_calls: + _items.append(_item_tool_calls.to_dict()) + _dict["tool_calls"] = _items + # override the default output from pydantic by calling `to_dict()` of function_call + if self.function_call: + _dict["function_call"] = self.function_call.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionAssistantMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "content": obj.get("content"), + "role": obj.get("role"), + "name": obj.get("name"), + "tool_calls": ( + [ToolChoice.from_dict(_item) for _item in obj["tool_calls"]] + if obj.get("tool_calls") is not None + else None + ), + "function_call": ( + ChatCompletionMessageFunctionCall.from_dict(obj["function_call"]) + if obj.get("function_call") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choice.py b/src/together/generated/models/chat_completion_choice.py new file mode 100644 index 00000000..3cd51127 --- /dev/null +++ b/src/together/generated/models/chat_completion_choice.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) +from together.generated.models.finish_reason import FinishReason +from together.generated.models.logprobs_part import LogprobsPart +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoice(BaseModel): + """ + ChatCompletionChoice + """ # noqa: E501 + + index: StrictInt + finish_reason: FinishReason + logprobs: Optional[LogprobsPart] = None + delta: ChatCompletionChoiceDelta + __properties: ClassVar[List[str]] = ["index", "finish_reason", "logprobs", "delta"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoice from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of logprobs + if self.logprobs: + _dict["logprobs"] = self.logprobs.to_dict() + # override the default output from pydantic by calling `to_dict()` of delta + if self.delta: + _dict["delta"] = self.delta.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoice from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "index": obj.get("index"), + "finish_reason": obj.get("finish_reason"), + "logprobs": ( + LogprobsPart.from_dict(obj["logprobs"]) + if obj.get("logprobs") is not None + else None + ), + "delta": ( + ChatCompletionChoiceDelta.from_dict(obj["delta"]) + if obj.get("delta") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choice_delta.py b/src/together/generated/models/chat_completion_choice_delta.py new file mode 100644 index 00000000..be8bde6b --- /dev/null +++ b/src/together/generated/models/chat_completion_choice_delta.py @@ -0,0 +1,134 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_choice_delta_function_call import ( + ChatCompletionChoiceDeltaFunctionCall, +) +from together.generated.models.tool_choice import ToolChoice +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoiceDelta(BaseModel): + """ + ChatCompletionChoiceDelta + """ # noqa: E501 + + token_id: Optional[StrictInt] = None + role: StrictStr + content: Optional[StrictStr] = None + tool_calls: Optional[List[ToolChoice]] = None + function_call: Optional[ChatCompletionChoiceDeltaFunctionCall] = None + __properties: ClassVar[List[str]] = [ + "token_id", + "role", + "content", + "tool_calls", + "function_call", + ] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["system", "user", "assistant", "function", "tool"]): + raise ValueError( + "must be one of enum values ('system', 'user', 'assistant', 'function', 'tool')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoiceDelta from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in tool_calls (list) + _items = [] + if self.tool_calls: + for _item_tool_calls in self.tool_calls: + if _item_tool_calls: + _items.append(_item_tool_calls.to_dict()) + _dict["tool_calls"] = _items + # override the default output from pydantic by calling `to_dict()` of function_call + if self.function_call: + _dict["function_call"] = self.function_call.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoiceDelta from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "token_id": obj.get("token_id"), + "role": obj.get("role"), + "content": obj.get("content"), + "tool_calls": ( + [ToolChoice.from_dict(_item) for _item in obj["tool_calls"]] + if obj.get("tool_calls") is not None + else None + ), + "function_call": ( + ChatCompletionChoiceDeltaFunctionCall.from_dict( + obj["function_call"] + ) + if obj.get("function_call") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choice_delta_function_call.py b/src/together/generated/models/chat_completion_choice_delta_function_call.py new file mode 100644 index 00000000..4d1c4079 --- /dev/null +++ b/src/together/generated/models/chat_completion_choice_delta_function_call.py @@ -0,0 +1,86 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoiceDeltaFunctionCall(BaseModel): + """ + ChatCompletionChoiceDeltaFunctionCall + """ # noqa: E501 + + arguments: StrictStr + name: StrictStr + __properties: ClassVar[List[str]] = ["arguments", "name"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoiceDeltaFunctionCall from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoiceDeltaFunctionCall from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"arguments": obj.get("arguments"), "name": obj.get("name")} + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choices_data_inner.py b/src/together/generated/models/chat_completion_choices_data_inner.py new file mode 100644 index 00000000..042cd5f3 --- /dev/null +++ b/src/together/generated/models/chat_completion_choices_data_inner.py @@ -0,0 +1,123 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_choices_data_inner_logprobs import ( + ChatCompletionChoicesDataInnerLogprobs, +) +from together.generated.models.chat_completion_message import ChatCompletionMessage +from together.generated.models.finish_reason import FinishReason +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoicesDataInner(BaseModel): + """ + ChatCompletionChoicesDataInner + """ # noqa: E501 + + text: Optional[StrictStr] = None + index: Optional[StrictInt] = None + seed: Optional[StrictInt] = None + finish_reason: Optional[FinishReason] = None + message: Optional[ChatCompletionMessage] = None + logprobs: Optional[ChatCompletionChoicesDataInnerLogprobs] = None + __properties: ClassVar[List[str]] = [ + "text", + "index", + "seed", + "finish_reason", + "message", + "logprobs", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoicesDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of message + if self.message: + _dict["message"] = self.message.to_dict() + # override the default output from pydantic by calling `to_dict()` of logprobs + if self.logprobs: + _dict["logprobs"] = self.logprobs.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoicesDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "text": obj.get("text"), + "index": obj.get("index"), + "seed": obj.get("seed"), + "finish_reason": obj.get("finish_reason"), + "message": ( + ChatCompletionMessage.from_dict(obj["message"]) + if obj.get("message") is not None + else None + ), + "logprobs": ( + ChatCompletionChoicesDataInnerLogprobs.from_dict(obj["logprobs"]) + if obj.get("logprobs") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choices_data_inner_logprobs.py b/src/together/generated/models/chat_completion_choices_data_inner_logprobs.py new file mode 100644 index 00000000..608b6c10 --- /dev/null +++ b/src/together/generated/models/chat_completion_choices_data_inner_logprobs.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoicesDataInnerLogprobs(BaseModel): + """ + ChatCompletionChoicesDataInnerLogprobs + """ # noqa: E501 + + token_ids: Optional[List[Union[StrictFloat, StrictInt]]] = Field( + default=None, description="List of token IDs corresponding to the logprobs" + ) + tokens: Optional[List[StrictStr]] = Field( + default=None, description="List of token strings" + ) + token_logprobs: Optional[List[Union[StrictFloat, StrictInt]]] = Field( + default=None, description="List of token log probabilities" + ) + __properties: ClassVar[List[str]] = ["token_ids", "tokens", "token_logprobs"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoicesDataInnerLogprobs from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoicesDataInnerLogprobs from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "token_ids": obj.get("token_ids"), + "tokens": obj.get("tokens"), + "token_logprobs": obj.get("token_logprobs"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_chunk.py b/src/together/generated/models/chat_completion_chunk.py new file mode 100644 index 00000000..5d1dd9de --- /dev/null +++ b/src/together/generated/models/chat_completion_chunk.py @@ -0,0 +1,139 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_chunk_choices_inner import ( + ChatCompletionChunkChoicesInner, +) +from together.generated.models.completion_chunk_usage import CompletionChunkUsage +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChunk(BaseModel): + """ + ChatCompletionChunk + """ # noqa: E501 + + id: StrictStr + object: StrictStr + created: StrictInt + system_fingerprint: Optional[StrictStr] = None + model: StrictStr + choices: List[ChatCompletionChunkChoicesInner] + usage: Optional[CompletionChunkUsage] = None + __properties: ClassVar[List[str]] = [ + "id", + "object", + "created", + "system_fingerprint", + "model", + "choices", + "usage", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["chat.completion.chunk"]): + raise ValueError("must be one of enum values ('chat.completion.chunk')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChunk from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in choices (list) + _items = [] + if self.choices: + for _item_choices in self.choices: + if _item_choices: + _items.append(_item_choices.to_dict()) + _dict["choices"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChunk from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "object": obj.get("object"), + "created": obj.get("created"), + "system_fingerprint": obj.get("system_fingerprint"), + "model": obj.get("model"), + "choices": ( + [ + ChatCompletionChunkChoicesInner.from_dict(_item) + for _item in obj["choices"] + ] + if obj.get("choices") is not None + else None + ), + "usage": ( + CompletionChunkUsage.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_chunk_choices_inner.py b/src/together/generated/models/chat_completion_chunk_choices_inner.py new file mode 100644 index 00000000..fd5a9ef9 --- /dev/null +++ b/src/together/generated/models/chat_completion_chunk_choices_inner.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) +from together.generated.models.finish_reason import FinishReason +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChunkChoicesInner(BaseModel): + """ + ChatCompletionChunkChoicesInner + """ # noqa: E501 + + index: StrictInt + finish_reason: FinishReason + logprobs: Optional[Union[StrictFloat, StrictInt]] = None + seed: Optional[StrictInt] = None + delta: ChatCompletionChoiceDelta + __properties: ClassVar[List[str]] = [ + "index", + "finish_reason", + "logprobs", + "seed", + "delta", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChunkChoicesInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of delta + if self.delta: + _dict["delta"] = self.delta.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChunkChoicesInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "index": obj.get("index"), + "finish_reason": obj.get("finish_reason"), + "logprobs": obj.get("logprobs"), + "seed": obj.get("seed"), + "delta": ( + ChatCompletionChoiceDelta.from_dict(obj["delta"]) + if obj.get("delta") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_event.py b/src/together/generated/models/chat_completion_event.py new file mode 100644 index 00000000..49c84ba7 --- /dev/null +++ b/src/together/generated/models/chat_completion_event.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.chat_completion_chunk import ChatCompletionChunk +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionEvent(BaseModel): + """ + ChatCompletionEvent + """ # noqa: E501 + + data: ChatCompletionChunk + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionEvent from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of data + if self.data: + _dict["data"] = self.data.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionEvent from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + ChatCompletionChunk.from_dict(obj["data"]) + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_function_message_param.py b/src/together/generated/models/chat_completion_function_message_param.py new file mode 100644 index 00000000..b6679430 --- /dev/null +++ b/src/together/generated/models/chat_completion_function_message_param.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionFunctionMessageParam(BaseModel): + """ + ChatCompletionFunctionMessageParam + """ # noqa: E501 + + role: StrictStr + content: StrictStr + name: StrictStr + __properties: ClassVar[List[str]] = ["role", "content", "name"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["function"]): + raise ValueError("must be one of enum values ('function')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionFunctionMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionFunctionMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "role": obj.get("role"), + "content": obj.get("content"), + "name": obj.get("name"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_message.py b/src/together/generated/models/chat_completion_message.py new file mode 100644 index 00000000..08eeca7a --- /dev/null +++ b/src/together/generated/models/chat_completion_message.py @@ -0,0 +1,127 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) +from together.generated.models.tool_choice import ToolChoice +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionMessage(BaseModel): + """ + ChatCompletionMessage + """ # noqa: E501 + + content: StrictStr + role: StrictStr + tool_calls: Optional[List[ToolChoice]] = None + function_call: Optional[ChatCompletionMessageFunctionCall] = None + __properties: ClassVar[List[str]] = [ + "content", + "role", + "tool_calls", + "function_call", + ] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["assistant"]): + raise ValueError("must be one of enum values ('assistant')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionMessage from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in tool_calls (list) + _items = [] + if self.tool_calls: + for _item_tool_calls in self.tool_calls: + if _item_tool_calls: + _items.append(_item_tool_calls.to_dict()) + _dict["tool_calls"] = _items + # override the default output from pydantic by calling `to_dict()` of function_call + if self.function_call: + _dict["function_call"] = self.function_call.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionMessage from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "content": obj.get("content"), + "role": obj.get("role"), + "tool_calls": ( + [ToolChoice.from_dict(_item) for _item in obj["tool_calls"]] + if obj.get("tool_calls") is not None + else None + ), + "function_call": ( + ChatCompletionMessageFunctionCall.from_dict(obj["function_call"]) + if obj.get("function_call") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_message_function_call.py b/src/together/generated/models/chat_completion_message_function_call.py new file mode 100644 index 00000000..6deaeb4a --- /dev/null +++ b/src/together/generated/models/chat_completion_message_function_call.py @@ -0,0 +1,86 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionMessageFunctionCall(BaseModel): + """ + ChatCompletionMessageFunctionCall + """ # noqa: E501 + + arguments: StrictStr + name: StrictStr + __properties: ClassVar[List[str]] = ["arguments", "name"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionMessageFunctionCall from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionMessageFunctionCall from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"arguments": obj.get("arguments"), "name": obj.get("name")} + ) + return _obj diff --git a/src/together/generated/models/chat_completion_message_param.py b/src/together/generated/models/chat_completion_message_param.py new file mode 100644 index 00000000..1984218d --- /dev/null +++ b/src/together/generated/models/chat_completion_message_param.py @@ -0,0 +1,266 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.chat_completion_assistant_message_param import ( + ChatCompletionAssistantMessageParam, +) +from together.generated.models.chat_completion_function_message_param import ( + ChatCompletionFunctionMessageParam, +) +from together.generated.models.chat_completion_system_message_param import ( + ChatCompletionSystemMessageParam, +) +from together.generated.models.chat_completion_tool_message_param import ( + ChatCompletionToolMessageParam, +) +from together.generated.models.chat_completion_user_message_param import ( + ChatCompletionUserMessageParam, +) +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +CHATCOMPLETIONMESSAGEPARAM_ONE_OF_SCHEMAS = [ + "ChatCompletionAssistantMessageParam", + "ChatCompletionFunctionMessageParam", + "ChatCompletionSystemMessageParam", + "ChatCompletionToolMessageParam", + "ChatCompletionUserMessageParam", +] + + +class ChatCompletionMessageParam(BaseModel): + """ + ChatCompletionMessageParam + """ + + # data type: ChatCompletionSystemMessageParam + oneof_schema_1_validator: Optional[ChatCompletionSystemMessageParam] = None + # data type: ChatCompletionUserMessageParam + oneof_schema_2_validator: Optional[ChatCompletionUserMessageParam] = None + # data type: ChatCompletionAssistantMessageParam + oneof_schema_3_validator: Optional[ChatCompletionAssistantMessageParam] = None + # data type: ChatCompletionToolMessageParam + oneof_schema_4_validator: Optional[ChatCompletionToolMessageParam] = None + # data type: ChatCompletionFunctionMessageParam + oneof_schema_5_validator: Optional[ChatCompletionFunctionMessageParam] = None + actual_instance: Optional[ + Union[ + ChatCompletionAssistantMessageParam, + ChatCompletionFunctionMessageParam, + ChatCompletionSystemMessageParam, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, + ] + ] = None + one_of_schemas: Set[str] = { + "ChatCompletionAssistantMessageParam", + "ChatCompletionFunctionMessageParam", + "ChatCompletionSystemMessageParam", + "ChatCompletionToolMessageParam", + "ChatCompletionUserMessageParam", + } + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ChatCompletionMessageParam.model_construct() + error_messages = [] + match = 0 + # validate data type: ChatCompletionSystemMessageParam + if not isinstance(v, ChatCompletionSystemMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionSystemMessageParam`" + ) + else: + match += 1 + # validate data type: ChatCompletionUserMessageParam + if not isinstance(v, ChatCompletionUserMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionUserMessageParam`" + ) + else: + match += 1 + # validate data type: ChatCompletionAssistantMessageParam + if not isinstance(v, ChatCompletionAssistantMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionAssistantMessageParam`" + ) + else: + match += 1 + # validate data type: ChatCompletionToolMessageParam + if not isinstance(v, ChatCompletionToolMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionToolMessageParam`" + ) + else: + match += 1 + # validate data type: ChatCompletionFunctionMessageParam + if not isinstance(v, ChatCompletionFunctionMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionFunctionMessageParam`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into ChatCompletionSystemMessageParam + try: + instance.actual_instance = ChatCompletionSystemMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionUserMessageParam + try: + instance.actual_instance = ChatCompletionUserMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionAssistantMessageParam + try: + instance.actual_instance = ChatCompletionAssistantMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionToolMessageParam + try: + instance.actual_instance = ChatCompletionToolMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionFunctionMessageParam + try: + instance.actual_instance = ChatCompletionFunctionMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[ + Union[ + Dict[str, Any], + ChatCompletionAssistantMessageParam, + ChatCompletionFunctionMessageParam, + ChatCompletionSystemMessageParam, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, + ] + ]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_request.py b/src/together/generated/models/chat_completion_request.py new file mode 100644 index 00000000..c779b6c5 --- /dev/null +++ b/src/together/generated/models/chat_completion_request.py @@ -0,0 +1,304 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing_extensions import Annotated +from together.generated.models.chat_completion_request_function_call import ( + ChatCompletionRequestFunctionCall, +) +from together.generated.models.chat_completion_request_messages_inner import ( + ChatCompletionRequestMessagesInner, +) +from together.generated.models.chat_completion_request_model import ( + ChatCompletionRequestModel, +) +from together.generated.models.chat_completion_request_response_format import ( + ChatCompletionRequestResponseFormat, +) +from together.generated.models.chat_completion_request_tool_choice import ( + ChatCompletionRequestToolChoice, +) +from together.generated.models.tools_part import ToolsPart +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionRequest(BaseModel): + """ + ChatCompletionRequest + """ # noqa: E501 + + messages: List[ChatCompletionRequestMessagesInner] = Field( + description="A list of messages comprising the conversation so far." + ) + model: ChatCompletionRequestModel + max_tokens: Optional[StrictInt] = Field( + default=None, description="The maximum number of tokens to generate." + ) + stop: Optional[List[StrictStr]] = Field( + default=None, + description='A list of string sequences that will truncate (stop) inference text output. For example, "" will stop generation as soon as the model generates the given token.', + ) + temperature: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.", + ) + top_p: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.", + ) + top_k: Optional[StrictInt] = Field( + default=None, + description="An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.", + ) + context_length_exceeded_behavior: Optional[StrictStr] = Field( + default="error", + description="Defined the behavior of the API when max_tokens exceed the maximum context length of the model. When set to 'error', API will return 400 with appropriate error message. When set to 'truncate', override the max_tokens with maximum context length of the model.", + ) + repetition_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition.", + ) + stream: Optional[StrictBool] = Field( + default=None, + description="If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results.", + ) + logprobs: Optional[Annotated[int, Field(le=1, strict=True, ge=0)]] = Field( + default=None, + description="Determines the number of most likely tokens to return at each token position log probabilities to return.", + ) + echo: Optional[StrictBool] = Field( + default=None, + description="If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs.", + ) + n: Optional[Annotated[int, Field(le=128, strict=True, ge=1)]] = Field( + default=None, + description="The number of completions to generate for each prompt.", + ) + min_p: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between 0 and 1 that can be used as an alternative to top_p and top-k.", + ) + presence_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.", + ) + frequency_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.", + ) + logit_bias: Optional[Dict[str, Union[StrictFloat, StrictInt]]] = Field( + default=None, + description="Adjusts the likelihood of specific tokens appearing in the generated output.", + ) + seed: Optional[StrictInt] = Field( + default=None, description="Seed value for reproducibility." + ) + function_call: Optional[ChatCompletionRequestFunctionCall] = None + response_format: Optional[ChatCompletionRequestResponseFormat] = None + tools: Optional[List[ToolsPart]] = Field( + default=None, + description="A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.", + ) + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + safety_model: Optional[StrictStr] = Field( + default=None, + description="The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models).", + ) + __properties: ClassVar[List[str]] = [ + "messages", + "model", + "max_tokens", + "stop", + "temperature", + "top_p", + "top_k", + "context_length_exceeded_behavior", + "repetition_penalty", + "stream", + "logprobs", + "echo", + "n", + "min_p", + "presence_penalty", + "frequency_penalty", + "logit_bias", + "seed", + "function_call", + "response_format", + "tools", + "tool_choice", + "safety_model", + ] + + @field_validator("context_length_exceeded_behavior") + def context_length_exceeded_behavior_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["truncate", "error"]): + raise ValueError("must be one of enum values ('truncate', 'error')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in messages (list) + _items = [] + if self.messages: + for _item_messages in self.messages: + if _item_messages: + _items.append(_item_messages.to_dict()) + _dict["messages"] = _items + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of function_call + if self.function_call: + _dict["function_call"] = self.function_call.to_dict() + # override the default output from pydantic by calling `to_dict()` of response_format + if self.response_format: + _dict["response_format"] = self.response_format.to_dict() + # override the default output from pydantic by calling `to_dict()` of each item in tools (list) + _items = [] + if self.tools: + for _item_tools in self.tools: + if _item_tools: + _items.append(_item_tools.to_dict()) + _dict["tools"] = _items + # override the default output from pydantic by calling `to_dict()` of tool_choice + if self.tool_choice: + _dict["tool_choice"] = self.tool_choice.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "messages": ( + [ + ChatCompletionRequestMessagesInner.from_dict(_item) + for _item in obj["messages"] + ] + if obj.get("messages") is not None + else None + ), + "model": ( + ChatCompletionRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "max_tokens": obj.get("max_tokens"), + "stop": obj.get("stop"), + "temperature": obj.get("temperature"), + "top_p": obj.get("top_p"), + "top_k": obj.get("top_k"), + "context_length_exceeded_behavior": ( + obj.get("context_length_exceeded_behavior") + if obj.get("context_length_exceeded_behavior") is not None + else "error" + ), + "repetition_penalty": obj.get("repetition_penalty"), + "stream": obj.get("stream"), + "logprobs": obj.get("logprobs"), + "echo": obj.get("echo"), + "n": obj.get("n"), + "min_p": obj.get("min_p"), + "presence_penalty": obj.get("presence_penalty"), + "frequency_penalty": obj.get("frequency_penalty"), + "logit_bias": obj.get("logit_bias"), + "seed": obj.get("seed"), + "function_call": ( + ChatCompletionRequestFunctionCall.from_dict(obj["function_call"]) + if obj.get("function_call") is not None + else None + ), + "response_format": ( + ChatCompletionRequestResponseFormat.from_dict( + obj["response_format"] + ) + if obj.get("response_format") is not None + else None + ), + "tools": ( + [ToolsPart.from_dict(_item) for _item in obj["tools"]] + if obj.get("tools") is not None + else None + ), + "tool_choice": ( + ChatCompletionRequestToolChoice.from_dict(obj["tool_choice"]) + if obj.get("tool_choice") is not None + else None + ), + "safety_model": obj.get("safety_model"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_request_function_call.py b/src/together/generated/models/chat_completion_request_function_call.py new file mode 100644 index 00000000..cb8159fa --- /dev/null +++ b/src/together/generated/models/chat_completion_request_function_call.py @@ -0,0 +1,177 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.chat_completion_request_function_call_one_of import ( + ChatCompletionRequestFunctionCallOneOf, +) +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +CHATCOMPLETIONREQUESTFUNCTIONCALL_ONE_OF_SCHEMAS = [ + "ChatCompletionRequestFunctionCallOneOf", + "str", +] + + +class ChatCompletionRequestFunctionCall(BaseModel): + """ + ChatCompletionRequestFunctionCall + """ + + # data type: str + oneof_schema_1_validator: Optional[StrictStr] = None + # data type: ChatCompletionRequestFunctionCallOneOf + oneof_schema_2_validator: Optional[ChatCompletionRequestFunctionCallOneOf] = None + actual_instance: Optional[Union[ChatCompletionRequestFunctionCallOneOf, str]] = None + one_of_schemas: Set[str] = {"ChatCompletionRequestFunctionCallOneOf", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ChatCompletionRequestFunctionCall.model_construct() + error_messages = [] + match = 0 + # validate data type: str + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: ChatCompletionRequestFunctionCallOneOf + if not isinstance(v, ChatCompletionRequestFunctionCallOneOf): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionRequestFunctionCallOneOf`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into str + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionRequestFunctionCallOneOf + try: + instance.actual_instance = ChatCompletionRequestFunctionCallOneOf.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], ChatCompletionRequestFunctionCallOneOf, str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_request_function_call_one_of.py b/src/together/generated/models/chat_completion_request_function_call_one_of.py new file mode 100644 index 00000000..1dc7940e --- /dev/null +++ b/src/together/generated/models/chat_completion_request_function_call_one_of.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionRequestFunctionCallOneOf(BaseModel): + """ + ChatCompletionRequestFunctionCallOneOf + """ # noqa: E501 + + name: StrictStr + __properties: ClassVar[List[str]] = ["name"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionRequestFunctionCallOneOf from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionRequestFunctionCallOneOf from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"name": obj.get("name")}) + return _obj diff --git a/src/together/generated/models/chat_completion_request_messages_inner.py b/src/together/generated/models/chat_completion_request_messages_inner.py new file mode 100644 index 00000000..2c9802b9 --- /dev/null +++ b/src/together/generated/models/chat_completion_request_messages_inner.py @@ -0,0 +1,99 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionRequestMessagesInner(BaseModel): + """ + ChatCompletionRequestMessagesInner + """ # noqa: E501 + + role: StrictStr = Field( + description="The role of the messages author. Choice between: system, user, or assistant." + ) + content: StrictStr = Field( + description="The content of the message, which can either be a simple string or a structured format." + ) + __properties: ClassVar[List[str]] = ["role", "content"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["system", "user", "assistant", "tool"]): + raise ValueError( + "must be one of enum values ('system', 'user', 'assistant', 'tool')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionRequestMessagesInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionRequestMessagesInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"role": obj.get("role"), "content": obj.get("content")} + ) + return _obj diff --git a/src/together/generated/models/chat_completion_request_model.py b/src/together/generated/models/chat_completion_request_model.py new file mode 100644 index 00000000..f5cd61a8 --- /dev/null +++ b/src/together/generated/models/chat_completion_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +CHATCOMPLETIONREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class ChatCompletionRequestModel(BaseModel): + """ + The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = ChatCompletionRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in ChatCompletionRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_request_response_format.py b/src/together/generated/models/chat_completion_request_response_format.py new file mode 100644 index 00000000..69ab762f --- /dev/null +++ b/src/together/generated/models/chat_completion_request_response_format.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionRequestResponseFormat(BaseModel): + """ + An object specifying the format that the model must output. + """ # noqa: E501 + + type: Optional[StrictStr] = Field( + default=None, description="The type of the response format." + ) + var_schema: Optional[Dict[str, StrictStr]] = Field( + default=None, description="The schema of the response format.", alias="schema" + ) + __properties: ClassVar[List[str]] = ["type", "schema"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionRequestResponseFormat from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionRequestResponseFormat from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"type": obj.get("type"), "schema": obj.get("schema")} + ) + return _obj diff --git a/src/together/generated/models/chat_completion_request_tool_choice.py b/src/together/generated/models/chat_completion_request_tool_choice.py new file mode 100644 index 00000000..d78632af --- /dev/null +++ b/src/together/generated/models/chat_completion_request_tool_choice.py @@ -0,0 +1,166 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.tool_choice import ToolChoice +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +CHATCOMPLETIONREQUESTTOOLCHOICE_ONE_OF_SCHEMAS = ["ToolChoice", "str"] + + +class ChatCompletionRequestToolChoice(BaseModel): + """ + Controls which (if any) function is called by the model. By default uses `auto`, which lets the model pick between generating a message or calling a function. + """ + + # data type: str + oneof_schema_1_validator: Optional[StrictStr] = None + # data type: ToolChoice + oneof_schema_2_validator: Optional[ToolChoice] = None + actual_instance: Optional[Union[ToolChoice, str]] = None + one_of_schemas: Set[str] = {"ToolChoice", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ChatCompletionRequestToolChoice.model_construct() + error_messages = [] + match = 0 + # validate data type: str + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: ToolChoice + if not isinstance(v, ToolChoice): + error_messages.append(f"Error! Input type `{type(v)}` is not `ToolChoice`") + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into str + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ToolChoice + try: + instance.actual_instance = ToolChoice.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], ToolChoice, str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_response.py b/src/together/generated/models/chat_completion_response.py new file mode 100644 index 00000000..1a023acb --- /dev/null +++ b/src/together/generated/models/chat_completion_response.py @@ -0,0 +1,136 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_choices_data_inner import ( + ChatCompletionChoicesDataInner, +) +from together.generated.models.usage_data import UsageData +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionResponse(BaseModel): + """ + ChatCompletionResponse + """ # noqa: E501 + + id: StrictStr + choices: List[ChatCompletionChoicesDataInner] + usage: Optional[UsageData] = None + created: StrictInt + model: StrictStr + object: StrictStr + __properties: ClassVar[List[str]] = [ + "id", + "choices", + "usage", + "created", + "model", + "object", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["chat.completion"]): + raise ValueError("must be one of enum values ('chat.completion')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in choices (list) + _items = [] + if self.choices: + for _item_choices in self.choices: + if _item_choices: + _items.append(_item_choices.to_dict()) + _dict["choices"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "choices": ( + [ + ChatCompletionChoicesDataInner.from_dict(_item) + for _item in obj["choices"] + ] + if obj.get("choices") is not None + else None + ), + "usage": ( + UsageData.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + "created": obj.get("created"), + "model": obj.get("model"), + "object": obj.get("object"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_stream.py b/src/together/generated/models/chat_completion_stream.py new file mode 100644 index 00000000..7f3c93de --- /dev/null +++ b/src/together/generated/models/chat_completion_stream.py @@ -0,0 +1,169 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.chat_completion_event import ChatCompletionEvent +from together.generated.models.stream_sentinel import StreamSentinel +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +CHATCOMPLETIONSTREAM_ONE_OF_SCHEMAS = ["ChatCompletionEvent", "StreamSentinel"] + + +class ChatCompletionStream(BaseModel): + """ + ChatCompletionStream + """ + + # data type: ChatCompletionEvent + oneof_schema_1_validator: Optional[ChatCompletionEvent] = None + # data type: StreamSentinel + oneof_schema_2_validator: Optional[StreamSentinel] = None + actual_instance: Optional[Union[ChatCompletionEvent, StreamSentinel]] = None + one_of_schemas: Set[str] = {"ChatCompletionEvent", "StreamSentinel"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ChatCompletionStream.model_construct() + error_messages = [] + match = 0 + # validate data type: ChatCompletionEvent + if not isinstance(v, ChatCompletionEvent): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionEvent`" + ) + else: + match += 1 + # validate data type: StreamSentinel + if not isinstance(v, StreamSentinel): + error_messages.append( + f"Error! Input type `{type(v)}` is not `StreamSentinel`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into ChatCompletionEvent + try: + instance.actual_instance = ChatCompletionEvent.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into StreamSentinel + try: + instance.actual_instance = StreamSentinel.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], ChatCompletionEvent, StreamSentinel]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_system_message_param.py b/src/together/generated/models/chat_completion_system_message_param.py new file mode 100644 index 00000000..02c80038 --- /dev/null +++ b/src/together/generated/models/chat_completion_system_message_param.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionSystemMessageParam(BaseModel): + """ + ChatCompletionSystemMessageParam + """ # noqa: E501 + + content: StrictStr + role: StrictStr + name: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["content", "role", "name"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["system"]): + raise ValueError("must be one of enum values ('system')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionSystemMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionSystemMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "content": obj.get("content"), + "role": obj.get("role"), + "name": obj.get("name"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_token.py b/src/together/generated/models/chat_completion_token.py new file mode 100644 index 00000000..60862fec --- /dev/null +++ b/src/together/generated/models/chat_completion_token.py @@ -0,0 +1,100 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, +) +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionToken(BaseModel): + """ + ChatCompletionToken + """ # noqa: E501 + + id: StrictInt + text: StrictStr + logprob: Union[StrictFloat, StrictInt] + special: StrictBool + __properties: ClassVar[List[str]] = ["id", "text", "logprob", "special"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionToken from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionToken from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "text": obj.get("text"), + "logprob": obj.get("logprob"), + "special": obj.get("special"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_tool.py b/src/together/generated/models/chat_completion_tool.py new file mode 100644 index 00000000..05d66c4d --- /dev/null +++ b/src/together/generated/models/chat_completion_tool.py @@ -0,0 +1,106 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.chat_completion_tool_function import ( + ChatCompletionToolFunction, +) +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionTool(BaseModel): + """ + ChatCompletionTool + """ # noqa: E501 + + type: StrictStr + function: ChatCompletionToolFunction + __properties: ClassVar[List[str]] = ["type", "function"] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["function"]): + raise ValueError("must be one of enum values ('function')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionTool from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of function + if self.function: + _dict["function"] = self.function.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionTool from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "type": obj.get("type"), + "function": ( + ChatCompletionToolFunction.from_dict(obj["function"]) + if obj.get("function") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_tool_function.py b/src/together/generated/models/chat_completion_tool_function.py new file mode 100644 index 00000000..3ebaa385 --- /dev/null +++ b/src/together/generated/models/chat_completion_tool_function.py @@ -0,0 +1,91 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionToolFunction(BaseModel): + """ + ChatCompletionToolFunction + """ # noqa: E501 + + description: Optional[StrictStr] = None + name: StrictStr + parameters: Optional[Dict[str, Any]] = None + __properties: ClassVar[List[str]] = ["description", "name", "parameters"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionToolFunction from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionToolFunction from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "description": obj.get("description"), + "name": obj.get("name"), + "parameters": obj.get("parameters"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_tool_message_param.py b/src/together/generated/models/chat_completion_tool_message_param.py new file mode 100644 index 00000000..115b718b --- /dev/null +++ b/src/together/generated/models/chat_completion_tool_message_param.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionToolMessageParam(BaseModel): + """ + ChatCompletionToolMessageParam + """ # noqa: E501 + + role: StrictStr + content: StrictStr + tool_call_id: StrictStr + __properties: ClassVar[List[str]] = ["role", "content", "tool_call_id"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["tool"]): + raise ValueError("must be one of enum values ('tool')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionToolMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionToolMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "role": obj.get("role"), + "content": obj.get("content"), + "tool_call_id": obj.get("tool_call_id"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_user_message_param.py b/src/together/generated/models/chat_completion_user_message_param.py new file mode 100644 index 00000000..e02a998c --- /dev/null +++ b/src/together/generated/models/chat_completion_user_message_param.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionUserMessageParam(BaseModel): + """ + ChatCompletionUserMessageParam + """ # noqa: E501 + + content: StrictStr + role: StrictStr + name: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["content", "role", "name"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["user"]): + raise ValueError("must be one of enum values ('user')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionUserMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionUserMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "content": obj.get("content"), + "role": obj.get("role"), + "name": obj.get("name"), + } + ) + return _obj diff --git a/src/together/generated/models/completion_choice.py b/src/together/generated/models/completion_choice.py new file mode 100644 index 00000000..0b1ac0a1 --- /dev/null +++ b/src/together/generated/models/completion_choice.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionChoice(BaseModel): + """ + CompletionChoice + """ # noqa: E501 + + text: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["text"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionChoice from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionChoice from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"text": obj.get("text")}) + return _obj diff --git a/src/together/generated/models/completion_choices_data_inner.py b/src/together/generated/models/completion_choices_data_inner.py new file mode 100644 index 00000000..b8cb4e7e --- /dev/null +++ b/src/together/generated/models/completion_choices_data_inner.py @@ -0,0 +1,101 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.finish_reason import FinishReason +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionChoicesDataInner(BaseModel): + """ + CompletionChoicesDataInner + """ # noqa: E501 + + text: Optional[StrictStr] = None + seed: Optional[StrictInt] = None + finish_reason: Optional[FinishReason] = None + logprobs: Optional[Dict[str, Any]] = None + __properties: ClassVar[List[str]] = ["text", "seed", "finish_reason", "logprobs"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionChoicesDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of logprobs + if self.logprobs: + _dict["logprobs"] = self.logprobs.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionChoicesDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "text": obj.get("text"), + "seed": obj.get("seed"), + "finish_reason": obj.get("finish_reason"), + "logprobs": ( + LogprobsPart.from_dict(obj["logprobs"]) + if obj.get("logprobs") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/completion_chunk.py b/src/together/generated/models/completion_chunk.py new file mode 100644 index 00000000..7a0b16bd --- /dev/null +++ b/src/together/generated/models/completion_chunk.py @@ -0,0 +1,139 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.completion_choice import CompletionChoice +from together.generated.models.completion_chunk_usage import CompletionChunkUsage +from together.generated.models.completion_token import CompletionToken +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionChunk(BaseModel): + """ + CompletionChunk + """ # noqa: E501 + + id: StrictStr + token: CompletionToken + choices: List[CompletionChoice] + usage: CompletionChunkUsage + seed: Optional[StrictInt] = None + finish_reason: Any + __properties: ClassVar[List[str]] = [ + "id", + "token", + "choices", + "usage", + "seed", + "finish_reason", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionChunk from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of token + if self.token: + _dict["token"] = self.token.to_dict() + # override the default output from pydantic by calling `to_dict()` of each item in choices (list) + _items = [] + if self.choices: + for _item_choices in self.choices: + if _item_choices: + _items.append(_item_choices.to_dict()) + _dict["choices"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + # override the default output from pydantic by calling `to_dict()` of finish_reason + if self.finish_reason: + _dict["finish_reason"] = self.finish_reason.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionChunk from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "token": ( + CompletionToken.from_dict(obj["token"]) + if obj.get("token") is not None + else None + ), + "choices": ( + [CompletionChoice.from_dict(_item) for _item in obj["choices"]] + if obj.get("choices") is not None + else None + ), + "usage": ( + CompletionChunkUsage.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + "seed": obj.get("seed"), + "finish_reason": ( + FinishReason.from_dict(obj["finish_reason"]) + if obj.get("finish_reason") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/completion_chunk_usage.py b/src/together/generated/models/completion_chunk_usage.py new file mode 100644 index 00000000..df7a48f6 --- /dev/null +++ b/src/together/generated/models/completion_chunk_usage.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionChunkUsage(BaseModel): + """ + CompletionChunkUsage + """ # noqa: E501 + + prompt_tokens: StrictInt + completion_tokens: StrictInt + total_tokens: StrictInt + __properties: ClassVar[List[str]] = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionChunkUsage from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionChunkUsage from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "prompt_tokens": obj.get("prompt_tokens"), + "completion_tokens": obj.get("completion_tokens"), + "total_tokens": obj.get("total_tokens"), + } + ) + return _obj diff --git a/src/together/generated/models/completion_event.py b/src/together/generated/models/completion_event.py new file mode 100644 index 00000000..5bd84c47 --- /dev/null +++ b/src/together/generated/models/completion_event.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.completion_chunk import CompletionChunk +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionEvent(BaseModel): + """ + CompletionEvent + """ # noqa: E501 + + data: CompletionChunk + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionEvent from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of data + if self.data: + _dict["data"] = self.data.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionEvent from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + CompletionChunk.from_dict(obj["data"]) + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/completion_request.py b/src/together/generated/models/completion_request.py new file mode 100644 index 00000000..065bef8b --- /dev/null +++ b/src/together/generated/models/completion_request.py @@ -0,0 +1,212 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing_extensions import Annotated +from together.generated.models.completion_request_model import CompletionRequestModel +from together.generated.models.completion_request_safety_model import ( + CompletionRequestSafetyModel, +) +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionRequest(BaseModel): + """ + CompletionRequest + """ # noqa: E501 + + prompt: StrictStr = Field( + description="A string providing context for the model to complete." + ) + model: CompletionRequestModel + max_tokens: Optional[StrictInt] = Field( + default=None, description="The maximum number of tokens to generate." + ) + stop: Optional[List[StrictStr]] = Field( + default=None, + description='A list of string sequences that will truncate (stop) inference text output. For example, "" will stop generation as soon as the model generates the given token.', + ) + temperature: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.", + ) + top_p: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.", + ) + top_k: Optional[StrictInt] = Field( + default=None, + description="An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.", + ) + repetition_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition.", + ) + stream: Optional[StrictBool] = Field( + default=None, + description="If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results.", + ) + logprobs: Optional[Annotated[int, Field(le=1, strict=True, ge=0)]] = Field( + default=None, + description="Determines the number of most likely tokens to return at each token position log probabilities to return.", + ) + echo: Optional[StrictBool] = Field( + default=None, + description="If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs.", + ) + n: Optional[Annotated[int, Field(le=128, strict=True, ge=1)]] = Field( + default=None, + description="The number of completions to generate for each prompt.", + ) + safety_model: Optional[CompletionRequestSafetyModel] = None + min_p: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between 0 and 1 that can be used as an alternative to top-p and top-k.", + ) + presence_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.", + ) + frequency_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.", + ) + logit_bias: Optional[Dict[str, Union[StrictFloat, StrictInt]]] = Field( + default=None, + description="Adjusts the likelihood of specific tokens appearing in the generated output.", + ) + seed: Optional[StrictInt] = Field( + default=None, description="Seed value for reproducibility." + ) + __properties: ClassVar[List[str]] = [ + "prompt", + "model", + "max_tokens", + "stop", + "temperature", + "top_p", + "top_k", + "repetition_penalty", + "stream", + "logprobs", + "echo", + "n", + "safety_model", + "min_p", + "presence_penalty", + "frequency_penalty", + "logit_bias", + "seed", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of safety_model + if self.safety_model: + _dict["safety_model"] = self.safety_model.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "prompt": obj.get("prompt"), + "model": ( + CompletionRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "max_tokens": obj.get("max_tokens"), + "stop": obj.get("stop"), + "temperature": obj.get("temperature"), + "top_p": obj.get("top_p"), + "top_k": obj.get("top_k"), + "repetition_penalty": obj.get("repetition_penalty"), + "stream": obj.get("stream"), + "logprobs": obj.get("logprobs"), + "echo": obj.get("echo"), + "n": obj.get("n"), + "safety_model": ( + CompletionRequestSafetyModel.from_dict(obj["safety_model"]) + if obj.get("safety_model") is not None + else None + ), + "min_p": obj.get("min_p"), + "presence_penalty": obj.get("presence_penalty"), + "frequency_penalty": obj.get("frequency_penalty"), + "logit_bias": obj.get("logit_bias"), + "seed": obj.get("seed"), + } + ) + return _obj diff --git a/src/together/generated/models/completion_request_model.py b/src/together/generated/models/completion_request_model.py new file mode 100644 index 00000000..38d25705 --- /dev/null +++ b/src/together/generated/models/completion_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +COMPLETIONREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class CompletionRequestModel(BaseModel): + """ + The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = CompletionRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in CompletionRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into CompletionRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/completion_request_safety_model.py b/src/together/generated/models/completion_request_safety_model.py new file mode 100644 index 00000000..981ee1e4 --- /dev/null +++ b/src/together/generated/models/completion_request_safety_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +COMPLETIONREQUESTSAFETYMODEL_ANY_OF_SCHEMAS = ["str"] + + +class CompletionRequestSafetyModel(BaseModel): + """ + The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = CompletionRequestSafetyModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in CompletionRequestSafetyModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into CompletionRequestSafetyModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/completion_response.py b/src/together/generated/models/completion_response.py new file mode 100644 index 00000000..ac858b6d --- /dev/null +++ b/src/together/generated/models/completion_response.py @@ -0,0 +1,151 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.completion_choices_data_inner import ( + CompletionChoicesDataInner, +) +from together.generated.models.prompt_part_inner import PromptPartInner +from together.generated.models.usage_data import UsageData +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionResponse(BaseModel): + """ + CompletionResponse + """ # noqa: E501 + + id: StrictStr + choices: List[CompletionChoicesDataInner] + prompt: Optional[List[PromptPartInner]] = None + usage: UsageData + created: StrictInt + model: StrictStr + object: StrictStr + __properties: ClassVar[List[str]] = [ + "id", + "choices", + "prompt", + "usage", + "created", + "model", + "object", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["text_completion"]): + raise ValueError("must be one of enum values ('text_completion')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in choices (list) + _items = [] + if self.choices: + for _item_choices in self.choices: + if _item_choices: + _items.append(_item_choices.to_dict()) + _dict["choices"] = _items + # override the default output from pydantic by calling `to_dict()` of each item in prompt (list) + _items = [] + if self.prompt: + for _item_prompt in self.prompt: + if _item_prompt: + _items.append(_item_prompt.to_dict()) + _dict["prompt"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "choices": ( + [ + CompletionChoicesDataInner.from_dict(_item) + for _item in obj["choices"] + ] + if obj.get("choices") is not None + else None + ), + "prompt": ( + [PromptPartInner.from_dict(_item) for _item in obj["prompt"]] + if obj.get("prompt") is not None + else None + ), + "usage": ( + UsageData.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + "created": obj.get("created"), + "model": obj.get("model"), + "object": obj.get("object"), + } + ) + return _obj diff --git a/src/together/generated/models/completion_stream.py b/src/together/generated/models/completion_stream.py new file mode 100644 index 00000000..12e58c46 --- /dev/null +++ b/src/together/generated/models/completion_stream.py @@ -0,0 +1,169 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.completion_event import CompletionEvent +from together.generated.models.stream_sentinel import StreamSentinel +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +COMPLETIONSTREAM_ONE_OF_SCHEMAS = ["CompletionEvent", "StreamSentinel"] + + +class CompletionStream(BaseModel): + """ + CompletionStream + """ + + # data type: CompletionEvent + oneof_schema_1_validator: Optional[CompletionEvent] = None + # data type: StreamSentinel + oneof_schema_2_validator: Optional[StreamSentinel] = None + actual_instance: Optional[Union[CompletionEvent, StreamSentinel]] = None + one_of_schemas: Set[str] = {"CompletionEvent", "StreamSentinel"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = CompletionStream.model_construct() + error_messages = [] + match = 0 + # validate data type: CompletionEvent + if not isinstance(v, CompletionEvent): + error_messages.append( + f"Error! Input type `{type(v)}` is not `CompletionEvent`" + ) + else: + match += 1 + # validate data type: StreamSentinel + if not isinstance(v, StreamSentinel): + error_messages.append( + f"Error! Input type `{type(v)}` is not `StreamSentinel`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into CompletionEvent + try: + instance.actual_instance = CompletionEvent.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into StreamSentinel + try: + instance.actual_instance = StreamSentinel.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], CompletionEvent, StreamSentinel]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/completion_token.py b/src/together/generated/models/completion_token.py new file mode 100644 index 00000000..de9a208d --- /dev/null +++ b/src/together/generated/models/completion_token.py @@ -0,0 +1,100 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, +) +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionToken(BaseModel): + """ + CompletionToken + """ # noqa: E501 + + id: StrictInt + text: StrictStr + logprob: Union[StrictFloat, StrictInt] + special: StrictBool + __properties: ClassVar[List[str]] = ["id", "text", "logprob", "special"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionToken from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionToken from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "text": obj.get("text"), + "logprob": obj.get("logprob"), + "special": obj.get("special"), + } + ) + return _obj diff --git a/src/together/generated/models/create_endpoint_request.py b/src/together/generated/models/create_endpoint_request.py new file mode 100644 index 00000000..70fc97b4 --- /dev/null +++ b/src/together/generated/models/create_endpoint_request.py @@ -0,0 +1,156 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.autoscaling import Autoscaling +from typing import Optional, Set +from typing_extensions import Self + + +class CreateEndpointRequest(BaseModel): + """ + CreateEndpointRequest + """ # noqa: E501 + + display_name: Optional[StrictStr] = Field( + default=None, description="A human-readable name for the endpoint" + ) + model: StrictStr = Field(description="The model to deploy on this endpoint") + hardware: StrictStr = Field( + description="The hardware configuration to use for this endpoint" + ) + autoscaling: Autoscaling = Field( + description="Configuration for automatic scaling of the endpoint" + ) + disable_prompt_cache: Optional[StrictBool] = Field( + default=False, + description="Whether to disable the prompt cache for this endpoint", + ) + disable_speculative_decoding: Optional[StrictBool] = Field( + default=False, + description="Whether to disable speculative decoding for this endpoint", + ) + state: Optional[StrictStr] = Field( + default="STARTED", description="The desired state of the endpoint" + ) + __properties: ClassVar[List[str]] = [ + "display_name", + "model", + "hardware", + "autoscaling", + "disable_prompt_cache", + "disable_speculative_decoding", + "state", + ] + + @field_validator("state") + def state_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["STARTED", "STOPPED"]): + raise ValueError("must be one of enum values ('STARTED', 'STOPPED')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CreateEndpointRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of autoscaling + if self.autoscaling: + _dict["autoscaling"] = self.autoscaling.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CreateEndpointRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "display_name": obj.get("display_name"), + "model": obj.get("model"), + "hardware": obj.get("hardware"), + "autoscaling": ( + Autoscaling.from_dict(obj["autoscaling"]) + if obj.get("autoscaling") is not None + else None + ), + "disable_prompt_cache": ( + obj.get("disable_prompt_cache") + if obj.get("disable_prompt_cache") is not None + else False + ), + "disable_speculative_decoding": ( + obj.get("disable_speculative_decoding") + if obj.get("disable_speculative_decoding") is not None + else False + ), + "state": ( + obj.get("state") if obj.get("state") is not None else "STARTED" + ), + } + ) + return _obj diff --git a/src/together/generated/models/dedicated_endpoint.py b/src/together/generated/models/dedicated_endpoint.py new file mode 100644 index 00000000..87ef87c5 --- /dev/null +++ b/src/together/generated/models/dedicated_endpoint.py @@ -0,0 +1,157 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.autoscaling import Autoscaling +from typing import Optional, Set +from typing_extensions import Self + + +class DedicatedEndpoint(BaseModel): + """ + Details about a dedicated endpoint deployment + """ # noqa: E501 + + object: StrictStr = Field(description="The type of object") + id: StrictStr = Field(description="Unique identifier for the endpoint") + name: StrictStr = Field(description="System name for the endpoint") + display_name: StrictStr = Field(description="Human-readable name for the endpoint") + model: StrictStr = Field(description="The model deployed on this endpoint") + hardware: StrictStr = Field( + description="The hardware configuration used for this endpoint" + ) + type: StrictStr = Field(description="The type of endpoint") + owner: StrictStr = Field(description="The owner of this endpoint") + state: StrictStr = Field(description="Current state of the endpoint") + autoscaling: Autoscaling = Field( + description="Configuration for automatic scaling of the endpoint" + ) + created_at: datetime = Field(description="Timestamp when the endpoint was created") + __properties: ClassVar[List[str]] = [ + "object", + "id", + "name", + "display_name", + "model", + "hardware", + "type", + "owner", + "state", + "autoscaling", + "created_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["endpoint"]): + raise ValueError("must be one of enum values ('endpoint')") + return value + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["dedicated"]): + raise ValueError("must be one of enum values ('dedicated')") + return value + + @field_validator("state") + def state_validate_enum(cls, value): + """Validates the enum""" + if value not in set( + ["PENDING", "STARTING", "STARTED", "STOPPING", "STOPPED", "ERROR"] + ): + raise ValueError( + "must be one of enum values ('PENDING', 'STARTING', 'STARTED', 'STOPPING', 'STOPPED', 'ERROR')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of DedicatedEndpoint from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of autoscaling + if self.autoscaling: + _dict["autoscaling"] = self.autoscaling.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of DedicatedEndpoint from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "name": obj.get("name"), + "display_name": obj.get("display_name"), + "model": obj.get("model"), + "hardware": obj.get("hardware"), + "type": obj.get("type"), + "owner": obj.get("owner"), + "state": obj.get("state"), + "autoscaling": ( + Autoscaling.from_dict(obj["autoscaling"]) + if obj.get("autoscaling") is not None + else None + ), + "created_at": obj.get("created_at"), + } + ) + return _obj diff --git a/src/together/generated/models/embeddings_request.py b/src/together/generated/models/embeddings_request.py new file mode 100644 index 00000000..bad5473c --- /dev/null +++ b/src/together/generated/models/embeddings_request.py @@ -0,0 +1,105 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel +from typing import Optional, Set +from typing_extensions import Self + + +class EmbeddingsRequest(BaseModel): + """ + EmbeddingsRequest + """ # noqa: E501 + + model: EmbeddingsRequestModel + input: EmbeddingsRequestInput + __properties: ClassVar[List[str]] = ["model", "input"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of EmbeddingsRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of input + if self.input: + _dict["input"] = self.input.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of EmbeddingsRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "model": ( + EmbeddingsRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "input": ( + EmbeddingsRequestInput.from_dict(obj["input"]) + if obj.get("input") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/embeddings_request_input.py b/src/together/generated/models/embeddings_request_input.py new file mode 100644 index 00000000..3a4ec919 --- /dev/null +++ b/src/together/generated/models/embeddings_request_input.py @@ -0,0 +1,171 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +EMBEDDINGSREQUESTINPUT_ONE_OF_SCHEMAS = ["List[str]", "str"] + + +class EmbeddingsRequestInput(BaseModel): + """ + EmbeddingsRequestInput + """ + + # data type: str + oneof_schema_1_validator: Optional[StrictStr] = Field( + default=None, description="A string providing the text for the model to embed." + ) + # data type: List[str] + oneof_schema_2_validator: Optional[List[StrictStr]] = None + actual_instance: Optional[Union[List[str], str]] = None + one_of_schemas: Set[str] = {"List[str]", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = EmbeddingsRequestInput.model_construct() + error_messages = [] + match = 0 + # validate data type: str + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: List[str] + try: + instance.oneof_schema_2_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into str + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into List[str] + try: + # validation + instance.oneof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_2_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], List[str], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/embeddings_request_model.py b/src/together/generated/models/embeddings_request_model.py new file mode 100644 index 00000000..5a40eb92 --- /dev/null +++ b/src/together/generated/models/embeddings_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +EMBEDDINGSREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class EmbeddingsRequestModel(BaseModel): + """ + The name of the embedding model to use.

[See all of Together AI's embedding models](https://docs.together.ai/docs/serverless-models#embedding-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = EmbeddingsRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in EmbeddingsRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into EmbeddingsRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/embeddings_response.py b/src/together/generated/models/embeddings_response.py new file mode 100644 index 00000000..cdf15928 --- /dev/null +++ b/src/together/generated/models/embeddings_response.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.embeddings_response_data_inner import ( + EmbeddingsResponseDataInner, +) +from typing import Optional, Set +from typing_extensions import Self + + +class EmbeddingsResponse(BaseModel): + """ + EmbeddingsResponse + """ # noqa: E501 + + object: StrictStr + model: StrictStr + data: List[EmbeddingsResponseDataInner] + __properties: ClassVar[List[str]] = ["object", "model", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of EmbeddingsResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of EmbeddingsResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "model": obj.get("model"), + "data": ( + [ + EmbeddingsResponseDataInner.from_dict(_item) + for _item in obj["data"] + ] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/embeddings_response_data_inner.py b/src/together/generated/models/embeddings_response_data_inner.py new file mode 100644 index 00000000..68e816ac --- /dev/null +++ b/src/together/generated/models/embeddings_response_data_inner.py @@ -0,0 +1,105 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class EmbeddingsResponseDataInner(BaseModel): + """ + EmbeddingsResponseDataInner + """ # noqa: E501 + + object: StrictStr + embedding: List[Union[StrictFloat, StrictInt]] + index: StrictInt + __properties: ClassVar[List[str]] = ["object", "embedding", "index"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["embedding"]): + raise ValueError("must be one of enum values ('embedding')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of EmbeddingsResponseDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of EmbeddingsResponseDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "embedding": obj.get("embedding"), + "index": obj.get("index"), + } + ) + return _obj diff --git a/src/together/generated/models/endpoint_pricing.py b/src/together/generated/models/endpoint_pricing.py new file mode 100644 index 00000000..847be535 --- /dev/null +++ b/src/together/generated/models/endpoint_pricing.py @@ -0,0 +1,85 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class EndpointPricing(BaseModel): + """ + Pricing details for using an endpoint + """ # noqa: E501 + + cents_per_minute: Union[StrictFloat, StrictInt] = Field( + description="Cost per minute of endpoint uptime in cents" + ) + __properties: ClassVar[List[str]] = ["cents_per_minute"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of EndpointPricing from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of EndpointPricing from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"cents_per_minute": obj.get("cents_per_minute")}) + return _obj diff --git a/src/together/generated/models/error_data.py b/src/together/generated/models/error_data.py new file mode 100644 index 00000000..0dd9d3ec --- /dev/null +++ b/src/together/generated/models/error_data.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.error_data_error import ErrorDataError +from typing import Optional, Set +from typing_extensions import Self + + +class ErrorData(BaseModel): + """ + ErrorData + """ # noqa: E501 + + error: ErrorDataError + __properties: ClassVar[List[str]] = ["error"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ErrorData from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of error + if self.error: + _dict["error"] = self.error.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ErrorData from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "error": ( + ErrorDataError.from_dict(obj["error"]) + if obj.get("error") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/error_data_error.py b/src/together/generated/models/error_data_error.py new file mode 100644 index 00000000..f43533ea --- /dev/null +++ b/src/together/generated/models/error_data_error.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ErrorDataError(BaseModel): + """ + ErrorDataError + """ # noqa: E501 + + message: StrictStr + type: StrictStr + param: Optional[StrictStr] = None + code: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["message", "type", "param", "code"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ErrorDataError from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ErrorDataError from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "message": obj.get("message"), + "type": obj.get("type"), + "param": obj.get("param"), + "code": obj.get("code"), + } + ) + return _obj diff --git a/src/together/generated/models/file_delete_response.py b/src/together/generated/models/file_delete_response.py new file mode 100644 index 00000000..07ad867e --- /dev/null +++ b/src/together/generated/models/file_delete_response.py @@ -0,0 +1,84 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class FileDeleteResponse(BaseModel): + """ + FileDeleteResponse + """ # noqa: E501 + + id: Optional[StrictStr] = None + deleted: Optional[StrictBool] = None + __properties: ClassVar[List[str]] = ["id", "deleted"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FileDeleteResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FileDeleteResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"id": obj.get("id"), "deleted": obj.get("deleted")}) + return _obj diff --git a/src/together/generated/models/file_list.py b/src/together/generated/models/file_list.py new file mode 100644 index 00000000..ca596491 --- /dev/null +++ b/src/together/generated/models/file_list.py @@ -0,0 +1,99 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.file_response import FileResponse +from typing import Optional, Set +from typing_extensions import Self + + +class FileList(BaseModel): + """ + FileList + """ # noqa: E501 + + data: List[FileResponse] + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FileList from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FileList from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + [FileResponse.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/file_object.py b/src/together/generated/models/file_object.py new file mode 100644 index 00000000..55a8b422 --- /dev/null +++ b/src/together/generated/models/file_object.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class FileObject(BaseModel): + """ + FileObject + """ # noqa: E501 + + object: Optional[StrictStr] = None + id: Optional[StrictStr] = None + filename: Optional[StrictStr] = None + size: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = ["object", "id", "filename", "size"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FileObject from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FileObject from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "filename": obj.get("filename"), + "size": obj.get("size"), + } + ) + return _obj diff --git a/src/together/generated/models/file_response.py b/src/together/generated/models/file_response.py new file mode 100644 index 00000000..7fe105e4 --- /dev/null +++ b/src/together/generated/models/file_response.py @@ -0,0 +1,135 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class FileResponse(BaseModel): + """ + FileResponse + """ # noqa: E501 + + id: StrictStr + object: StrictStr + created_at: StrictInt + filename: StrictStr + bytes: StrictInt + purpose: StrictStr + processed: StrictBool = Field(alias="Processed") + file_type: StrictStr = Field(alias="FileType") + line_count: StrictInt = Field(alias="LineCount") + __properties: ClassVar[List[str]] = [ + "id", + "object", + "created_at", + "filename", + "bytes", + "purpose", + "Processed", + "FileType", + "LineCount", + ] + + @field_validator("purpose") + def purpose_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["fine-tune"]): + raise ValueError("must be one of enum values ('fine-tune')") + return value + + @field_validator("file_type") + def file_type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["jsonl", "parquet"]): + raise ValueError("must be one of enum values ('jsonl', 'parquet')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FileResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FileResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "object": obj.get("object"), + "created_at": obj.get("created_at"), + "filename": obj.get("filename"), + "bytes": obj.get("bytes"), + "purpose": obj.get("purpose"), + "Processed": obj.get("Processed"), + "FileType": obj.get("FileType"), + "LineCount": obj.get("LineCount"), + } + ) + return _obj diff --git a/src/together/generated/models/fine_tune_event.py b/src/together/generated/models/fine_tune_event.py new file mode 100644 index 00000000..638272ab --- /dev/null +++ b/src/together/generated/models/fine_tune_event.py @@ -0,0 +1,137 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.finetune_event_levels import FinetuneEventLevels +from together.generated.models.finetune_event_type import FinetuneEventType +from typing import Optional, Set +from typing_extensions import Self + + +class FineTuneEvent(BaseModel): + """ + FineTuneEvent + """ # noqa: E501 + + object: StrictStr + created_at: StrictStr + level: Optional[FinetuneEventLevels] = None + message: StrictStr + type: FinetuneEventType + param_count: StrictInt + token_count: StrictInt + total_steps: StrictInt + wandb_url: StrictStr + step: StrictInt + checkpoint_path: StrictStr + model_path: StrictStr + training_offset: StrictInt + hash: StrictStr + __properties: ClassVar[List[str]] = [ + "object", + "created_at", + "level", + "message", + "type", + "param_count", + "token_count", + "total_steps", + "wandb_url", + "step", + "checkpoint_path", + "model_path", + "training_offset", + "hash", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["fine-tune-event"]): + raise ValueError("must be one of enum values ('fine-tune-event')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FineTuneEvent from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FineTuneEvent from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "created_at": obj.get("created_at"), + "level": obj.get("level"), + "message": obj.get("message"), + "type": obj.get("type"), + "param_count": obj.get("param_count"), + "token_count": obj.get("token_count"), + "total_steps": obj.get("total_steps"), + "wandb_url": obj.get("wandb_url"), + "step": obj.get("step"), + "checkpoint_path": obj.get("checkpoint_path"), + "model_path": obj.get("model_path"), + "training_offset": obj.get("training_offset"), + "hash": obj.get("hash"), + } + ) + return _obj diff --git a/src/together/generated/models/fine_tunes_post_request.py b/src/together/generated/models/fine_tunes_post_request.py new file mode 100644 index 00000000..215ca080 --- /dev/null +++ b/src/together/generated/models/fine_tunes_post_request.py @@ -0,0 +1,233 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.fine_tunes_post_request_train_on_inputs import ( + FineTunesPostRequestTrainOnInputs, +) +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) +from typing import Optional, Set +from typing_extensions import Self + + +class FineTunesPostRequest(BaseModel): + """ + FineTunesPostRequest + """ # noqa: E501 + + training_file: StrictStr = Field( + description="File-ID of a training file uploaded to the Together API" + ) + validation_file: Optional[StrictStr] = Field( + default=None, + description="File-ID of a validation file uploaded to the Together API", + ) + model: StrictStr = Field( + description="Name of the base model to run fine-tune job on" + ) + n_epochs: Optional[StrictInt] = Field( + default=1, description="Number of epochs for fine-tuning" + ) + n_checkpoints: Optional[StrictInt] = Field( + default=1, description="Number of checkpoints to save during fine-tuning" + ) + n_evals: Optional[StrictInt] = Field( + default=0, + description="Number of evaluations to be run on a given validation set during training", + ) + batch_size: Optional[StrictInt] = Field( + default=32, description="Batch size for fine-tuning" + ) + learning_rate: Optional[Union[StrictFloat, StrictInt]] = Field( + default=0.000010, description="Learning rate multiplier to use for training" + ) + lr_scheduler: Optional[Dict[str, Any]] = None + warmup_ratio: Optional[Union[StrictFloat, StrictInt]] = Field( + default=0.0, + description="The percent of steps at the start of training to linearly increase the learning rate.", + ) + max_grad_norm: Optional[Union[StrictFloat, StrictInt]] = Field( + default=1.0, + description="Max gradient norm to be used for gradient clipping. Set to 0 to disable.", + ) + weight_decay: Optional[Union[StrictFloat, StrictInt]] = Field( + default=0.0, description="Weight decay" + ) + suffix: Optional[StrictStr] = Field( + default=None, + description="Suffix that will be added to your fine-tuned model name", + ) + wandb_api_key: Optional[StrictStr] = Field( + default=None, description="API key for Weights & Biases integration" + ) + wandb_base_url: Optional[StrictStr] = Field( + default=None, + description="The base URL of a dedicated Weights & Biases instance.", + ) + wandb_project_name: Optional[StrictStr] = Field( + default=None, + description="The Weights & Biases project for your run. If not specified, will use `together` as the project name.", + ) + wandb_name: Optional[StrictStr] = Field( + default=None, description="The Weights & Biases name for your run." + ) + train_on_inputs: Optional[FineTunesPostRequestTrainOnInputs] = False + training_type: Optional[FineTunesPostRequestTrainingType] = None + __properties: ClassVar[List[str]] = [ + "training_file", + "validation_file", + "model", + "n_epochs", + "n_checkpoints", + "n_evals", + "batch_size", + "learning_rate", + "lr_scheduler", + "warmup_ratio", + "max_grad_norm", + "weight_decay", + "suffix", + "wandb_api_key", + "wandb_base_url", + "wandb_project_name", + "wandb_name", + "train_on_inputs", + "training_type", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FineTunesPostRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of lr_scheduler + if self.lr_scheduler: + _dict["lr_scheduler"] = self.lr_scheduler.to_dict() + # override the default output from pydantic by calling `to_dict()` of train_on_inputs + if self.train_on_inputs: + _dict["train_on_inputs"] = self.train_on_inputs.to_dict() + # override the default output from pydantic by calling `to_dict()` of training_type + if self.training_type: + _dict["training_type"] = self.training_type.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FineTunesPostRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "training_file": obj.get("training_file"), + "validation_file": obj.get("validation_file"), + "model": obj.get("model"), + "n_epochs": ( + obj.get("n_epochs") if obj.get("n_epochs") is not None else 1 + ), + "n_checkpoints": ( + obj.get("n_checkpoints") + if obj.get("n_checkpoints") is not None + else 1 + ), + "n_evals": obj.get("n_evals") if obj.get("n_evals") is not None else 0, + "batch_size": ( + obj.get("batch_size") if obj.get("batch_size") is not None else 32 + ), + "learning_rate": ( + obj.get("learning_rate") + if obj.get("learning_rate") is not None + else 0.000010 + ), + "lr_scheduler": ( + LRScheduler.from_dict(obj["lr_scheduler"]) + if obj.get("lr_scheduler") is not None + else None + ), + "warmup_ratio": ( + obj.get("warmup_ratio") + if obj.get("warmup_ratio") is not None + else 0.0 + ), + "max_grad_norm": ( + obj.get("max_grad_norm") + if obj.get("max_grad_norm") is not None + else 1.0 + ), + "weight_decay": ( + obj.get("weight_decay") + if obj.get("weight_decay") is not None + else 0.0 + ), + "suffix": obj.get("suffix"), + "wandb_api_key": obj.get("wandb_api_key"), + "wandb_base_url": obj.get("wandb_base_url"), + "wandb_project_name": obj.get("wandb_project_name"), + "wandb_name": obj.get("wandb_name"), + "train_on_inputs": ( + FineTunesPostRequestTrainOnInputs.from_dict(obj["train_on_inputs"]) + if obj.get("train_on_inputs") is not None + else None + ), + "training_type": ( + FineTunesPostRequestTrainingType.from_dict(obj["training_type"]) + if obj.get("training_type") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/fine_tunes_post_request_train_on_inputs.py b/src/together/generated/models/fine_tunes_post_request_train_on_inputs.py new file mode 100644 index 00000000..4c5e7c3c --- /dev/null +++ b/src/together/generated/models/fine_tunes_post_request_train_on_inputs.py @@ -0,0 +1,170 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +FINETUNESPOSTREQUESTTRAINONINPUTS_ONE_OF_SCHEMAS = ["bool", "str"] + + +class FineTunesPostRequestTrainOnInputs(BaseModel): + """ + Whether to mask the user messages in conversational data or prompts in instruction data. + """ + + # data type: bool + oneof_schema_1_validator: Optional[StrictBool] = None + # data type: str + oneof_schema_2_validator: Optional[StrictStr] = None + actual_instance: Optional[Union[bool, str]] = None + one_of_schemas: Set[str] = {"bool", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = FineTunesPostRequestTrainOnInputs.model_construct() + error_messages = [] + match = 0 + # validate data type: bool + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.oneof_schema_2_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into bool + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.oneof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_2_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], bool, str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/fine_tunes_post_request_training_type.py b/src/together/generated/models/fine_tunes_post_request_training_type.py new file mode 100644 index 00000000..8d4b6906 --- /dev/null +++ b/src/together/generated/models/fine_tunes_post_request_training_type.py @@ -0,0 +1,172 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.full_training_type import FullTrainingType +from together.generated.models.lo_ra_training_type import LoRATrainingType +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +FINETUNESPOSTREQUESTTRAININGTYPE_ONE_OF_SCHEMAS = [ + "FullTrainingType", + "LoRATrainingType", +] + + +class FineTunesPostRequestTrainingType(BaseModel): + """ + FineTunesPostRequestTrainingType + """ + + # data type: FullTrainingType + oneof_schema_1_validator: Optional[FullTrainingType] = None + # data type: LoRATrainingType + oneof_schema_2_validator: Optional[LoRATrainingType] = None + actual_instance: Optional[Union[FullTrainingType, LoRATrainingType]] = None + one_of_schemas: Set[str] = {"FullTrainingType", "LoRATrainingType"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = FineTunesPostRequestTrainingType.model_construct() + error_messages = [] + match = 0 + # validate data type: FullTrainingType + if not isinstance(v, FullTrainingType): + error_messages.append( + f"Error! Input type `{type(v)}` is not `FullTrainingType`" + ) + else: + match += 1 + # validate data type: LoRATrainingType + if not isinstance(v, LoRATrainingType): + error_messages.append( + f"Error! Input type `{type(v)}` is not `LoRATrainingType`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into FullTrainingType + try: + instance.actual_instance = FullTrainingType.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into LoRATrainingType + try: + instance.actual_instance = LoRATrainingType.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], FullTrainingType, LoRATrainingType]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/finetune_download_result.py b/src/together/generated/models/finetune_download_result.py new file mode 100644 index 00000000..ddebbc70 --- /dev/null +++ b/src/together/generated/models/finetune_download_result.py @@ -0,0 +1,116 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class FinetuneDownloadResult(BaseModel): + """ + FinetuneDownloadResult + """ # noqa: E501 + + object: Optional[StrictStr] = None + id: Optional[StrictStr] = None + checkpoint_step: Optional[StrictInt] = None + filename: Optional[StrictStr] = None + size: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = [ + "object", + "id", + "checkpoint_step", + "filename", + "size", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["local"]): + raise ValueError("must be one of enum values ('local')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FinetuneDownloadResult from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # set to None if object (nullable) is None + # and model_fields_set contains the field + if self.object is None and "object" in self.model_fields_set: + _dict["object"] = None + + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FinetuneDownloadResult from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "checkpoint_step": obj.get("checkpoint_step"), + "filename": obj.get("filename"), + "size": obj.get("size"), + } + ) + return _obj diff --git a/src/together/generated/models/finetune_event_levels.py b/src/together/generated/models/finetune_event_levels.py new file mode 100644 index 00000000..ee263088 --- /dev/null +++ b/src/together/generated/models/finetune_event_levels.py @@ -0,0 +1,39 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +from enum import Enum +from typing_extensions import Self + + +class FinetuneEventLevels(str, Enum): + """ + FinetuneEventLevels + """ + + """ + allowed enum values + """ + INFO = "info" + WARNING = "warning" + ERROR = "error" + LEGACY_INFO = "legacy_info" + LEGACY_IWARNING = "legacy_iwarning" + LEGACY_IERROR = "legacy_ierror" + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Create an instance of FinetuneEventLevels from a JSON string""" + return cls(json.loads(json_str)) diff --git a/src/together/generated/models/finetune_event_type.py b/src/together/generated/models/finetune_event_type.py new file mode 100644 index 00000000..8f65293b --- /dev/null +++ b/src/together/generated/models/finetune_event_type.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +from enum import Enum +from typing_extensions import Self + + +class FinetuneEventType(str, Enum): + """ + FinetuneEventType + """ + + """ + allowed enum values + """ + JOB_PENDING = "job_pending" + JOB_START = "job_start" + JOB_STOPPED = "job_stopped" + MODEL_DOWNLOADING = "model_downloading" + MODEL_DOWNLOAD_COMPLETE = "model_download_complete" + TRAINING_DATA_DOWNLOADING = "training_data_downloading" + TRAINING_DATA_DOWNLOAD_COMPLETE = "training_data_download_complete" + VALIDATION_DATA_DOWNLOADING = "validation_data_downloading" + VALIDATION_DATA_DOWNLOAD_COMPLETE = "validation_data_download_complete" + WANDB_INIT = "wandb_init" + TRAINING_START = "training_start" + CHECKPOINT_SAVE = "checkpoint_save" + BILLING_LIMIT = "billing_limit" + EPOCH_COMPLETE = "epoch_complete" + TRAINING_COMPLETE = "training_complete" + MODEL_COMPRESSING = "model_compressing" + MODEL_COMPRESSION_COMPLETE = "model_compression_complete" + MODEL_UPLOADING = "model_uploading" + MODEL_UPLOAD_COMPLETE = "model_upload_complete" + JOB_COMPLETE = "job_complete" + JOB_ERROR = "job_error" + CANCEL_REQUESTED = "cancel_requested" + JOB_RESTARTED = "job_restarted" + REFUND = "refund" + WARNING = "warning" + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Create an instance of FinetuneEventType from a JSON string""" + return cls(json.loads(json_str)) diff --git a/src/together/generated/models/finetune_job_status.py b/src/together/generated/models/finetune_job_status.py new file mode 100644 index 00000000..97b5dd1c --- /dev/null +++ b/src/together/generated/models/finetune_job_status.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +from enum import Enum +from typing_extensions import Self + + +class FinetuneJobStatus(str, Enum): + """ + FinetuneJobStatus + """ + + """ + allowed enum values + """ + PENDING = "pending" + QUEUED = "queued" + RUNNING = "running" + COMPRESSING = "compressing" + UPLOADING = "uploading" + CANCEL_REQUESTED = "cancel_requested" + CANCELLED = "cancelled" + ERROR = "error" + COMPLETED = "completed" + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Create an instance of FinetuneJobStatus from a JSON string""" + return cls(json.loads(json_str)) diff --git a/src/together/generated/models/finetune_list.py b/src/together/generated/models/finetune_list.py new file mode 100644 index 00000000..2a20ba4c --- /dev/null +++ b/src/together/generated/models/finetune_list.py @@ -0,0 +1,99 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.finetune_response import FinetuneResponse +from typing import Optional, Set +from typing_extensions import Self + + +class FinetuneList(BaseModel): + """ + FinetuneList + """ # noqa: E501 + + data: List[FinetuneResponse] + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FinetuneList from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FinetuneList from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + [FinetuneResponse.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/finetune_list_events.py b/src/together/generated/models/finetune_list_events.py new file mode 100644 index 00000000..c4266c1c --- /dev/null +++ b/src/together/generated/models/finetune_list_events.py @@ -0,0 +1,99 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.fine_tune_event import FineTuneEvent +from typing import Optional, Set +from typing_extensions import Self + + +class FinetuneListEvents(BaseModel): + """ + FinetuneListEvents + """ # noqa: E501 + + data: List[FineTuneEvent] + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FinetuneListEvents from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FinetuneListEvents from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + [FineTuneEvent.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/finetune_response.py b/src/together/generated/models/finetune_response.py new file mode 100644 index 00000000..a1055827 --- /dev/null +++ b/src/together/generated/models/finetune_response.py @@ -0,0 +1,222 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.fine_tune_event import FineTuneEvent +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) +from together.generated.models.finetune_job_status import FinetuneJobStatus +from together.generated.models.finetune_response_train_on_inputs import ( + FinetuneResponseTrainOnInputs, +) +from typing import Optional, Set +from typing_extensions import Self + + +class FinetuneResponse(BaseModel): + """ + FinetuneResponse + """ # noqa: E501 + + id: StrictStr + training_file: Optional[StrictStr] = None + validation_file: Optional[StrictStr] = None + model: Optional[StrictStr] = None + model_output_name: Optional[StrictStr] = None + model_output_path: Optional[StrictStr] = None + trainingfile_numlines: Optional[StrictInt] = None + trainingfile_size: Optional[StrictInt] = None + created_at: Optional[StrictStr] = None + updated_at: Optional[StrictStr] = None + n_epochs: Optional[StrictInt] = None + n_checkpoints: Optional[StrictInt] = None + n_evals: Optional[StrictInt] = None + batch_size: Optional[StrictInt] = None + learning_rate: Optional[Union[StrictFloat, StrictInt]] = None + lr_scheduler: Optional[Dict[str, Any]] = None + warmup_ratio: Optional[Union[StrictFloat, StrictInt]] = None + max_grad_norm: Optional[Union[StrictFloat, StrictInt]] = None + weight_decay: Optional[Union[StrictFloat, StrictInt]] = None + eval_steps: Optional[StrictInt] = None + train_on_inputs: Optional[FinetuneResponseTrainOnInputs] = None + training_type: Optional[FineTunesPostRequestTrainingType] = None + status: FinetuneJobStatus + job_id: Optional[StrictStr] = None + events: Optional[List[FineTuneEvent]] = None + token_count: Optional[StrictInt] = None + param_count: Optional[StrictInt] = None + total_price: Optional[StrictInt] = None + epochs_completed: Optional[StrictInt] = None + queue_depth: Optional[StrictInt] = None + wandb_project_name: Optional[StrictStr] = None + wandb_url: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = [ + "id", + "training_file", + "validation_file", + "model", + "model_output_name", + "model_output_path", + "trainingfile_numlines", + "trainingfile_size", + "created_at", + "updated_at", + "n_epochs", + "n_checkpoints", + "n_evals", + "batch_size", + "learning_rate", + "lr_scheduler", + "warmup_ratio", + "max_grad_norm", + "weight_decay", + "eval_steps", + "train_on_inputs", + "training_type", + "status", + "job_id", + "events", + "token_count", + "param_count", + "total_price", + "epochs_completed", + "queue_depth", + "wandb_project_name", + "wandb_url", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FinetuneResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of lr_scheduler + if self.lr_scheduler: + _dict["lr_scheduler"] = self.lr_scheduler.to_dict() + # override the default output from pydantic by calling `to_dict()` of train_on_inputs + if self.train_on_inputs: + _dict["train_on_inputs"] = self.train_on_inputs.to_dict() + # override the default output from pydantic by calling `to_dict()` of training_type + if self.training_type: + _dict["training_type"] = self.training_type.to_dict() + # override the default output from pydantic by calling `to_dict()` of each item in events (list) + _items = [] + if self.events: + for _item_events in self.events: + if _item_events: + _items.append(_item_events.to_dict()) + _dict["events"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FinetuneResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "training_file": obj.get("training_file"), + "validation_file": obj.get("validation_file"), + "model": obj.get("model"), + "model_output_name": obj.get("model_output_name"), + "model_output_path": obj.get("model_output_path"), + "trainingfile_numlines": obj.get("trainingfile_numlines"), + "trainingfile_size": obj.get("trainingfile_size"), + "created_at": obj.get("created_at"), + "updated_at": obj.get("updated_at"), + "n_epochs": obj.get("n_epochs"), + "n_checkpoints": obj.get("n_checkpoints"), + "n_evals": obj.get("n_evals"), + "batch_size": obj.get("batch_size"), + "learning_rate": obj.get("learning_rate"), + "lr_scheduler": ( + LRScheduler.from_dict(obj["lr_scheduler"]) + if obj.get("lr_scheduler") is not None + else None + ), + "warmup_ratio": obj.get("warmup_ratio"), + "max_grad_norm": obj.get("max_grad_norm"), + "weight_decay": obj.get("weight_decay"), + "eval_steps": obj.get("eval_steps"), + "train_on_inputs": ( + FinetuneResponseTrainOnInputs.from_dict(obj["train_on_inputs"]) + if obj.get("train_on_inputs") is not None + else None + ), + "training_type": ( + FineTunesPostRequestTrainingType.from_dict(obj["training_type"]) + if obj.get("training_type") is not None + else None + ), + "status": obj.get("status"), + "job_id": obj.get("job_id"), + "events": ( + [FineTuneEvent.from_dict(_item) for _item in obj["events"]] + if obj.get("events") is not None + else None + ), + "token_count": obj.get("token_count"), + "param_count": obj.get("param_count"), + "total_price": obj.get("total_price"), + "epochs_completed": obj.get("epochs_completed"), + "queue_depth": obj.get("queue_depth"), + "wandb_project_name": obj.get("wandb_project_name"), + "wandb_url": obj.get("wandb_url"), + } + ) + return _obj diff --git a/src/together/generated/models/finetune_response_train_on_inputs.py b/src/together/generated/models/finetune_response_train_on_inputs.py new file mode 100644 index 00000000..44ff7e8a --- /dev/null +++ b/src/together/generated/models/finetune_response_train_on_inputs.py @@ -0,0 +1,170 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +FINETUNERESPONSETRAINONINPUTS_ONE_OF_SCHEMAS = ["bool", "str"] + + +class FinetuneResponseTrainOnInputs(BaseModel): + """ + FinetuneResponseTrainOnInputs + """ + + # data type: bool + oneof_schema_1_validator: Optional[StrictBool] = None + # data type: str + oneof_schema_2_validator: Optional[StrictStr] = None + actual_instance: Optional[Union[bool, str]] = None + one_of_schemas: Set[str] = {"bool", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = FinetuneResponseTrainOnInputs.model_construct() + error_messages = [] + match = 0 + # validate data type: bool + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.oneof_schema_2_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into bool + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.oneof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_2_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], bool, str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/finish_reason.py b/src/together/generated/models/finish_reason.py new file mode 100644 index 00000000..4d88eced --- /dev/null +++ b/src/together/generated/models/finish_reason.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +from enum import Enum +from typing_extensions import Self + + +class FinishReason(str, Enum): + """ + FinishReason + """ + + """ + allowed enum values + """ + STOP = "stop" + EOS = "eos" + LENGTH = "length" + TOOL_CALLS = "tool_calls" + FUNCTION_CALL = "function_call" + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Create an instance of FinishReason from a JSON string""" + return cls(json.loads(json_str)) diff --git a/src/together/generated/models/full_training_type.py b/src/together/generated/models/full_training_type.py new file mode 100644 index 00000000..6999096a --- /dev/null +++ b/src/together/generated/models/full_training_type.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class FullTrainingType(BaseModel): + """ + FullTrainingType + """ # noqa: E501 + + type: StrictStr + __properties: ClassVar[List[str]] = ["type"] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["Full"]): + raise ValueError("must be one of enum values ('Full')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FullTrainingType from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FullTrainingType from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"type": obj.get("type")}) + return _obj diff --git a/src/together/generated/models/hardware_availability.py b/src/together/generated/models/hardware_availability.py new file mode 100644 index 00000000..53a166cf --- /dev/null +++ b/src/together/generated/models/hardware_availability.py @@ -0,0 +1,94 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class HardwareAvailability(BaseModel): + """ + Indicates the current availability status of a hardware configuration + """ # noqa: E501 + + status: StrictStr = Field( + description="The availability status of the hardware configuration" + ) + __properties: ClassVar[List[str]] = ["status"] + + @field_validator("status") + def status_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["available", "unavailable", "insufficient"]): + raise ValueError( + "must be one of enum values ('available', 'unavailable', 'insufficient')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of HardwareAvailability from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of HardwareAvailability from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"status": obj.get("status")}) + return _obj diff --git a/src/together/generated/models/hardware_spec.py b/src/together/generated/models/hardware_spec.py new file mode 100644 index 00000000..10d0058a --- /dev/null +++ b/src/together/generated/models/hardware_spec.py @@ -0,0 +1,100 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class HardwareSpec(BaseModel): + """ + Detailed specifications of a hardware configuration + """ # noqa: E501 + + gpu_type: StrictStr = Field(description="The type/model of GPU") + gpu_link: StrictStr = Field(description="The GPU interconnect technology") + gpu_memory: Union[StrictFloat, StrictInt] = Field( + description="Amount of GPU memory in GB" + ) + gpu_count: StrictInt = Field(description="Number of GPUs in this configuration") + __properties: ClassVar[List[str]] = [ + "gpu_type", + "gpu_link", + "gpu_memory", + "gpu_count", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of HardwareSpec from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of HardwareSpec from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "gpu_type": obj.get("gpu_type"), + "gpu_link": obj.get("gpu_link"), + "gpu_memory": obj.get("gpu_memory"), + "gpu_count": obj.get("gpu_count"), + } + ) + return _obj diff --git a/src/together/generated/models/hardware_with_status.py b/src/together/generated/models/hardware_with_status.py new file mode 100644 index 00000000..46680485 --- /dev/null +++ b/src/together/generated/models/hardware_with_status.py @@ -0,0 +1,140 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.hardware_availability import HardwareAvailability +from together.generated.models.hardware_spec import HardwareSpec +from typing import Optional, Set +from typing_extensions import Self + + +class HardwareWithStatus(BaseModel): + """ + Hardware configuration details including current availability status + """ # noqa: E501 + + object: StrictStr + name: StrictStr = Field( + description="Unique identifier for the hardware configuration" + ) + pricing: EndpointPricing + specs: HardwareSpec + availability: Optional[HardwareAvailability] = None + updated_at: datetime = Field( + description="Timestamp of when the hardware status was last updated" + ) + __properties: ClassVar[List[str]] = [ + "object", + "name", + "pricing", + "specs", + "availability", + "updated_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["hardware"]): + raise ValueError("must be one of enum values ('hardware')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of HardwareWithStatus from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of pricing + if self.pricing: + _dict["pricing"] = self.pricing.to_dict() + # override the default output from pydantic by calling `to_dict()` of specs + if self.specs: + _dict["specs"] = self.specs.to_dict() + # override the default output from pydantic by calling `to_dict()` of availability + if self.availability: + _dict["availability"] = self.availability.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of HardwareWithStatus from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "name": obj.get("name"), + "pricing": ( + EndpointPricing.from_dict(obj["pricing"]) + if obj.get("pricing") is not None + else None + ), + "specs": ( + HardwareSpec.from_dict(obj["specs"]) + if obj.get("specs") is not None + else None + ), + "availability": ( + HardwareAvailability.from_dict(obj["availability"]) + if obj.get("availability") is not None + else None + ), + "updated_at": obj.get("updated_at"), + } + ) + return _obj diff --git a/src/together/generated/models/image_response.py b/src/together/generated/models/image_response.py new file mode 100644 index 00000000..35b81ea4 --- /dev/null +++ b/src/together/generated/models/image_response.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.image_response_data_inner import ImageResponseDataInner +from typing import Optional, Set +from typing_extensions import Self + + +class ImageResponse(BaseModel): + """ + ImageResponse + """ # noqa: E501 + + id: StrictStr + model: StrictStr + object: StrictStr + data: List[Optional[ImageResponseDataInner]] + __properties: ClassVar[List[str]] = ["id", "model", "object", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ImageResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ImageResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "model": obj.get("model"), + "object": obj.get("object"), + "data": ( + [ImageResponseDataInner.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/image_response_data_inner.py b/src/together/generated/models/image_response_data_inner.py new file mode 100644 index 00000000..4e625f45 --- /dev/null +++ b/src/together/generated/models/image_response_data_inner.py @@ -0,0 +1,85 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ImageResponseDataInner(BaseModel): + """ + ImageResponseDataInner + """ # noqa: E501 + + index: StrictInt + b64_json: Optional[StrictStr] = None + url: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = [] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ImageResponseDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ImageResponseDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({}) + return _obj diff --git a/src/together/generated/models/images_generations_post_request.py b/src/together/generated/models/images_generations_post_request.py new file mode 100644 index 00000000..9ccc6fe9 --- /dev/null +++ b/src/together/generated/models/images_generations_post_request.py @@ -0,0 +1,217 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.images_generations_post_request_image_loras_inner import ( + ImagesGenerationsPostRequestImageLorasInner, +) +from together.generated.models.images_generations_post_request_model import ( + ImagesGenerationsPostRequestModel, +) +from typing import Optional, Set +from typing_extensions import Self + + +class ImagesGenerationsPostRequest(BaseModel): + """ + ImagesGenerationsPostRequest + """ # noqa: E501 + + prompt: StrictStr = Field( + description="A description of the desired images. Maximum length varies by model." + ) + model: ImagesGenerationsPostRequestModel + steps: Optional[StrictInt] = Field( + default=20, description="Number of generation steps." + ) + image_url: Optional[StrictStr] = Field( + default=None, + description="URL of an image to use for image models that support it.", + ) + seed: Optional[StrictInt] = Field( + default=None, + description="Seed used for generation. Can be used to reproduce image generations.", + ) + n: Optional[StrictInt] = Field( + default=1, description="Number of image results to generate." + ) + height: Optional[StrictInt] = Field( + default=1024, description="Height of the image to generate in number of pixels." + ) + width: Optional[StrictInt] = Field( + default=1024, description="Width of the image to generate in number of pixels." + ) + negative_prompt: Optional[StrictStr] = Field( + default=None, + description="The prompt or prompts not to guide the image generation.", + ) + response_format: Optional[StrictStr] = Field( + default=None, + description="Format of the image response. Can be either a base64 string or a URL.", + ) + guidance: Optional[Union[StrictFloat, StrictInt]] = Field( + default=3.5, + description="Adjusts the alignment of the generated image with the input prompt. Higher values (e.g., 8-10) make the output more faithful to the prompt, while lower values (e.g., 1-5) encourage more creative freedom.", + ) + output_format: Optional[StrictStr] = Field( + default="jpeg", + description="The format of the image response. Can be either be `jpeg` or `png`. Defaults to `jpeg`.", + ) + image_loras: Optional[List[ImagesGenerationsPostRequestImageLorasInner]] = Field( + default=None, + description="An array of objects that define LoRAs (Low-Rank Adaptations) to influence the generated image.", + ) + __properties: ClassVar[List[str]] = [ + "prompt", + "model", + "steps", + "image_url", + "seed", + "n", + "height", + "width", + "negative_prompt", + "response_format", + "guidance", + "output_format", + "image_loras", + ] + + @field_validator("response_format") + def response_format_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["base64", "url"]): + raise ValueError("must be one of enum values ('base64', 'url')") + return value + + @field_validator("output_format") + def output_format_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["jpeg", "png"]): + raise ValueError("must be one of enum values ('jpeg', 'png')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ImagesGenerationsPostRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of each item in image_loras (list) + _items = [] + if self.image_loras: + for _item_image_loras in self.image_loras: + if _item_image_loras: + _items.append(_item_image_loras.to_dict()) + _dict["image_loras"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ImagesGenerationsPostRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "prompt": obj.get("prompt"), + "model": ( + ImagesGenerationsPostRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "steps": obj.get("steps") if obj.get("steps") is not None else 20, + "image_url": obj.get("image_url"), + "seed": obj.get("seed"), + "n": obj.get("n") if obj.get("n") is not None else 1, + "height": obj.get("height") if obj.get("height") is not None else 1024, + "width": obj.get("width") if obj.get("width") is not None else 1024, + "negative_prompt": obj.get("negative_prompt"), + "response_format": obj.get("response_format"), + "guidance": ( + obj.get("guidance") if obj.get("guidance") is not None else 3.5 + ), + "output_format": ( + obj.get("output_format") + if obj.get("output_format") is not None + else "jpeg" + ), + "image_loras": ( + [ + ImagesGenerationsPostRequestImageLorasInner.from_dict(_item) + for _item in obj["image_loras"] + ] + if obj.get("image_loras") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/images_generations_post_request_image_loras_inner.py b/src/together/generated/models/images_generations_post_request_image_loras_inner.py new file mode 100644 index 00000000..50f29485 --- /dev/null +++ b/src/together/generated/models/images_generations_post_request_image_loras_inner.py @@ -0,0 +1,88 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class ImagesGenerationsPostRequestImageLorasInner(BaseModel): + """ + ImagesGenerationsPostRequestImageLorasInner + """ # noqa: E501 + + path: StrictStr = Field( + description="The URL of the LoRA to apply (e.g. https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA)." + ) + scale: Union[StrictFloat, StrictInt] = Field( + description="The strength of the LoRA's influence. Most LoRA's recommend a value of 1." + ) + __properties: ClassVar[List[str]] = ["path", "scale"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ImagesGenerationsPostRequestImageLorasInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ImagesGenerationsPostRequestImageLorasInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"path": obj.get("path"), "scale": obj.get("scale")}) + return _obj diff --git a/src/together/generated/models/images_generations_post_request_model.py b/src/together/generated/models/images_generations_post_request_model.py new file mode 100644 index 00000000..a61ffba7 --- /dev/null +++ b/src/together/generated/models/images_generations_post_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +IMAGESGENERATIONSPOSTREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class ImagesGenerationsPostRequestModel(BaseModel): + """ + The model to use for image generation.

[See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = ImagesGenerationsPostRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in ImagesGenerationsPostRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ImagesGenerationsPostRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/linear_lr_scheduler_args.py b/src/together/generated/models/linear_lr_scheduler_args.py new file mode 100644 index 00000000..d84842f1 --- /dev/null +++ b/src/together/generated/models/linear_lr_scheduler_args.py @@ -0,0 +1,94 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing import Optional, Set +from typing_extensions import Self + + +class LinearLRSchedulerArgs(BaseModel): + """ + LinearLRSchedulerArgs + """ # noqa: E501 + + min_lr_ratio: Optional[Union[StrictFloat, StrictInt]] = Field( + default=0.0, + description="The ratio of the final learning rate to the peak learning rate", + ) + __properties: ClassVar[List[str]] = ["min_lr_ratio"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of LinearLRSchedulerArgs from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of LinearLRSchedulerArgs from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "min_lr_ratio": ( + obj.get("min_lr_ratio") + if obj.get("min_lr_ratio") is not None + else 0.0 + ) + } + ) + return _obj diff --git a/src/together/generated/models/list_endpoint.py b/src/together/generated/models/list_endpoint.py new file mode 100644 index 00000000..417949c3 --- /dev/null +++ b/src/together/generated/models/list_endpoint.py @@ -0,0 +1,136 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ListEndpoint(BaseModel): + """ + Details about an endpoint when listed via the list endpoint + """ # noqa: E501 + + object: StrictStr = Field(description="The type of object") + id: StrictStr = Field(description="Unique identifier for the endpoint") + name: StrictStr = Field(description="System name for the endpoint") + model: StrictStr = Field(description="The model deployed on this endpoint") + type: StrictStr = Field(description="The type of endpoint") + owner: StrictStr = Field(description="The owner of this endpoint") + state: StrictStr = Field(description="Current state of the endpoint") + created_at: datetime = Field(description="Timestamp when the endpoint was created") + __properties: ClassVar[List[str]] = [ + "object", + "id", + "name", + "model", + "type", + "owner", + "state", + "created_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["endpoint"]): + raise ValueError("must be one of enum values ('endpoint')") + return value + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["serverless", "dedicated"]): + raise ValueError("must be one of enum values ('serverless', 'dedicated')") + return value + + @field_validator("state") + def state_validate_enum(cls, value): + """Validates the enum""" + if value not in set( + ["PENDING", "STARTING", "STARTED", "STOPPING", "STOPPED", "ERROR"] + ): + raise ValueError( + "must be one of enum values ('PENDING', 'STARTING', 'STARTED', 'STOPPING', 'STOPPED', 'ERROR')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListEndpoint from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListEndpoint from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "name": obj.get("name"), + "model": obj.get("model"), + "type": obj.get("type"), + "owner": obj.get("owner"), + "state": obj.get("state"), + "created_at": obj.get("created_at"), + } + ) + return _obj diff --git a/src/together/generated/models/list_endpoints200_response.py b/src/together/generated/models/list_endpoints200_response.py new file mode 100644 index 00000000..0db32f09 --- /dev/null +++ b/src/together/generated/models/list_endpoints200_response.py @@ -0,0 +1,108 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.list_endpoint import ListEndpoint +from typing import Optional, Set +from typing_extensions import Self + + +class ListEndpoints200Response(BaseModel): + """ + ListEndpoints200Response + """ # noqa: E501 + + object: StrictStr + data: List[ListEndpoint] + __properties: ClassVar[List[str]] = ["object", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListEndpoints200Response from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListEndpoints200Response from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "data": ( + [ListEndpoint.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/list_hardware200_response.py b/src/together/generated/models/list_hardware200_response.py new file mode 100644 index 00000000..c18ec5f0 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response.py @@ -0,0 +1,185 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.list_hardware200_response_one_of import ( + ListHardware200ResponseOneOf, +) +from together.generated.models.list_hardware200_response_one_of1 import ( + ListHardware200ResponseOneOf1, +) +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +LISTHARDWARE200RESPONSE_ONE_OF_SCHEMAS = [ + "ListHardware200ResponseOneOf", + "ListHardware200ResponseOneOf1", +] + + +class ListHardware200Response(BaseModel): + """ + ListHardware200Response + """ + + # data type: ListHardware200ResponseOneOf + oneof_schema_1_validator: Optional[ListHardware200ResponseOneOf] = None + # data type: ListHardware200ResponseOneOf1 + oneof_schema_2_validator: Optional[ListHardware200ResponseOneOf1] = None + actual_instance: Optional[ + Union[ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1] + ] = None + one_of_schemas: Set[str] = { + "ListHardware200ResponseOneOf", + "ListHardware200ResponseOneOf1", + } + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ListHardware200Response.model_construct() + error_messages = [] + match = 0 + # validate data type: ListHardware200ResponseOneOf + if not isinstance(v, ListHardware200ResponseOneOf): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ListHardware200ResponseOneOf`" + ) + else: + match += 1 + # validate data type: ListHardware200ResponseOneOf1 + if not isinstance(v, ListHardware200ResponseOneOf1): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ListHardware200ResponseOneOf1`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into ListHardware200ResponseOneOf + try: + instance.actual_instance = ListHardware200ResponseOneOf.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ListHardware200ResponseOneOf1 + try: + instance.actual_instance = ListHardware200ResponseOneOf1.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[ + Union[ + Dict[str, Any], ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1 + ] + ]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/list_hardware200_response_one_of.py b/src/together/generated/models/list_hardware200_response_one_of.py new file mode 100644 index 00000000..1cf0ec36 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response_one_of.py @@ -0,0 +1,113 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.list_hardware200_response_one_of_data_inner import ( + ListHardware200ResponseOneOfDataInner, +) +from typing import Optional, Set +from typing_extensions import Self + + +class ListHardware200ResponseOneOf(BaseModel): + """ + Response when no model filter is provided + """ # noqa: E501 + + object: StrictStr + data: List[ListHardware200ResponseOneOfDataInner] + __properties: ClassVar[List[str]] = ["object", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "data": ( + [ + ListHardware200ResponseOneOfDataInner.from_dict(_item) + for _item in obj["data"] + ] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of1.py b/src/together/generated/models/list_hardware200_response_one_of1.py new file mode 100644 index 00000000..171532b1 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response_one_of1.py @@ -0,0 +1,113 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.list_hardware200_response_one_of1_data_inner import ( + ListHardware200ResponseOneOf1DataInner, +) +from typing import Optional, Set +from typing_extensions import Self + + +class ListHardware200ResponseOneOf1(BaseModel): + """ + Response when model filter is provided + """ # noqa: E501 + + object: StrictStr + data: List[ListHardware200ResponseOneOf1DataInner] + __properties: ClassVar[List[str]] = ["object", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf1 from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf1 from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "data": ( + [ + ListHardware200ResponseOneOf1DataInner.from_dict(_item) + for _item in obj["data"] + ] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of1_data_inner.py b/src/together/generated/models/list_hardware200_response_one_of1_data_inner.py new file mode 100644 index 00000000..db5c86a2 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response_one_of1_data_inner.py @@ -0,0 +1,140 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.hardware_availability import HardwareAvailability +from together.generated.models.hardware_spec import HardwareSpec +from typing import Optional, Set +from typing_extensions import Self + + +class ListHardware200ResponseOneOf1DataInner(BaseModel): + """ + ListHardware200ResponseOneOf1DataInner + """ # noqa: E501 + + object: StrictStr + name: StrictStr = Field( + description="Unique identifier for the hardware configuration" + ) + pricing: EndpointPricing + specs: HardwareSpec + availability: HardwareAvailability + updated_at: datetime = Field( + description="Timestamp of when the hardware status was last updated" + ) + __properties: ClassVar[List[str]] = [ + "object", + "name", + "pricing", + "specs", + "availability", + "updated_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["hardware"]): + raise ValueError("must be one of enum values ('hardware')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf1DataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of pricing + if self.pricing: + _dict["pricing"] = self.pricing.to_dict() + # override the default output from pydantic by calling `to_dict()` of specs + if self.specs: + _dict["specs"] = self.specs.to_dict() + # override the default output from pydantic by calling `to_dict()` of availability + if self.availability: + _dict["availability"] = self.availability.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf1DataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "name": obj.get("name"), + "pricing": ( + EndpointPricing.from_dict(obj["pricing"]) + if obj.get("pricing") is not None + else None + ), + "specs": ( + HardwareSpec.from_dict(obj["specs"]) + if obj.get("specs") is not None + else None + ), + "availability": ( + HardwareAvailability.from_dict(obj["availability"]) + if obj.get("availability") is not None + else None + ), + "updated_at": obj.get("updated_at"), + } + ) + return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of_data_inner.py b/src/together/generated/models/list_hardware200_response_one_of_data_inner.py new file mode 100644 index 00000000..bc1f7a99 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response_one_of_data_inner.py @@ -0,0 +1,137 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.hardware_spec import HardwareSpec +from typing import Optional, Set +from typing_extensions import Self + + +class ListHardware200ResponseOneOfDataInner(BaseModel): + """ + ListHardware200ResponseOneOfDataInner + """ # noqa: E501 + + object: StrictStr + name: StrictStr = Field( + description="Unique identifier for the hardware configuration" + ) + pricing: EndpointPricing + specs: HardwareSpec + availability: Optional[Any] = None + updated_at: datetime = Field( + description="Timestamp of when the hardware status was last updated" + ) + __properties: ClassVar[List[str]] = [ + "object", + "name", + "pricing", + "specs", + "availability", + "updated_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["hardware"]): + raise ValueError("must be one of enum values ('hardware')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOfDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of pricing + if self.pricing: + _dict["pricing"] = self.pricing.to_dict() + # override the default output from pydantic by calling `to_dict()` of specs + if self.specs: + _dict["specs"] = self.specs.to_dict() + # set to None if availability (nullable) is None + # and model_fields_set contains the field + if self.availability is None and "availability" in self.model_fields_set: + _dict["availability"] = None + + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOfDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "name": obj.get("name"), + "pricing": ( + EndpointPricing.from_dict(obj["pricing"]) + if obj.get("pricing") is not None + else None + ), + "specs": ( + HardwareSpec.from_dict(obj["specs"]) + if obj.get("specs") is not None + else None + ), + "availability": obj.get("availability"), + "updated_at": obj.get("updated_at"), + } + ) + return _obj diff --git a/src/together/generated/models/lo_ra_training_type.py b/src/together/generated/models/lo_ra_training_type.py new file mode 100644 index 00000000..44d82d70 --- /dev/null +++ b/src/together/generated/models/lo_ra_training_type.py @@ -0,0 +1,123 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing import Optional, Set +from typing_extensions import Self + + +class LoRATrainingType(BaseModel): + """ + LoRATrainingType + """ # noqa: E501 + + type: StrictStr + lora_r: StrictInt + lora_alpha: StrictInt + lora_dropout: Optional[Union[StrictFloat, StrictInt]] = 0.0 + lora_trainable_modules: Optional[StrictStr] = "all-linear" + __properties: ClassVar[List[str]] = [ + "type", + "lora_r", + "lora_alpha", + "lora_dropout", + "lora_trainable_modules", + ] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["Lora"]): + raise ValueError("must be one of enum values ('Lora')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of LoRATrainingType from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of LoRATrainingType from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "type": obj.get("type"), + "lora_r": obj.get("lora_r"), + "lora_alpha": obj.get("lora_alpha"), + "lora_dropout": ( + obj.get("lora_dropout") + if obj.get("lora_dropout") is not None + else 0.0 + ), + "lora_trainable_modules": ( + obj.get("lora_trainable_modules") + if obj.get("lora_trainable_modules") is not None + else "all-linear" + ), + } + ) + return _obj diff --git a/src/together/generated/models/logprobs_part.py b/src/together/generated/models/logprobs_part.py new file mode 100644 index 00000000..dd1de169 --- /dev/null +++ b/src/together/generated/models/logprobs_part.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing import Optional, Set +from typing_extensions import Self + + +class LogprobsPart(BaseModel): + """ + LogprobsPart + """ # noqa: E501 + + token_ids: Optional[List[Union[StrictFloat, StrictInt]]] = Field( + default=None, description="List of token IDs corresponding to the logprobs" + ) + tokens: Optional[List[StrictStr]] = Field( + default=None, description="List of token strings" + ) + token_logprobs: Optional[List[Union[StrictFloat, StrictInt]]] = Field( + default=None, description="List of token log probabilities" + ) + __properties: ClassVar[List[str]] = ["token_ids", "tokens", "token_logprobs"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of LogprobsPart from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of LogprobsPart from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "token_ids": obj.get("token_ids"), + "tokens": obj.get("tokens"), + "token_logprobs": obj.get("token_logprobs"), + } + ) + return _obj diff --git a/src/together/generated/models/lr_scheduler.py b/src/together/generated/models/lr_scheduler.py new file mode 100644 index 00000000..4e327e05 --- /dev/null +++ b/src/together/generated/models/lr_scheduler.py @@ -0,0 +1,96 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class LRScheduler(BaseModel): + """ + LRScheduler + """ # noqa: E501 + + lr_scheduler_type: StrictStr + lr_scheduler_args: Optional[Dict[str, Any]] = None + __properties: ClassVar[List[str]] = ["lr_scheduler_type", "lr_scheduler_args"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of LRScheduler from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of lr_scheduler_args + if self.lr_scheduler_args: + _dict["lr_scheduler_args"] = self.lr_scheduler_args.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of LRScheduler from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "lr_scheduler_type": obj.get("lr_scheduler_type"), + "lr_scheduler_args": ( + LinearLRSchedulerArgs.from_dict(obj["lr_scheduler_args"]) + if obj.get("lr_scheduler_args") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/model_info.py b/src/together/generated/models/model_info.py new file mode 100644 index 00000000..61a48d64 --- /dev/null +++ b/src/together/generated/models/model_info.py @@ -0,0 +1,135 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.pricing import Pricing +from typing import Optional, Set +from typing_extensions import Self + + +class ModelInfo(BaseModel): + """ + ModelInfo + """ # noqa: E501 + + id: StrictStr + object: StrictStr + created: StrictInt + type: StrictStr + display_name: Optional[StrictStr] = None + organization: Optional[StrictStr] = None + link: Optional[StrictStr] = None + license: Optional[StrictStr] = None + context_length: Optional[StrictInt] = None + pricing: Optional[Pricing] = None + __properties: ClassVar[List[str]] = [ + "id", + "object", + "created", + "type", + "display_name", + "organization", + "link", + "license", + "context_length", + "pricing", + ] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set( + ["chat", "language", "code", "image", "embedding", "moderation", "rerank"] + ): + raise ValueError( + "must be one of enum values ('chat', 'language', 'code', 'image', 'embedding', 'moderation', 'rerank')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ModelInfo from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of pricing + if self.pricing: + _dict["pricing"] = self.pricing.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ModelInfo from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "object": obj.get("object"), + "created": obj.get("created"), + "type": obj.get("type"), + "display_name": obj.get("display_name"), + "organization": obj.get("organization"), + "link": obj.get("link"), + "license": obj.get("license"), + "context_length": obj.get("context_length"), + "pricing": ( + Pricing.from_dict(obj["pricing"]) + if obj.get("pricing") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/pricing.py b/src/together/generated/models/pricing.py new file mode 100644 index 00000000..87350c63 --- /dev/null +++ b/src/together/generated/models/pricing.py @@ -0,0 +1,101 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class Pricing(BaseModel): + """ + Pricing + """ # noqa: E501 + + hourly: Union[StrictFloat, StrictInt] + input: Union[StrictFloat, StrictInt] + output: Union[StrictFloat, StrictInt] + base: Union[StrictFloat, StrictInt] + finetune: Union[StrictFloat, StrictInt] + __properties: ClassVar[List[str]] = [ + "hourly", + "input", + "output", + "base", + "finetune", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Pricing from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Pricing from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "hourly": obj.get("hourly"), + "input": obj.get("input"), + "output": obj.get("output"), + "base": obj.get("base"), + "finetune": obj.get("finetune"), + } + ) + return _obj diff --git a/src/together/generated/models/prompt_part_inner.py b/src/together/generated/models/prompt_part_inner.py new file mode 100644 index 00000000..a999f700 --- /dev/null +++ b/src/together/generated/models/prompt_part_inner.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.logprobs_part import LogprobsPart +from typing import Optional, Set +from typing_extensions import Self + + +class PromptPartInner(BaseModel): + """ + PromptPartInner + """ # noqa: E501 + + text: Optional[StrictStr] = None + logprobs: Optional[LogprobsPart] = None + __properties: ClassVar[List[str]] = ["text", "logprobs"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of PromptPartInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of logprobs + if self.logprobs: + _dict["logprobs"] = self.logprobs.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of PromptPartInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "text": obj.get("text"), + "logprobs": ( + LogprobsPart.from_dict(obj["logprobs"]) + if obj.get("logprobs") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/rerank_request.py b/src/together/generated/models/rerank_request.py new file mode 100644 index 00000000..5a68173e --- /dev/null +++ b/src/together/generated/models/rerank_request.py @@ -0,0 +1,144 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.rerank_request_documents import RerankRequestDocuments +from together.generated.models.rerank_request_model import RerankRequestModel +from typing import Optional, Set +from typing_extensions import Self + + +class RerankRequest(BaseModel): + """ + RerankRequest + """ # noqa: E501 + + model: RerankRequestModel + query: StrictStr = Field(description="The search query to be used for ranking.") + documents: RerankRequestDocuments + top_n: Optional[StrictInt] = Field( + default=None, description="The number of top results to return." + ) + return_documents: Optional[StrictBool] = Field( + default=None, + description="Whether to return supplied documents with the response.", + ) + rank_fields: Optional[List[StrictStr]] = Field( + default=None, + description="List of keys in the JSON Object document to rank by. Defaults to use all supplied keys for ranking.", + ) + additional_properties: Dict[str, Any] = {} + __properties: ClassVar[List[str]] = [ + "model", + "query", + "documents", + "top_n", + "return_documents", + "rank_fields", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RerankRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + * Fields in `self.additional_properties` are added to the output dict. + """ + excluded_fields: Set[str] = set( + [ + "additional_properties", + ] + ) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of documents + if self.documents: + _dict["documents"] = self.documents.to_dict() + # puts key-value pairs in additional_properties in the top level + if self.additional_properties is not None: + for _key, _value in self.additional_properties.items(): + _dict[_key] = _value + + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RerankRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "model": ( + RerankRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "query": obj.get("query"), + "documents": ( + RerankRequestDocuments.from_dict(obj["documents"]) + if obj.get("documents") is not None + else None + ), + "top_n": obj.get("top_n"), + "return_documents": obj.get("return_documents"), + "rank_fields": obj.get("rank_fields"), + } + ) + # store additional fields in additional_properties + for _key in obj.keys(): + if _key not in cls.__properties: + _obj.additional_properties[_key] = obj.get(_key) + + return _obj diff --git a/src/together/generated/models/rerank_request_documents.py b/src/together/generated/models/rerank_request_documents.py new file mode 100644 index 00000000..f1e24d87 --- /dev/null +++ b/src/together/generated/models/rerank_request_documents.py @@ -0,0 +1,171 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, Dict, List, Optional +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +RERANKREQUESTDOCUMENTS_ONE_OF_SCHEMAS = ["List[Dict[str, object]]", "List[str]"] + + +class RerankRequestDocuments(BaseModel): + """ + List of documents, which can be either strings or objects. + """ + + # data type: List[Dict[str, object]] + oneof_schema_1_validator: Optional[List[Dict[str, Any]]] = None + # data type: List[str] + oneof_schema_2_validator: Optional[List[StrictStr]] = None + actual_instance: Optional[Union[List[Dict[str, object]], List[str]]] = None + one_of_schemas: Set[str] = {"List[Dict[str, object]]", "List[str]"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = RerankRequestDocuments.model_construct() + error_messages = [] + match = 0 + # validate data type: List[Dict[str, object]] + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: List[str] + try: + instance.oneof_schema_2_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into List[Dict[str, object]] + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into List[str] + try: + # validation + instance.oneof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_2_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], List[Dict[str, object]], List[str]]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/rerank_request_model.py b/src/together/generated/models/rerank_request_model.py new file mode 100644 index 00000000..1f72a3a6 --- /dev/null +++ b/src/together/generated/models/rerank_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +RERANKREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class RerankRequestModel(BaseModel): + """ + The model to be used for the rerank request.

[See all of Together AI's rerank models](https://docs.together.ai/docs/serverless-models#rerank-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = RerankRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in RerankRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into RerankRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/rerank_response.py b/src/together/generated/models/rerank_response.py new file mode 100644 index 00000000..edfb2fd3 --- /dev/null +++ b/src/together/generated/models/rerank_response.py @@ -0,0 +1,127 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.rerank_response_results_inner import ( + RerankResponseResultsInner, +) +from together.generated.models.usage_data import UsageData +from typing import Optional, Set +from typing_extensions import Self + + +class RerankResponse(BaseModel): + """ + RerankResponse + """ # noqa: E501 + + object: StrictStr = Field(description="Object type") + id: Optional[StrictStr] = Field(default=None, description="Request ID") + model: StrictStr = Field(description="The model to be used for the rerank request.") + results: List[RerankResponseResultsInner] + usage: Optional[UsageData] = None + __properties: ClassVar[List[str]] = ["object", "id", "model", "results", "usage"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["rerank"]): + raise ValueError("must be one of enum values ('rerank')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RerankResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in results (list) + _items = [] + if self.results: + for _item_results in self.results: + if _item_results: + _items.append(_item_results.to_dict()) + _dict["results"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RerankResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "model": obj.get("model"), + "results": ( + [ + RerankResponseResultsInner.from_dict(_item) + for _item in obj["results"] + ] + if obj.get("results") is not None + else None + ), + "usage": ( + UsageData.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/rerank_response_results_inner.py b/src/together/generated/models/rerank_response_results_inner.py new file mode 100644 index 00000000..51610442 --- /dev/null +++ b/src/together/generated/models/rerank_response_results_inner.py @@ -0,0 +1,101 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Union +from together.generated.models.rerank_response_results_inner_document import ( + RerankResponseResultsInnerDocument, +) +from typing import Optional, Set +from typing_extensions import Self + + +class RerankResponseResultsInner(BaseModel): + """ + RerankResponseResultsInner + """ # noqa: E501 + + index: StrictInt + relevance_score: Union[StrictFloat, StrictInt] + document: RerankResponseResultsInnerDocument + __properties: ClassVar[List[str]] = ["index", "relevance_score", "document"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RerankResponseResultsInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of document + if self.document: + _dict["document"] = self.document.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RerankResponseResultsInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "index": obj.get("index"), + "relevance_score": obj.get("relevance_score"), + "document": ( + RerankResponseResultsInnerDocument.from_dict(obj["document"]) + if obj.get("document") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/rerank_response_results_inner_document.py b/src/together/generated/models/rerank_response_results_inner_document.py new file mode 100644 index 00000000..51258d5a --- /dev/null +++ b/src/together/generated/models/rerank_response_results_inner_document.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class RerankResponseResultsInnerDocument(BaseModel): + """ + RerankResponseResultsInnerDocument + """ # noqa: E501 + + text: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["text"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RerankResponseResultsInnerDocument from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RerankResponseResultsInnerDocument from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"text": obj.get("text")}) + return _obj diff --git a/src/together/generated/models/stream_sentinel.py b/src/together/generated/models/stream_sentinel.py new file mode 100644 index 00000000..54c925d4 --- /dev/null +++ b/src/together/generated/models/stream_sentinel.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class StreamSentinel(BaseModel): + """ + StreamSentinel + """ # noqa: E501 + + data: StrictStr + __properties: ClassVar[List[str]] = ["data"] + + @field_validator("data") + def data_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["[DONE]"]): + raise ValueError("must be one of enum values ('[DONE]')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of StreamSentinel from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of StreamSentinel from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"data": obj.get("data")}) + return _obj diff --git a/src/together/generated/models/tool_choice.py b/src/together/generated/models/tool_choice.py new file mode 100644 index 00000000..2571dcf9 --- /dev/null +++ b/src/together/generated/models/tool_choice.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Union +from together.generated.models.tool_choice_function import ToolChoiceFunction +from typing import Optional, Set +from typing_extensions import Self + + +class ToolChoice(BaseModel): + """ + ToolChoice + """ # noqa: E501 + + index: Union[StrictFloat, StrictInt] + id: StrictStr + type: StrictStr + function: ToolChoiceFunction + __properties: ClassVar[List[str]] = ["index", "id", "type", "function"] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["function"]): + raise ValueError("must be one of enum values ('function')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ToolChoice from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of function + if self.function: + _dict["function"] = self.function.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ToolChoice from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "index": obj.get("index"), + "id": obj.get("id"), + "type": obj.get("type"), + "function": ( + ToolChoiceFunction.from_dict(obj["function"]) + if obj.get("function") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/tool_choice_function.py b/src/together/generated/models/tool_choice_function.py new file mode 100644 index 00000000..308cbe71 --- /dev/null +++ b/src/together/generated/models/tool_choice_function.py @@ -0,0 +1,86 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ToolChoiceFunction(BaseModel): + """ + ToolChoiceFunction + """ # noqa: E501 + + name: StrictStr + arguments: StrictStr + __properties: ClassVar[List[str]] = ["name", "arguments"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ToolChoiceFunction from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ToolChoiceFunction from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"name": obj.get("name"), "arguments": obj.get("arguments")} + ) + return _obj diff --git a/src/together/generated/models/tools_part.py b/src/together/generated/models/tools_part.py new file mode 100644 index 00000000..e26792c2 --- /dev/null +++ b/src/together/generated/models/tools_part.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.tools_part_function import ToolsPartFunction +from typing import Optional, Set +from typing_extensions import Self + + +class ToolsPart(BaseModel): + """ + ToolsPart + """ # noqa: E501 + + type: Optional[StrictStr] = None + function: Optional[ToolsPartFunction] = None + __properties: ClassVar[List[str]] = ["type", "function"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ToolsPart from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of function + if self.function: + _dict["function"] = self.function.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ToolsPart from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "type": obj.get("type"), + "function": ( + ToolsPartFunction.from_dict(obj["function"]) + if obj.get("function") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/tools_part_function.py b/src/together/generated/models/tools_part_function.py new file mode 100644 index 00000000..cbb5d419 --- /dev/null +++ b/src/together/generated/models/tools_part_function.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ToolsPartFunction(BaseModel): + """ + ToolsPartFunction + """ # noqa: E501 + + description: Optional[StrictStr] = None + name: Optional[StrictStr] = None + parameters: Optional[Dict[str, Any]] = Field( + default=None, description="A map of parameter names to their values." + ) + __properties: ClassVar[List[str]] = ["description", "name", "parameters"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ToolsPartFunction from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ToolsPartFunction from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "description": obj.get("description"), + "name": obj.get("name"), + "parameters": obj.get("parameters"), + } + ) + return _obj diff --git a/src/together/generated/models/update_endpoint_request.py b/src/together/generated/models/update_endpoint_request.py new file mode 100644 index 00000000..ee2d2ff0 --- /dev/null +++ b/src/together/generated/models/update_endpoint_request.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.autoscaling import Autoscaling +from typing import Optional, Set +from typing_extensions import Self + + +class UpdateEndpointRequest(BaseModel): + """ + UpdateEndpointRequest + """ # noqa: E501 + + display_name: Optional[StrictStr] = Field( + default=None, description="A human-readable name for the endpoint" + ) + state: Optional[StrictStr] = Field( + default=None, description="The desired state of the endpoint" + ) + autoscaling: Optional[Autoscaling] = Field( + default=None, description="New autoscaling configuration for the endpoint" + ) + __properties: ClassVar[List[str]] = ["display_name", "state", "autoscaling"] + + @field_validator("state") + def state_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["STARTED", "STOPPED"]): + raise ValueError("must be one of enum values ('STARTED', 'STOPPED')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of UpdateEndpointRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of autoscaling + if self.autoscaling: + _dict["autoscaling"] = self.autoscaling.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of UpdateEndpointRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "display_name": obj.get("display_name"), + "state": obj.get("state"), + "autoscaling": ( + Autoscaling.from_dict(obj["autoscaling"]) + if obj.get("autoscaling") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/usage_data.py b/src/together/generated/models/usage_data.py new file mode 100644 index 00000000..82a825c7 --- /dev/null +++ b/src/together/generated/models/usage_data.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class UsageData(BaseModel): + """ + UsageData + """ # noqa: E501 + + prompt_tokens: StrictInt + completion_tokens: StrictInt + total_tokens: StrictInt + __properties: ClassVar[List[str]] = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of UsageData from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of UsageData from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "prompt_tokens": obj.get("prompt_tokens"), + "completion_tokens": obj.get("completion_tokens"), + "total_tokens": obj.get("total_tokens"), + } + ) + return _obj diff --git a/src/together/generated/rest.py b/src/together/generated/rest.py new file mode 100644 index 00000000..0f92a615 --- /dev/null +++ b/src/together/generated/rest.py @@ -0,0 +1,195 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import io +import json +import re +import ssl +from typing import Optional, Union + +import aiohttp +import aiohttp_retry + +from together.generated.exceptions import ApiException, ApiValueError + +RESTResponseType = aiohttp.ClientResponse + +ALLOW_RETRY_METHODS = frozenset({"DELETE", "GET", "HEAD", "OPTIONS", "PUT", "TRACE"}) + + +class RESTResponse(io.IOBase): + + def __init__(self, resp) -> None: + self.response = resp + self.status = resp.status + self.reason = resp.reason + self.data = None + + async def read(self): + if self.data is None: + self.data = await self.response.read() + return self.data + + def getheaders(self): + """Returns a CIMultiDictProxy of the response headers.""" + return self.response.headers + + def getheader(self, name, default=None): + """Returns a given response header.""" + return self.response.headers.get(name, default) + + +class RESTClientObject: + + def __init__(self, configuration) -> None: + + # maxsize is number of requests to host that are allowed in parallel + self.maxsize = configuration.connection_pool_maxsize + + self.ssl_context = ssl.create_default_context(cafile=configuration.ssl_ca_cert) + if configuration.cert_file: + self.ssl_context.load_cert_chain( + configuration.cert_file, keyfile=configuration.key_file + ) + + if not configuration.verify_ssl: + self.ssl_context.check_hostname = False + self.ssl_context.verify_mode = ssl.CERT_NONE + + self.proxy = configuration.proxy + self.proxy_headers = configuration.proxy_headers + + self.retries = configuration.retries + + self.pool_manager: Optional[aiohttp.ClientSession] = None + self.retry_client: Optional[aiohttp_retry.RetryClient] = None + + async def close(self) -> None: + if self.pool_manager: + await self.pool_manager.close() + if self.retry_client is not None: + await self.retry_client.close() + + async def request( + self, + method, + url, + headers=None, + body=None, + post_params=None, + _request_timeout=None, + ): + """Execute request + + :param method: http request method + :param url: http request url + :param headers: http request headers + :param body: request json body, for `application/json` + :param post_params: request post parameters, + `application/x-www-form-urlencoded` + and `multipart/form-data` + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + """ + method = method.upper() + assert method in ["GET", "HEAD", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"] + + if post_params and body: + raise ApiValueError( + "body parameter cannot be used with post_params parameter." + ) + + post_params = post_params or {} + headers = headers or {} + # url already contains the URL query string + timeout = _request_timeout or 5 * 60 + + if "Content-Type" not in headers: + headers["Content-Type"] = "application/json" + + args = {"method": method, "url": url, "timeout": timeout, "headers": headers} + + if self.proxy: + args["proxy"] = self.proxy + if self.proxy_headers: + args["proxy_headers"] = self.proxy_headers + + # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE` + if method in ["POST", "PUT", "PATCH", "OPTIONS", "DELETE"]: + if re.search("json", headers["Content-Type"], re.IGNORECASE): + if body is not None: + body = json.dumps(body) + args["data"] = body + elif headers["Content-Type"] == "application/x-www-form-urlencoded": + args["data"] = aiohttp.FormData(post_params) + elif headers["Content-Type"] == "multipart/form-data": + # must del headers['Content-Type'], or the correct + # Content-Type which generated by aiohttp + del headers["Content-Type"] + data = aiohttp.FormData() + for param in post_params: + k, v = param + if isinstance(v, tuple) and len(v) == 3: + data.add_field(k, value=v[1], filename=v[0], content_type=v[2]) + else: + # Ensures that dict objects are serialized + if isinstance(v, dict): + v = json.dumps(v) + elif isinstance(v, int): + v = str(v) + data.add_field(k, v) + args["data"] = data + + # Pass a `bytes` or `str` parameter directly in the body to support + # other content types than Json when `body` argument is provided + # in serialized form + elif isinstance(body, str) or isinstance(body, bytes): + args["data"] = body + else: + # Cannot generate the request from given parameters + msg = """Cannot prepare a request message for provided + arguments. Please check that your arguments match + declared content type.""" + raise ApiException(status=0, reason=msg) + + pool_manager: Union[aiohttp.ClientSession, aiohttp_retry.RetryClient] + + # https pool manager + if self.pool_manager is None: + self.pool_manager = aiohttp.ClientSession( + connector=aiohttp.TCPConnector( + limit=self.maxsize, ssl=self.ssl_context + ), + trust_env=True, + ) + pool_manager = self.pool_manager + + if self.retries is not None and method in ALLOW_RETRY_METHODS: + if self.retry_client is None: + self.retry_client = aiohttp_retry.RetryClient( + client_session=self.pool_manager, + retry_options=aiohttp_retry.ExponentialRetry( + attempts=self.retries, + factor=2.0, + start_timeout=0.1, + max_timeout=120.0, + ), + ) + pool_manager = self.retry_client + + r = await pool_manager.request(**args) + + return RESTResponse(r) diff --git a/src/together/generated/test/__init__.py b/src/together/generated/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/together/generated/test/test_audio_api.py b/src/together/generated/test/test_audio_api.py new file mode 100644 index 00000000..deddc486 --- /dev/null +++ b/src/together/generated/test/test_audio_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.audio_api import AudioApi + + +class TestAudioApi(unittest.IsolatedAsyncioTestCase): + """AudioApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = AudioApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_audio_speech(self) -> None: + """Test case for audio_speech + + Create audio generation request + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_request.py b/src/together/generated/test/test_audio_speech_request.py new file mode 100644 index 00000000..43362fcd --- /dev/null +++ b/src/together/generated/test/test_audio_speech_request.py @@ -0,0 +1,63 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_request import AudioSpeechRequest + + +class TestAudioSpeechRequest(unittest.TestCase): + """AudioSpeechRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechRequest: + """Test AudioSpeechRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechRequest` + """ + model = AudioSpeechRequest() + if include_optional: + return AudioSpeechRequest( + model = cartesia/sonic, + input = '', + voice = None, + response_format = 'wav', + language = 'en', + response_encoding = 'pcm_f32le', + sample_rate = 1.337, + stream = True + ) + else: + return AudioSpeechRequest( + model = cartesia/sonic, + input = '', + voice = None, + ) + """ + + def testAudioSpeechRequest(self): + """Test AudioSpeechRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_request_model.py b/src/together/generated/test/test_audio_speech_request_model.py new file mode 100644 index 00000000..beb8ec7c --- /dev/null +++ b/src/together/generated/test/test_audio_speech_request_model.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel + + +class TestAudioSpeechRequestModel(unittest.TestCase): + """AudioSpeechRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechRequestModel: + """Test AudioSpeechRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechRequestModel` + """ + model = AudioSpeechRequestModel() + if include_optional: + return AudioSpeechRequestModel( + ) + else: + return AudioSpeechRequestModel( + ) + """ + + def testAudioSpeechRequestModel(self): + """Test AudioSpeechRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_request_voice.py b/src/together/generated/test/test_audio_speech_request_voice.py new file mode 100644 index 00000000..744d89c5 --- /dev/null +++ b/src/together/generated/test/test_audio_speech_request_voice.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice + + +class TestAudioSpeechRequestVoice(unittest.TestCase): + """AudioSpeechRequestVoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechRequestVoice: + """Test AudioSpeechRequestVoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechRequestVoice` + """ + model = AudioSpeechRequestVoice() + if include_optional: + return AudioSpeechRequestVoice( + ) + else: + return AudioSpeechRequestVoice( + ) + """ + + def testAudioSpeechRequestVoice(self): + """Test AudioSpeechRequestVoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_stream_chunk.py b/src/together/generated/test/test_audio_speech_stream_chunk.py new file mode 100644 index 00000000..1335b885 --- /dev/null +++ b/src/together/generated/test/test_audio_speech_stream_chunk.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk + + +class TestAudioSpeechStreamChunk(unittest.TestCase): + """AudioSpeechStreamChunk unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechStreamChunk: + """Test AudioSpeechStreamChunk + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechStreamChunk` + """ + model = AudioSpeechStreamChunk() + if include_optional: + return AudioSpeechStreamChunk( + object = 'audio.tts.chunk', + model = 'cartesia/sonic', + b64 = '' + ) + else: + return AudioSpeechStreamChunk( + object = 'audio.tts.chunk', + model = 'cartesia/sonic', + b64 = '', + ) + """ + + def testAudioSpeechStreamChunk(self): + """Test AudioSpeechStreamChunk""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_stream_event.py b/src/together/generated/test/test_audio_speech_stream_event.py new file mode 100644 index 00000000..68337a10 --- /dev/null +++ b/src/together/generated/test/test_audio_speech_stream_event.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent + + +class TestAudioSpeechStreamEvent(unittest.TestCase): + """AudioSpeechStreamEvent unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechStreamEvent: + """Test AudioSpeechStreamEvent + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechStreamEvent` + """ + model = AudioSpeechStreamEvent() + if include_optional: + return AudioSpeechStreamEvent( + data = together.generated.models.audio_speech_stream_chunk.AudioSpeechStreamChunk( + object = 'audio.tts.chunk', + model = 'cartesia/sonic', + b64 = '', ) + ) + else: + return AudioSpeechStreamEvent( + data = together.generated.models.audio_speech_stream_chunk.AudioSpeechStreamChunk( + object = 'audio.tts.chunk', + model = 'cartesia/sonic', + b64 = '', ), + ) + """ + + def testAudioSpeechStreamEvent(self): + """Test AudioSpeechStreamEvent""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_stream_response.py b/src/together/generated/test/test_audio_speech_stream_response.py new file mode 100644 index 00000000..e9e245bc --- /dev/null +++ b/src/together/generated/test/test_audio_speech_stream_response.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_stream_response import ( + AudioSpeechStreamResponse, +) + + +class TestAudioSpeechStreamResponse(unittest.TestCase): + """AudioSpeechStreamResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechStreamResponse: + """Test AudioSpeechStreamResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechStreamResponse` + """ + model = AudioSpeechStreamResponse() + if include_optional: + return AudioSpeechStreamResponse( + data = '[DONE]' + ) + else: + return AudioSpeechStreamResponse( + data = '[DONE]', + ) + """ + + def testAudioSpeechStreamResponse(self): + """Test AudioSpeechStreamResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_autoscaling.py b/src/together/generated/test/test_autoscaling.py new file mode 100644 index 00000000..4f8dff60 --- /dev/null +++ b/src/together/generated/test/test_autoscaling.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.autoscaling import Autoscaling + + +class TestAutoscaling(unittest.TestCase): + """Autoscaling unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> Autoscaling: + """Test Autoscaling + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `Autoscaling` + """ + model = Autoscaling() + if include_optional: + return Autoscaling( + min_replicas = 56, + max_replicas = 56 + ) + else: + return Autoscaling( + min_replicas = 56, + max_replicas = 56, + ) + """ + + def testAutoscaling(self): + """Test Autoscaling""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_api.py b/src/together/generated/test/test_chat_api.py new file mode 100644 index 00000000..55bb7cba --- /dev/null +++ b/src/together/generated/test/test_chat_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.chat_api import ChatApi + + +class TestChatApi(unittest.IsolatedAsyncioTestCase): + """ChatApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = ChatApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_chat_completions(self) -> None: + """Test case for chat_completions + + Create chat completion + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_assistant_message_param.py b/src/together/generated/test/test_chat_completion_assistant_message_param.py new file mode 100644 index 00000000..072b5db2 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_assistant_message_param.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_assistant_message_param import ( + ChatCompletionAssistantMessageParam, +) + + +class TestChatCompletionAssistantMessageParam(unittest.TestCase): + """ChatCompletionAssistantMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionAssistantMessageParam: + """Test ChatCompletionAssistantMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionAssistantMessageParam` + """ + model = ChatCompletionAssistantMessageParam() + if include_optional: + return ChatCompletionAssistantMessageParam( + content = '', + role = 'assistant', + name = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ) + ) + else: + return ChatCompletionAssistantMessageParam( + role = 'assistant', + ) + """ + + def testChatCompletionAssistantMessageParam(self): + """Test ChatCompletionAssistantMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choice.py b/src/together/generated/test/test_chat_completion_choice.py new file mode 100644 index 00000000..9618c968 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choice.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choice import ChatCompletionChoice + + +class TestChatCompletionChoice(unittest.TestCase): + """ChatCompletionChoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoice: + """Test ChatCompletionChoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoice` + """ + model = ChatCompletionChoice() + if include_optional: + return ChatCompletionChoice( + index = 56, + finish_reason = 'stop', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ), + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ) + ) + else: + return ChatCompletionChoice( + index = 56, + finish_reason = 'stop', + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), + ) + """ + + def testChatCompletionChoice(self): + """Test ChatCompletionChoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choice_delta.py b/src/together/generated/test/test_chat_completion_choice_delta.py new file mode 100644 index 00000000..6e430d9e --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choice_delta.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) + + +class TestChatCompletionChoiceDelta(unittest.TestCase): + """ChatCompletionChoiceDelta unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoiceDelta: + """Test ChatCompletionChoiceDelta + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoiceDelta` + """ + model = ChatCompletionChoiceDelta() + if include_optional: + return ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ) + ) + else: + return ChatCompletionChoiceDelta( + role = 'system', + ) + """ + + def testChatCompletionChoiceDelta(self): + """Test ChatCompletionChoiceDelta""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choice_delta_function_call.py b/src/together/generated/test/test_chat_completion_choice_delta_function_call.py new file mode 100644 index 00000000..0797b639 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choice_delta_function_call.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choice_delta_function_call import ( + ChatCompletionChoiceDeltaFunctionCall, +) + + +class TestChatCompletionChoiceDeltaFunctionCall(unittest.TestCase): + """ChatCompletionChoiceDeltaFunctionCall unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoiceDeltaFunctionCall: + """Test ChatCompletionChoiceDeltaFunctionCall + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoiceDeltaFunctionCall` + """ + model = ChatCompletionChoiceDeltaFunctionCall() + if include_optional: + return ChatCompletionChoiceDeltaFunctionCall( + arguments = '', + name = '' + ) + else: + return ChatCompletionChoiceDeltaFunctionCall( + arguments = '', + name = '', + ) + """ + + def testChatCompletionChoiceDeltaFunctionCall(self): + """Test ChatCompletionChoiceDeltaFunctionCall""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choices_data_inner.py b/src/together/generated/test/test_chat_completion_choices_data_inner.py new file mode 100644 index 00000000..d61c850b --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choices_data_inner.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choices_data_inner import ( + ChatCompletionChoicesDataInner, +) + + +class TestChatCompletionChoicesDataInner(unittest.TestCase): + """ChatCompletionChoicesDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoicesDataInner: + """Test ChatCompletionChoicesDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoicesDataInner` + """ + model = ChatCompletionChoicesDataInner() + if include_optional: + return ChatCompletionChoicesDataInner( + text = '', + index = 56, + seed = 56, + finish_reason = 'stop', + message = together.generated.models.chat_completion_message.ChatCompletionMessage( + content = '', + role = 'assistant', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ), ), + logprobs = None + ) + else: + return ChatCompletionChoicesDataInner( + ) + """ + + def testChatCompletionChoicesDataInner(self): + """Test ChatCompletionChoicesDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py b/src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py new file mode 100644 index 00000000..88ae0977 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py @@ -0,0 +1,63 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choices_data_inner_logprobs import ( + ChatCompletionChoicesDataInnerLogprobs, +) + + +class TestChatCompletionChoicesDataInnerLogprobs(unittest.TestCase): + """ChatCompletionChoicesDataInnerLogprobs unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoicesDataInnerLogprobs: + """Test ChatCompletionChoicesDataInnerLogprobs + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoicesDataInnerLogprobs` + """ + model = ChatCompletionChoicesDataInnerLogprobs() + if include_optional: + return ChatCompletionChoicesDataInnerLogprobs( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ] + ) + else: + return ChatCompletionChoicesDataInnerLogprobs( + ) + """ + + def testChatCompletionChoicesDataInnerLogprobs(self): + """Test ChatCompletionChoicesDataInnerLogprobs""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_chunk.py b/src/together/generated/test/test_chat_completion_chunk.py new file mode 100644 index 00000000..f935abc8 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_chunk.py @@ -0,0 +1,108 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_chunk import ChatCompletionChunk + + +class TestChatCompletionChunk(unittest.TestCase): + """ChatCompletionChunk unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChunk: + """Test ChatCompletionChunk + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChunk` + """ + model = ChatCompletionChunk() + if include_optional: + return ChatCompletionChunk( + id = '', + object = 'chat.completion.chunk', + created = 56, + system_fingerprint = '', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + choices = [ + together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), ) + ], + usage = None + ) + else: + return ChatCompletionChunk( + id = '', + object = 'chat.completion.chunk', + created = 56, + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + choices = [ + together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), ) + ], + ) + """ + + def testChatCompletionChunk(self): + """Test ChatCompletionChunk""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_chunk_choices_inner.py b/src/together/generated/test/test_chat_completion_chunk_choices_inner.py new file mode 100644 index 00000000..e7317378 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_chunk_choices_inner.py @@ -0,0 +1,92 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_chunk_choices_inner import ( + ChatCompletionChunkChoicesInner, +) + + +class TestChatCompletionChunkChoicesInner(unittest.TestCase): + """ChatCompletionChunkChoicesInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChunkChoicesInner: + """Test ChatCompletionChunkChoicesInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChunkChoicesInner` + """ + model = ChatCompletionChunkChoicesInner() + if include_optional: + return ChatCompletionChunkChoicesInner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ) + ) + else: + return ChatCompletionChunkChoicesInner( + index = 56, + finish_reason = 'stop', + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), + ) + """ + + def testChatCompletionChunkChoicesInner(self): + """Test ChatCompletionChunkChoicesInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_event.py b/src/together/generated/test/test_chat_completion_event.py new file mode 100644 index 00000000..c219c114 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_event.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_event import ChatCompletionEvent + + +class TestChatCompletionEvent(unittest.TestCase): + """ChatCompletionEvent unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionEvent: + """Test ChatCompletionEvent + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionEvent` + """ + model = ChatCompletionEvent() + if include_optional: + return ChatCompletionEvent( + data = together.generated.models.chat_completion_chunk.ChatCompletionChunk( + id = '', + object = 'chat.completion.chunk', + created = 56, + system_fingerprint = '', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + choices = [ + together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), ) + ], + usage = null, ) + ) + else: + return ChatCompletionEvent( + data = together.generated.models.chat_completion_chunk.ChatCompletionChunk( + id = '', + object = 'chat.completion.chunk', + created = 56, + system_fingerprint = '', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + choices = [ + together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), ) + ], + usage = null, ), + ) + """ + + def testChatCompletionEvent(self): + """Test ChatCompletionEvent""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_function_message_param.py b/src/together/generated/test/test_chat_completion_function_message_param.py new file mode 100644 index 00000000..07cfa130 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_function_message_param.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_function_message_param import ( + ChatCompletionFunctionMessageParam, +) + + +class TestChatCompletionFunctionMessageParam(unittest.TestCase): + """ChatCompletionFunctionMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionFunctionMessageParam: + """Test ChatCompletionFunctionMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionFunctionMessageParam` + """ + model = ChatCompletionFunctionMessageParam() + if include_optional: + return ChatCompletionFunctionMessageParam( + role = 'function', + content = '', + name = '' + ) + else: + return ChatCompletionFunctionMessageParam( + role = 'function', + content = '', + name = '', + ) + """ + + def testChatCompletionFunctionMessageParam(self): + """Test ChatCompletionFunctionMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_message.py b/src/together/generated/test/test_chat_completion_message.py new file mode 100644 index 00000000..6e60a844 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_message.py @@ -0,0 +1,68 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_message import ChatCompletionMessage + + +class TestChatCompletionMessage(unittest.TestCase): + """ChatCompletionMessage unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionMessage: + """Test ChatCompletionMessage + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionMessage` + """ + model = ChatCompletionMessage() + if include_optional: + return ChatCompletionMessage( + content = '', + role = 'assistant', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ) + ) + else: + return ChatCompletionMessage( + content = '', + role = 'assistant', + ) + """ + + def testChatCompletionMessage(self): + """Test ChatCompletionMessage""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_message_function_call.py b/src/together/generated/test/test_chat_completion_message_function_call.py new file mode 100644 index 00000000..90f0dbc7 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_message_function_call.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) + + +class TestChatCompletionMessageFunctionCall(unittest.TestCase): + """ChatCompletionMessageFunctionCall unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionMessageFunctionCall: + """Test ChatCompletionMessageFunctionCall + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionMessageFunctionCall` + """ + model = ChatCompletionMessageFunctionCall() + if include_optional: + return ChatCompletionMessageFunctionCall( + arguments = '', + name = '' + ) + else: + return ChatCompletionMessageFunctionCall( + arguments = '', + name = '', + ) + """ + + def testChatCompletionMessageFunctionCall(self): + """Test ChatCompletionMessageFunctionCall""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_message_param.py b/src/together/generated/test/test_chat_completion_message_param.py new file mode 100644 index 00000000..d463e95d --- /dev/null +++ b/src/together/generated/test/test_chat_completion_message_param.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_message_param import ( + ChatCompletionMessageParam, +) + + +class TestChatCompletionMessageParam(unittest.TestCase): + """ChatCompletionMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionMessageParam: + """Test ChatCompletionMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionMessageParam` + """ + model = ChatCompletionMessageParam() + if include_optional: + return ChatCompletionMessageParam( + content = '', + role = 'function', + name = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ), + tool_call_id = '' + ) + else: + return ChatCompletionMessageParam( + content = '', + role = 'function', + name = '', + tool_call_id = '', + ) + """ + + def testChatCompletionMessageParam(self): + """Test ChatCompletionMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request.py b/src/together/generated/test/test_chat_completion_request.py new file mode 100644 index 00000000..d84e32d6 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request import ChatCompletionRequest + + +class TestChatCompletionRequest(unittest.TestCase): + """ChatCompletionRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequest: + """Test ChatCompletionRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequest` + """ + model = ChatCompletionRequest() + if include_optional: + return ChatCompletionRequest( + messages = [ + together.generated.models.chat_completion_request_messages_inner.ChatCompletionRequest_messages_inner( + role = 'system', + content = '', ) + ], + model = meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo, + max_tokens = 56, + stop = [ + '' + ], + temperature = 1.337, + top_p = 1.337, + top_k = 56, + context_length_exceeded_behavior = 'error', + repetition_penalty = 1.337, + stream = True, + logprobs = 0, + echo = True, + n = 1, + min_p = 1.337, + presence_penalty = 1.337, + frequency_penalty = 1.337, + logit_bias = {1024=-10.5, 105=21.4}, + seed = 42, + function_call = None, + response_format = together.generated.models.chat_completion_request_response_format.ChatCompletionRequest_response_format( + type = 'json', + schema = { + 'key' : '' + }, ), + tools = [ + together.generated.models.tools_part.ToolsPart( + type = 'tool_type', + function = together.generated.models.tools_part_function.ToolsPart_function( + description = 'A description of the function.', + name = 'function_name', + parameters = { }, ), ) + ], + tool_choice = None, + safety_model = 'safety_model_name' + ) + else: + return ChatCompletionRequest( + messages = [ + together.generated.models.chat_completion_request_messages_inner.ChatCompletionRequest_messages_inner( + role = 'system', + content = '', ) + ], + model = meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo, + ) + """ + + def testChatCompletionRequest(self): + """Test ChatCompletionRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_function_call.py b/src/together/generated/test/test_chat_completion_request_function_call.py new file mode 100644 index 00000000..55125eb8 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_function_call.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_function_call import ( + ChatCompletionRequestFunctionCall, +) + + +class TestChatCompletionRequestFunctionCall(unittest.TestCase): + """ChatCompletionRequestFunctionCall unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestFunctionCall: + """Test ChatCompletionRequestFunctionCall + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestFunctionCall` + """ + model = ChatCompletionRequestFunctionCall() + if include_optional: + return ChatCompletionRequestFunctionCall( + name = '' + ) + else: + return ChatCompletionRequestFunctionCall( + name = '', + ) + """ + + def testChatCompletionRequestFunctionCall(self): + """Test ChatCompletionRequestFunctionCall""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_function_call_one_of.py b/src/together/generated/test/test_chat_completion_request_function_call_one_of.py new file mode 100644 index 00000000..58ec5841 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_function_call_one_of.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_function_call_one_of import ( + ChatCompletionRequestFunctionCallOneOf, +) + + +class TestChatCompletionRequestFunctionCallOneOf(unittest.TestCase): + """ChatCompletionRequestFunctionCallOneOf unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestFunctionCallOneOf: + """Test ChatCompletionRequestFunctionCallOneOf + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestFunctionCallOneOf` + """ + model = ChatCompletionRequestFunctionCallOneOf() + if include_optional: + return ChatCompletionRequestFunctionCallOneOf( + name = '' + ) + else: + return ChatCompletionRequestFunctionCallOneOf( + name = '', + ) + """ + + def testChatCompletionRequestFunctionCallOneOf(self): + """Test ChatCompletionRequestFunctionCallOneOf""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_messages_inner.py b/src/together/generated/test/test_chat_completion_request_messages_inner.py new file mode 100644 index 00000000..4d799742 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_messages_inner.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_messages_inner import ( + ChatCompletionRequestMessagesInner, +) + + +class TestChatCompletionRequestMessagesInner(unittest.TestCase): + """ChatCompletionRequestMessagesInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestMessagesInner: + """Test ChatCompletionRequestMessagesInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestMessagesInner` + """ + model = ChatCompletionRequestMessagesInner() + if include_optional: + return ChatCompletionRequestMessagesInner( + role = 'system', + content = '' + ) + else: + return ChatCompletionRequestMessagesInner( + role = 'system', + content = '', + ) + """ + + def testChatCompletionRequestMessagesInner(self): + """Test ChatCompletionRequestMessagesInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_model.py b/src/together/generated/test/test_chat_completion_request_model.py new file mode 100644 index 00000000..1f18e0f7 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_model.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_model import ( + ChatCompletionRequestModel, +) + + +class TestChatCompletionRequestModel(unittest.TestCase): + """ChatCompletionRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestModel: + """Test ChatCompletionRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestModel` + """ + model = ChatCompletionRequestModel() + if include_optional: + return ChatCompletionRequestModel( + ) + else: + return ChatCompletionRequestModel( + ) + """ + + def testChatCompletionRequestModel(self): + """Test ChatCompletionRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_response_format.py b/src/together/generated/test/test_chat_completion_request_response_format.py new file mode 100644 index 00000000..e6f5241b --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_response_format.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_response_format import ( + ChatCompletionRequestResponseFormat, +) + + +class TestChatCompletionRequestResponseFormat(unittest.TestCase): + """ChatCompletionRequestResponseFormat unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestResponseFormat: + """Test ChatCompletionRequestResponseFormat + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestResponseFormat` + """ + model = ChatCompletionRequestResponseFormat() + if include_optional: + return ChatCompletionRequestResponseFormat( + type = 'json', + var_schema = { + 'key' : '' + } + ) + else: + return ChatCompletionRequestResponseFormat( + ) + """ + + def testChatCompletionRequestResponseFormat(self): + """Test ChatCompletionRequestResponseFormat""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_tool_choice.py b/src/together/generated/test/test_chat_completion_request_tool_choice.py new file mode 100644 index 00000000..b04e7456 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_tool_choice.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_tool_choice import ( + ChatCompletionRequestToolChoice, +) + + +class TestChatCompletionRequestToolChoice(unittest.TestCase): + """ChatCompletionRequestToolChoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestToolChoice: + """Test ChatCompletionRequestToolChoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestToolChoice` + """ + model = ChatCompletionRequestToolChoice() + if include_optional: + return ChatCompletionRequestToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ) + ) + else: + return ChatCompletionRequestToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), + ) + """ + + def testChatCompletionRequestToolChoice(self): + """Test ChatCompletionRequestToolChoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_response.py b/src/together/generated/test/test_chat_completion_response.py new file mode 100644 index 00000000..e02f8cb3 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_response.py @@ -0,0 +1,110 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_response import ChatCompletionResponse + + +class TestChatCompletionResponse(unittest.TestCase): + """ChatCompletionResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionResponse: + """Test ChatCompletionResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionResponse` + """ + model = ChatCompletionResponse() + if include_optional: + return ChatCompletionResponse( + id = '', + choices = [ + together.generated.models.chat_completion_choices_data_inner.ChatCompletionChoicesData_inner( + text = '', + index = 56, + seed = 56, + finish_reason = 'stop', + message = together.generated.models.chat_completion_message.ChatCompletionMessage( + content = '', + role = 'assistant', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ), ), + logprobs = null, ) + ], + usage = together.generated.models.usage_data.UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, ), + created = 56, + model = '', + object = 'chat.completion' + ) + else: + return ChatCompletionResponse( + id = '', + choices = [ + together.generated.models.chat_completion_choices_data_inner.ChatCompletionChoicesData_inner( + text = '', + index = 56, + seed = 56, + finish_reason = 'stop', + message = together.generated.models.chat_completion_message.ChatCompletionMessage( + content = '', + role = 'assistant', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ), ), + logprobs = null, ) + ], + created = 56, + model = '', + object = 'chat.completion', + ) + """ + + def testChatCompletionResponse(self): + """Test ChatCompletionResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_stream.py b/src/together/generated/test/test_chat_completion_stream.py new file mode 100644 index 00000000..29643caa --- /dev/null +++ b/src/together/generated/test/test_chat_completion_stream.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_stream import ChatCompletionStream + + +class TestChatCompletionStream(unittest.TestCase): + """ChatCompletionStream unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionStream: + """Test ChatCompletionStream + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionStream` + """ + model = ChatCompletionStream() + if include_optional: + return ChatCompletionStream( + data = '[DONE]' + ) + else: + return ChatCompletionStream( + data = '[DONE]', + ) + """ + + def testChatCompletionStream(self): + """Test ChatCompletionStream""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_system_message_param.py b/src/together/generated/test/test_chat_completion_system_message_param.py new file mode 100644 index 00000000..33b8a0c8 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_system_message_param.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_system_message_param import ( + ChatCompletionSystemMessageParam, +) + + +class TestChatCompletionSystemMessageParam(unittest.TestCase): + """ChatCompletionSystemMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionSystemMessageParam: + """Test ChatCompletionSystemMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionSystemMessageParam` + """ + model = ChatCompletionSystemMessageParam() + if include_optional: + return ChatCompletionSystemMessageParam( + content = '', + role = 'system', + name = '' + ) + else: + return ChatCompletionSystemMessageParam( + content = '', + role = 'system', + ) + """ + + def testChatCompletionSystemMessageParam(self): + """Test ChatCompletionSystemMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_token.py b/src/together/generated/test/test_chat_completion_token.py new file mode 100644 index 00000000..131f6c4d --- /dev/null +++ b/src/together/generated/test/test_chat_completion_token.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_token import ChatCompletionToken + + +class TestChatCompletionToken(unittest.TestCase): + """ChatCompletionToken unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionToken: + """Test ChatCompletionToken + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionToken` + """ + model = ChatCompletionToken() + if include_optional: + return ChatCompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True + ) + else: + return ChatCompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, + ) + """ + + def testChatCompletionToken(self): + """Test ChatCompletionToken""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_tool.py b/src/together/generated/test/test_chat_completion_tool.py new file mode 100644 index 00000000..2f795a90 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_tool.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_tool import ChatCompletionTool + + +class TestChatCompletionTool(unittest.TestCase): + """ChatCompletionTool unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionTool: + """Test ChatCompletionTool + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionTool` + """ + model = ChatCompletionTool() + if include_optional: + return ChatCompletionTool( + type = 'function', + function = together.generated.models.chat_completion_tool_function.ChatCompletionTool_function( + description = '', + name = '', + parameters = { + 'key' : null + }, ) + ) + else: + return ChatCompletionTool( + type = 'function', + function = together.generated.models.chat_completion_tool_function.ChatCompletionTool_function( + description = '', + name = '', + parameters = { + 'key' : null + }, ), + ) + """ + + def testChatCompletionTool(self): + """Test ChatCompletionTool""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_tool_function.py b/src/together/generated/test/test_chat_completion_tool_function.py new file mode 100644 index 00000000..0d370610 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_tool_function.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_tool_function import ( + ChatCompletionToolFunction, +) + + +class TestChatCompletionToolFunction(unittest.TestCase): + """ChatCompletionToolFunction unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionToolFunction: + """Test ChatCompletionToolFunction + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionToolFunction` + """ + model = ChatCompletionToolFunction() + if include_optional: + return ChatCompletionToolFunction( + description = '', + name = '', + parameters = { + 'key' : null + } + ) + else: + return ChatCompletionToolFunction( + name = '', + ) + """ + + def testChatCompletionToolFunction(self): + """Test ChatCompletionToolFunction""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_tool_message_param.py b/src/together/generated/test/test_chat_completion_tool_message_param.py new file mode 100644 index 00000000..90aece7d --- /dev/null +++ b/src/together/generated/test/test_chat_completion_tool_message_param.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_tool_message_param import ( + ChatCompletionToolMessageParam, +) + + +class TestChatCompletionToolMessageParam(unittest.TestCase): + """ChatCompletionToolMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionToolMessageParam: + """Test ChatCompletionToolMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionToolMessageParam` + """ + model = ChatCompletionToolMessageParam() + if include_optional: + return ChatCompletionToolMessageParam( + role = 'tool', + content = '', + tool_call_id = '' + ) + else: + return ChatCompletionToolMessageParam( + role = 'tool', + content = '', + tool_call_id = '', + ) + """ + + def testChatCompletionToolMessageParam(self): + """Test ChatCompletionToolMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_user_message_param.py b/src/together/generated/test/test_chat_completion_user_message_param.py new file mode 100644 index 00000000..7571b70e --- /dev/null +++ b/src/together/generated/test/test_chat_completion_user_message_param.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_user_message_param import ( + ChatCompletionUserMessageParam, +) + + +class TestChatCompletionUserMessageParam(unittest.TestCase): + """ChatCompletionUserMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionUserMessageParam: + """Test ChatCompletionUserMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionUserMessageParam` + """ + model = ChatCompletionUserMessageParam() + if include_optional: + return ChatCompletionUserMessageParam( + content = '', + role = 'user', + name = '' + ) + else: + return ChatCompletionUserMessageParam( + content = '', + role = 'user', + ) + """ + + def testChatCompletionUserMessageParam(self): + """Test ChatCompletionUserMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_api.py b/src/together/generated/test/test_completion_api.py new file mode 100644 index 00000000..7a0eaeae --- /dev/null +++ b/src/together/generated/test/test_completion_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.completion_api import CompletionApi + + +class TestCompletionApi(unittest.IsolatedAsyncioTestCase): + """CompletionApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = CompletionApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_completions(self) -> None: + """Test case for completions + + Create completion + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_choice.py b/src/together/generated/test/test_completion_choice.py new file mode 100644 index 00000000..a3ce4b5c --- /dev/null +++ b/src/together/generated/test/test_completion_choice.py @@ -0,0 +1,53 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_choice import CompletionChoice + + +class TestCompletionChoice(unittest.TestCase): + """CompletionChoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionChoice: + """Test CompletionChoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionChoice` + """ + model = CompletionChoice() + if include_optional: + return CompletionChoice( + text = '' + ) + else: + return CompletionChoice( + ) + """ + + def testCompletionChoice(self): + """Test CompletionChoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_choices_data_inner.py b/src/together/generated/test/test_completion_choices_data_inner.py new file mode 100644 index 00000000..d71d81c7 --- /dev/null +++ b/src/together/generated/test/test_completion_choices_data_inner.py @@ -0,0 +1,67 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_choices_data_inner import ( + CompletionChoicesDataInner, +) + + +class TestCompletionChoicesDataInner(unittest.TestCase): + """CompletionChoicesDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionChoicesDataInner: + """Test CompletionChoicesDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionChoicesDataInner` + """ + model = CompletionChoicesDataInner() + if include_optional: + return CompletionChoicesDataInner( + text = 'The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture.', + seed = 56, + finish_reason = 'stop', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ) + ) + else: + return CompletionChoicesDataInner( + ) + """ + + def testCompletionChoicesDataInner(self): + """Test CompletionChoicesDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_chunk.py b/src/together/generated/test/test_completion_chunk.py new file mode 100644 index 00000000..448b44ff --- /dev/null +++ b/src/together/generated/test/test_completion_chunk.py @@ -0,0 +1,77 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_chunk import CompletionChunk + + +class TestCompletionChunk(unittest.TestCase): + """CompletionChunk unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionChunk: + """Test CompletionChunk + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionChunk` + """ + model = CompletionChunk() + if include_optional: + return CompletionChunk( + id = '', + token = together.generated.models.completion_token.CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, ), + choices = [ + together.generated.models.completion_choice.CompletionChoice( + text = '', ) + ], + usage = None, + seed = 56, + finish_reason = None + ) + else: + return CompletionChunk( + id = '', + token = together.generated.models.completion_token.CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, ), + choices = [ + together.generated.models.completion_choice.CompletionChoice( + text = '', ) + ], + usage = None, + finish_reason = None, + ) + """ + + def testCompletionChunk(self): + """Test CompletionChunk""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_chunk_usage.py b/src/together/generated/test/test_completion_chunk_usage.py new file mode 100644 index 00000000..09f1a850 --- /dev/null +++ b/src/together/generated/test/test_completion_chunk_usage.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_chunk_usage import CompletionChunkUsage + + +class TestCompletionChunkUsage(unittest.TestCase): + """CompletionChunkUsage unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionChunkUsage: + """Test CompletionChunkUsage + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionChunkUsage` + """ + model = CompletionChunkUsage() + if include_optional: + return CompletionChunkUsage( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56 + ) + else: + return CompletionChunkUsage( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, + ) + """ + + def testCompletionChunkUsage(self): + """Test CompletionChunkUsage""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_event.py b/src/together/generated/test/test_completion_event.py new file mode 100644 index 00000000..26181ffc --- /dev/null +++ b/src/together/generated/test/test_completion_event.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_event import CompletionEvent + + +class TestCompletionEvent(unittest.TestCase): + """CompletionEvent unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionEvent: + """Test CompletionEvent + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionEvent` + """ + model = CompletionEvent() + if include_optional: + return CompletionEvent( + data = together.generated.models.completion_chunk.CompletionChunk( + id = '', + token = together.generated.models.completion_token.CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, ), + choices = [ + together.generated.models.completion_choice.CompletionChoice( + text = '', ) + ], + usage = null, + seed = 56, + finish_reason = null, ) + ) + else: + return CompletionEvent( + data = together.generated.models.completion_chunk.CompletionChunk( + id = '', + token = together.generated.models.completion_token.CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, ), + choices = [ + together.generated.models.completion_choice.CompletionChoice( + text = '', ) + ], + usage = null, + seed = 56, + finish_reason = null, ), + ) + """ + + def testCompletionEvent(self): + """Test CompletionEvent""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_request.py b/src/together/generated/test/test_completion_request.py new file mode 100644 index 00000000..3a823073 --- /dev/null +++ b/src/together/generated/test/test_completion_request.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_request import CompletionRequest + + +class TestCompletionRequest(unittest.TestCase): + """CompletionRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionRequest: + """Test CompletionRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionRequest` + """ + model = CompletionRequest() + if include_optional: + return CompletionRequest( + prompt = '[INST] What is the capital of France? [/INST]', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + max_tokens = 56, + stop = [ + '' + ], + temperature = 1.337, + top_p = 1.337, + top_k = 56, + repetition_penalty = 1.337, + stream = True, + logprobs = 0, + echo = True, + n = 1, + safety_model = 'safety_model_name', + min_p = 1.337, + presence_penalty = 1.337, + frequency_penalty = 1.337, + logit_bias = {1024=-10.5, 105=21.4}, + seed = 42 + ) + else: + return CompletionRequest( + prompt = '[INST] What is the capital of France? [/INST]', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + ) + """ + + def testCompletionRequest(self): + """Test CompletionRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_request_model.py b/src/together/generated/test/test_completion_request_model.py new file mode 100644 index 00000000..d3cfa734 --- /dev/null +++ b/src/together/generated/test/test_completion_request_model.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_request_model import CompletionRequestModel + + +class TestCompletionRequestModel(unittest.TestCase): + """CompletionRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionRequestModel: + """Test CompletionRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionRequestModel` + """ + model = CompletionRequestModel() + if include_optional: + return CompletionRequestModel( + ) + else: + return CompletionRequestModel( + ) + """ + + def testCompletionRequestModel(self): + """Test CompletionRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_request_safety_model.py b/src/together/generated/test/test_completion_request_safety_model.py new file mode 100644 index 00000000..fb7228a9 --- /dev/null +++ b/src/together/generated/test/test_completion_request_safety_model.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_request_safety_model import ( + CompletionRequestSafetyModel, +) + + +class TestCompletionRequestSafetyModel(unittest.TestCase): + """CompletionRequestSafetyModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionRequestSafetyModel: + """Test CompletionRequestSafetyModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionRequestSafetyModel` + """ + model = CompletionRequestSafetyModel() + if include_optional: + return CompletionRequestSafetyModel( + ) + else: + return CompletionRequestSafetyModel( + ) + """ + + def testCompletionRequestSafetyModel(self): + """Test CompletionRequestSafetyModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_response.py b/src/together/generated/test/test_completion_response.py new file mode 100644 index 00000000..f4003cf5 --- /dev/null +++ b/src/together/generated/test/test_completion_response.py @@ -0,0 +1,114 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_response import CompletionResponse + + +class TestCompletionResponse(unittest.TestCase): + """CompletionResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionResponse: + """Test CompletionResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionResponse` + """ + model = CompletionResponse() + if include_optional: + return CompletionResponse( + id = '', + choices = [ + together.generated.models.completion_choices_data_inner.CompletionChoicesData_inner( + text = 'The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture.', + seed = 56, + finish_reason = 'stop', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ), ) + ], + prompt = [ + together.generated.models.prompt_part_inner.PromptPart_inner( + text = '[INST] What is the capital of France? [/INST]', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ), ) + ], + usage = together.generated.models.usage_data.UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, ), + created = 56, + model = '', + object = 'text_completion' + ) + else: + return CompletionResponse( + id = '', + choices = [ + together.generated.models.completion_choices_data_inner.CompletionChoicesData_inner( + text = 'The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture.', + seed = 56, + finish_reason = 'stop', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ), ) + ], + usage = together.generated.models.usage_data.UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, ), + created = 56, + model = '', + object = 'text_completion', + ) + """ + + def testCompletionResponse(self): + """Test CompletionResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_stream.py b/src/together/generated/test/test_completion_stream.py new file mode 100644 index 00000000..9edbd934 --- /dev/null +++ b/src/together/generated/test/test_completion_stream.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_stream import CompletionStream + + +class TestCompletionStream(unittest.TestCase): + """CompletionStream unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionStream: + """Test CompletionStream + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionStream` + """ + model = CompletionStream() + if include_optional: + return CompletionStream( + data = '[DONE]' + ) + else: + return CompletionStream( + data = '[DONE]', + ) + """ + + def testCompletionStream(self): + """Test CompletionStream""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_token.py b/src/together/generated/test/test_completion_token.py new file mode 100644 index 00000000..a15263c6 --- /dev/null +++ b/src/together/generated/test/test_completion_token.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_token import CompletionToken + + +class TestCompletionToken(unittest.TestCase): + """CompletionToken unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionToken: + """Test CompletionToken + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionToken` + """ + model = CompletionToken() + if include_optional: + return CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True + ) + else: + return CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, + ) + """ + + def testCompletionToken(self): + """Test CompletionToken""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_create_endpoint_request.py b/src/together/generated/test/test_create_endpoint_request.py new file mode 100644 index 00000000..b5e35dd0 --- /dev/null +++ b/src/together/generated/test/test_create_endpoint_request.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.create_endpoint_request import CreateEndpointRequest + + +class TestCreateEndpointRequest(unittest.TestCase): + """CreateEndpointRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CreateEndpointRequest: + """Test CreateEndpointRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CreateEndpointRequest` + """ + model = CreateEndpointRequest() + if include_optional: + return CreateEndpointRequest( + display_name = '', + model = '', + hardware = '', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ), + disable_prompt_cache = True, + disable_speculative_decoding = True, + state = 'STARTED' + ) + else: + return CreateEndpointRequest( + model = '', + hardware = '', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ), + ) + """ + + def testCreateEndpointRequest(self): + """Test CreateEndpointRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_dedicated_endpoint.py b/src/together/generated/test/test_dedicated_endpoint.py new file mode 100644 index 00000000..61edfee9 --- /dev/null +++ b/src/together/generated/test/test_dedicated_endpoint.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.dedicated_endpoint import DedicatedEndpoint + + +class TestDedicatedEndpoint(unittest.TestCase): + """DedicatedEndpoint unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> DedicatedEndpoint: + """Test DedicatedEndpoint + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `DedicatedEndpoint` + """ + model = DedicatedEndpoint() + if include_optional: + return DedicatedEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'devuser/meta-llama/Llama-3-8b-chat-hf-a32b82a1', + display_name = 'My Llama3 70b endpoint', + model = 'meta-llama/Llama-3-8b-chat-hf', + hardware = '1x_nvidia_a100_80gb_sxm', + type = 'dedicated', + owner = 'devuser', + state = 'STARTED', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ), + created_at = '2025-02-04T10:43:55.405Z' + ) + else: + return DedicatedEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'devuser/meta-llama/Llama-3-8b-chat-hf-a32b82a1', + display_name = 'My Llama3 70b endpoint', + model = 'meta-llama/Llama-3-8b-chat-hf', + hardware = '1x_nvidia_a100_80gb_sxm', + type = 'dedicated', + owner = 'devuser', + state = 'STARTED', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ), + created_at = '2025-02-04T10:43:55.405Z', + ) + """ + + def testDedicatedEndpoint(self): + """Test DedicatedEndpoint""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_api.py b/src/together/generated/test/test_embeddings_api.py new file mode 100644 index 00000000..c63a7b1e --- /dev/null +++ b/src/together/generated/test/test_embeddings_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.embeddings_api import EmbeddingsApi + + +class TestEmbeddingsApi(unittest.IsolatedAsyncioTestCase): + """EmbeddingsApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = EmbeddingsApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_embeddings(self) -> None: + """Test case for embeddings + + Create embedding + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_request.py b/src/together/generated/test/test_embeddings_request.py new file mode 100644 index 00000000..0652a2ff --- /dev/null +++ b/src/together/generated/test/test_embeddings_request.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_request import EmbeddingsRequest + + +class TestEmbeddingsRequest(unittest.TestCase): + """EmbeddingsRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsRequest: + """Test EmbeddingsRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsRequest` + """ + model = EmbeddingsRequest() + if include_optional: + return EmbeddingsRequest( + model = 'togethercomputer/m2-bert-80M-8k-retrieval', + input = Our solar system orbits the Milky Way galaxy at about 515,000 mph + ) + else: + return EmbeddingsRequest( + model = 'togethercomputer/m2-bert-80M-8k-retrieval', + input = Our solar system orbits the Milky Way galaxy at about 515,000 mph, + ) + """ + + def testEmbeddingsRequest(self): + """Test EmbeddingsRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_request_input.py b/src/together/generated/test/test_embeddings_request_input.py new file mode 100644 index 00000000..bb51de9a --- /dev/null +++ b/src/together/generated/test/test_embeddings_request_input.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput + + +class TestEmbeddingsRequestInput(unittest.TestCase): + """EmbeddingsRequestInput unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsRequestInput: + """Test EmbeddingsRequestInput + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsRequestInput` + """ + model = EmbeddingsRequestInput() + if include_optional: + return EmbeddingsRequestInput( + ) + else: + return EmbeddingsRequestInput( + ) + """ + + def testEmbeddingsRequestInput(self): + """Test EmbeddingsRequestInput""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_request_model.py b/src/together/generated/test/test_embeddings_request_model.py new file mode 100644 index 00000000..e31f5837 --- /dev/null +++ b/src/together/generated/test/test_embeddings_request_model.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel + + +class TestEmbeddingsRequestModel(unittest.TestCase): + """EmbeddingsRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsRequestModel: + """Test EmbeddingsRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsRequestModel` + """ + model = EmbeddingsRequestModel() + if include_optional: + return EmbeddingsRequestModel( + ) + else: + return EmbeddingsRequestModel( + ) + """ + + def testEmbeddingsRequestModel(self): + """Test EmbeddingsRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_response.py b/src/together/generated/test/test_embeddings_response.py new file mode 100644 index 00000000..0a09847c --- /dev/null +++ b/src/together/generated/test/test_embeddings_response.py @@ -0,0 +1,72 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_response import EmbeddingsResponse + + +class TestEmbeddingsResponse(unittest.TestCase): + """EmbeddingsResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsResponse: + """Test EmbeddingsResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsResponse` + """ + model = EmbeddingsResponse() + if include_optional: + return EmbeddingsResponse( + object = 'list', + model = '', + data = [ + together.generated.models.embeddings_response_data_inner.EmbeddingsResponse_data_inner( + object = 'embedding', + embedding = [ + 1.337 + ], + index = 56, ) + ] + ) + else: + return EmbeddingsResponse( + object = 'list', + model = '', + data = [ + together.generated.models.embeddings_response_data_inner.EmbeddingsResponse_data_inner( + object = 'embedding', + embedding = [ + 1.337 + ], + index = 56, ) + ], + ) + """ + + def testEmbeddingsResponse(self): + """Test EmbeddingsResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_response_data_inner.py b/src/together/generated/test/test_embeddings_response_data_inner.py new file mode 100644 index 00000000..88a95c8b --- /dev/null +++ b/src/together/generated/test/test_embeddings_response_data_inner.py @@ -0,0 +1,64 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_response_data_inner import ( + EmbeddingsResponseDataInner, +) + + +class TestEmbeddingsResponseDataInner(unittest.TestCase): + """EmbeddingsResponseDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsResponseDataInner: + """Test EmbeddingsResponseDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsResponseDataInner` + """ + model = EmbeddingsResponseDataInner() + if include_optional: + return EmbeddingsResponseDataInner( + object = 'embedding', + embedding = [ + 1.337 + ], + index = 56 + ) + else: + return EmbeddingsResponseDataInner( + object = 'embedding', + embedding = [ + 1.337 + ], + index = 56, + ) + """ + + def testEmbeddingsResponseDataInner(self): + """Test EmbeddingsResponseDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_endpoint_pricing.py b/src/together/generated/test/test_endpoint_pricing.py new file mode 100644 index 00000000..e99001c4 --- /dev/null +++ b/src/together/generated/test/test_endpoint_pricing.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.endpoint_pricing import EndpointPricing + + +class TestEndpointPricing(unittest.TestCase): + """EndpointPricing unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EndpointPricing: + """Test EndpointPricing + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EndpointPricing` + """ + model = EndpointPricing() + if include_optional: + return EndpointPricing( + cents_per_minute = 1.337 + ) + else: + return EndpointPricing( + cents_per_minute = 1.337, + ) + """ + + def testEndpointPricing(self): + """Test EndpointPricing""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_endpoints_api.py b/src/together/generated/test/test_endpoints_api.py new file mode 100644 index 00000000..9d384219 --- /dev/null +++ b/src/together/generated/test/test_endpoints_api.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.endpoints_api import EndpointsApi + + +class TestEndpointsApi(unittest.IsolatedAsyncioTestCase): + """EndpointsApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = EndpointsApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_create_endpoint(self) -> None: + """Test case for create_endpoint + + Create a dedicated endpoint, it will start automatically + """ + pass + + async def test_delete_endpoint(self) -> None: + """Test case for delete_endpoint + + Delete endpoint + """ + pass + + async def test_get_endpoint(self) -> None: + """Test case for get_endpoint + + Get endpoint by ID + """ + pass + + async def test_list_endpoints(self) -> None: + """Test case for list_endpoints + + List all endpoints, can be filtered by type + """ + pass + + async def test_update_endpoint(self) -> None: + """Test case for update_endpoint + + Update endpoint, this can also be used to start or stop a dedicated endpoint + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_error_data.py b/src/together/generated/test/test_error_data.py new file mode 100644 index 00000000..0f91ac2e --- /dev/null +++ b/src/together/generated/test/test_error_data.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.error_data import ErrorData + + +class TestErrorData(unittest.TestCase): + """ErrorData unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ErrorData: + """Test ErrorData + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ErrorData` + """ + model = ErrorData() + if include_optional: + return ErrorData( + error = together.generated.models.error_data_error.ErrorData_error( + message = '', + type = '', + param = '', + code = '', ) + ) + else: + return ErrorData( + error = together.generated.models.error_data_error.ErrorData_error( + message = '', + type = '', + param = '', + code = '', ), + ) + """ + + def testErrorData(self): + """Test ErrorData""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_error_data_error.py b/src/together/generated/test/test_error_data_error.py new file mode 100644 index 00000000..a6952f0a --- /dev/null +++ b/src/together/generated/test/test_error_data_error.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.error_data_error import ErrorDataError + + +class TestErrorDataError(unittest.TestCase): + """ErrorDataError unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ErrorDataError: + """Test ErrorDataError + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ErrorDataError` + """ + model = ErrorDataError() + if include_optional: + return ErrorDataError( + message = '', + type = '', + param = '', + code = '' + ) + else: + return ErrorDataError( + message = '', + type = '', + ) + """ + + def testErrorDataError(self): + """Test ErrorDataError""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_file_delete_response.py b/src/together/generated/test/test_file_delete_response.py new file mode 100644 index 00000000..5e0cf618 --- /dev/null +++ b/src/together/generated/test/test_file_delete_response.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.file_delete_response import FileDeleteResponse + + +class TestFileDeleteResponse(unittest.TestCase): + """FileDeleteResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FileDeleteResponse: + """Test FileDeleteResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FileDeleteResponse` + """ + model = FileDeleteResponse() + if include_optional: + return FileDeleteResponse( + id = '', + deleted = True + ) + else: + return FileDeleteResponse( + ) + """ + + def testFileDeleteResponse(self): + """Test FileDeleteResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_file_list.py b/src/together/generated/test/test_file_list.py new file mode 100644 index 00000000..e3984f30 --- /dev/null +++ b/src/together/generated/test/test_file_list.py @@ -0,0 +1,76 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.file_list import FileList + + +class TestFileList(unittest.TestCase): + """FileList unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FileList: + """Test FileList + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FileList` + """ + model = FileList() + if include_optional: + return FileList( + data = [ + together.generated.models.file_response.FileResponse( + id = '', + object = 'file', + created_at = 1715021438, + filename = 'my_file.jsonl', + bytes = 2664, + purpose = 'fine-tune', + processed = True, + file_type = 'jsonl', + line_count = 56, ) + ] + ) + else: + return FileList( + data = [ + together.generated.models.file_response.FileResponse( + id = '', + object = 'file', + created_at = 1715021438, + filename = 'my_file.jsonl', + bytes = 2664, + purpose = 'fine-tune', + processed = True, + file_type = 'jsonl', + line_count = 56, ) + ], + ) + """ + + def testFileList(self): + """Test FileList""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_file_object.py b/src/together/generated/test/test_file_object.py new file mode 100644 index 00000000..a242cc02 --- /dev/null +++ b/src/together/generated/test/test_file_object.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.file_object import FileObject + + +class TestFileObject(unittest.TestCase): + """FileObject unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FileObject: + """Test FileObject + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FileObject` + """ + model = FileObject() + if include_optional: + return FileObject( + object = '', + id = '', + filename = '', + size = 56 + ) + else: + return FileObject( + ) + """ + + def testFileObject(self): + """Test FileObject""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_file_response.py b/src/together/generated/test/test_file_response.py new file mode 100644 index 00000000..06164546 --- /dev/null +++ b/src/together/generated/test/test_file_response.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.file_response import FileResponse + + +class TestFileResponse(unittest.TestCase): + """FileResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FileResponse: + """Test FileResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FileResponse` + """ + model = FileResponse() + if include_optional: + return FileResponse( + id = '', + object = 'file', + created_at = 1715021438, + filename = 'my_file.jsonl', + bytes = 2664, + purpose = 'fine-tune', + processed = True, + file_type = 'jsonl', + line_count = 56 + ) + else: + return FileResponse( + id = '', + object = 'file', + created_at = 1715021438, + filename = 'my_file.jsonl', + bytes = 2664, + purpose = 'fine-tune', + processed = True, + file_type = 'jsonl', + line_count = 56, + ) + """ + + def testFileResponse(self): + """Test FileResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_files_api.py b/src/together/generated/test/test_files_api.py new file mode 100644 index 00000000..0e5269ff --- /dev/null +++ b/src/together/generated/test/test_files_api.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.files_api import FilesApi + + +class TestFilesApi(unittest.IsolatedAsyncioTestCase): + """FilesApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = FilesApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_files_get(self) -> None: + """Test case for files_get + + List all files + """ + pass + + async def test_files_id_content_get(self) -> None: + """Test case for files_id_content_get + + Get file contents + """ + pass + + async def test_files_id_delete(self) -> None: + """Test case for files_id_delete + + Delete a file + """ + pass + + async def test_files_id_get(self) -> None: + """Test case for files_id_get + + List file + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tune_event.py b/src/together/generated/test/test_fine_tune_event.py new file mode 100644 index 00000000..f61a7b5f --- /dev/null +++ b/src/together/generated/test/test_fine_tune_event.py @@ -0,0 +1,79 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.fine_tune_event import FineTuneEvent + + +class TestFineTuneEvent(unittest.TestCase): + """FineTuneEvent unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FineTuneEvent: + """Test FineTuneEvent + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FineTuneEvent` + """ + model = FineTuneEvent() + if include_optional: + return FineTuneEvent( + object = 'fine-tune-event', + created_at = '', + level = ERROR_TO_EXAMPLE_VALUE, + message = '', + type = 'job_pending', + param_count = 56, + token_count = 56, + total_steps = 56, + wandb_url = '', + step = 56, + checkpoint_path = '', + model_path = '', + training_offset = 56, + hash = '' + ) + else: + return FineTuneEvent( + object = 'fine-tune-event', + created_at = '', + message = '', + type = 'job_pending', + param_count = 56, + token_count = 56, + total_steps = 56, + wandb_url = '', + step = 56, + checkpoint_path = '', + model_path = '', + training_offset = 56, + hash = '', + ) + """ + + def testFineTuneEvent(self): + """Test FineTuneEvent""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tunes_post_request.py b/src/together/generated/test/test_fine_tunes_post_request.py new file mode 100644 index 00000000..6e8a1471 --- /dev/null +++ b/src/together/generated/test/test_fine_tunes_post_request.py @@ -0,0 +1,76 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest + + +class TestFineTunesPostRequest(unittest.TestCase): + """FineTunesPostRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FineTunesPostRequest: + """Test FineTunesPostRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FineTunesPostRequest` + """ + model = FineTunesPostRequest() + if include_optional: + return FineTunesPostRequest( + training_file = '', + validation_file = '', + model = '', + n_epochs = 56, + n_checkpoints = 56, + n_evals = 56, + batch_size = 56, + learning_rate = 1.337, + lr_scheduler = together.generated.models.lr_scheduler.LRScheduler( + lr_scheduler_type = '', + lr_scheduler_args = together.generated.models.linear_lr_scheduler_args.LinearLRSchedulerArgs( + min_lr_ratio = 1.337, ), ), + warmup_ratio = 1.337, + max_grad_norm = 1.337, + weight_decay = 1.337, + suffix = '', + wandb_api_key = '', + wandb_base_url = '', + wandb_project_name = '', + wandb_name = '', + train_on_inputs = True, + training_type = together.generated.models._fine_tunes_post_request_training_type._fine_tunes_post_request_training_type() + ) + else: + return FineTunesPostRequest( + training_file = '', + model = '', + ) + """ + + def testFineTunesPostRequest(self): + """Test FineTunesPostRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py b/src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py new file mode 100644 index 00000000..06d1d703 --- /dev/null +++ b/src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.fine_tunes_post_request_train_on_inputs import ( + FineTunesPostRequestTrainOnInputs, +) + + +class TestFineTunesPostRequestTrainOnInputs(unittest.TestCase): + """FineTunesPostRequestTrainOnInputs unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FineTunesPostRequestTrainOnInputs: + """Test FineTunesPostRequestTrainOnInputs + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FineTunesPostRequestTrainOnInputs` + """ + model = FineTunesPostRequestTrainOnInputs() + if include_optional: + return FineTunesPostRequestTrainOnInputs( + ) + else: + return FineTunesPostRequestTrainOnInputs( + ) + """ + + def testFineTunesPostRequestTrainOnInputs(self): + """Test FineTunesPostRequestTrainOnInputs""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tunes_post_request_training_type.py b/src/together/generated/test/test_fine_tunes_post_request_training_type.py new file mode 100644 index 00000000..b92881ff --- /dev/null +++ b/src/together/generated/test/test_fine_tunes_post_request_training_type.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) + + +class TestFineTunesPostRequestTrainingType(unittest.TestCase): + """FineTunesPostRequestTrainingType unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FineTunesPostRequestTrainingType: + """Test FineTunesPostRequestTrainingType + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FineTunesPostRequestTrainingType` + """ + model = FineTunesPostRequestTrainingType() + if include_optional: + return FineTunesPostRequestTrainingType( + type = 'Full', + lora_r = 56, + lora_alpha = 56, + lora_dropout = 1.337, + lora_trainable_modules = 'all-linear' + ) + else: + return FineTunesPostRequestTrainingType( + type = 'Full', + lora_r = 56, + lora_alpha = 56, + ) + """ + + def testFineTunesPostRequestTrainingType(self): + """Test FineTunesPostRequestTrainingType""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tuning_api.py b/src/together/generated/test/test_fine_tuning_api.py new file mode 100644 index 00000000..dab43fa7 --- /dev/null +++ b/src/together/generated/test/test_fine_tuning_api.py @@ -0,0 +1,73 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.fine_tuning_api import FineTuningApi + + +class TestFineTuningApi(unittest.IsolatedAsyncioTestCase): + """FineTuningApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = FineTuningApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_fine_tunes_get(self) -> None: + """Test case for fine_tunes_get + + List all jobs + """ + pass + + async def test_fine_tunes_id_cancel_post(self) -> None: + """Test case for fine_tunes_id_cancel_post + + Cancel job + """ + pass + + async def test_fine_tunes_id_events_get(self) -> None: + """Test case for fine_tunes_id_events_get + + List job events + """ + pass + + async def test_fine_tunes_id_get(self) -> None: + """Test case for fine_tunes_id_get + + List job + """ + pass + + async def test_fine_tunes_post(self) -> None: + """Test case for fine_tunes_post + + Create job + """ + pass + + async def test_finetune_download_get(self) -> None: + """Test case for finetune_download_get + + Download model + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_download_result.py b/src/together/generated/test/test_finetune_download_result.py new file mode 100644 index 00000000..a0136246 --- /dev/null +++ b/src/together/generated/test/test_finetune_download_result.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_download_result import FinetuneDownloadResult + + +class TestFinetuneDownloadResult(unittest.TestCase): + """FinetuneDownloadResult unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneDownloadResult: + """Test FinetuneDownloadResult + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneDownloadResult` + """ + model = FinetuneDownloadResult() + if include_optional: + return FinetuneDownloadResult( + object = ERROR_TO_EXAMPLE_VALUE, + id = '', + checkpoint_step = 56, + filename = '', + size = 56 + ) + else: + return FinetuneDownloadResult( + ) + """ + + def testFinetuneDownloadResult(self): + """Test FinetuneDownloadResult""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_event_levels.py b/src/together/generated/test/test_finetune_event_levels.py new file mode 100644 index 00000000..c82e8354 --- /dev/null +++ b/src/together/generated/test/test_finetune_event_levels.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_event_levels import FinetuneEventLevels + + +class TestFinetuneEventLevels(unittest.TestCase): + """FinetuneEventLevels unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testFinetuneEventLevels(self): + """Test FinetuneEventLevels""" + # inst = FinetuneEventLevels() + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_event_type.py b/src/together/generated/test/test_finetune_event_type.py new file mode 100644 index 00000000..6340f74d --- /dev/null +++ b/src/together/generated/test/test_finetune_event_type.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_event_type import FinetuneEventType + + +class TestFinetuneEventType(unittest.TestCase): + """FinetuneEventType unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testFinetuneEventType(self): + """Test FinetuneEventType""" + # inst = FinetuneEventType() + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_job_status.py b/src/together/generated/test/test_finetune_job_status.py new file mode 100644 index 00000000..2bbee5ee --- /dev/null +++ b/src/together/generated/test/test_finetune_job_status.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_job_status import FinetuneJobStatus + + +class TestFinetuneJobStatus(unittest.TestCase): + """FinetuneJobStatus unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testFinetuneJobStatus(self): + """Test FinetuneJobStatus""" + # inst = FinetuneJobStatus() + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_list.py b/src/together/generated/test/test_finetune_list.py new file mode 100644 index 00000000..40d16304 --- /dev/null +++ b/src/together/generated/test/test_finetune_list.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_list import FinetuneList + + +class TestFinetuneList(unittest.TestCase): + """FinetuneList unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneList: + """Test FinetuneList + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneList` + """ + model = FinetuneList() + if include_optional: + return FinetuneList( + data = ERROR_TO_EXAMPLE_VALUE + ) + else: + return FinetuneList( + data = ERROR_TO_EXAMPLE_VALUE, + ) + """ + + def testFinetuneList(self): + """Test FinetuneList""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_list_events.py b/src/together/generated/test/test_finetune_list_events.py new file mode 100644 index 00000000..5170de82 --- /dev/null +++ b/src/together/generated/test/test_finetune_list_events.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_list_events import FinetuneListEvents + + +class TestFinetuneListEvents(unittest.TestCase): + """FinetuneListEvents unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneListEvents: + """Test FinetuneListEvents + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneListEvents` + """ + model = FinetuneListEvents() + if include_optional: + return FinetuneListEvents( + data = ERROR_TO_EXAMPLE_VALUE + ) + else: + return FinetuneListEvents( + data = ERROR_TO_EXAMPLE_VALUE, + ) + """ + + def testFinetuneListEvents(self): + """Test FinetuneListEvents""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_response.py b/src/together/generated/test/test_finetune_response.py new file mode 100644 index 00000000..288a4a9b --- /dev/null +++ b/src/together/generated/test/test_finetune_response.py @@ -0,0 +1,89 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_response import FinetuneResponse + + +class TestFinetuneResponse(unittest.TestCase): + """FinetuneResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneResponse: + """Test FinetuneResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneResponse` + """ + model = FinetuneResponse() + if include_optional: + return FinetuneResponse( + id = '', + training_file = '', + validation_file = '', + model = '', + model_output_name = '', + model_output_path = '', + trainingfile_numlines = 56, + trainingfile_size = 56, + created_at = '', + updated_at = '', + n_epochs = 56, + n_checkpoints = 56, + n_evals = 56, + batch_size = 56, + learning_rate = 1.337, + lr_scheduler = together.generated.models.lr_scheduler.LRScheduler( + lr_scheduler_type = '', + lr_scheduler_args = together.generated.models.linear_lr_scheduler_args.LinearLRSchedulerArgs( + min_lr_ratio = 1.337, ), ), + warmup_ratio = 1.337, + max_grad_norm = 1.337, + weight_decay = 1.337, + eval_steps = 56, + train_on_inputs = None, + training_type = together.generated.models._fine_tunes_post_request_training_type._fine_tunes_post_request_training_type(), + status = 'pending', + job_id = '', + events = ERROR_TO_EXAMPLE_VALUE, + token_count = 56, + param_count = 56, + total_price = 56, + epochs_completed = 56, + queue_depth = 56, + wandb_project_name = '', + wandb_url = '' + ) + else: + return FinetuneResponse( + id = '', + status = 'pending', + ) + """ + + def testFinetuneResponse(self): + """Test FinetuneResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_response_train_on_inputs.py b/src/together/generated/test/test_finetune_response_train_on_inputs.py new file mode 100644 index 00000000..f6133122 --- /dev/null +++ b/src/together/generated/test/test_finetune_response_train_on_inputs.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_response_train_on_inputs import ( + FinetuneResponseTrainOnInputs, +) + + +class TestFinetuneResponseTrainOnInputs(unittest.TestCase): + """FinetuneResponseTrainOnInputs unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneResponseTrainOnInputs: + """Test FinetuneResponseTrainOnInputs + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneResponseTrainOnInputs` + """ + model = FinetuneResponseTrainOnInputs() + if include_optional: + return FinetuneResponseTrainOnInputs( + ) + else: + return FinetuneResponseTrainOnInputs( + ) + """ + + def testFinetuneResponseTrainOnInputs(self): + """Test FinetuneResponseTrainOnInputs""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finish_reason.py b/src/together/generated/test/test_finish_reason.py new file mode 100644 index 00000000..02204fb6 --- /dev/null +++ b/src/together/generated/test/test_finish_reason.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finish_reason import FinishReason + + +class TestFinishReason(unittest.TestCase): + """FinishReason unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testFinishReason(self): + """Test FinishReason""" + # inst = FinishReason() + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_full_training_type.py b/src/together/generated/test/test_full_training_type.py new file mode 100644 index 00000000..303187e3 --- /dev/null +++ b/src/together/generated/test/test_full_training_type.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.full_training_type import FullTrainingType + + +class TestFullTrainingType(unittest.TestCase): + """FullTrainingType unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FullTrainingType: + """Test FullTrainingType + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FullTrainingType` + """ + model = FullTrainingType() + if include_optional: + return FullTrainingType( + type = 'Full' + ) + else: + return FullTrainingType( + type = 'Full', + ) + """ + + def testFullTrainingType(self): + """Test FullTrainingType""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_hardware_api.py b/src/together/generated/test/test_hardware_api.py new file mode 100644 index 00000000..a347ff1c --- /dev/null +++ b/src/together/generated/test/test_hardware_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.hardware_api import HardwareApi + + +class TestHardwareApi(unittest.IsolatedAsyncioTestCase): + """HardwareApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = HardwareApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_list_hardware(self) -> None: + """Test case for list_hardware + + List available hardware configurations + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_hardware_availability.py b/src/together/generated/test/test_hardware_availability.py new file mode 100644 index 00000000..cf7d4016 --- /dev/null +++ b/src/together/generated/test/test_hardware_availability.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.hardware_availability import HardwareAvailability + + +class TestHardwareAvailability(unittest.TestCase): + """HardwareAvailability unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> HardwareAvailability: + """Test HardwareAvailability + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `HardwareAvailability` + """ + model = HardwareAvailability() + if include_optional: + return HardwareAvailability( + status = 'available' + ) + else: + return HardwareAvailability( + status = 'available', + ) + """ + + def testHardwareAvailability(self): + """Test HardwareAvailability""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_hardware_spec.py b/src/together/generated/test/test_hardware_spec.py new file mode 100644 index 00000000..f9888c33 --- /dev/null +++ b/src/together/generated/test/test_hardware_spec.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.hardware_spec import HardwareSpec + + +class TestHardwareSpec(unittest.TestCase): + """HardwareSpec unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> HardwareSpec: + """Test HardwareSpec + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `HardwareSpec` + """ + model = HardwareSpec() + if include_optional: + return HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56 + ) + else: + return HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, + ) + """ + + def testHardwareSpec(self): + """Test HardwareSpec""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_hardware_with_status.py b/src/together/generated/test/test_hardware_with_status.py new file mode 100644 index 00000000..a6ca05f6 --- /dev/null +++ b/src/together/generated/test/test_hardware_with_status.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.hardware_with_status import HardwareWithStatus + + +class TestHardwareWithStatus(unittest.TestCase): + """HardwareWithStatus unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> HardwareWithStatus: + """Test HardwareWithStatus + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `HardwareWithStatus` + """ + model = HardwareWithStatus() + if include_optional: + return HardwareWithStatus( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = together.generated.models.hardware_availability.HardwareAvailability( + status = 'available', ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') + ) + else: + return HardwareWithStatus( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + ) + """ + + def testHardwareWithStatus(self): + """Test HardwareWithStatus""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_image_response.py b/src/together/generated/test/test_image_response.py new file mode 100644 index 00000000..fd124ab7 --- /dev/null +++ b/src/together/generated/test/test_image_response.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.image_response import ImageResponse + + +class TestImageResponse(unittest.TestCase): + """ImageResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ImageResponse: + """Test ImageResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImageResponse` + """ + model = ImageResponse() + if include_optional: + return ImageResponse( + id = '', + model = '', + object = 'list', + data = [ + together.generated.models.image_response_data_inner.ImageResponse_data_inner( + index = 56, + b64_json = '', + url = '', ) + ] + ) + else: + return ImageResponse( + id = '', + model = '', + object = 'list', + data = [ + together.generated.models.image_response_data_inner.ImageResponse_data_inner( + index = 56, + b64_json = '', + url = '', ) + ], + ) + """ + + def testImageResponse(self): + """Test ImageResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_image_response_data_inner.py b/src/together/generated/test/test_image_response_data_inner.py new file mode 100644 index 00000000..f12a697c --- /dev/null +++ b/src/together/generated/test/test_image_response_data_inner.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.image_response_data_inner import ImageResponseDataInner + + +class TestImageResponseDataInner(unittest.TestCase): + """ImageResponseDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ImageResponseDataInner: + """Test ImageResponseDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImageResponseDataInner` + """ + model = ImageResponseDataInner() + if include_optional: + return ImageResponseDataInner( + index = 56, + b64_json = '', + url = '' + ) + else: + return ImageResponseDataInner( + index = 56, + ) + """ + + def testImageResponseDataInner(self): + """Test ImageResponseDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_images_api.py b/src/together/generated/test/test_images_api.py new file mode 100644 index 00000000..14888a0f --- /dev/null +++ b/src/together/generated/test/test_images_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.images_api import ImagesApi + + +class TestImagesApi(unittest.IsolatedAsyncioTestCase): + """ImagesApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = ImagesApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_images_generations_post(self) -> None: + """Test case for images_generations_post + + Create image + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_images_generations_post_request.py b/src/together/generated/test/test_images_generations_post_request.py new file mode 100644 index 00000000..4376a118 --- /dev/null +++ b/src/together/generated/test/test_images_generations_post_request.py @@ -0,0 +1,73 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.images_generations_post_request import ( + ImagesGenerationsPostRequest, +) + + +class TestImagesGenerationsPostRequest(unittest.TestCase): + """ImagesGenerationsPostRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ImagesGenerationsPostRequest: + """Test ImagesGenerationsPostRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImagesGenerationsPostRequest` + """ + model = ImagesGenerationsPostRequest() + if include_optional: + return ImagesGenerationsPostRequest( + prompt = 'cat floating in space, cinematic', + model = 'black-forest-labs/FLUX.1-schnell', + steps = 56, + image_url = '', + seed = 56, + n = 56, + height = 56, + width = 56, + negative_prompt = '', + response_format = 'base64', + guidance = 1.337, + output_format = 'jpeg', + image_loras = [ + together.generated.models._images_generations_post_request_image_loras_inner._images_generations_post_request_image_loras_inner( + path = '', + scale = 1.337, ) + ] + ) + else: + return ImagesGenerationsPostRequest( + prompt = 'cat floating in space, cinematic', + model = 'black-forest-labs/FLUX.1-schnell', + ) + """ + + def testImagesGenerationsPostRequest(self): + """Test ImagesGenerationsPostRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_images_generations_post_request_image_loras_inner.py b/src/together/generated/test/test_images_generations_post_request_image_loras_inner.py new file mode 100644 index 00000000..1fa6c7b0 --- /dev/null +++ b/src/together/generated/test/test_images_generations_post_request_image_loras_inner.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.images_generations_post_request_image_loras_inner import ( + ImagesGenerationsPostRequestImageLorasInner, +) + + +class TestImagesGenerationsPostRequestImageLorasInner(unittest.TestCase): + """ImagesGenerationsPostRequestImageLorasInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance( + self, include_optional + ) -> ImagesGenerationsPostRequestImageLorasInner: + """Test ImagesGenerationsPostRequestImageLorasInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImagesGenerationsPostRequestImageLorasInner` + """ + model = ImagesGenerationsPostRequestImageLorasInner() + if include_optional: + return ImagesGenerationsPostRequestImageLorasInner( + path = '', + scale = 1.337 + ) + else: + return ImagesGenerationsPostRequestImageLorasInner( + path = '', + scale = 1.337, + ) + """ + + def testImagesGenerationsPostRequestImageLorasInner(self): + """Test ImagesGenerationsPostRequestImageLorasInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_images_generations_post_request_model.py b/src/together/generated/test/test_images_generations_post_request_model.py new file mode 100644 index 00000000..3cc3c613 --- /dev/null +++ b/src/together/generated/test/test_images_generations_post_request_model.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.images_generations_post_request_model import ( + ImagesGenerationsPostRequestModel, +) + + +class TestImagesGenerationsPostRequestModel(unittest.TestCase): + """ImagesGenerationsPostRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ImagesGenerationsPostRequestModel: + """Test ImagesGenerationsPostRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImagesGenerationsPostRequestModel` + """ + model = ImagesGenerationsPostRequestModel() + if include_optional: + return ImagesGenerationsPostRequestModel( + ) + else: + return ImagesGenerationsPostRequestModel( + ) + """ + + def testImagesGenerationsPostRequestModel(self): + """Test ImagesGenerationsPostRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_linear_lr_scheduler_args.py b/src/together/generated/test/test_linear_lr_scheduler_args.py new file mode 100644 index 00000000..a1181988 --- /dev/null +++ b/src/together/generated/test/test_linear_lr_scheduler_args.py @@ -0,0 +1,53 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs + + +class TestLinearLRSchedulerArgs(unittest.TestCase): + """LinearLRSchedulerArgs unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> LinearLRSchedulerArgs: + """Test LinearLRSchedulerArgs + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `LinearLRSchedulerArgs` + """ + model = LinearLRSchedulerArgs() + if include_optional: + return LinearLRSchedulerArgs( + min_lr_ratio = 1.337 + ) + else: + return LinearLRSchedulerArgs( + ) + """ + + def testLinearLRSchedulerArgs(self): + """Test LinearLRSchedulerArgs""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_endpoint.py b/src/together/generated/test/test_list_endpoint.py new file mode 100644 index 00000000..54ad619a --- /dev/null +++ b/src/together/generated/test/test_list_endpoint.py @@ -0,0 +1,68 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_endpoint import ListEndpoint + + +class TestListEndpoint(unittest.TestCase): + """ListEndpoint unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListEndpoint: + """Test ListEndpoint + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListEndpoint` + """ + model = ListEndpoint() + if include_optional: + return ListEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'allenai/OLMo-7B', + model = 'allenai/OLMo-7B', + type = 'serverless', + owner = 'together', + state = 'STARTED', + created_at = '2024-02-28T21:34:35.444Z' + ) + else: + return ListEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'allenai/OLMo-7B', + model = 'allenai/OLMo-7B', + type = 'serverless', + owner = 'together', + state = 'STARTED', + created_at = '2024-02-28T21:34:35.444Z', + ) + """ + + def testListEndpoint(self): + """Test ListEndpoint""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_endpoints200_response.py b/src/together/generated/test/test_list_endpoints200_response.py new file mode 100644 index 00000000..246d4f99 --- /dev/null +++ b/src/together/generated/test/test_list_endpoints200_response.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_endpoints200_response import ( + ListEndpoints200Response, +) + + +class TestListEndpoints200Response(unittest.TestCase): + """ListEndpoints200Response unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListEndpoints200Response: + """Test ListEndpoints200Response + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListEndpoints200Response` + """ + model = ListEndpoints200Response() + if include_optional: + return ListEndpoints200Response( + object = 'list', + data = [ + together.generated.models.list_endpoint.ListEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'allenai/OLMo-7B', + model = 'allenai/OLMo-7B', + type = 'serverless', + owner = 'together', + state = 'STARTED', + created_at = '2024-02-28T21:34:35.444Z', ) + ] + ) + else: + return ListEndpoints200Response( + object = 'list', + data = [ + together.generated.models.list_endpoint.ListEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'allenai/OLMo-7B', + model = 'allenai/OLMo-7B', + type = 'serverless', + owner = 'together', + state = 'STARTED', + created_at = '2024-02-28T21:34:35.444Z', ) + ], + ) + """ + + def testListEndpoints200Response(self): + """Test ListEndpoints200Response""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response.py b/src/together/generated/test/test_list_hardware200_response.py new file mode 100644 index 00000000..bcc8dda5 --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response import ListHardware200Response + + +class TestListHardware200Response(unittest.TestCase): + """ListHardware200Response unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200Response: + """Test ListHardware200Response + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200Response` + """ + model = ListHardware200Response() + if include_optional: + return ListHardware200Response( + object = 'list', + data = [ + null + ] + ) + else: + return ListHardware200Response( + object = 'list', + data = [ + null + ], + ) + """ + + def testListHardware200Response(self): + """Test ListHardware200Response""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of.py b/src/together/generated/test/test_list_hardware200_response_one_of.py new file mode 100644 index 00000000..9b25ae66 --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response_one_of.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response_one_of import ( + ListHardware200ResponseOneOf, +) + + +class TestListHardware200ResponseOneOf(unittest.TestCase): + """ListHardware200ResponseOneOf unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200ResponseOneOf: + """Test ListHardware200ResponseOneOf + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200ResponseOneOf` + """ + model = ListHardware200ResponseOneOf() + if include_optional: + return ListHardware200ResponseOneOf( + object = 'list', + data = [ + null + ] + ) + else: + return ListHardware200ResponseOneOf( + object = 'list', + data = [ + null + ], + ) + """ + + def testListHardware200ResponseOneOf(self): + """Test ListHardware200ResponseOneOf""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of1.py b/src/together/generated/test/test_list_hardware200_response_one_of1.py new file mode 100644 index 00000000..2925384c --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response_one_of1.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response_one_of1 import ( + ListHardware200ResponseOneOf1, +) + + +class TestListHardware200ResponseOneOf1(unittest.TestCase): + """ListHardware200ResponseOneOf1 unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200ResponseOneOf1: + """Test ListHardware200ResponseOneOf1 + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200ResponseOneOf1` + """ + model = ListHardware200ResponseOneOf1() + if include_optional: + return ListHardware200ResponseOneOf1( + object = 'list', + data = [ + null + ] + ) + else: + return ListHardware200ResponseOneOf1( + object = 'list', + data = [ + null + ], + ) + """ + + def testListHardware200ResponseOneOf1(self): + """Test ListHardware200ResponseOneOf1""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py b/src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py new file mode 100644 index 00000000..a51e7dbe --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response_one_of1_data_inner import ( + ListHardware200ResponseOneOf1DataInner, +) + + +class TestListHardware200ResponseOneOf1DataInner(unittest.TestCase): + """ListHardware200ResponseOneOf1DataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200ResponseOneOf1DataInner: + """Test ListHardware200ResponseOneOf1DataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200ResponseOneOf1DataInner` + """ + model = ListHardware200ResponseOneOf1DataInner() + if include_optional: + return ListHardware200ResponseOneOf1DataInner( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = together.generated.models.hardware_availability.HardwareAvailability( + status = 'available', ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') + ) + else: + return ListHardware200ResponseOneOf1DataInner( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = together.generated.models.hardware_availability.HardwareAvailability( + status = 'available', ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + ) + """ + + def testListHardware200ResponseOneOf1DataInner(self): + """Test ListHardware200ResponseOneOf1DataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py b/src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py new file mode 100644 index 00000000..e6193a14 --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py @@ -0,0 +1,75 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response_one_of_data_inner import ( + ListHardware200ResponseOneOfDataInner, +) + + +class TestListHardware200ResponseOneOfDataInner(unittest.TestCase): + """ListHardware200ResponseOneOfDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200ResponseOneOfDataInner: + """Test ListHardware200ResponseOneOfDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200ResponseOneOfDataInner` + """ + model = ListHardware200ResponseOneOfDataInner() + if include_optional: + return ListHardware200ResponseOneOfDataInner( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = None, + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') + ) + else: + return ListHardware200ResponseOneOfDataInner( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + ) + """ + + def testListHardware200ResponseOneOfDataInner(self): + """Test ListHardware200ResponseOneOfDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_lo_ra_training_type.py b/src/together/generated/test/test_lo_ra_training_type.py new file mode 100644 index 00000000..dcd0309c --- /dev/null +++ b/src/together/generated/test/test_lo_ra_training_type.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.lo_ra_training_type import LoRATrainingType + + +class TestLoRATrainingType(unittest.TestCase): + """LoRATrainingType unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> LoRATrainingType: + """Test LoRATrainingType + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `LoRATrainingType` + """ + model = LoRATrainingType() + if include_optional: + return LoRATrainingType( + type = 'Lora', + lora_r = 56, + lora_alpha = 56, + lora_dropout = 1.337, + lora_trainable_modules = 'all-linear' + ) + else: + return LoRATrainingType( + type = 'Lora', + lora_r = 56, + lora_alpha = 56, + ) + """ + + def testLoRATrainingType(self): + """Test LoRATrainingType""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_logprobs_part.py b/src/together/generated/test/test_logprobs_part.py new file mode 100644 index 00000000..b37d38e9 --- /dev/null +++ b/src/together/generated/test/test_logprobs_part.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.logprobs_part import LogprobsPart + + +class TestLogprobsPart(unittest.TestCase): + """LogprobsPart unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> LogprobsPart: + """Test LogprobsPart + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `LogprobsPart` + """ + model = LogprobsPart() + if include_optional: + return LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ] + ) + else: + return LogprobsPart( + ) + """ + + def testLogprobsPart(self): + """Test LogprobsPart""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_lr_scheduler.py b/src/together/generated/test/test_lr_scheduler.py new file mode 100644 index 00000000..281f4102 --- /dev/null +++ b/src/together/generated/test/test_lr_scheduler.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.lr_scheduler import LRScheduler + + +class TestLRScheduler(unittest.TestCase): + """LRScheduler unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> LRScheduler: + """Test LRScheduler + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `LRScheduler` + """ + model = LRScheduler() + if include_optional: + return LRScheduler( + lr_scheduler_type = '', + lr_scheduler_args = together.generated.models.linear_lr_scheduler_args.LinearLRSchedulerArgs( + min_lr_ratio = 1.337, ) + ) + else: + return LRScheduler( + lr_scheduler_type = '', + ) + """ + + def testLRScheduler(self): + """Test LRScheduler""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_model_info.py b/src/together/generated/test/test_model_info.py new file mode 100644 index 00000000..24c4f5ca --- /dev/null +++ b/src/together/generated/test/test_model_info.py @@ -0,0 +1,71 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.model_info import ModelInfo + + +class TestModelInfo(unittest.TestCase): + """ModelInfo unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ModelInfo: + """Test ModelInfo + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ModelInfo` + """ + model = ModelInfo() + if include_optional: + return ModelInfo( + id = 'Austism/chronos-hermes-13b', + object = 'model', + created = 1692896905, + type = 'chat', + display_name = 'Chronos Hermes (13B)', + organization = 'Austism', + link = '', + license = 'other', + context_length = 2048, + pricing = together.generated.models.pricing.Pricing( + hourly = 0, + input = 0.3, + output = 0.3, + base = 0, + finetune = 0, ) + ) + else: + return ModelInfo( + id = 'Austism/chronos-hermes-13b', + object = 'model', + created = 1692896905, + type = 'chat', + ) + """ + + def testModelInfo(self): + """Test ModelInfo""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_models_api.py b/src/together/generated/test/test_models_api.py new file mode 100644 index 00000000..0ba1e2b4 --- /dev/null +++ b/src/together/generated/test/test_models_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.models_api import ModelsApi + + +class TestModelsApi(unittest.IsolatedAsyncioTestCase): + """ModelsApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = ModelsApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_models(self) -> None: + """Test case for models + + List all models + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_pricing.py b/src/together/generated/test/test_pricing.py new file mode 100644 index 00000000..8cf572bd --- /dev/null +++ b/src/together/generated/test/test_pricing.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.pricing import Pricing + + +class TestPricing(unittest.TestCase): + """Pricing unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> Pricing: + """Test Pricing + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `Pricing` + """ + model = Pricing() + if include_optional: + return Pricing( + hourly = 0, + input = 0.3, + output = 0.3, + base = 0, + finetune = 0 + ) + else: + return Pricing( + hourly = 0, + input = 0.3, + output = 0.3, + base = 0, + finetune = 0, + ) + """ + + def testPricing(self): + """Test Pricing""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_prompt_part_inner.py b/src/together/generated/test/test_prompt_part_inner.py new file mode 100644 index 00000000..5d588326 --- /dev/null +++ b/src/together/generated/test/test_prompt_part_inner.py @@ -0,0 +1,63 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.prompt_part_inner import PromptPartInner + + +class TestPromptPartInner(unittest.TestCase): + """PromptPartInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> PromptPartInner: + """Test PromptPartInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `PromptPartInner` + """ + model = PromptPartInner() + if include_optional: + return PromptPartInner( + text = '[INST] What is the capital of France? [/INST]', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ) + ) + else: + return PromptPartInner( + ) + """ + + def testPromptPartInner(self): + """Test PromptPartInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_api.py b/src/together/generated/test/test_rerank_api.py new file mode 100644 index 00000000..2acd64cd --- /dev/null +++ b/src/together/generated/test/test_rerank_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.rerank_api import RerankApi + + +class TestRerankApi(unittest.IsolatedAsyncioTestCase): + """RerankApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = RerankApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_rerank(self) -> None: + """Test case for rerank + + Create a rerank request + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_request.py b/src/together/generated/test/test_rerank_request.py new file mode 100644 index 00000000..c8489b9f --- /dev/null +++ b/src/together/generated/test/test_rerank_request.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_request import RerankRequest + + +class TestRerankRequest(unittest.TestCase): + """RerankRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankRequest: + """Test RerankRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankRequest` + """ + model = RerankRequest() + if include_optional: + return RerankRequest( + model = 'Salesforce/Llama-Rank-V1', + query = 'What animals can I find near Peru?', + documents = [{title=Llama, text=The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.}, {title=Panda, text=The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.}, {title=Guanaco, text=The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.}, {title=Wild Bactrian camel, text=The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.}], + top_n = 2, + return_documents = True, + rank_fields = [title, text] + ) + else: + return RerankRequest( + model = 'Salesforce/Llama-Rank-V1', + query = 'What animals can I find near Peru?', + documents = [{title=Llama, text=The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.}, {title=Panda, text=The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.}, {title=Guanaco, text=The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.}, {title=Wild Bactrian camel, text=The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.}], + ) + """ + + def testRerankRequest(self): + """Test RerankRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_request_documents.py b/src/together/generated/test/test_rerank_request_documents.py new file mode 100644 index 00000000..53fe08af --- /dev/null +++ b/src/together/generated/test/test_rerank_request_documents.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_request_documents import RerankRequestDocuments + + +class TestRerankRequestDocuments(unittest.TestCase): + """RerankRequestDocuments unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankRequestDocuments: + """Test RerankRequestDocuments + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankRequestDocuments` + """ + model = RerankRequestDocuments() + if include_optional: + return RerankRequestDocuments( + ) + else: + return RerankRequestDocuments( + ) + """ + + def testRerankRequestDocuments(self): + """Test RerankRequestDocuments""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_request_model.py b/src/together/generated/test/test_rerank_request_model.py new file mode 100644 index 00000000..285741b2 --- /dev/null +++ b/src/together/generated/test/test_rerank_request_model.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_request_model import RerankRequestModel + + +class TestRerankRequestModel(unittest.TestCase): + """RerankRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankRequestModel: + """Test RerankRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankRequestModel` + """ + model = RerankRequestModel() + if include_optional: + return RerankRequestModel( + ) + else: + return RerankRequestModel( + ) + """ + + def testRerankRequestModel(self): + """Test RerankRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_response.py b/src/together/generated/test/test_rerank_response.py new file mode 100644 index 00000000..187a5ac1 --- /dev/null +++ b/src/together/generated/test/test_rerank_response.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_response import RerankResponse + + +class TestRerankResponse(unittest.TestCase): + """RerankResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankResponse: + """Test RerankResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankResponse` + """ + model = RerankResponse() + if include_optional: + return RerankResponse( + object = 'rerank', + id = '9dfa1a09-5ebc-4a40-970f-586cb8f4ae47', + model = 'salesforce/turboranker-0.8-3778-6328', + results = [{index=0, relevance_score=0.29980177813003117, document={text={"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}}}, {index=2, relevance_score=0.2752447527354349, document={text={"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}}}], + usage = {prompt_tokens=1837, completion_tokens=0, total_tokens=1837} + ) + else: + return RerankResponse( + object = 'rerank', + model = 'salesforce/turboranker-0.8-3778-6328', + results = [{index=0, relevance_score=0.29980177813003117, document={text={"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}}}, {index=2, relevance_score=0.2752447527354349, document={text={"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}}}], + ) + """ + + def testRerankResponse(self): + """Test RerankResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_response_results_inner.py b/src/together/generated/test/test_rerank_response_results_inner.py new file mode 100644 index 00000000..1ff263c5 --- /dev/null +++ b/src/together/generated/test/test_rerank_response_results_inner.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_response_results_inner import ( + RerankResponseResultsInner, +) + + +class TestRerankResponseResultsInner(unittest.TestCase): + """RerankResponseResultsInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankResponseResultsInner: + """Test RerankResponseResultsInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankResponseResultsInner` + """ + model = RerankResponseResultsInner() + if include_optional: + return RerankResponseResultsInner( + index = 56, + relevance_score = 1.337, + document = together.generated.models.rerank_response_results_inner_document.RerankResponse_results_inner_document( + text = '', ) + ) + else: + return RerankResponseResultsInner( + index = 56, + relevance_score = 1.337, + document = together.generated.models.rerank_response_results_inner_document.RerankResponse_results_inner_document( + text = '', ), + ) + """ + + def testRerankResponseResultsInner(self): + """Test RerankResponseResultsInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_response_results_inner_document.py b/src/together/generated/test/test_rerank_response_results_inner_document.py new file mode 100644 index 00000000..02fb87ce --- /dev/null +++ b/src/together/generated/test/test_rerank_response_results_inner_document.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_response_results_inner_document import ( + RerankResponseResultsInnerDocument, +) + + +class TestRerankResponseResultsInnerDocument(unittest.TestCase): + """RerankResponseResultsInnerDocument unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankResponseResultsInnerDocument: + """Test RerankResponseResultsInnerDocument + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankResponseResultsInnerDocument` + """ + model = RerankResponseResultsInnerDocument() + if include_optional: + return RerankResponseResultsInnerDocument( + text = '' + ) + else: + return RerankResponseResultsInnerDocument( + ) + """ + + def testRerankResponseResultsInnerDocument(self): + """Test RerankResponseResultsInnerDocument""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_stream_sentinel.py b/src/together/generated/test/test_stream_sentinel.py new file mode 100644 index 00000000..58961b3f --- /dev/null +++ b/src/together/generated/test/test_stream_sentinel.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.stream_sentinel import StreamSentinel + + +class TestStreamSentinel(unittest.TestCase): + """StreamSentinel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> StreamSentinel: + """Test StreamSentinel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `StreamSentinel` + """ + model = StreamSentinel() + if include_optional: + return StreamSentinel( + data = '[DONE]' + ) + else: + return StreamSentinel( + data = '[DONE]', + ) + """ + + def testStreamSentinel(self): + """Test StreamSentinel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_tool_choice.py b/src/together/generated/test/test_tool_choice.py new file mode 100644 index 00000000..b34a312a --- /dev/null +++ b/src/together/generated/test/test_tool_choice.py @@ -0,0 +1,64 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.tool_choice import ToolChoice + + +class TestToolChoice(unittest.TestCase): + """ToolChoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ToolChoice: + """Test ToolChoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ToolChoice` + """ + model = ToolChoice() + if include_optional: + return ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ) + ) + else: + return ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), + ) + """ + + def testToolChoice(self): + """Test ToolChoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_tool_choice_function.py b/src/together/generated/test/test_tool_choice_function.py new file mode 100644 index 00000000..d7a2a8fa --- /dev/null +++ b/src/together/generated/test/test_tool_choice_function.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.tool_choice_function import ToolChoiceFunction + + +class TestToolChoiceFunction(unittest.TestCase): + """ToolChoiceFunction unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ToolChoiceFunction: + """Test ToolChoiceFunction + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ToolChoiceFunction` + """ + model = ToolChoiceFunction() + if include_optional: + return ToolChoiceFunction( + name = 'function_name', + arguments = '' + ) + else: + return ToolChoiceFunction( + name = 'function_name', + arguments = '', + ) + """ + + def testToolChoiceFunction(self): + """Test ToolChoiceFunction""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_tools_part.py b/src/together/generated/test/test_tools_part.py new file mode 100644 index 00000000..6f3aad82 --- /dev/null +++ b/src/together/generated/test/test_tools_part.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.tools_part import ToolsPart + + +class TestToolsPart(unittest.TestCase): + """ToolsPart unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ToolsPart: + """Test ToolsPart + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ToolsPart` + """ + model = ToolsPart() + if include_optional: + return ToolsPart( + type = 'tool_type', + function = together.generated.models.tools_part_function.ToolsPart_function( + description = 'A description of the function.', + name = 'function_name', + parameters = { }, ) + ) + else: + return ToolsPart( + ) + """ + + def testToolsPart(self): + """Test ToolsPart""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_tools_part_function.py b/src/together/generated/test/test_tools_part_function.py new file mode 100644 index 00000000..35de3a5b --- /dev/null +++ b/src/together/generated/test/test_tools_part_function.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.tools_part_function import ToolsPartFunction + + +class TestToolsPartFunction(unittest.TestCase): + """ToolsPartFunction unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ToolsPartFunction: + """Test ToolsPartFunction + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ToolsPartFunction` + """ + model = ToolsPartFunction() + if include_optional: + return ToolsPartFunction( + description = 'A description of the function.', + name = 'function_name', + parameters = { } + ) + else: + return ToolsPartFunction( + ) + """ + + def testToolsPartFunction(self): + """Test ToolsPartFunction""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_update_endpoint_request.py b/src/together/generated/test/test_update_endpoint_request.py new file mode 100644 index 00000000..3ce2db6f --- /dev/null +++ b/src/together/generated/test/test_update_endpoint_request.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.update_endpoint_request import UpdateEndpointRequest + + +class TestUpdateEndpointRequest(unittest.TestCase): + """UpdateEndpointRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> UpdateEndpointRequest: + """Test UpdateEndpointRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `UpdateEndpointRequest` + """ + model = UpdateEndpointRequest() + if include_optional: + return UpdateEndpointRequest( + display_name = 'My Llama3 70b endpoint', + state = 'STARTED', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ) + ) + else: + return UpdateEndpointRequest( + ) + """ + + def testUpdateEndpointRequest(self): + """Test UpdateEndpointRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_usage_data.py b/src/together/generated/test/test_usage_data.py new file mode 100644 index 00000000..e8de8bd8 --- /dev/null +++ b/src/together/generated/test/test_usage_data.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.usage_data import UsageData + + +class TestUsageData(unittest.TestCase): + """UsageData unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> UsageData: + """Test UsageData + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `UsageData` + """ + model = UsageData() + if include_optional: + return UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56 + ) + else: + return UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, + ) + """ + + def testUsageData(self): + """Test UsageData""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() From 33b6c816d3fd46c194574d1cb6b5c0c63af57d9e Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 17:21:09 +0000 Subject: [PATCH 23/29] fix mypy error --- src/together/abstract/api_requestor.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/together/abstract/api_requestor.py b/src/together/abstract/api_requestor.py index e4004f3e..4bc49288 100644 --- a/src/together/abstract/api_requestor.py +++ b/src/together/abstract/api_requestor.py @@ -587,16 +587,14 @@ async def arequest_raw( ) headers["Content-Type"] = content_type - request_kwargs = { - "headers": headers, - "data": data, - "timeout": timeout, - "allow_redirects": options.allow_redirects, - } - try: result = await session.request( - method=options.method, url=abs_url, **request_kwargs + method=options.method, + url=abs_url, + headers=headers, + data=data, + timeout=timeout, + allow_redirects=options.allow_redirects, ) utils.log_debug( "Together API response", From a10e18ae10f86b5fe4782f177c0e15f6f00a500b Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 18:21:08 +0000 Subject: [PATCH 24/29] add hardware call and --wait option --- scripts/openapi.yaml | 79 ++---- src/together/cli/api/endpoints.py | 181 +++++++++++--- src/together/generated/__init__.py | 12 - src/together/generated/api/hardware_api.py | 24 +- src/together/generated/docs/HardwareApi.md | 6 +- .../generated/docs/HardwareWithStatus.md | 4 +- .../generated/docs/ListHardware200Response.md | 2 +- .../docs/ListHardware200ResponseOneOf.md | 29 --- .../docs/ListHardware200ResponseOneOf1.md | 29 --- .../ListHardware200ResponseOneOf1DataInner.md | 32 --- .../ListHardware200ResponseOneOfDataInner.md | 32 --- src/together/generated/models/__init__.py | 12 - .../generated/models/hardware_with_status.py | 8 +- .../models/list_hardware200_response.py | 225 ++++++------------ .../list_hardware200_response_one_of.py | 113 --------- .../list_hardware200_response_one_of1.py | 113 --------- ...hardware200_response_one_of1_data_inner.py | 140 ----------- ..._hardware200_response_one_of_data_inner.py | 137 ----------- .../test/test_hardware_with_status.py | 4 +- .../test/test_list_hardware200_response.py | 28 ++- .../test_list_hardware200_response_one_of.py | 62 ----- .../test_list_hardware200_response_one_of1.py | 62 ----- ...hardware200_response_one_of1_data_inner.py | 78 ------ ..._hardware200_response_one_of_data_inner.py | 75 ------ src/together/resources/endpoints.py | 44 +++- 25 files changed, 340 insertions(+), 1191 deletions(-) delete mode 100644 src/together/generated/docs/ListHardware200ResponseOneOf.md delete mode 100644 src/together/generated/docs/ListHardware200ResponseOneOf1.md delete mode 100644 src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md delete mode 100644 src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md delete mode 100644 src/together/generated/models/list_hardware200_response_one_of.py delete mode 100644 src/together/generated/models/list_hardware200_response_one_of1.py delete mode 100644 src/together/generated/models/list_hardware200_response_one_of1_data_inner.py delete mode 100644 src/together/generated/models/list_hardware200_response_one_of_data_inner.py delete mode 100644 src/together/generated/test/test_list_hardware200_response_one_of.py delete mode 100644 src/together/generated/test/test_list_hardware200_response_one_of1.py delete mode 100644 src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py delete mode 100644 src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py diff --git a/scripts/openapi.yaml b/scripts/openapi.yaml index 040c915d..c34a3fbc 100644 --- a/scripts/openapi.yaml +++ b/scripts/openapi.yaml @@ -934,7 +934,10 @@ paths: get: tags: ["Hardware"] summary: List available hardware configurations - description: Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + description: > + Returns a list of available hardware configurations for deploying models. + When a model parameter is provided, it returns only hardware configurations compatible + with that model, including their current availability status. operationId: listHardware parameters: - name: model @@ -942,7 +945,9 @@ paths: required: false schema: type: string - description: Filter hardware configurations by model compatibility + description: > + Filter hardware configurations by model compatibility. When provided, + the response includes availability status for each compatible configuration. example: meta-llama/Llama-3-70b-chat-hf responses: "200": @@ -950,59 +955,19 @@ paths: content: application/json: schema: - oneOf: - - type: object - description: Response when no model filter is provided - required: - - object - - data - properties: - object: - type: string - enum: - - list - data: - type: array - items: - allOf: - - $ref: "#/components/schemas/HardwareWithStatus" - - type: object - properties: - availability: - not: {} - - type: object - description: Response when model filter is provided - required: - - object - - data - properties: - object: - type: string - enum: - - list - data: - type: array - items: - allOf: - - $ref: "#/components/schemas/HardwareWithStatus" - - type: object - required: - - availability - example: - object: "list" + type: object + required: + - object + - data + properties: + object: + type: string + enum: + - list data: - - object: "hardware" - name: "2x_nvidia_a100_80gb_sxm" - pricing: - input: 0 - output: 0 - cents_per_minute: 5.42 - specs: - gpu_type: "a100-80gb" - gpu_link: "sxm" - gpu_memory: 80 - gpu_count: 2 - updated_at: "2024-01-01T00:00:00Z" + type: array + items: + $ref: "#/components/schemas/HardwareWithStatus" "403": description: "Unauthorized" content: @@ -2646,10 +2611,10 @@ components: HardwareWithStatus: type: object - description: Hardware configuration details including current availability status + description: Hardware configuration details with optional availability status required: - object - - name + - id - pricing - specs - updated_at @@ -2658,7 +2623,7 @@ components: type: string enum: - hardware - name: + id: type: string description: Unique identifier for the hardware configuration examples: diff --git a/src/together/cli/api/endpoints.py b/src/together/cli/api/endpoints.py index 3cf74944..19fc99d1 100644 --- a/src/together/cli/api/endpoints.py +++ b/src/together/cli/api/endpoints.py @@ -1,5 +1,6 @@ from __future__ import annotations +import json import sys from functools import wraps from typing import Any, Callable, Dict, List, Literal, TypeVar, Union @@ -8,7 +9,12 @@ from together import Together from together.error import AuthenticationError, InvalidRequestError -from together.generated.exceptions import ForbiddenException, ServiceException +from together.generated.exceptions import ( + BadRequestException, + ForbiddenException, + NotFoundException, + ServiceException, +) from together.types import DedicatedEndpoint, ListEndpoint @@ -67,6 +73,28 @@ def print_endpoint( F = TypeVar("F", bound=Callable[..., Any]) +def print_api_error( + e: Union[ + ForbiddenException, NotFoundException, BadRequestException, ServiceException + ], +) -> None: + error_details = "" + if e.data is not None: + error_details = e.data.to_dict()["error"]["message"] + elif e.body: + error_details = json.loads(e.body)["error"]["message"] + else: + error_details = str(e) + + if ( + "credentials" in error_details.lower() + or "authentication" in error_details.lower() + ): + click.echo("Error: Invalid API key or authentication failed", err=True) + else: + click.echo(f"Error: {error_details}", err=True) + + def handle_api_errors(f: F) -> F: """Decorator to handle common API errors in CLI commands.""" @@ -74,20 +102,14 @@ def handle_api_errors(f: F) -> F: def wrapper(*args: Any, **kwargs: Any) -> Any: try: return f(*args, **kwargs) - except (ForbiddenException, ServiceException) as e: - error_details = "" - if e.data is not None: - error_details = e.data.to_dict()["error"]["message"] - else: - error_details = str(e) - - if ( - "credentials" in error_details.lower() - or "authentication" in error_details.lower() - ): - click.echo("Error: Invalid API key or authentication failed", err=True) - else: - click.echo(f"Error: {error_details}", err=True) + except ( + ForbiddenException, + NotFoundException, + BadRequestException, + ServiceException, + ) as e: + print_api_error(e) + sys.exit(1) except AuthenticationError as e: click.echo(f"Error details: {str(e)}", err=True) @@ -160,6 +182,12 @@ def endpoints(ctx: click.Context) -> None: is_flag=True, help="Create the endpoint in STOPPED state instead of auto-starting it", ) +@click.option( + "--wait", + is_flag=True, + default=True, + help="Wait for the endpoint to be ready after creation", +) @click.pass_obj @handle_api_errors def create( @@ -173,6 +201,7 @@ def create( no_prompt_cache: bool, no_speculative_decoding: bool, no_auto_start: bool, + wait: bool, ) -> None: """Create a new dedicated inference endpoint.""" # Map GPU types to their full hardware ID names @@ -186,16 +215,26 @@ def create( hardware_id = f"{gpu_count}x_{gpu_map[gpu]}" - response = client.endpoints.create( - model=model, - hardware=hardware_id, - min_replicas=min_replicas, - max_replicas=max_replicas, - display_name=display_name, - disable_prompt_cache=no_prompt_cache, - disable_speculative_decoding=no_speculative_decoding, - state="STOPPED" if no_auto_start else "STARTED", - ) + try: + response = client.endpoints.create( + model=model, + hardware=hardware_id, + min_replicas=min_replicas, + max_replicas=max_replicas, + display_name=display_name, + disable_prompt_cache=no_prompt_cache, + disable_speculative_decoding=no_speculative_decoding, + state="STOPPED" if no_auto_start else "STARTED", + ) + except NotFoundException as e: + if "check the hardware api" in str(e).lower(): + print_api_error(e) + fetch_and_print_hardware_options( + client=client, model=model, print_json=False, available=True + ) + sys.exit(1) + + raise e # Print detailed information to stderr click.echo("Created dedicated endpoint with:", err=True) @@ -212,7 +251,16 @@ def create( if no_auto_start: click.echo(" Auto-start: disabled", err=True) - click.echo("Endpoint created successfully, id: ", err=True) + click.echo("Endpoint created successfully", err=True) + + if wait: + import time + + click.echo("Waiting for endpoint to be ready...", err=True) + while client.endpoints.get(response.id).state != "STARTED": + time.sleep(1) + click.echo("Endpoint ready", err=True) + # Print only the endpoint ID to stdout click.echo(response.id) @@ -228,25 +276,98 @@ def get(client: Together, endpoint_id: str, json: bool) -> None: print_endpoint(endpoint, json=json) +@endpoints.command() +@click.option("--model", help="Filter hardware options by model") +@click.option("--json", is_flag=True, help="Print output in JSON format") +@click.option( + "--available", + is_flag=True, + help="Print only available hardware options (can only be used if model is passed in)", +) +@click.pass_obj +@handle_api_errors +def hardware(client: Together, model: str | None, json: bool, available: bool) -> None: + """List all available hardware options, optionally filtered by model.""" + fetch_and_print_hardware_options(client, model, json, available) + + +def fetch_and_print_hardware_options( + client: Together, model: str | None, print_json: bool, available: bool +) -> None: + """Print hardware options for a model.""" + + message = "Available hardware options:" if available else "All hardware options:" + click.echo(message, err=True) + hardware_options = client.endpoints.list_hardware(model) + if available: + hardware_options = [ + hardware + for hardware in hardware_options + if hardware.availability is not None + and hardware.availability.status == "available" + ] + + if print_json: + json_output = [ + { + "id": hardware.id, + "pricing": hardware.pricing.to_dict(), + "specs": hardware.specs.to_dict(), + "availability": ( + hardware.availability.to_dict() if hardware.availability else None + ), + } + for hardware in hardware_options + ] + click.echo(json.dumps(json_output, indent=2)) + else: + for hardware in hardware_options: + click.echo(f" {hardware.id}", err=True) + + @endpoints.command() @click.argument("endpoint-id", required=True) +@click.option( + "--wait", is_flag=True, default=True, help="Wait for the endpoint to stop" +) @click.pass_obj @handle_api_errors -def stop(client: Together, endpoint_id: str) -> None: +def stop(client: Together, endpoint_id: str, wait: bool) -> None: """Stop a dedicated inference endpoint.""" client.endpoints.update(endpoint_id, state="STOPPED") - click.echo("Successfully stopped endpoint", err=True) + click.echo("Successfully marked endpoint as stopping", err=True) + + if wait: + import time + + click.echo("Waiting for endpoint to stop...", err=True) + while client.endpoints.get(endpoint_id).state != "STOPPED": + time.sleep(1) + click.echo("Endpoint stopped", err=True) + click.echo(endpoint_id) @endpoints.command() @click.argument("endpoint-id", required=True) +@click.option( + "--wait", is_flag=True, default=True, help="Wait for the endpoint to start" +) @click.pass_obj @handle_api_errors -def start(client: Together, endpoint_id: str) -> None: +def start(client: Together, endpoint_id: str, wait: bool) -> None: """Start a dedicated inference endpoint.""" client.endpoints.update(endpoint_id, state="STARTED") - click.echo("Successfully started endpoint", err=True) + click.echo("Successfully marked endpoint as starting", err=True) + + if wait: + import time + + click.echo("Waiting for endpoint to start...", err=True) + while client.endpoints.get(endpoint_id).state != "STARTED": + time.sleep(1) + click.echo("Endpoint started", err=True) + click.echo(endpoint_id) diff --git a/src/together/generated/__init__.py b/src/together/generated/__init__.py index cf250519..2a7c8446 100644 --- a/src/together/generated/__init__.py +++ b/src/together/generated/__init__.py @@ -188,18 +188,6 @@ ListEndpoints200Response, ) from together.generated.models.list_hardware200_response import ListHardware200Response -from together.generated.models.list_hardware200_response_one_of import ( - ListHardware200ResponseOneOf, -) -from together.generated.models.list_hardware200_response_one_of1 import ( - ListHardware200ResponseOneOf1, -) -from together.generated.models.list_hardware200_response_one_of1_data_inner import ( - ListHardware200ResponseOneOf1DataInner, -) -from together.generated.models.list_hardware200_response_one_of_data_inner import ( - ListHardware200ResponseOneOfDataInner, -) from together.generated.models.lo_ra_training_type import LoRATrainingType from together.generated.models.logprobs_part import LogprobsPart from together.generated.models.model_info import ModelInfo diff --git a/src/together/generated/api/hardware_api.py b/src/together/generated/api/hardware_api.py index 0f8e78fd..84b178de 100644 --- a/src/together/generated/api/hardware_api.py +++ b/src/together/generated/api/hardware_api.py @@ -43,7 +43,9 @@ async def list_hardware( self, model: Annotated[ Optional[StrictStr], - Field(description="Filter hardware configurations by model compatibility"), + Field( + description="Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. " + ), ] = None, _request_timeout: Union[ None, @@ -59,9 +61,9 @@ async def list_hardware( ) -> ListHardware200Response: """List available hardware configurations - Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. - :param model: Filter hardware configurations by model compatibility + :param model: Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. :type model: str :param _request_timeout: timeout setting for this request. If one number provided, it will be total request @@ -112,7 +114,9 @@ async def list_hardware_with_http_info( self, model: Annotated[ Optional[StrictStr], - Field(description="Filter hardware configurations by model compatibility"), + Field( + description="Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. " + ), ] = None, _request_timeout: Union[ None, @@ -128,9 +132,9 @@ async def list_hardware_with_http_info( ) -> ApiResponse[ListHardware200Response]: """List available hardware configurations - Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. - :param model: Filter hardware configurations by model compatibility + :param model: Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. :type model: str :param _request_timeout: timeout setting for this request. If one number provided, it will be total request @@ -181,7 +185,9 @@ async def list_hardware_without_preload_content( self, model: Annotated[ Optional[StrictStr], - Field(description="Filter hardware configurations by model compatibility"), + Field( + description="Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. " + ), ] = None, _request_timeout: Union[ None, @@ -197,9 +203,9 @@ async def list_hardware_without_preload_content( ) -> RESTResponseType: """List available hardware configurations - Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. - :param model: Filter hardware configurations by model compatibility + :param model: Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. :type model: str :param _request_timeout: timeout setting for this request. If one number provided, it will be total request diff --git a/src/together/generated/docs/HardwareApi.md b/src/together/generated/docs/HardwareApi.md index b631d038..6498f346 100644 --- a/src/together/generated/docs/HardwareApi.md +++ b/src/together/generated/docs/HardwareApi.md @@ -12,7 +12,7 @@ Method | HTTP request | Description List available hardware configurations -Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. +Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. ### Example @@ -44,7 +44,7 @@ configuration = together.generated.Configuration( async with together.generated.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = together.generated.HardwareApi(api_client) - model = 'meta-llama/Llama-3-70b-chat-hf' # str | Filter hardware configurations by model compatibility (optional) + model = 'meta-llama/Llama-3-70b-chat-hf' # str | Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. (optional) try: # List available hardware configurations @@ -62,7 +62,7 @@ async with together.generated.ApiClient(configuration) as api_client: Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - **model** | **str**| Filter hardware configurations by model compatibility | [optional] + **model** | **str**| Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. | [optional] ### Return type diff --git a/src/together/generated/docs/HardwareWithStatus.md b/src/together/generated/docs/HardwareWithStatus.md index 6435273e..3d85fdbd 100644 --- a/src/together/generated/docs/HardwareWithStatus.md +++ b/src/together/generated/docs/HardwareWithStatus.md @@ -1,13 +1,13 @@ # HardwareWithStatus -Hardware configuration details including current availability status +Hardware configuration details with optional availability status ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **object** | **str** | | -**name** | **str** | Unique identifier for the hardware configuration | +**id** | **str** | Unique identifier for the hardware configuration | **pricing** | [**EndpointPricing**](EndpointPricing.md) | | **specs** | [**HardwareSpec**](HardwareSpec.md) | | **availability** | [**HardwareAvailability**](HardwareAvailability.md) | | [optional] diff --git a/src/together/generated/docs/ListHardware200Response.md b/src/together/generated/docs/ListHardware200Response.md index 7621d170..60fe285e 100644 --- a/src/together/generated/docs/ListHardware200Response.md +++ b/src/together/generated/docs/ListHardware200Response.md @@ -6,7 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **object** | **str** | | -**data** | [**List[ListHardware200ResponseOneOf1DataInner]**](ListHardware200ResponseOneOf1DataInner.md) | | +**data** | [**List[HardwareWithStatus]**](HardwareWithStatus.md) | | ## Example diff --git a/src/together/generated/docs/ListHardware200ResponseOneOf.md b/src/together/generated/docs/ListHardware200ResponseOneOf.md deleted file mode 100644 index 4c4472d7..00000000 --- a/src/together/generated/docs/ListHardware200ResponseOneOf.md +++ /dev/null @@ -1,29 +0,0 @@ -# ListHardware200ResponseOneOf - -Response when no model filter is provided - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**data** | [**List[ListHardware200ResponseOneOfDataInner]**](ListHardware200ResponseOneOfDataInner.md) | | - -## Example - -```python -from together.generated.models.list_hardware200_response_one_of import ListHardware200ResponseOneOf - -# TODO update the JSON string below -json = "{}" -# create an instance of ListHardware200ResponseOneOf from a JSON string -list_hardware200_response_one_of_instance = ListHardware200ResponseOneOf.from_json(json) -# print the JSON string representation of the object -print(ListHardware200ResponseOneOf.to_json()) - -# convert the object into a dict -list_hardware200_response_one_of_dict = list_hardware200_response_one_of_instance.to_dict() -# create an instance of ListHardware200ResponseOneOf from a dict -list_hardware200_response_one_of_from_dict = ListHardware200ResponseOneOf.from_dict(list_hardware200_response_one_of_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOf1.md b/src/together/generated/docs/ListHardware200ResponseOneOf1.md deleted file mode 100644 index e93eeca5..00000000 --- a/src/together/generated/docs/ListHardware200ResponseOneOf1.md +++ /dev/null @@ -1,29 +0,0 @@ -# ListHardware200ResponseOneOf1 - -Response when model filter is provided - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**data** | [**List[ListHardware200ResponseOneOf1DataInner]**](ListHardware200ResponseOneOf1DataInner.md) | | - -## Example - -```python -from together.generated.models.list_hardware200_response_one_of1 import ListHardware200ResponseOneOf1 - -# TODO update the JSON string below -json = "{}" -# create an instance of ListHardware200ResponseOneOf1 from a JSON string -list_hardware200_response_one_of1_instance = ListHardware200ResponseOneOf1.from_json(json) -# print the JSON string representation of the object -print(ListHardware200ResponseOneOf1.to_json()) - -# convert the object into a dict -list_hardware200_response_one_of1_dict = list_hardware200_response_one_of1_instance.to_dict() -# create an instance of ListHardware200ResponseOneOf1 from a dict -list_hardware200_response_one_of1_from_dict = ListHardware200ResponseOneOf1.from_dict(list_hardware200_response_one_of1_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md b/src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md deleted file mode 100644 index ffe4d491..00000000 --- a/src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md +++ /dev/null @@ -1,32 +0,0 @@ -# ListHardware200ResponseOneOf1DataInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**name** | **str** | Unique identifier for the hardware configuration | -**pricing** | [**EndpointPricing**](EndpointPricing.md) | | -**specs** | [**HardwareSpec**](HardwareSpec.md) | | -**availability** | [**HardwareAvailability**](HardwareAvailability.md) | | -**updated_at** | **datetime** | Timestamp of when the hardware status was last updated | - -## Example - -```python -from together.generated.models.list_hardware200_response_one_of1_data_inner import ListHardware200ResponseOneOf1DataInner - -# TODO update the JSON string below -json = "{}" -# create an instance of ListHardware200ResponseOneOf1DataInner from a JSON string -list_hardware200_response_one_of1_data_inner_instance = ListHardware200ResponseOneOf1DataInner.from_json(json) -# print the JSON string representation of the object -print(ListHardware200ResponseOneOf1DataInner.to_json()) - -# convert the object into a dict -list_hardware200_response_one_of1_data_inner_dict = list_hardware200_response_one_of1_data_inner_instance.to_dict() -# create an instance of ListHardware200ResponseOneOf1DataInner from a dict -list_hardware200_response_one_of1_data_inner_from_dict = ListHardware200ResponseOneOf1DataInner.from_dict(list_hardware200_response_one_of1_data_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md b/src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md deleted file mode 100644 index 75586f0d..00000000 --- a/src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md +++ /dev/null @@ -1,32 +0,0 @@ -# ListHardware200ResponseOneOfDataInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**name** | **str** | Unique identifier for the hardware configuration | -**pricing** | [**EndpointPricing**](EndpointPricing.md) | | -**specs** | [**HardwareSpec**](HardwareSpec.md) | | -**availability** | **object** | | [optional] -**updated_at** | **datetime** | Timestamp of when the hardware status was last updated | - -## Example - -```python -from together.generated.models.list_hardware200_response_one_of_data_inner import ListHardware200ResponseOneOfDataInner - -# TODO update the JSON string below -json = "{}" -# create an instance of ListHardware200ResponseOneOfDataInner from a JSON string -list_hardware200_response_one_of_data_inner_instance = ListHardware200ResponseOneOfDataInner.from_json(json) -# print the JSON string representation of the object -print(ListHardware200ResponseOneOfDataInner.to_json()) - -# convert the object into a dict -list_hardware200_response_one_of_data_inner_dict = list_hardware200_response_one_of_data_inner_instance.to_dict() -# create an instance of ListHardware200ResponseOneOfDataInner from a dict -list_hardware200_response_one_of_data_inner_from_dict = ListHardware200ResponseOneOfDataInner.from_dict(list_hardware200_response_one_of_data_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/models/__init__.py b/src/together/generated/models/__init__.py index 7b50c345..2fe07559 100644 --- a/src/together/generated/models/__init__.py +++ b/src/together/generated/models/__init__.py @@ -161,18 +161,6 @@ ListEndpoints200Response, ) from together.generated.models.list_hardware200_response import ListHardware200Response -from together.generated.models.list_hardware200_response_one_of import ( - ListHardware200ResponseOneOf, -) -from together.generated.models.list_hardware200_response_one_of1 import ( - ListHardware200ResponseOneOf1, -) -from together.generated.models.list_hardware200_response_one_of1_data_inner import ( - ListHardware200ResponseOneOf1DataInner, -) -from together.generated.models.list_hardware200_response_one_of_data_inner import ( - ListHardware200ResponseOneOfDataInner, -) from together.generated.models.lo_ra_training_type import LoRATrainingType from together.generated.models.logprobs_part import LogprobsPart from together.generated.models.model_info import ModelInfo diff --git a/src/together/generated/models/hardware_with_status.py b/src/together/generated/models/hardware_with_status.py index 46680485..4ce638a2 100644 --- a/src/together/generated/models/hardware_with_status.py +++ b/src/together/generated/models/hardware_with_status.py @@ -29,11 +29,11 @@ class HardwareWithStatus(BaseModel): """ - Hardware configuration details including current availability status + Hardware configuration details with optional availability status """ # noqa: E501 object: StrictStr - name: StrictStr = Field( + id: StrictStr = Field( description="Unique identifier for the hardware configuration" ) pricing: EndpointPricing @@ -44,7 +44,7 @@ class HardwareWithStatus(BaseModel): ) __properties: ClassVar[List[str]] = [ "object", - "name", + "id", "pricing", "specs", "availability", @@ -118,7 +118,7 @@ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: _obj = cls.model_validate( { "object": obj.get("object"), - "name": obj.get("name"), + "id": obj.get("id"), "pricing": ( EndpointPricing.from_dict(obj["pricing"]) if obj.get("pricing") is not None diff --git a/src/together/generated/models/list_hardware200_response.py b/src/together/generated/models/list_hardware200_response.py index c18ec5f0..42b45e76 100644 --- a/src/together/generated/models/list_hardware200_response.py +++ b/src/together/generated/models/list_hardware200_response.py @@ -13,173 +13,96 @@ from __future__ import annotations -import json import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from together.generated.models.list_hardware200_response_one_of import ( - ListHardware200ResponseOneOf, -) -from together.generated.models.list_hardware200_response_one_of1 import ( - ListHardware200ResponseOneOf1, -) -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -LISTHARDWARE200RESPONSE_ONE_OF_SCHEMAS = [ - "ListHardware200ResponseOneOf", - "ListHardware200ResponseOneOf1", -] +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.hardware_with_status import HardwareWithStatus +from typing import Optional, Set +from typing_extensions import Self class ListHardware200Response(BaseModel): """ ListHardware200Response - """ + """ # noqa: E501 + + object: StrictStr + data: List[HardwareWithStatus] + __properties: ClassVar[List[str]] = ["object", "data"] - # data type: ListHardware200ResponseOneOf - oneof_schema_1_validator: Optional[ListHardware200ResponseOneOf] = None - # data type: ListHardware200ResponseOneOf1 - oneof_schema_2_validator: Optional[ListHardware200ResponseOneOf1] = None - actual_instance: Optional[ - Union[ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1] - ] = None - one_of_schemas: Set[str] = { - "ListHardware200ResponseOneOf", - "ListHardware200ResponseOneOf1", - } + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value model_config = ConfigDict( + populate_by_name=True, validate_assignment=True, protected_namespaces=(), ) - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = ListHardware200Response.model_construct() - error_messages = [] - match = 0 - # validate data type: ListHardware200ResponseOneOf - if not isinstance(v, ListHardware200ResponseOneOf): - error_messages.append( - f"Error! Input type `{type(v)}` is not `ListHardware200ResponseOneOf`" - ) - else: - match += 1 - # validate data type: ListHardware200ResponseOneOf1 - if not isinstance(v, ListHardware200ResponseOneOf1): - error_messages.append( - f"Error! Input type `{type(v)}` is not `ListHardware200ResponseOneOf1`" - ) - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " - + ", ".join(error_messages) - ) - else: - return v + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListHardware200Response from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into ListHardware200ResponseOneOf - try: - instance.actual_instance = ListHardware200ResponseOneOf.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into ListHardware200ResponseOneOf1 - try: - instance.actual_instance = ListHardware200ResponseOneOf1.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict( - self, - ) -> Optional[ - Union[ - Dict[str, Any], ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1 - ] - ]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListHardware200Response from a dict""" + if obj is None: return None - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "data": ( + [HardwareWithStatus.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of.py b/src/together/generated/models/list_hardware200_response_one_of.py deleted file mode 100644 index 1cf0ec36..00000000 --- a/src/together/generated/models/list_hardware200_response_one_of.py +++ /dev/null @@ -1,113 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from together.generated.models.list_hardware200_response_one_of_data_inner import ( - ListHardware200ResponseOneOfDataInner, -) -from typing import Optional, Set -from typing_extensions import Self - - -class ListHardware200ResponseOneOf(BaseModel): - """ - Response when no model filter is provided - """ # noqa: E501 - - object: StrictStr - data: List[ListHardware200ResponseOneOfDataInner] - __properties: ClassVar[List[str]] = ["object", "data"] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["list"]): - raise ValueError("must be one of enum values ('list')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListHardware200ResponseOneOf from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in data (list) - _items = [] - if self.data: - for _item_data in self.data: - if _item_data: - _items.append(_item_data.to_dict()) - _dict["data"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListHardware200ResponseOneOf from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "data": ( - [ - ListHardware200ResponseOneOfDataInner.from_dict(_item) - for _item in obj["data"] - ] - if obj.get("data") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of1.py b/src/together/generated/models/list_hardware200_response_one_of1.py deleted file mode 100644 index 171532b1..00000000 --- a/src/together/generated/models/list_hardware200_response_one_of1.py +++ /dev/null @@ -1,113 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from together.generated.models.list_hardware200_response_one_of1_data_inner import ( - ListHardware200ResponseOneOf1DataInner, -) -from typing import Optional, Set -from typing_extensions import Self - - -class ListHardware200ResponseOneOf1(BaseModel): - """ - Response when model filter is provided - """ # noqa: E501 - - object: StrictStr - data: List[ListHardware200ResponseOneOf1DataInner] - __properties: ClassVar[List[str]] = ["object", "data"] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["list"]): - raise ValueError("must be one of enum values ('list')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListHardware200ResponseOneOf1 from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in data (list) - _items = [] - if self.data: - for _item_data in self.data: - if _item_data: - _items.append(_item_data.to_dict()) - _dict["data"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListHardware200ResponseOneOf1 from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "data": ( - [ - ListHardware200ResponseOneOf1DataInner.from_dict(_item) - for _item in obj["data"] - ] - if obj.get("data") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of1_data_inner.py b/src/together/generated/models/list_hardware200_response_one_of1_data_inner.py deleted file mode 100644 index db5c86a2..00000000 --- a/src/together/generated/models/list_hardware200_response_one_of1_data_inner.py +++ /dev/null @@ -1,140 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from datetime import datetime -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from together.generated.models.endpoint_pricing import EndpointPricing -from together.generated.models.hardware_availability import HardwareAvailability -from together.generated.models.hardware_spec import HardwareSpec -from typing import Optional, Set -from typing_extensions import Self - - -class ListHardware200ResponseOneOf1DataInner(BaseModel): - """ - ListHardware200ResponseOneOf1DataInner - """ # noqa: E501 - - object: StrictStr - name: StrictStr = Field( - description="Unique identifier for the hardware configuration" - ) - pricing: EndpointPricing - specs: HardwareSpec - availability: HardwareAvailability - updated_at: datetime = Field( - description="Timestamp of when the hardware status was last updated" - ) - __properties: ClassVar[List[str]] = [ - "object", - "name", - "pricing", - "specs", - "availability", - "updated_at", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["hardware"]): - raise ValueError("must be one of enum values ('hardware')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListHardware200ResponseOneOf1DataInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of pricing - if self.pricing: - _dict["pricing"] = self.pricing.to_dict() - # override the default output from pydantic by calling `to_dict()` of specs - if self.specs: - _dict["specs"] = self.specs.to_dict() - # override the default output from pydantic by calling `to_dict()` of availability - if self.availability: - _dict["availability"] = self.availability.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListHardware200ResponseOneOf1DataInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "name": obj.get("name"), - "pricing": ( - EndpointPricing.from_dict(obj["pricing"]) - if obj.get("pricing") is not None - else None - ), - "specs": ( - HardwareSpec.from_dict(obj["specs"]) - if obj.get("specs") is not None - else None - ), - "availability": ( - HardwareAvailability.from_dict(obj["availability"]) - if obj.get("availability") is not None - else None - ), - "updated_at": obj.get("updated_at"), - } - ) - return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of_data_inner.py b/src/together/generated/models/list_hardware200_response_one_of_data_inner.py deleted file mode 100644 index bc1f7a99..00000000 --- a/src/together/generated/models/list_hardware200_response_one_of_data_inner.py +++ /dev/null @@ -1,137 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from datetime import datetime -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.endpoint_pricing import EndpointPricing -from together.generated.models.hardware_spec import HardwareSpec -from typing import Optional, Set -from typing_extensions import Self - - -class ListHardware200ResponseOneOfDataInner(BaseModel): - """ - ListHardware200ResponseOneOfDataInner - """ # noqa: E501 - - object: StrictStr - name: StrictStr = Field( - description="Unique identifier for the hardware configuration" - ) - pricing: EndpointPricing - specs: HardwareSpec - availability: Optional[Any] = None - updated_at: datetime = Field( - description="Timestamp of when the hardware status was last updated" - ) - __properties: ClassVar[List[str]] = [ - "object", - "name", - "pricing", - "specs", - "availability", - "updated_at", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["hardware"]): - raise ValueError("must be one of enum values ('hardware')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListHardware200ResponseOneOfDataInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of pricing - if self.pricing: - _dict["pricing"] = self.pricing.to_dict() - # override the default output from pydantic by calling `to_dict()` of specs - if self.specs: - _dict["specs"] = self.specs.to_dict() - # set to None if availability (nullable) is None - # and model_fields_set contains the field - if self.availability is None and "availability" in self.model_fields_set: - _dict["availability"] = None - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListHardware200ResponseOneOfDataInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "name": obj.get("name"), - "pricing": ( - EndpointPricing.from_dict(obj["pricing"]) - if obj.get("pricing") is not None - else None - ), - "specs": ( - HardwareSpec.from_dict(obj["specs"]) - if obj.get("specs") is not None - else None - ), - "availability": obj.get("availability"), - "updated_at": obj.get("updated_at"), - } - ) - return _obj diff --git a/src/together/generated/test/test_hardware_with_status.py b/src/together/generated/test/test_hardware_with_status.py index a6ca05f6..8727755f 100644 --- a/src/together/generated/test/test_hardware_with_status.py +++ b/src/together/generated/test/test_hardware_with_status.py @@ -37,7 +37,7 @@ def make_instance(self, include_optional) -> HardwareWithStatus: if include_optional: return HardwareWithStatus( object = 'hardware', - name = '', + id = '', pricing = together.generated.models.endpoint_pricing.EndpointPricing( cents_per_minute = 1.337, ), specs = together.generated.models.hardware_spec.HardwareSpec( @@ -52,7 +52,7 @@ def make_instance(self, include_optional) -> HardwareWithStatus: else: return HardwareWithStatus( object = 'hardware', - name = '', + id = '', pricing = together.generated.models.endpoint_pricing.EndpointPricing( cents_per_minute = 1.337, ), specs = together.generated.models.hardware_spec.HardwareSpec( diff --git a/src/together/generated/test/test_list_hardware200_response.py b/src/together/generated/test/test_list_hardware200_response.py index bcc8dda5..90843a2e 100644 --- a/src/together/generated/test/test_list_hardware200_response.py +++ b/src/together/generated/test/test_list_hardware200_response.py @@ -38,14 +38,38 @@ def make_instance(self, include_optional) -> ListHardware200Response: return ListHardware200Response( object = 'list', data = [ - null + together.generated.models.hardware_with_status.HardwareWithStatus( + object = 'hardware', + id = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = together.generated.models.hardware_availability.HardwareAvailability( + status = 'available', ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ) ] ) else: return ListHardware200Response( object = 'list', data = [ - null + together.generated.models.hardware_with_status.HardwareWithStatus( + object = 'hardware', + id = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = together.generated.models.hardware_availability.HardwareAvailability( + status = 'available', ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ) ], ) """ diff --git a/src/together/generated/test/test_list_hardware200_response_one_of.py b/src/together/generated/test/test_list_hardware200_response_one_of.py deleted file mode 100644 index 9b25ae66..00000000 --- a/src/together/generated/test/test_list_hardware200_response_one_of.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.list_hardware200_response_one_of import ( - ListHardware200ResponseOneOf, -) - - -class TestListHardware200ResponseOneOf(unittest.TestCase): - """ListHardware200ResponseOneOf unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ListHardware200ResponseOneOf: - """Test ListHardware200ResponseOneOf - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ListHardware200ResponseOneOf` - """ - model = ListHardware200ResponseOneOf() - if include_optional: - return ListHardware200ResponseOneOf( - object = 'list', - data = [ - null - ] - ) - else: - return ListHardware200ResponseOneOf( - object = 'list', - data = [ - null - ], - ) - """ - - def testListHardware200ResponseOneOf(self): - """Test ListHardware200ResponseOneOf""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of1.py b/src/together/generated/test/test_list_hardware200_response_one_of1.py deleted file mode 100644 index 2925384c..00000000 --- a/src/together/generated/test/test_list_hardware200_response_one_of1.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.list_hardware200_response_one_of1 import ( - ListHardware200ResponseOneOf1, -) - - -class TestListHardware200ResponseOneOf1(unittest.TestCase): - """ListHardware200ResponseOneOf1 unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ListHardware200ResponseOneOf1: - """Test ListHardware200ResponseOneOf1 - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ListHardware200ResponseOneOf1` - """ - model = ListHardware200ResponseOneOf1() - if include_optional: - return ListHardware200ResponseOneOf1( - object = 'list', - data = [ - null - ] - ) - else: - return ListHardware200ResponseOneOf1( - object = 'list', - data = [ - null - ], - ) - """ - - def testListHardware200ResponseOneOf1(self): - """Test ListHardware200ResponseOneOf1""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py b/src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py deleted file mode 100644 index a51e7dbe..00000000 --- a/src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.list_hardware200_response_one_of1_data_inner import ( - ListHardware200ResponseOneOf1DataInner, -) - - -class TestListHardware200ResponseOneOf1DataInner(unittest.TestCase): - """ListHardware200ResponseOneOf1DataInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ListHardware200ResponseOneOf1DataInner: - """Test ListHardware200ResponseOneOf1DataInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ListHardware200ResponseOneOf1DataInner` - """ - model = ListHardware200ResponseOneOf1DataInner() - if include_optional: - return ListHardware200ResponseOneOf1DataInner( - object = 'hardware', - name = '', - pricing = together.generated.models.endpoint_pricing.EndpointPricing( - cents_per_minute = 1.337, ), - specs = together.generated.models.hardware_spec.HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56, ), - availability = together.generated.models.hardware_availability.HardwareAvailability( - status = 'available', ), - updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') - ) - else: - return ListHardware200ResponseOneOf1DataInner( - object = 'hardware', - name = '', - pricing = together.generated.models.endpoint_pricing.EndpointPricing( - cents_per_minute = 1.337, ), - specs = together.generated.models.hardware_spec.HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56, ), - availability = together.generated.models.hardware_availability.HardwareAvailability( - status = 'available', ), - updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), - ) - """ - - def testListHardware200ResponseOneOf1DataInner(self): - """Test ListHardware200ResponseOneOf1DataInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py b/src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py deleted file mode 100644 index e6193a14..00000000 --- a/src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py +++ /dev/null @@ -1,75 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.list_hardware200_response_one_of_data_inner import ( - ListHardware200ResponseOneOfDataInner, -) - - -class TestListHardware200ResponseOneOfDataInner(unittest.TestCase): - """ListHardware200ResponseOneOfDataInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ListHardware200ResponseOneOfDataInner: - """Test ListHardware200ResponseOneOfDataInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ListHardware200ResponseOneOfDataInner` - """ - model = ListHardware200ResponseOneOfDataInner() - if include_optional: - return ListHardware200ResponseOneOfDataInner( - object = 'hardware', - name = '', - pricing = together.generated.models.endpoint_pricing.EndpointPricing( - cents_per_minute = 1.337, ), - specs = together.generated.models.hardware_spec.HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56, ), - availability = None, - updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') - ) - else: - return ListHardware200ResponseOneOfDataInner( - object = 'hardware', - name = '', - pricing = together.generated.models.endpoint_pricing.EndpointPricing( - cents_per_minute = 1.337, ), - specs = together.generated.models.hardware_spec.HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56, ), - updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), - ) - """ - - def testListHardware200ResponseOneOfDataInner(self): - """Test ListHardware200ResponseOneOfDataInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/resources/endpoints.py b/src/together/resources/endpoints.py index 06092360..21ed47ed 100644 --- a/src/together/resources/endpoints.py +++ b/src/together/resources/endpoints.py @@ -4,11 +4,13 @@ from typing import Any, Dict, List, Literal, Optional from together.generated.api.endpoints_api import EndpointsApi +from together.generated.api.hardware_api import HardwareApi from together.generated.api_client import ApiClient from together.generated.configuration import Configuration from together.generated.models.autoscaling import Autoscaling from together.generated.models.create_endpoint_request import CreateEndpointRequest from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.hardware_with_status import HardwareWithStatus from together.generated.models.list_endpoint import ListEndpoint from together.generated.models.update_endpoint_request import UpdateEndpointRequest from together.types import TogetherClient @@ -17,7 +19,9 @@ class BaseEndpoints: """Base class containing common endpoint functionality and documentation.""" - def _get_api_client(self, client: TogetherClient) -> tuple[ApiClient, EndpointsApi]: + def _get_api_client( + self, client: TogetherClient + ) -> tuple[ApiClient, EndpointsApi, HardwareApi]: api_client = ApiClient( configuration=Configuration( host=client.base_url.rstrip("/") if client.base_url else "", @@ -25,14 +29,14 @@ def _get_api_client(self, client: TogetherClient) -> tuple[ApiClient, EndpointsA header_name="Authorization", header_value=f"Bearer {client.api_key}" if client.api_key else None, ) - return api_client, EndpointsApi(api_client) + return api_client, EndpointsApi(api_client), HardwareApi(api_client) class Endpoints(BaseEndpoints): """Synchronous endpoints client.""" def __init__(self, client: TogetherClient) -> None: - self.api_client, self._api = self._get_api_client(client) + self.api_client, self._api, self._hardware_api = self._get_api_client(client) self._loop = asyncio.new_event_loop() asyncio.set_event_loop(self._loop) @@ -184,12 +188,29 @@ async def _update() -> DedicatedEndpoint: return self._loop.run_until_complete(_update()) + def list_hardware(self, model: Optional[str] = None) -> List[HardwareWithStatus]: + """ + List available hardware configurations. + + Args: + model (str, optional): Filter hardware configurations by model compatibility. Defaults to None. + + Returns: + List[HardwareWithStatus]: List of hardware configurations with their availability status + """ + + async def _list_hardware() -> List[HardwareWithStatus]: + response = await self._hardware_api.list_hardware(model=model) + return response.data + + return self._loop.run_until_complete(_list_hardware()) + class AsyncEndpoints(BaseEndpoints): """Asynchronous endpoints client.""" def __init__(self, client: TogetherClient) -> None: - self.api_client, self._api = self._get_api_client(client) + self.api_client, self._api, self._hardware_api = self._get_api_client(client) async def create( self, @@ -312,3 +333,18 @@ async def update( return await self._api.update_endpoint( endpoint_id=endpoint_id, update_endpoint_request=request ) + + async def list_hardware( + self, model: Optional[str] = None + ) -> List[HardwareWithStatus]: + """ + List available hardware configurations. + + Args: + model (str, optional): Filter hardware configurations by model compatibility. Defaults to None. + + Returns: + List[HardwareWithStatus]: List of hardware configurations with their availability status + """ + response = await self._hardware_api.list_hardware(model=model) + return response.data From e68aa19a84b1c3cd01c2d1d7adbdcb0392dd02d7 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 19:32:40 +0000 Subject: [PATCH 25/29] =?UTF-8?q?remove=20openapi=20client=20=F0=9F=98=AD?= =?UTF-8?q?=F0=9F=98=AD=F0=9F=98=AD=F0=9F=98=AD=F0=9F=98=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/upload-to-pypi.yml | 4 - Makefile | 12 +- mypy.ini | 7 - poetry.lock | 16 +- pyproject.toml | 6 - scripts/generate_api_client.py | 109 - scripts/openapi.yaml | 2813 ----------------- src/together/abstract/api_requestor.py | 2 +- src/together/cli/api/endpoints.py | 59 +- src/together/error.py | 3 + src/together/generated/__init__.py | 212 -- src/together/generated/api/__init__.py | 14 - src/together/generated/api/audio_api.py | 302 -- src/together/generated/api/chat_api.py | 308 -- src/together/generated/api/completion_api.py | 308 -- src/together/generated/api/embeddings_api.py | 308 -- src/together/generated/api/endpoints_api.py | 1354 -------- src/together/generated/api/files_api.py | 996 ------ src/together/generated/api/fine_tuning_api.py | 1630 ---------- src/together/generated/api/hardware_api.py | 304 -- src/together/generated/api/images_api.py | 291 -- src/together/generated/api/models_api.py | 279 -- src/together/generated/api/rerank_api.py | 308 -- src/together/generated/api_client.py | 758 ----- src/together/generated/api_response.py | 20 - src/together/generated/configuration.py | 583 ---- src/together/generated/docs/AudioApi.md | 88 - .../generated/docs/AudioSpeechRequest.md | 34 - .../generated/docs/AudioSpeechRequestModel.md | 27 - .../generated/docs/AudioSpeechRequestVoice.md | 27 - .../generated/docs/AudioSpeechStreamChunk.md | 29 - .../generated/docs/AudioSpeechStreamEvent.md | 27 - .../docs/AudioSpeechStreamResponse.md | 27 - src/together/generated/docs/Autoscaling.md | 29 - src/together/generated/docs/ChatApi.md | 93 - .../ChatCompletionAssistantMessageParam.md | 31 - .../generated/docs/ChatCompletionChoice.md | 30 - .../docs/ChatCompletionChoiceDelta.md | 31 - .../ChatCompletionChoiceDeltaFunctionCall.md | 28 - .../docs/ChatCompletionChoicesDataInner.md | 32 - .../ChatCompletionChoicesDataInnerLogprobs.md | 29 - .../generated/docs/ChatCompletionChunk.md | 33 - .../docs/ChatCompletionChunkChoicesInner.md | 31 - .../generated/docs/ChatCompletionEvent.md | 27 - .../ChatCompletionFunctionMessageParam.md | 29 - .../generated/docs/ChatCompletionMessage.md | 30 - .../docs/ChatCompletionMessageFunctionCall.md | 28 - .../docs/ChatCompletionMessageParam.md | 32 - .../generated/docs/ChatCompletionRequest.md | 49 - .../docs/ChatCompletionRequestFunctionCall.md | 27 - .../ChatCompletionRequestFunctionCallOneOf.md | 27 - .../ChatCompletionRequestMessagesInner.md | 28 - .../docs/ChatCompletionRequestModel.md | 27 - .../ChatCompletionRequestResponseFormat.md | 29 - .../docs/ChatCompletionRequestToolChoice.md | 31 - .../generated/docs/ChatCompletionResponse.md | 32 - .../generated/docs/ChatCompletionStream.md | 27 - .../docs/ChatCompletionSystemMessageParam.md | 29 - .../generated/docs/ChatCompletionToken.md | 30 - .../generated/docs/ChatCompletionTool.md | 28 - .../docs/ChatCompletionToolFunction.md | 29 - .../docs/ChatCompletionToolMessageParam.md | 29 - .../docs/ChatCompletionUserMessageParam.md | 29 - src/together/generated/docs/CompletionApi.md | 93 - .../generated/docs/CompletionChoice.md | 27 - .../docs/CompletionChoicesDataInner.md | 30 - .../generated/docs/CompletionChunk.md | 32 - .../generated/docs/CompletionChunkUsage.md | 29 - .../generated/docs/CompletionEvent.md | 27 - .../generated/docs/CompletionRequest.md | 44 - .../generated/docs/CompletionRequestModel.md | 27 - .../docs/CompletionRequestSafetyModel.md | 27 - .../generated/docs/CompletionResponse.md | 33 - .../generated/docs/CompletionStream.md | 27 - .../generated/docs/CompletionToken.md | 30 - .../generated/docs/CreateEndpointRequest.md | 33 - .../generated/docs/DedicatedEndpoint.md | 38 - src/together/generated/docs/EmbeddingsApi.md | 93 - .../generated/docs/EmbeddingsRequest.md | 28 - .../generated/docs/EmbeddingsRequestInput.md | 26 - .../generated/docs/EmbeddingsRequestModel.md | 27 - .../generated/docs/EmbeddingsResponse.md | 29 - .../docs/EmbeddingsResponseDataInner.md | 29 - .../generated/docs/EndpointPricing.md | 28 - src/together/generated/docs/EndpointsApi.md | 416 --- src/together/generated/docs/ErrorData.md | 27 - src/together/generated/docs/ErrorDataError.md | 30 - .../generated/docs/FileDeleteResponse.md | 28 - src/together/generated/docs/FileList.md | 27 - src/together/generated/docs/FileObject.md | 30 - src/together/generated/docs/FileResponse.md | 35 - src/together/generated/docs/FilesApi.md | 320 -- src/together/generated/docs/FineTuneEvent.md | 40 - .../generated/docs/FineTunesPostRequest.md | 45 - .../docs/FineTunesPostRequestTrainOnInputs.md | 27 - .../docs/FineTunesPostRequestTrainingType.md | 31 - src/together/generated/docs/FineTuningApi.md | 488 --- .../generated/docs/FinetuneDownloadResult.md | 31 - .../generated/docs/FinetuneEventLevels.md | 18 - .../generated/docs/FinetuneEventType.md | 56 - .../generated/docs/FinetuneJobStatus.md | 24 - src/together/generated/docs/FinetuneList.md | 27 - .../generated/docs/FinetuneListEvents.md | 27 - .../generated/docs/FinetuneResponse.md | 58 - .../docs/FinetuneResponseTrainOnInputs.md | 26 - src/together/generated/docs/FinishReason.md | 16 - .../generated/docs/FullTrainingType.md | 27 - src/together/generated/docs/HardwareApi.md | 88 - .../generated/docs/HardwareAvailability.md | 28 - src/together/generated/docs/HardwareSpec.md | 31 - .../generated/docs/HardwareWithStatus.md | 33 - src/together/generated/docs/ImageResponse.md | 30 - .../generated/docs/ImageResponseDataInner.md | 29 - src/together/generated/docs/ImagesApi.md | 87 - .../docs/ImagesGenerationsPostRequest.md | 39 - ...esGenerationsPostRequestImageLorasInner.md | 28 - .../docs/ImagesGenerationsPostRequestModel.md | 27 - src/together/generated/docs/LRScheduler.md | 28 - .../generated/docs/LinearLRSchedulerArgs.md | 27 - src/together/generated/docs/ListEndpoint.md | 35 - .../docs/ListEndpoints200Response.md | 28 - .../generated/docs/ListHardware200Response.md | 28 - .../generated/docs/LoRATrainingType.md | 31 - src/together/generated/docs/LogprobsPart.md | 29 - src/together/generated/docs/ModelInfo.md | 36 - src/together/generated/docs/ModelsApi.md | 87 - src/together/generated/docs/Pricing.md | 31 - .../generated/docs/PromptPartInner.md | 28 - src/together/generated/docs/RerankApi.md | 93 - src/together/generated/docs/RerankRequest.md | 32 - .../generated/docs/RerankRequestDocuments.md | 27 - .../generated/docs/RerankRequestModel.md | 27 - src/together/generated/docs/RerankResponse.md | 31 - .../docs/RerankResponseResultsInner.md | 29 - .../RerankResponseResultsInnerDocument.md | 27 - src/together/generated/docs/StreamSentinel.md | 27 - src/together/generated/docs/ToolChoice.md | 30 - .../generated/docs/ToolChoiceFunction.md | 28 - src/together/generated/docs/ToolsPart.md | 28 - .../generated/docs/ToolsPartFunction.md | 29 - .../generated/docs/UpdateEndpointRequest.md | 29 - src/together/generated/docs/UsageData.md | 29 - src/together/generated/exceptions.py | 220 -- src/together/generated/models/__init__.py | 185 -- .../generated/models/audio_speech_request.py | 212 -- .../models/audio_speech_request_model.py | 158 - .../models/audio_speech_request_voice.py | 158 - .../models/audio_speech_stream_chunk.py | 98 - .../models/audio_speech_stream_event.py | 95 - .../models/audio_speech_stream_response.py | 169 - src/together/generated/models/autoscaling.py | 93 - ...chat_completion_assistant_message_param.py | 130 - .../models/chat_completion_choice.py | 112 - .../models/chat_completion_choice_delta.py | 134 - ...t_completion_choice_delta_function_call.py | 86 - .../chat_completion_choices_data_inner.py | 123 - ..._completion_choices_data_inner_logprobs.py | 97 - .../generated/models/chat_completion_chunk.py | 139 - .../chat_completion_chunk_choices_inner.py | 112 - .../generated/models/chat_completion_event.py | 95 - .../chat_completion_function_message_param.py | 98 - .../models/chat_completion_message.py | 127 - .../chat_completion_message_function_call.py | 86 - .../models/chat_completion_message_param.py | 266 -- .../models/chat_completion_request.py | 304 -- .../chat_completion_request_function_call.py | 177 -- ...completion_request_function_call_one_of.py | 83 - .../chat_completion_request_messages_inner.py | 99 - .../models/chat_completion_request_model.py | 158 - ...chat_completion_request_response_format.py | 90 - .../chat_completion_request_tool_choice.py | 166 - .../models/chat_completion_response.py | 136 - .../models/chat_completion_stream.py | 169 - .../chat_completion_system_message_param.py | 98 - .../generated/models/chat_completion_token.py | 100 - .../generated/models/chat_completion_tool.py | 106 - .../models/chat_completion_tool_function.py | 91 - .../chat_completion_tool_message_param.py | 98 - .../chat_completion_user_message_param.py | 98 - .../generated/models/completion_choice.py | 83 - .../models/completion_choices_data_inner.py | 101 - .../generated/models/completion_chunk.py | 139 - .../models/completion_chunk_usage.py | 95 - .../generated/models/completion_event.py | 95 - .../generated/models/completion_request.py | 212 -- .../models/completion_request_model.py | 158 - .../models/completion_request_safety_model.py | 158 - .../generated/models/completion_response.py | 151 - .../generated/models/completion_stream.py | 169 - .../generated/models/completion_token.py | 100 - .../models/create_endpoint_request.py | 156 - .../generated/models/dedicated_endpoint.py | 157 - .../generated/models/embeddings_request.py | 105 - .../models/embeddings_request_input.py | 171 - .../models/embeddings_request_model.py | 158 - .../generated/models/embeddings_response.py | 115 - .../models/embeddings_response_data_inner.py | 105 - .../generated/models/endpoint_pricing.py | 85 - src/together/generated/models/error_data.py | 95 - .../generated/models/error_data_error.py | 93 - .../generated/models/file_delete_response.py | 84 - src/together/generated/models/file_list.py | 99 - src/together/generated/models/file_object.py | 93 - .../generated/models/file_response.py | 135 - .../generated/models/fine_tune_event.py | 137 - .../models/fine_tunes_post_request.py | 233 -- ...fine_tunes_post_request_train_on_inputs.py | 170 - .../fine_tunes_post_request_training_type.py | 172 - .../models/finetune_download_result.py | 116 - .../generated/models/finetune_event_levels.py | 39 - .../generated/models/finetune_event_type.py | 58 - .../generated/models/finetune_job_status.py | 42 - .../generated/models/finetune_list.py | 99 - .../generated/models/finetune_list_events.py | 99 - .../generated/models/finetune_response.py | 222 -- .../finetune_response_train_on_inputs.py | 170 - .../generated/models/finish_reason.py | 38 - .../generated/models/full_training_type.py | 90 - .../generated/models/hardware_availability.py | 94 - .../generated/models/hardware_spec.py | 100 - .../generated/models/hardware_with_status.py | 140 - .../generated/models/image_response.py | 112 - .../models/image_response_data_inner.py | 85 - .../models/images_generations_post_request.py | 217 -- ...erations_post_request_image_loras_inner.py | 88 - .../images_generations_post_request_model.py | 158 - .../models/linear_lr_scheduler_args.py | 94 - .../generated/models/list_endpoint.py | 136 - .../models/list_endpoints200_response.py | 108 - .../models/list_hardware200_response.py | 108 - .../generated/models/lo_ra_training_type.py | 123 - .../generated/models/logprobs_part.py | 97 - src/together/generated/models/lr_scheduler.py | 96 - src/together/generated/models/model_info.py | 135 - src/together/generated/models/pricing.py | 101 - .../generated/models/prompt_part_inner.py | 97 - .../generated/models/rerank_request.py | 144 - .../models/rerank_request_documents.py | 171 - .../generated/models/rerank_request_model.py | 158 - .../generated/models/rerank_response.py | 127 - .../models/rerank_response_results_inner.py | 101 - .../rerank_response_results_inner_document.py | 83 - .../generated/models/stream_sentinel.py | 90 - src/together/generated/models/tool_choice.py | 115 - .../generated/models/tool_choice_function.py | 86 - src/together/generated/models/tools_part.py | 97 - .../generated/models/tools_part_function.py | 93 - .../models/update_endpoint_request.py | 115 - src/together/generated/models/usage_data.py | 95 - src/together/generated/rest.py | 195 -- src/together/generated/test/__init__.py | 0 src/together/generated/test/test_audio_api.py | 38 - .../test/test_audio_speech_request.py | 63 - .../test/test_audio_speech_request_model.py | 52 - .../test/test_audio_speech_request_voice.py | 52 - .../test/test_audio_speech_stream_chunk.py | 58 - .../test/test_audio_speech_stream_event.py | 60 - .../test/test_audio_speech_stream_response.py | 56 - .../generated/test/test_autoscaling.py | 56 - src/together/generated/test/test_chat_api.py | 38 - ...chat_completion_assistant_message_param.py | 70 - .../test/test_chat_completion_choice.py | 98 - .../test/test_chat_completion_choice_delta.py | 70 - ...t_completion_choice_delta_function_call.py | 58 - ...test_chat_completion_choices_data_inner.py | 74 - ..._completion_choices_data_inner_logprobs.py | 63 - .../test/test_chat_completion_chunk.py | 108 - ...est_chat_completion_chunk_choices_inner.py | 92 - .../test/test_chat_completion_event.py | 112 - ..._chat_completion_function_message_param.py | 60 - .../test/test_chat_completion_message.py | 68 - ...t_chat_completion_message_function_call.py | 58 - .../test_chat_completion_message_param.py | 74 - .../test/test_chat_completion_request.py | 98 - ...t_chat_completion_request_function_call.py | 56 - ...completion_request_function_call_one_of.py | 56 - ..._chat_completion_request_messages_inner.py | 58 - .../test_chat_completion_request_model.py | 54 - ...chat_completion_request_response_format.py | 58 - ...est_chat_completion_request_tool_choice.py | 66 - .../test/test_chat_completion_response.py | 110 - .../test/test_chat_completion_stream.py | 54 - ...st_chat_completion_system_message_param.py | 59 - .../test/test_chat_completion_token.py | 60 - .../test/test_chat_completion_tool.py | 66 - .../test_chat_completion_tool_function.py | 60 - ...test_chat_completion_tool_message_param.py | 60 - ...test_chat_completion_user_message_param.py | 59 - .../generated/test/test_completion_api.py | 38 - .../generated/test/test_completion_choice.py | 53 - .../test_completion_choices_data_inner.py | 67 - .../generated/test/test_completion_chunk.py | 77 - .../test/test_completion_chunk_usage.py | 58 - .../generated/test/test_completion_event.py | 80 - .../generated/test/test_completion_request.py | 74 - .../test/test_completion_request_model.py | 52 - .../test_completion_request_safety_model.py | 54 - .../test/test_completion_response.py | 114 - .../generated/test/test_completion_stream.py | 54 - .../generated/test/test_completion_token.py | 60 - .../test/test_create_endpoint_request.py | 66 - .../generated/test/test_dedicated_endpoint.py | 78 - .../generated/test/test_embeddings_api.py | 38 - .../generated/test/test_embeddings_request.py | 56 - .../test/test_embeddings_request_input.py | 52 - .../test/test_embeddings_request_model.py | 52 - .../test/test_embeddings_response.py | 72 - .../test_embeddings_response_data_inner.py | 64 - .../generated/test/test_endpoint_pricing.py | 54 - .../generated/test/test_endpoints_api.py | 66 - .../generated/test/test_error_data.py | 62 - .../generated/test/test_error_data_error.py | 58 - .../test/test_file_delete_response.py | 54 - src/together/generated/test/test_file_list.py | 76 - .../generated/test/test_file_object.py | 56 - .../generated/test/test_file_response.py | 70 - src/together/generated/test/test_files_api.py | 59 - .../generated/test/test_fine_tune_event.py | 79 - .../test/test_fine_tunes_post_request.py | 76 - ...fine_tunes_post_request_train_on_inputs.py | 54 - ...t_fine_tunes_post_request_training_type.py | 62 - .../generated/test/test_fine_tuning_api.py | 73 - .../test/test_finetune_download_result.py | 57 - .../test/test_finetune_event_levels.py | 35 - .../test/test_finetune_event_type.py | 35 - .../test/test_finetune_job_status.py | 35 - .../generated/test/test_finetune_list.py | 54 - .../test/test_finetune_list_events.py | 54 - .../generated/test/test_finetune_response.py | 89 - .../test_finetune_response_train_on_inputs.py | 54 - .../generated/test/test_finish_reason.py | 35 - .../generated/test/test_full_training_type.py | 54 - .../generated/test/test_hardware_api.py | 38 - .../test/test_hardware_availability.py | 54 - .../generated/test/test_hardware_spec.py | 60 - .../test/test_hardware_with_status.py | 74 - .../generated/test/test_image_response.py | 70 - .../test/test_image_response_data_inner.py | 56 - .../generated/test/test_images_api.py | 38 - .../test_images_generations_post_request.py | 73 - ...erations_post_request_image_loras_inner.py | 60 - ...t_images_generations_post_request_model.py | 54 - .../test/test_linear_lr_scheduler_args.py | 53 - .../generated/test/test_list_endpoint.py | 68 - .../test/test_list_endpoints200_response.py | 78 - .../test/test_list_hardware200_response.py | 84 - .../test/test_lo_ra_training_type.py | 60 - .../generated/test/test_logprobs_part.py | 61 - .../generated/test/test_lr_scheduler.py | 56 - .../generated/test/test_model_info.py | 71 - .../generated/test/test_models_api.py | 38 - src/together/generated/test/test_pricing.py | 62 - .../generated/test/test_prompt_part_inner.py | 63 - .../generated/test/test_rerank_api.py | 38 - .../generated/test/test_rerank_request.py | 61 - .../test/test_rerank_request_documents.py | 52 - .../test/test_rerank_request_model.py | 52 - .../generated/test/test_rerank_response.py | 60 - .../test_rerank_response_results_inner.py | 62 - ..._rerank_response_results_inner_document.py | 55 - .../generated/test/test_stream_sentinel.py | 54 - .../generated/test/test_tool_choice.py | 64 - .../test/test_tool_choice_function.py | 56 - .../generated/test/test_tools_part.py | 57 - .../test/test_tools_part_function.py | 55 - .../test/test_update_endpoint_request.py | 57 - .../generated/test/test_usage_data.py | 58 - src/together/resources/endpoints.py | 440 ++- src/together/types/endpoints.py | 114 +- 369 files changed, 416 insertions(+), 36938 deletions(-) delete mode 100755 scripts/generate_api_client.py delete mode 100644 scripts/openapi.yaml delete mode 100644 src/together/generated/__init__.py delete mode 100644 src/together/generated/api/__init__.py delete mode 100644 src/together/generated/api/audio_api.py delete mode 100644 src/together/generated/api/chat_api.py delete mode 100644 src/together/generated/api/completion_api.py delete mode 100644 src/together/generated/api/embeddings_api.py delete mode 100644 src/together/generated/api/endpoints_api.py delete mode 100644 src/together/generated/api/files_api.py delete mode 100644 src/together/generated/api/fine_tuning_api.py delete mode 100644 src/together/generated/api/hardware_api.py delete mode 100644 src/together/generated/api/images_api.py delete mode 100644 src/together/generated/api/models_api.py delete mode 100644 src/together/generated/api/rerank_api.py delete mode 100644 src/together/generated/api_client.py delete mode 100644 src/together/generated/api_response.py delete mode 100644 src/together/generated/configuration.py delete mode 100644 src/together/generated/docs/AudioApi.md delete mode 100644 src/together/generated/docs/AudioSpeechRequest.md delete mode 100644 src/together/generated/docs/AudioSpeechRequestModel.md delete mode 100644 src/together/generated/docs/AudioSpeechRequestVoice.md delete mode 100644 src/together/generated/docs/AudioSpeechStreamChunk.md delete mode 100644 src/together/generated/docs/AudioSpeechStreamEvent.md delete mode 100644 src/together/generated/docs/AudioSpeechStreamResponse.md delete mode 100644 src/together/generated/docs/Autoscaling.md delete mode 100644 src/together/generated/docs/ChatApi.md delete mode 100644 src/together/generated/docs/ChatCompletionAssistantMessageParam.md delete mode 100644 src/together/generated/docs/ChatCompletionChoice.md delete mode 100644 src/together/generated/docs/ChatCompletionChoiceDelta.md delete mode 100644 src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md delete mode 100644 src/together/generated/docs/ChatCompletionChoicesDataInner.md delete mode 100644 src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md delete mode 100644 src/together/generated/docs/ChatCompletionChunk.md delete mode 100644 src/together/generated/docs/ChatCompletionChunkChoicesInner.md delete mode 100644 src/together/generated/docs/ChatCompletionEvent.md delete mode 100644 src/together/generated/docs/ChatCompletionFunctionMessageParam.md delete mode 100644 src/together/generated/docs/ChatCompletionMessage.md delete mode 100644 src/together/generated/docs/ChatCompletionMessageFunctionCall.md delete mode 100644 src/together/generated/docs/ChatCompletionMessageParam.md delete mode 100644 src/together/generated/docs/ChatCompletionRequest.md delete mode 100644 src/together/generated/docs/ChatCompletionRequestFunctionCall.md delete mode 100644 src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md delete mode 100644 src/together/generated/docs/ChatCompletionRequestMessagesInner.md delete mode 100644 src/together/generated/docs/ChatCompletionRequestModel.md delete mode 100644 src/together/generated/docs/ChatCompletionRequestResponseFormat.md delete mode 100644 src/together/generated/docs/ChatCompletionRequestToolChoice.md delete mode 100644 src/together/generated/docs/ChatCompletionResponse.md delete mode 100644 src/together/generated/docs/ChatCompletionStream.md delete mode 100644 src/together/generated/docs/ChatCompletionSystemMessageParam.md delete mode 100644 src/together/generated/docs/ChatCompletionToken.md delete mode 100644 src/together/generated/docs/ChatCompletionTool.md delete mode 100644 src/together/generated/docs/ChatCompletionToolFunction.md delete mode 100644 src/together/generated/docs/ChatCompletionToolMessageParam.md delete mode 100644 src/together/generated/docs/ChatCompletionUserMessageParam.md delete mode 100644 src/together/generated/docs/CompletionApi.md delete mode 100644 src/together/generated/docs/CompletionChoice.md delete mode 100644 src/together/generated/docs/CompletionChoicesDataInner.md delete mode 100644 src/together/generated/docs/CompletionChunk.md delete mode 100644 src/together/generated/docs/CompletionChunkUsage.md delete mode 100644 src/together/generated/docs/CompletionEvent.md delete mode 100644 src/together/generated/docs/CompletionRequest.md delete mode 100644 src/together/generated/docs/CompletionRequestModel.md delete mode 100644 src/together/generated/docs/CompletionRequestSafetyModel.md delete mode 100644 src/together/generated/docs/CompletionResponse.md delete mode 100644 src/together/generated/docs/CompletionStream.md delete mode 100644 src/together/generated/docs/CompletionToken.md delete mode 100644 src/together/generated/docs/CreateEndpointRequest.md delete mode 100644 src/together/generated/docs/DedicatedEndpoint.md delete mode 100644 src/together/generated/docs/EmbeddingsApi.md delete mode 100644 src/together/generated/docs/EmbeddingsRequest.md delete mode 100644 src/together/generated/docs/EmbeddingsRequestInput.md delete mode 100644 src/together/generated/docs/EmbeddingsRequestModel.md delete mode 100644 src/together/generated/docs/EmbeddingsResponse.md delete mode 100644 src/together/generated/docs/EmbeddingsResponseDataInner.md delete mode 100644 src/together/generated/docs/EndpointPricing.md delete mode 100644 src/together/generated/docs/EndpointsApi.md delete mode 100644 src/together/generated/docs/ErrorData.md delete mode 100644 src/together/generated/docs/ErrorDataError.md delete mode 100644 src/together/generated/docs/FileDeleteResponse.md delete mode 100644 src/together/generated/docs/FileList.md delete mode 100644 src/together/generated/docs/FileObject.md delete mode 100644 src/together/generated/docs/FileResponse.md delete mode 100644 src/together/generated/docs/FilesApi.md delete mode 100644 src/together/generated/docs/FineTuneEvent.md delete mode 100644 src/together/generated/docs/FineTunesPostRequest.md delete mode 100644 src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md delete mode 100644 src/together/generated/docs/FineTunesPostRequestTrainingType.md delete mode 100644 src/together/generated/docs/FineTuningApi.md delete mode 100644 src/together/generated/docs/FinetuneDownloadResult.md delete mode 100644 src/together/generated/docs/FinetuneEventLevels.md delete mode 100644 src/together/generated/docs/FinetuneEventType.md delete mode 100644 src/together/generated/docs/FinetuneJobStatus.md delete mode 100644 src/together/generated/docs/FinetuneList.md delete mode 100644 src/together/generated/docs/FinetuneListEvents.md delete mode 100644 src/together/generated/docs/FinetuneResponse.md delete mode 100644 src/together/generated/docs/FinetuneResponseTrainOnInputs.md delete mode 100644 src/together/generated/docs/FinishReason.md delete mode 100644 src/together/generated/docs/FullTrainingType.md delete mode 100644 src/together/generated/docs/HardwareApi.md delete mode 100644 src/together/generated/docs/HardwareAvailability.md delete mode 100644 src/together/generated/docs/HardwareSpec.md delete mode 100644 src/together/generated/docs/HardwareWithStatus.md delete mode 100644 src/together/generated/docs/ImageResponse.md delete mode 100644 src/together/generated/docs/ImageResponseDataInner.md delete mode 100644 src/together/generated/docs/ImagesApi.md delete mode 100644 src/together/generated/docs/ImagesGenerationsPostRequest.md delete mode 100644 src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md delete mode 100644 src/together/generated/docs/ImagesGenerationsPostRequestModel.md delete mode 100644 src/together/generated/docs/LRScheduler.md delete mode 100644 src/together/generated/docs/LinearLRSchedulerArgs.md delete mode 100644 src/together/generated/docs/ListEndpoint.md delete mode 100644 src/together/generated/docs/ListEndpoints200Response.md delete mode 100644 src/together/generated/docs/ListHardware200Response.md delete mode 100644 src/together/generated/docs/LoRATrainingType.md delete mode 100644 src/together/generated/docs/LogprobsPart.md delete mode 100644 src/together/generated/docs/ModelInfo.md delete mode 100644 src/together/generated/docs/ModelsApi.md delete mode 100644 src/together/generated/docs/Pricing.md delete mode 100644 src/together/generated/docs/PromptPartInner.md delete mode 100644 src/together/generated/docs/RerankApi.md delete mode 100644 src/together/generated/docs/RerankRequest.md delete mode 100644 src/together/generated/docs/RerankRequestDocuments.md delete mode 100644 src/together/generated/docs/RerankRequestModel.md delete mode 100644 src/together/generated/docs/RerankResponse.md delete mode 100644 src/together/generated/docs/RerankResponseResultsInner.md delete mode 100644 src/together/generated/docs/RerankResponseResultsInnerDocument.md delete mode 100644 src/together/generated/docs/StreamSentinel.md delete mode 100644 src/together/generated/docs/ToolChoice.md delete mode 100644 src/together/generated/docs/ToolChoiceFunction.md delete mode 100644 src/together/generated/docs/ToolsPart.md delete mode 100644 src/together/generated/docs/ToolsPartFunction.md delete mode 100644 src/together/generated/docs/UpdateEndpointRequest.md delete mode 100644 src/together/generated/docs/UsageData.md delete mode 100644 src/together/generated/exceptions.py delete mode 100644 src/together/generated/models/__init__.py delete mode 100644 src/together/generated/models/audio_speech_request.py delete mode 100644 src/together/generated/models/audio_speech_request_model.py delete mode 100644 src/together/generated/models/audio_speech_request_voice.py delete mode 100644 src/together/generated/models/audio_speech_stream_chunk.py delete mode 100644 src/together/generated/models/audio_speech_stream_event.py delete mode 100644 src/together/generated/models/audio_speech_stream_response.py delete mode 100644 src/together/generated/models/autoscaling.py delete mode 100644 src/together/generated/models/chat_completion_assistant_message_param.py delete mode 100644 src/together/generated/models/chat_completion_choice.py delete mode 100644 src/together/generated/models/chat_completion_choice_delta.py delete mode 100644 src/together/generated/models/chat_completion_choice_delta_function_call.py delete mode 100644 src/together/generated/models/chat_completion_choices_data_inner.py delete mode 100644 src/together/generated/models/chat_completion_choices_data_inner_logprobs.py delete mode 100644 src/together/generated/models/chat_completion_chunk.py delete mode 100644 src/together/generated/models/chat_completion_chunk_choices_inner.py delete mode 100644 src/together/generated/models/chat_completion_event.py delete mode 100644 src/together/generated/models/chat_completion_function_message_param.py delete mode 100644 src/together/generated/models/chat_completion_message.py delete mode 100644 src/together/generated/models/chat_completion_message_function_call.py delete mode 100644 src/together/generated/models/chat_completion_message_param.py delete mode 100644 src/together/generated/models/chat_completion_request.py delete mode 100644 src/together/generated/models/chat_completion_request_function_call.py delete mode 100644 src/together/generated/models/chat_completion_request_function_call_one_of.py delete mode 100644 src/together/generated/models/chat_completion_request_messages_inner.py delete mode 100644 src/together/generated/models/chat_completion_request_model.py delete mode 100644 src/together/generated/models/chat_completion_request_response_format.py delete mode 100644 src/together/generated/models/chat_completion_request_tool_choice.py delete mode 100644 src/together/generated/models/chat_completion_response.py delete mode 100644 src/together/generated/models/chat_completion_stream.py delete mode 100644 src/together/generated/models/chat_completion_system_message_param.py delete mode 100644 src/together/generated/models/chat_completion_token.py delete mode 100644 src/together/generated/models/chat_completion_tool.py delete mode 100644 src/together/generated/models/chat_completion_tool_function.py delete mode 100644 src/together/generated/models/chat_completion_tool_message_param.py delete mode 100644 src/together/generated/models/chat_completion_user_message_param.py delete mode 100644 src/together/generated/models/completion_choice.py delete mode 100644 src/together/generated/models/completion_choices_data_inner.py delete mode 100644 src/together/generated/models/completion_chunk.py delete mode 100644 src/together/generated/models/completion_chunk_usage.py delete mode 100644 src/together/generated/models/completion_event.py delete mode 100644 src/together/generated/models/completion_request.py delete mode 100644 src/together/generated/models/completion_request_model.py delete mode 100644 src/together/generated/models/completion_request_safety_model.py delete mode 100644 src/together/generated/models/completion_response.py delete mode 100644 src/together/generated/models/completion_stream.py delete mode 100644 src/together/generated/models/completion_token.py delete mode 100644 src/together/generated/models/create_endpoint_request.py delete mode 100644 src/together/generated/models/dedicated_endpoint.py delete mode 100644 src/together/generated/models/embeddings_request.py delete mode 100644 src/together/generated/models/embeddings_request_input.py delete mode 100644 src/together/generated/models/embeddings_request_model.py delete mode 100644 src/together/generated/models/embeddings_response.py delete mode 100644 src/together/generated/models/embeddings_response_data_inner.py delete mode 100644 src/together/generated/models/endpoint_pricing.py delete mode 100644 src/together/generated/models/error_data.py delete mode 100644 src/together/generated/models/error_data_error.py delete mode 100644 src/together/generated/models/file_delete_response.py delete mode 100644 src/together/generated/models/file_list.py delete mode 100644 src/together/generated/models/file_object.py delete mode 100644 src/together/generated/models/file_response.py delete mode 100644 src/together/generated/models/fine_tune_event.py delete mode 100644 src/together/generated/models/fine_tunes_post_request.py delete mode 100644 src/together/generated/models/fine_tunes_post_request_train_on_inputs.py delete mode 100644 src/together/generated/models/fine_tunes_post_request_training_type.py delete mode 100644 src/together/generated/models/finetune_download_result.py delete mode 100644 src/together/generated/models/finetune_event_levels.py delete mode 100644 src/together/generated/models/finetune_event_type.py delete mode 100644 src/together/generated/models/finetune_job_status.py delete mode 100644 src/together/generated/models/finetune_list.py delete mode 100644 src/together/generated/models/finetune_list_events.py delete mode 100644 src/together/generated/models/finetune_response.py delete mode 100644 src/together/generated/models/finetune_response_train_on_inputs.py delete mode 100644 src/together/generated/models/finish_reason.py delete mode 100644 src/together/generated/models/full_training_type.py delete mode 100644 src/together/generated/models/hardware_availability.py delete mode 100644 src/together/generated/models/hardware_spec.py delete mode 100644 src/together/generated/models/hardware_with_status.py delete mode 100644 src/together/generated/models/image_response.py delete mode 100644 src/together/generated/models/image_response_data_inner.py delete mode 100644 src/together/generated/models/images_generations_post_request.py delete mode 100644 src/together/generated/models/images_generations_post_request_image_loras_inner.py delete mode 100644 src/together/generated/models/images_generations_post_request_model.py delete mode 100644 src/together/generated/models/linear_lr_scheduler_args.py delete mode 100644 src/together/generated/models/list_endpoint.py delete mode 100644 src/together/generated/models/list_endpoints200_response.py delete mode 100644 src/together/generated/models/list_hardware200_response.py delete mode 100644 src/together/generated/models/lo_ra_training_type.py delete mode 100644 src/together/generated/models/logprobs_part.py delete mode 100644 src/together/generated/models/lr_scheduler.py delete mode 100644 src/together/generated/models/model_info.py delete mode 100644 src/together/generated/models/pricing.py delete mode 100644 src/together/generated/models/prompt_part_inner.py delete mode 100644 src/together/generated/models/rerank_request.py delete mode 100644 src/together/generated/models/rerank_request_documents.py delete mode 100644 src/together/generated/models/rerank_request_model.py delete mode 100644 src/together/generated/models/rerank_response.py delete mode 100644 src/together/generated/models/rerank_response_results_inner.py delete mode 100644 src/together/generated/models/rerank_response_results_inner_document.py delete mode 100644 src/together/generated/models/stream_sentinel.py delete mode 100644 src/together/generated/models/tool_choice.py delete mode 100644 src/together/generated/models/tool_choice_function.py delete mode 100644 src/together/generated/models/tools_part.py delete mode 100644 src/together/generated/models/tools_part_function.py delete mode 100644 src/together/generated/models/update_endpoint_request.py delete mode 100644 src/together/generated/models/usage_data.py delete mode 100644 src/together/generated/rest.py delete mode 100644 src/together/generated/test/__init__.py delete mode 100644 src/together/generated/test/test_audio_api.py delete mode 100644 src/together/generated/test/test_audio_speech_request.py delete mode 100644 src/together/generated/test/test_audio_speech_request_model.py delete mode 100644 src/together/generated/test/test_audio_speech_request_voice.py delete mode 100644 src/together/generated/test/test_audio_speech_stream_chunk.py delete mode 100644 src/together/generated/test/test_audio_speech_stream_event.py delete mode 100644 src/together/generated/test/test_audio_speech_stream_response.py delete mode 100644 src/together/generated/test/test_autoscaling.py delete mode 100644 src/together/generated/test/test_chat_api.py delete mode 100644 src/together/generated/test/test_chat_completion_assistant_message_param.py delete mode 100644 src/together/generated/test/test_chat_completion_choice.py delete mode 100644 src/together/generated/test/test_chat_completion_choice_delta.py delete mode 100644 src/together/generated/test/test_chat_completion_choice_delta_function_call.py delete mode 100644 src/together/generated/test/test_chat_completion_choices_data_inner.py delete mode 100644 src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py delete mode 100644 src/together/generated/test/test_chat_completion_chunk.py delete mode 100644 src/together/generated/test/test_chat_completion_chunk_choices_inner.py delete mode 100644 src/together/generated/test/test_chat_completion_event.py delete mode 100644 src/together/generated/test/test_chat_completion_function_message_param.py delete mode 100644 src/together/generated/test/test_chat_completion_message.py delete mode 100644 src/together/generated/test/test_chat_completion_message_function_call.py delete mode 100644 src/together/generated/test/test_chat_completion_message_param.py delete mode 100644 src/together/generated/test/test_chat_completion_request.py delete mode 100644 src/together/generated/test/test_chat_completion_request_function_call.py delete mode 100644 src/together/generated/test/test_chat_completion_request_function_call_one_of.py delete mode 100644 src/together/generated/test/test_chat_completion_request_messages_inner.py delete mode 100644 src/together/generated/test/test_chat_completion_request_model.py delete mode 100644 src/together/generated/test/test_chat_completion_request_response_format.py delete mode 100644 src/together/generated/test/test_chat_completion_request_tool_choice.py delete mode 100644 src/together/generated/test/test_chat_completion_response.py delete mode 100644 src/together/generated/test/test_chat_completion_stream.py delete mode 100644 src/together/generated/test/test_chat_completion_system_message_param.py delete mode 100644 src/together/generated/test/test_chat_completion_token.py delete mode 100644 src/together/generated/test/test_chat_completion_tool.py delete mode 100644 src/together/generated/test/test_chat_completion_tool_function.py delete mode 100644 src/together/generated/test/test_chat_completion_tool_message_param.py delete mode 100644 src/together/generated/test/test_chat_completion_user_message_param.py delete mode 100644 src/together/generated/test/test_completion_api.py delete mode 100644 src/together/generated/test/test_completion_choice.py delete mode 100644 src/together/generated/test/test_completion_choices_data_inner.py delete mode 100644 src/together/generated/test/test_completion_chunk.py delete mode 100644 src/together/generated/test/test_completion_chunk_usage.py delete mode 100644 src/together/generated/test/test_completion_event.py delete mode 100644 src/together/generated/test/test_completion_request.py delete mode 100644 src/together/generated/test/test_completion_request_model.py delete mode 100644 src/together/generated/test/test_completion_request_safety_model.py delete mode 100644 src/together/generated/test/test_completion_response.py delete mode 100644 src/together/generated/test/test_completion_stream.py delete mode 100644 src/together/generated/test/test_completion_token.py delete mode 100644 src/together/generated/test/test_create_endpoint_request.py delete mode 100644 src/together/generated/test/test_dedicated_endpoint.py delete mode 100644 src/together/generated/test/test_embeddings_api.py delete mode 100644 src/together/generated/test/test_embeddings_request.py delete mode 100644 src/together/generated/test/test_embeddings_request_input.py delete mode 100644 src/together/generated/test/test_embeddings_request_model.py delete mode 100644 src/together/generated/test/test_embeddings_response.py delete mode 100644 src/together/generated/test/test_embeddings_response_data_inner.py delete mode 100644 src/together/generated/test/test_endpoint_pricing.py delete mode 100644 src/together/generated/test/test_endpoints_api.py delete mode 100644 src/together/generated/test/test_error_data.py delete mode 100644 src/together/generated/test/test_error_data_error.py delete mode 100644 src/together/generated/test/test_file_delete_response.py delete mode 100644 src/together/generated/test/test_file_list.py delete mode 100644 src/together/generated/test/test_file_object.py delete mode 100644 src/together/generated/test/test_file_response.py delete mode 100644 src/together/generated/test/test_files_api.py delete mode 100644 src/together/generated/test/test_fine_tune_event.py delete mode 100644 src/together/generated/test/test_fine_tunes_post_request.py delete mode 100644 src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py delete mode 100644 src/together/generated/test/test_fine_tunes_post_request_training_type.py delete mode 100644 src/together/generated/test/test_fine_tuning_api.py delete mode 100644 src/together/generated/test/test_finetune_download_result.py delete mode 100644 src/together/generated/test/test_finetune_event_levels.py delete mode 100644 src/together/generated/test/test_finetune_event_type.py delete mode 100644 src/together/generated/test/test_finetune_job_status.py delete mode 100644 src/together/generated/test/test_finetune_list.py delete mode 100644 src/together/generated/test/test_finetune_list_events.py delete mode 100644 src/together/generated/test/test_finetune_response.py delete mode 100644 src/together/generated/test/test_finetune_response_train_on_inputs.py delete mode 100644 src/together/generated/test/test_finish_reason.py delete mode 100644 src/together/generated/test/test_full_training_type.py delete mode 100644 src/together/generated/test/test_hardware_api.py delete mode 100644 src/together/generated/test/test_hardware_availability.py delete mode 100644 src/together/generated/test/test_hardware_spec.py delete mode 100644 src/together/generated/test/test_hardware_with_status.py delete mode 100644 src/together/generated/test/test_image_response.py delete mode 100644 src/together/generated/test/test_image_response_data_inner.py delete mode 100644 src/together/generated/test/test_images_api.py delete mode 100644 src/together/generated/test/test_images_generations_post_request.py delete mode 100644 src/together/generated/test/test_images_generations_post_request_image_loras_inner.py delete mode 100644 src/together/generated/test/test_images_generations_post_request_model.py delete mode 100644 src/together/generated/test/test_linear_lr_scheduler_args.py delete mode 100644 src/together/generated/test/test_list_endpoint.py delete mode 100644 src/together/generated/test/test_list_endpoints200_response.py delete mode 100644 src/together/generated/test/test_list_hardware200_response.py delete mode 100644 src/together/generated/test/test_lo_ra_training_type.py delete mode 100644 src/together/generated/test/test_logprobs_part.py delete mode 100644 src/together/generated/test/test_lr_scheduler.py delete mode 100644 src/together/generated/test/test_model_info.py delete mode 100644 src/together/generated/test/test_models_api.py delete mode 100644 src/together/generated/test/test_pricing.py delete mode 100644 src/together/generated/test/test_prompt_part_inner.py delete mode 100644 src/together/generated/test/test_rerank_api.py delete mode 100644 src/together/generated/test/test_rerank_request.py delete mode 100644 src/together/generated/test/test_rerank_request_documents.py delete mode 100644 src/together/generated/test/test_rerank_request_model.py delete mode 100644 src/together/generated/test/test_rerank_response.py delete mode 100644 src/together/generated/test/test_rerank_response_results_inner.py delete mode 100644 src/together/generated/test/test_rerank_response_results_inner_document.py delete mode 100644 src/together/generated/test/test_stream_sentinel.py delete mode 100644 src/together/generated/test/test_tool_choice.py delete mode 100644 src/together/generated/test/test_tool_choice_function.py delete mode 100644 src/together/generated/test/test_tools_part.py delete mode 100644 src/together/generated/test/test_tools_part_function.py delete mode 100644 src/together/generated/test/test_update_endpoint_request.py delete mode 100644 src/together/generated/test/test_usage_data.py diff --git a/.github/workflows/upload-to-pypi.yml b/.github/workflows/upload-to-pypi.yml index 066f9b9e..45485ce7 100644 --- a/.github/workflows/upload-to-pypi.yml +++ b/.github/workflows/upload-to-pypi.yml @@ -27,10 +27,6 @@ jobs: path: .venv key: venv-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }}-${{ hashFiles('poetry.lock') }} - - name: Generate OpenAPI client - run: | - make generate-client-from-existing-spec - - name: Install dependencies run: | poetry config virtualenvs.in-project true diff --git a/Makefile b/Makefile index 3aa71b97..a9390125 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests generate-client +.PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests # Default target executed when no arguments are given to make. all: help @@ -27,19 +27,10 @@ integration_tests: install: poetry install --with quality,tests poetry run pre-commit install - $(MAKE) generate-client format: poetry run pre-commit run --all-files -# OpenAPI Client Generation - -generate-client: - python scripts/generate_api_client.py - -generate-client-from-existing-spec: - python scripts/generate_api_client.py --skip-spec-download - # Documentation html: @@ -57,4 +48,3 @@ help: @echo 'test_watch - run unit tests in watch mode' @echo 'extended_tests - run extended tests' @echo 'integration_tests - run integration tests' - @echo 'generate-client - generate the OpenAPI client' diff --git a/mypy.ini b/mypy.ini index 88b2d7ba..ff370642 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,9 +1,2 @@ [mypy] plugins = pydantic.mypy -disallow_untyped_defs = true - -[mypy-together.generated.*] -ignore_errors = true - -[mypy.tests.*] -ignore_errors = true diff --git a/poetry.lock b/poetry.lock index 5de741d1..e66ce00c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -114,20 +114,6 @@ yarl = ">=1.17.0,<2.0" [package.extras] speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] -[[package]] -name = "aiohttp-retry" -version = "2.9.1" -description = "Simple retry client for aiohttp" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiohttp_retry-2.9.1-py3-none-any.whl", hash = "sha256:66d2759d1921838256a05a3f80ad7e724936f083e35be5abb5e16eed6be6dc54"}, - {file = "aiohttp_retry-2.9.1.tar.gz", hash = "sha256:8eb75e904ed4ee5c2ec242fefe85bf04240f685391c4879d8f541d6028ff01f1"}, -] - -[package.dependencies] -aiohttp = "*" - [[package]] name = "aiosignal" version = "1.3.2" @@ -2608,4 +2594,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "5578b1ebfec18261ec47ebf32e3ee89424ff3b630810704bfefb62905d652c4a" +content-hash = "802bf67e3b3e7180ba85cd152ddfa8c9f048cf393472d4051c8970152259dd39" diff --git a/pyproject.toml b/pyproject.toml index 6301660b..0a4534cd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,9 +26,6 @@ classifiers = [ ] repository = "https://github.com/togethercomputer/together-python" homepage = "https://github.com/togethercomputer/together-python" -packages = [ - { include = "together", from = "src" }, -] [tool.poetry.dependencies] python = "^3.9" @@ -43,12 +40,10 @@ filelock = "^3.13.1" eval-type-backport = ">=0.1.3,<0.3.0" click = "^8.1.7" pyarrow = ">=10.0.1" -python-dateutil = "^2.8.2" numpy = [ { version = ">=1.23.5", python = "<3.12" }, { version = ">=1.26.0", python = ">=3.12" }, ] -aiohttp-retry = "^2.9.1" pillow = "^11.1.0" [tool.poetry.group.quality] @@ -80,7 +75,6 @@ datasets = ">=2.18,<4.0" transformers = "^4.39.3" - [tool.poetry.urls] "Homepage" = "https://github.com/togethercomputer/together-python" "Bug Tracker" = "https://github.com/togethercomputer/together-python/issues" diff --git a/scripts/generate_api_client.py b/scripts/generate_api_client.py deleted file mode 100755 index 2f18888a..00000000 --- a/scripts/generate_api_client.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import annotations - -import argparse -import shutil -import subprocess -import sys -import tempfile -from pathlib import Path - - -OPENAPI_SPEC_URL = ( - "https://raw.githubusercontent.com/togethercomputer/openapi/main/openapi.yaml" -) -# We no longer set OUTPUT_DIR to the src folder for generation. -# Instead, we'll copy the generated client to the target directory. -TARGET_DIR = Path(__file__).parent.parent / "src" / "together" / "generated" -GENERATOR_JAR_URL = "https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/7.11.0/openapi-generator-cli-7.11.0.jar" -GENERATOR_JAR = Path(__file__).parent / "openapi-generator-cli.jar" - - -def run_command(cmd: list[str], check: bool = True) -> subprocess.CompletedProcess[str]: - """Run a command and optionally check its return code.""" - print(f"Running: {' '.join(cmd)}") - return subprocess.run(cmd, check=check, capture_output=True, text=True) - - -def download_file(url: str, target: Path) -> None: - """Download a file.""" - print(f"Downloading {url} to {target}") - run_command(["wget", "-O", str(target), url]) - - -def parse_args() -> argparse.Namespace: - """Parse command line arguments.""" - parser = argparse.ArgumentParser(description="Generate Together API client") - parser.add_argument( - "--skip-spec-download", - action="store_true", - help="Skip downloading the OpenAPI spec file", - ) - return parser.parse_args() - - -def main() -> None: - args = parse_args() - spec_file = Path(__file__).parent / "openapi.yaml" - - # Download OpenAPI spec if not skipped. - if not args.skip_spec_download: - download_file(OPENAPI_SPEC_URL, spec_file) - # Format the spec for better merge conflict handling. - run_command(["npx", "-y", "prettier", "--write", str(spec_file)]) - elif not spec_file.exists(): - print( - "Error: OpenAPI spec file not found and download was skipped", - file=sys.stderr, - ) - sys.exit(1) - - # Download generator if needed. - download_file(GENERATOR_JAR_URL, GENERATOR_JAR) - - # Create a temporary directory for generation. - with tempfile.TemporaryDirectory() as tmp_dir: - tmp_path = Path(tmp_dir) - # Build the generation command. - cmd = [ - "java", - "-jar", - str(GENERATOR_JAR), - "generate", - "-i", - str(spec_file), - "-g", - "python", - "-o", - str(tmp_path), - "--package-name=together.generated", - "--git-repo-id=together-python", - "--git-user-id=togethercomputer", - "--additional-properties=packageUrl=https://github.com/togethercomputer/together-python", - "--additional-properties=library=asyncio", - "--additional-properties=generateSourceCodeOnly=true", - ] - - print("Generating client code into temporary directory...") - result = run_command(cmd, check=False) - if result.returncode != 0: - print("Error generating client code:", file=sys.stderr) - print(result.stderr, file=sys.stderr) - sys.exit(1) - - # The generator will create a directory structure like: tmp_dir/together/generated - generated_dir = tmp_path / "together" / "generated" - if not generated_dir.exists(): - print("Error: Expected generated directory not found", file=sys.stderr) - sys.exit(1) - - # Remove any existing generated client code. - shutil.rmtree(TARGET_DIR, ignore_errors=True) - TARGET_DIR.parent.mkdir(parents=True, exist_ok=True) - # Copy the generated code from the temporary directory to the target directory. - shutil.copytree(generated_dir, TARGET_DIR) - print("Successfully generated and copied client code to", TARGET_DIR) - - -if __name__ == "__main__": - main() diff --git a/scripts/openapi.yaml b/scripts/openapi.yaml deleted file mode 100644 index c34a3fbc..00000000 --- a/scripts/openapi.yaml +++ /dev/null @@ -1,2813 +0,0 @@ -openapi: 3.1.0 -info: - title: Together APIs - description: The Together REST API. Please see https://docs.together.ai for more details. - version: "2.0.0" - termsOfService: https://www.together.ai/terms-of-service - contact: - name: Together Support - url: https://www.together.ai/contact - license: - name: MIT - url: https://github.com/togethercomputer/openapi/blob/main/LICENSE -servers: - - url: https://api.together.xyz/v1 -security: - - bearerAuth: [] -paths: - /chat/completions: - post: - tags: ["Chat"] - summary: Create chat completion - description: Query a chat model. - operationId: chat-completions - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/ChatCompletionRequest" - responses: - "200": - description: "200" - content: - application/json: - schema: - $ref: "#/components/schemas/ChatCompletionResponse" - text/event-stream: - schema: - $ref: "#/components/schemas/ChatCompletionStream" - "400": - description: "BadRequest" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "401": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "404": - description: "NotFound" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "429": - description: "RateLimit" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "503": - description: "Overloaded" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "504": - description: "Timeout" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - deprecated: false - /completions: - post: - tags: ["Completion"] - summary: Create completion - description: Query a language, code, or image model. - operationId: completions - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/CompletionRequest" - responses: - "200": - description: "200" - content: - application/json: - schema: - $ref: "#/components/schemas/CompletionResponse" - text/event-stream: - schema: - $ref: "#/components/schemas/CompletionStream" - "400": - description: "BadRequest" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "401": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "404": - description: "NotFound" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "429": - description: "RateLimit" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "503": - description: "Overloaded" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "504": - description: "Timeout" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - deprecated: false - /embeddings: - post: - tags: ["Embeddings"] - summary: Create embedding - description: Query an embedding model for a given string of text. - operationId: embeddings - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/EmbeddingsRequest" - responses: - "200": - description: "200" - content: - application/json: - schema: - $ref: "#/components/schemas/EmbeddingsResponse" - "400": - description: "BadRequest" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "401": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "404": - description: "NotFound" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "429": - description: "RateLimit" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "503": - description: "Overloaded" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "504": - description: "Timeout" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - deprecated: false - /models: - get: - tags: ["Models"] - summary: List all models - description: Lists all of Together's open-source models - operationId: models - responses: - "200": - description: "200" - content: - application/json: - schema: - $ref: "#/components/schemas/ModelInfoList" - "400": - description: "BadRequest" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "401": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "404": - description: "NotFound" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "429": - description: "RateLimit" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "504": - description: "Timeout" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - deprecated: false - /images/generations: - post: - tags: ["Images"] - summary: Create image - description: Use an image model to generate an image for a given prompt. - requestBody: - required: true - content: - application/json: - schema: - type: object - required: - - prompt - - model - properties: - prompt: - type: string - description: A description of the desired images. Maximum length varies by model. - example: cat floating in space, cinematic - model: - type: string - description: > - The model to use for image generation.
-
- [See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models) - example: black-forest-labs/FLUX.1-schnell - anyOf: - - type: string - enum: - - black-forest-labs/FLUX.1-schnell-Free - - black-forest-labs/FLUX.1-schnell - - black-forest-labs/FLUX.1.1-pro - - type: string - steps: - type: integer - default: 20 - description: Number of generation steps. - image_url: - type: string - description: URL of an image to use for image models that support it. - seed: - type: integer - description: Seed used for generation. Can be used to reproduce image generations. - n: - type: integer - default: 1 - description: Number of image results to generate. - height: - type: integer - default: 1024 - description: Height of the image to generate in number of pixels. - width: - type: integer - default: 1024 - description: Width of the image to generate in number of pixels. - negative_prompt: - type: string - description: The prompt or prompts not to guide the image generation. - response_format: - type: string - description: Format of the image response. Can be either a base64 string or a URL. - enum: - - base64 - - url - guidance: - type: number - description: Adjusts the alignment of the generated image with the input prompt. Higher values (e.g., 8-10) make the output more faithful to the prompt, while lower values (e.g., 1-5) encourage more creative freedom. - default: 3.5 - output_format: - type: string - description: The format of the image response. Can be either be `jpeg` or `png`. Defaults to `jpeg`. - default: jpeg - enum: - - jpeg - - png - image_loras: - description: An array of objects that define LoRAs (Low-Rank Adaptations) to influence the generated image. - type: array - items: - type: object - required: [path, scale] - properties: - path: - type: string - description: The URL of the LoRA to apply (e.g. https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA). - scale: - type: number - description: The strength of the LoRA's influence. Most LoRA's recommend a value of 1. - responses: - "200": - description: Image generated successfully - content: - application/json: - schema: - $ref: "#/components/schemas/ImageResponse" - /files: - get: - tags: ["Files"] - summary: List all files - description: List the metadata for all uploaded data files. - responses: - "200": - description: List of files - content: - application/json: - schema: - $ref: "#/components/schemas/FileList" - /files/{id}: - get: - tags: ["Files"] - summary: List file - description: List the metadata for a single uploaded data file. - parameters: - - name: id - in: path - required: true - schema: - type: string - responses: - "200": - description: File retrieved successfully - content: - application/json: - schema: - $ref: "#/components/schemas/FileResponse" - delete: - tags: ["Files"] - summary: Delete a file - description: Delete a previously uploaded data file. - parameters: - - name: id - in: path - required: true - schema: - type: string - responses: - "200": - description: File deleted successfully - content: - application/json: - schema: - $ref: "#/components/schemas/FileDeleteResponse" - /files/{id}/content: - get: - tags: ["Files"] - summary: Get file contents - description: Get the contents of a single uploaded data file. - parameters: - - name: id - in: path - required: true - schema: - type: string - responses: - "200": - description: File content retrieved successfully - content: - application/json: - schema: - $ref: "#/components/schemas/FileObject" - "500": - description: Internal Server Error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - /fine-tunes: - post: - tags: ["Fine-tuning"] - summary: Create job - description: Use a model to create a fine-tuning job. - requestBody: - required: true - content: - application/json: - schema: - type: object - required: - - training_file - - model - properties: - training_file: - type: string - description: File-ID of a training file uploaded to the Together API - validation_file: - type: string - description: File-ID of a validation file uploaded to the Together API - model: - type: string - description: Name of the base model to run fine-tune job on - n_epochs: - type: integer - default: 1 - description: Number of epochs for fine-tuning - n_checkpoints: - type: integer - default: 1 - description: Number of checkpoints to save during fine-tuning - n_evals: - type: integer - default: 0 - description: Number of evaluations to be run on a given validation set during training - batch_size: - type: integer - default: 32 - description: Batch size for fine-tuning - learning_rate: - type: number - format: float - default: 0.00001 - description: Learning rate multiplier to use for training - lr_scheduler: - type: object - default: none - $ref: "#/components/schemas/LRScheduler" - warmup_ratio: - type: number - format: float - default: 0.0 - description: The percent of steps at the start of training to linearly increase the learning rate. - max_grad_norm: - type: number - format: float - default: 1.0 - description: Max gradient norm to be used for gradient clipping. Set to 0 to disable. - weight_decay: - type: number - format: float - default: 0.0 - description: Weight decay - suffix: - type: string - description: Suffix that will be added to your fine-tuned model name - wandb_api_key: - type: string - description: API key for Weights & Biases integration - wandb_base_url: - type: string - description: The base URL of a dedicated Weights & Biases instance. - wandb_project_name: - type: string - description: The Weights & Biases project for your run. If not specified, will use `together` as the project name. - wandb_name: - type: string - description: The Weights & Biases name for your run. - train_on_inputs: - oneOf: - - type: boolean - - type: string - enum: - - auto - type: boolean - default: auto - description: Whether to mask the user messages in conversational data or prompts in instruction data. - training_type: - type: object - oneOf: - - $ref: "#/components/schemas/FullTrainingType" - - $ref: "#/components/schemas/LoRATrainingType" - responses: - "200": - description: Fine-tuning job initiated successfully - content: - application/json: - schema: - $ref: "#/components/schemas/FinetuneResponse" - get: - tags: ["Fine-tuning"] - summary: List all jobs - description: List the metadata for all fine-tuning jobs. - responses: - "200": - description: List of fine-tune jobs - content: - application/json: - schema: - $ref: "#/components/schemas/FinetuneList" - /fine-tunes/{id}: - get: - tags: ["Fine-tuning"] - summary: List job - description: List the metadata for a single fine-tuning job. - parameters: - - name: id - in: path - required: true - schema: - type: string - responses: - "200": - description: Fine-tune job details retrieved successfully - content: - application/json: - schema: - $ref: "#/components/schemas/FinetuneResponse" - /fine-tunes/{id}/events: - get: - tags: ["Fine-tuning"] - summary: List job events - description: List the events for a single fine-tuning job. - parameters: - - name: id - in: path - required: true - schema: - type: string - responses: - "200": - description: List of fine-tune events - content: - application/json: - schema: - $ref: "#/components/schemas/FinetuneListEvents" - /finetune/download: - get: - tags: ["Fine-tuning"] - summary: Download model - description: Download a compressed fine-tuned model or checkpoint to local disk. - parameters: - - in: query - name: ft_id - schema: - type: string - required: true - description: Fine-tune ID to download. A string that starts with `ft-`. - - in: query - name: checkpoint_step - schema: - type: integer - required: false - description: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. - - in: query - name: checkpoint - schema: - type: string - enum: - - merged - - adapter - description: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. - - in: query - name: output - schema: - type: string - required: false - description: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. - responses: - "200": - description: Successfully downloaded the fine-tuned model or checkpoint. - content: - application/json: - schema: - $ref: "#/components/schemas/FinetuneDownloadResult" - "400": - description: Invalid request parameters. - "404": - description: Fine-tune ID not found. - /fine-tunes/{id}/cancel: - post: - tags: ["Fine-tuning"] - summary: Cancel job - description: Cancel a currently running fine-tuning job. - parameters: - - in: path - name: id - schema: - type: string - required: true - description: Fine-tune ID to cancel. A string that starts with `ft-`. - responses: - "200": - description: Successfully cancelled the fine-tuning job. - content: - application/json: - schema: - $ref: "#/components/schemas/FinetuneResponse" - "400": - description: Invalid request parameters. - "404": - description: Fine-tune ID not found. - /rerank: - post: - tags: ["Rerank"] - summary: Create a rerank request - description: Query a reranker model - operationId: rerank - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/RerankRequest" - responses: - "200": - description: "200" - content: - application/json: - schema: - $ref: "#/components/schemas/RerankResponse" - "400": - description: "BadRequest" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "401": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "404": - description: "NotFound" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "429": - description: "RateLimit" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "503": - description: "Overloaded" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "504": - description: "Timeout" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - deprecated: false - /audio/speech: - post: - tags: ["Audio"] - summary: Create audio generation request - description: Generate audio from input text - operationId: audio-speech - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/AudioSpeechRequest" - responses: - "200": - description: "OK" - content: - application/octet-stream: - schema: - type: string - format: binary - audio/wav: - schema: - type: string - format: binary - audio/mpeg: - schema: - type: string - format: binary - text/event-stream: - schema: - $ref: "#/components/schemas/AudioSpeechStreamResponse" - "400": - description: "BadRequest" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "429": - description: "RateLimit" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - /endpoints: - get: - tags: ["Endpoints"] - summary: List all endpoints, can be filtered by type - description: Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). - operationId: listEndpoints - parameters: - - name: type - in: query - required: false - schema: - type: string - enum: - - dedicated - - serverless - description: Filter endpoints by type - example: dedicated - responses: - "200": - description: "200" - content: - application/json: - schema: - type: object - required: - - object - - data - properties: - object: - type: string - enum: - - list - data: - type: array - items: - $ref: "#/components/schemas/ListEndpoint" - example: - object: "list" - data: - - object: "endpoint" - name: "allenai/OLMo-7B" - model: "allenai/OLMo-7B" - type: "serverless" - owner: "together" - state: "STARTED" - created_at: "2024-02-28T21:34:35.444Z" - "403": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "500": - description: "Internal error" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - post: - tags: ["Endpoints"] - summary: Create a dedicated endpoint, it will start automatically - description: Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. - operationId: createEndpoint - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateEndpointRequest" - responses: - "200": - description: "200" - content: - application/json: - schema: - $ref: "#/components/schemas/DedicatedEndpoint" - "403": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "500": - description: "Internal error" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - - /endpoints/{endpointId}: - get: - tags: ["Endpoints"] - summary: Get endpoint by ID - description: Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. - operationId: getEndpoint - parameters: - - name: endpointId - in: path - required: true - schema: - type: string - description: The ID of the endpoint to retrieve - example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 - responses: - "200": - description: "200" - content: - application/json: - schema: - $ref: "#/components/schemas/DedicatedEndpoint" - "403": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "404": - description: "Not Found" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "500": - description: "Internal error" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - - patch: - tags: ["Endpoints"] - summary: Update endpoint, this can also be used to start or stop a dedicated endpoint - description: Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). - operationId: updateEndpoint - parameters: - - name: endpointId - in: path - required: true - schema: - type: string - description: The ID of the endpoint to update - example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - display_name: - type: string - description: A human-readable name for the endpoint - example: My Llama3 70b endpoint - state: - type: string - description: The desired state of the endpoint - enum: - - STARTED - - STOPPED - example: STARTED - autoscaling: - $ref: "#/components/schemas/Autoscaling" - description: New autoscaling configuration for the endpoint - responses: - "200": - description: "200" - content: - application/json: - schema: - $ref: "#/components/schemas/DedicatedEndpoint" - "403": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "404": - description: "Not Found" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "500": - description: "Internal error" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - - delete: - tags: ["Endpoints"] - summary: Delete endpoint - description: Permanently deletes an endpoint. This action cannot be undone. - operationId: deleteEndpoint - parameters: - - name: endpointId - in: path - required: true - schema: - type: string - description: The ID of the endpoint to delete - example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 - responses: - "204": - description: "No Content - Endpoint successfully deleted" - "403": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "404": - description: "Not Found" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "500": - description: "Internal error" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - - /hardware: - get: - tags: ["Hardware"] - summary: List available hardware configurations - description: > - Returns a list of available hardware configurations for deploying models. - When a model parameter is provided, it returns only hardware configurations compatible - with that model, including their current availability status. - operationId: listHardware - parameters: - - name: model - in: query - required: false - schema: - type: string - description: > - Filter hardware configurations by model compatibility. When provided, - the response includes availability status for each compatible configuration. - example: meta-llama/Llama-3-70b-chat-hf - responses: - "200": - description: "List of available hardware configurations" - content: - application/json: - schema: - type: object - required: - - object - - data - properties: - object: - type: string - enum: - - list - data: - type: array - items: - $ref: "#/components/schemas/HardwareWithStatus" - "403": - description: "Unauthorized" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - "500": - description: "Internal error" - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorData" - -components: - securitySchemes: - bearerAuth: - type: http - scheme: bearer - x-bearer-format: bearer - x-default: default - - schemas: - RerankRequest: - type: object - properties: - model: - type: string - description: > - The model to be used for the rerank request.
-
- [See all of Together AI's rerank models](https://docs.together.ai/docs/serverless-models#rerank-models) - example: Salesforce/Llama-Rank-V1 - anyOf: - - type: string - enum: - - Salesforce/Llama-Rank-v1 - - type: string - - query: - type: string - description: The search query to be used for ranking. - example: What animals can I find near Peru? - documents: - description: List of documents, which can be either strings or objects. - oneOf: - - type: array - items: - type: object - additionalProperties: true - - type: array - items: - type: string - example: Our solar system orbits the Milky Way galaxy at about 515,000 mph - example: - - { - "title": "Llama", - "text": "The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.", - } - - { - "title": "Panda", - "text": "The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.", - } - - { - "title": "Guanaco", - "text": "The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.", - } - - { - "title": "Wild Bactrian camel", - "text": "The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.", - } - top_n: - type: integer - description: The number of top results to return. - example: 2 - return_documents: - type: boolean - description: Whether to return supplied documents with the response. - example: true - rank_fields: - type: array - items: - type: string - description: List of keys in the JSON Object document to rank by. Defaults to use all supplied keys for ranking. - example: ["title", "text"] - required: - - model - - query - - documents - additionalProperties: false - - RerankResponse: - type: object - required: - - object - - model - - results - properties: - object: - type: string - description: Object type - enum: - - rerank - example: rerank - id: - type: string - description: Request ID - example: 9dfa1a09-5ebc-4a40-970f-586cb8f4ae47 - model: - type: string - description: The model to be used for the rerank request. - example: salesforce/turboranker-0.8-3778-6328 - results: - type: array - items: - type: object - required: [index, relevance_score, document] - properties: - index: - type: integer - relevance_score: - type: number - document: - type: object - properties: - text: - type: string - nullable: true - example: - - { - "index": 0, - "relevance_score": 0.29980177813003117, - "document": - { - "text": '{"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}', - }, - } - - { - "index": 2, - "relevance_score": 0.2752447527354349, - "document": - { - "text": '{"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}', - }, - } - usage: - $ref: "#/components/schemas/UsageData" - example: - { - "prompt_tokens": 1837, - "completion_tokens": 0, - "total_tokens": 1837, - } - - ErrorData: - type: object - required: - - error - properties: - error: - type: object - properties: - message: - type: string - nullable: false - type: - type: string - nullable: false - param: - type: string - nullable: true - default: null - code: - type: string - nullable: true - default: null - required: - - type - - message - - FinishReason: - type: string - enum: - - stop - - eos - - length - - tool_calls - - function_call - - LogprobsPart: - type: object - properties: - token_ids: - type: array - items: - type: number - description: List of token IDs corresponding to the logprobs - tokens: - type: array - items: - type: string - description: List of token strings - token_logprobs: - type: array - items: - type: number - description: List of token log probabilities - - PromptPart: - type: array - items: - type: object - properties: - text: - type: string - example: [INST] What is the capital of France? [/INST] - logprobs: - $ref: "#/components/schemas/LogprobsPart" - - UsageData: - type: object - properties: - prompt_tokens: - type: integer - completion_tokens: - type: integer - total_tokens: - type: integer - required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true - - CompletionChoicesData: - type: array - items: - type: object - properties: - text: - type: string - example: The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture. - seed: - type: integer - finish_reason: - $ref: "#/components/schemas/FinishReason" - logprobs: - type: object - $ref: "#/components/schemas/LogprobsPart" - - CompletionRequest: - type: object - required: - - model - - prompt - properties: - prompt: - type: string - description: A string providing context for the model to complete. - example: [INST] What is the capital of France? [/INST] - model: - type: string - description: > - The name of the model to query.
-
- [See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) - example: mistralai/Mixtral-8x7B-Instruct-v0.1 - anyOf: - - type: string - enum: - - meta-llama/Llama-2-70b-hf - - mistralai/Mistral-7B-v0.1 - - mistralai/Mixtral-8x7B-v0.1 - - Meta-Llama/Llama-Guard-7b - - type: string - max_tokens: - type: integer - description: The maximum number of tokens to generate. - stop: - type: array - description: A list of string sequences that will truncate (stop) inference text output. For example, "
" will stop generation as soon as the model generates the given token. - items: - type: string - temperature: - type: number - description: A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. - format: float - top_p: - type: number - description: A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. - format: float - top_k: - type: integer - description: An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. - format: int32 - repetition_penalty: - type: number - description: A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. - format: float - stream: - type: boolean - description: "If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results." - logprobs: - type: integer - minimum: 0 - maximum: 1 - description: Determines the number of most likely tokens to return at each token position log probabilities to return. - echo: - type: boolean - description: If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. - n: - type: integer - description: The number of completions to generate for each prompt. - minimum: 1 - maximum: 128 - safety_model: - type: string - description: The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). - example: "safety_model_name" - anyOf: - - type: string - enum: - - Meta-Llama/Llama-Guard-7b - - type: string - min_p: - type: number - description: A number between 0 and 1 that can be used as an alternative to top-p and top-k. - format: float - presence_penalty: - type: number - description: A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. - format: float - frequency_penalty: - type: number - description: A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. - format: float - logit_bias: - type: object - additionalProperties: - type: number - - format: float - description: Adjusts the likelihood of specific tokens appearing in the generated output. - example: { "1024": -10.5, "105": 21.4 } - seed: - type: integer - description: Seed value for reproducibility. - example: 42 - CompletionResponse: - type: object - properties: - id: - type: string - choices: - $ref: "#/components/schemas/CompletionChoicesData" - prompt: - $ref: "#/components/schemas/PromptPart" - usage: - $ref: "#/components/schemas/UsageData" - created: - type: integer - model: - type: string - object: - type: string - enum: - - text_completion - required: - - id - - choices - - usage - - created - - model - - object - - CompletionStream: - oneOf: - - $ref: "#/components/schemas/CompletionEvent" - - $ref: "#/components/schemas/StreamSentinel" - - CompletionEvent: - type: object - required: [data] - properties: - data: - $ref: "#/components/schemas/CompletionChunk" - - CompletionChunk: - type: object - required: [id, token, choices, usage, finish_reason] - properties: - id: - type: string - token: - $ref: "#/components/schemas/CompletionToken" - choices: - title: CompletionChoices - type: array - items: - $ref: "#/components/schemas/CompletionChoice" - usage: - allOf: - - $ref: "#/components/schemas/UsageData" - - nullable: true - seed: - type: integer - finish_reason: - allOf: - - $ref: "#/components/schemas/FinishReason" - - nullable: true - - CompletionChoice: - type: object - required: [index] - properties: - text: - type: string - - CompletionToken: - type: object - required: [id, text, logprob, special] - properties: - id: - type: integer - text: - type: string - logprob: - type: number - special: - type: boolean - - ChatCompletionChoicesData: - type: array - items: - type: object - properties: - text: - type: string - index: - type: integer - seed: - type: integer - finish_reason: - $ref: "#/components/schemas/FinishReason" - message: - $ref: "#/components/schemas/ChatCompletionMessage" - logprobs: - allOf: - - nullable: true - - $ref: "#/components/schemas/LogprobsPart" - ChatCompletionMessage: - type: object - required: [role, content] - properties: - content: - type: string - nullable: true - role: - type: string - enum: [assistant] - tool_calls: - type: array - items: - $ref: "#/components/schemas/ToolChoice" - function_call: - type: object - deprecated: true - required: [arguments, name] - properties: - arguments: - type: string - name: - type: string - ChatCompletionTool: - type: object - required: [type, function] - properties: - type: - type: string - enum: ["function"] - function: - type: object - required: [name] - properties: - description: - type: string - name: - type: string - parameters: - type: object - additionalProperties: true - - ChatCompletionRequest: - type: object - required: - - model - - messages - properties: - messages: - type: array - description: A list of messages comprising the conversation so far. - items: - type: object - properties: - role: - type: string - description: "The role of the messages author. Choice between: system, user, or assistant." - enum: - - system - - user - - assistant - - tool - content: - description: The content of the message, which can either be a simple string or a structured format. - type: string - oneOf: - - type: string - description: A plain text message. - - type: array - description: A structured message with mixed content types. - items: - type: object - oneOf: - - type: object - properties: - type: - type: string - enum: - - text - text: - type: string - required: - - type - - text - - type: object - properties: - type: - type: string - enum: - - image_url - image_url: - type: object - properties: - url: - type: string - description: The URL of the image as a plain string. - required: - - url - required: - - type - - image_url - required: - - role - - content - model: - description: > - The name of the model to query.
-
- [See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) - example: meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo - anyOf: - - type: string - enum: - - Qwen/Qwen2.5-72B-Instruct-Turbo - - Qwen/Qwen2.5-7B-Instruct-Turbo - - meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo - - meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo - - meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo - - type: string - max_tokens: - type: integer - description: The maximum number of tokens to generate. - stop: - type: array - description: A list of string sequences that will truncate (stop) inference text output. For example, "
" will stop generation as soon as the model generates the given token. - items: - type: string - temperature: - type: number - description: A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. - format: float - top_p: - type: number - description: A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. - format: float - top_k: - type: integer - description: An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. - format: int32 - context_length_exceeded_behavior: - type: string - enum: ["truncate", "error"] - default: "error" - description: Defined the behavior of the API when max_tokens exceed the maximum context length of the model. When set to 'error', API will return 400 with appropriate error message. When set to 'truncate', override the max_tokens with maximum context length of the model. - repetition_penalty: - type: number - description: A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. - stream: - type: boolean - description: "If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results." - logprobs: - type: integer - minimum: 0 - maximum: 1 - description: Determines the number of most likely tokens to return at each token position log probabilities to return. - echo: - type: boolean - description: If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. - n: - type: integer - description: The number of completions to generate for each prompt. - minimum: 1 - maximum: 128 - min_p: - type: number - description: A number between 0 and 1 that can be used as an alternative to top_p and top-k. - format: float - presence_penalty: - type: number - description: A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. - format: float - frequency_penalty: - type: number - description: A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. - format: float - logit_bias: - type: object - additionalProperties: - type: number - format: float - description: Adjusts the likelihood of specific tokens appearing in the generated output. - example: { "1024": -10.5, "105": 21.4 } - seed: - type: integer - description: Seed value for reproducibility. - example: 42 - function_call: - oneOf: - - type: string - enum: [none, auto] - - type: object - required: [name] - properties: - name: - type: string - response_format: - type: object - description: An object specifying the format that the model must output. - properties: - type: - type: string - description: The type of the response format. - example: json - schema: - type: object - additionalProperties: - type: string - description: The schema of the response format. - tools: - type: array - description: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. - items: - $ref: "#/components/schemas/ToolsPart" - tool_choice: - description: Controls which (if any) function is called by the model. By default uses `auto`, which lets the model pick between generating a message or calling a function. - oneOf: - - type: string - example: "tool_name" - - $ref: "#/components/schemas/ToolChoice" - safety_model: - type: string - description: The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). - example: "safety_model_name" - - ChatCompletionMessageParam: - oneOf: - - $ref: "#/components/schemas/ChatCompletionSystemMessageParam" - - $ref: "#/components/schemas/ChatCompletionUserMessageParam" - - $ref: "#/components/schemas/ChatCompletionAssistantMessageParam" - - $ref: "#/components/schemas/ChatCompletionToolMessageParam" - - $ref: "#/components/schemas/ChatCompletionFunctionMessageParam" - - # Start Message Params - - ChatCompletionSystemMessageParam: - type: object - required: [content, role] - properties: - content: - type: string - role: - type: string - enum: ["system"] - name: - type: string - - ChatCompletionUserMessageParam: - type: object - required: [content, role] - properties: - content: - type: string - # TODO: more comple content? - role: - type: string - enum: ["user"] - name: - type: string - - ChatCompletionAssistantMessageParam: - type: object - required: [role] - properties: - content: - type: string - nullable: true - role: - type: string - enum: ["assistant"] - name: - type: string - tool_calls: - type: array - items: - $ref: "#/components/schemas/ToolChoice" - function_call: - type: object - deprecated: true - properties: - arguments: - type: string - name: - type: string - required: [arguments, name] - - ChatCompletionFunctionMessageParam: - type: object - deprecated: true - required: [content, role, name] - properties: - role: - type: string - enum: ["function"] - content: - type: string - name: - type: string - - ChatCompletionToolMessageParam: - type: object - properties: - role: - type: string - enum: ["tool"] - content: - type: string - tool_call_id: - type: string - required: [role, content, tool_call_id] - - # End Message Params - - ChatCompletionResponse: - type: object - properties: - id: - type: string - choices: - $ref: "#/components/schemas/ChatCompletionChoicesData" - usage: - $ref: "#/components/schemas/UsageData" - created: - type: integer - model: - type: string - object: - type: string - enum: - - chat.completion - required: [choices, id, created, model, object] - - ChatCompletionStream: - oneOf: - - $ref: "#/components/schemas/ChatCompletionEvent" - - $ref: "#/components/schemas/StreamSentinel" - - ChatCompletionEvent: - type: object - required: [data] - properties: - data: - $ref: "#/components/schemas/ChatCompletionChunk" - - ChatCompletionChunk: - type: object - required: [id, object, created, choices, model] - properties: - id: - type: string - object: - type: string - enum: - - chat.completion.chunk - created: - type: integer - system_fingerprint: - type: string - model: - type: string - example: mistralai/Mixtral-8x7B-Instruct-v0.1 - choices: - title: ChatCompletionChoices - type: array - items: - type: object - required: [index, delta, finish_reason] - properties: - index: - type: integer - finish_reason: - $ref: "#/components/schemas/FinishReason" - nullable: true - logprobs: - type: number - nullable: true - seed: - type: integer - nullable: true - delta: - title: ChatCompletionChoiceDelta - type: object - required: [role] - properties: - token_id: - type: integer - role: - type: string - enum: ["system", "user", "assistant", "function", "tool"] - content: - type: string - nullable: true - tool_calls: - type: array - items: - $ref: "#/components/schemas/ToolChoice" - function_call: - type: object - deprecated: true - nullable: true - properties: - arguments: - type: string - name: - type: string - required: - - arguments - - name - usage: - allOf: - - $ref: "#/components/schemas/UsageData" - - nullable: true - - AudioSpeechRequest: - type: object - required: - - model - - input - - voice - properties: - model: - description: > - The name of the model to query.
-
- [See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models) - example: cartesia/sonic - anyOf: - - type: string - enum: - - cartesia/sonic - - type: string - input: - type: string - description: Input text to generate the audio for - voice: - description: The voice to use for generating the audio. [View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available). - anyOf: - - type: string - enum: - - laidback woman - - polite man - - storyteller lady - - friendly sidekick - - type: string - response_format: - type: string - description: The format of audio output - default: wav - enum: - - mp3 - - wav - - raw - language: - type: string - description: Language of input text - default: en - enum: - - en - - de - - fr - - es - - hi - - it - - ja - - ko - - nl - - pl - - pt - - ru - - sv - - tr - - zh - response_encoding: - type: string - description: Audio encoding of response - default: pcm_f32le - enum: - - pcm_f32le - - pcm_s16le - - pcm_mulaw - - pcm_alaw - sample_rate: - type: number - default: 44100 - description: Sampling rate to use for the output audio - stream: - type: boolean - default: false - description: "If true, output is streamed for several characters at a time instead of waiting for the full response. The stream terminates with `data: [DONE]`. If false, return the encoded audio as octet stream" - - AudioSpeechStreamResponse: - oneOf: - - $ref: "#/components/schemas/AudioSpeechStreamEvent" - - $ref: "#/components/schemas/StreamSentinel" - - AudioSpeechStreamEvent: - type: object - required: [data] - properties: - data: - $ref: "#/components/schemas/AudioSpeechStreamChunk" - - AudioSpeechStreamChunk: - type: object - required: [object, model, b64] - properties: - object: - type: string - enum: - - audio.tts.chunk - model: - type: string - example: cartesia/sonic - b64: - type: string - description: base64 encoded audio stream - StreamSentinel: - type: object - required: [data] - properties: - data: - title: stream_signal - type: string - enum: - - "[DONE]" - - ChatCompletionToken: - type: object - required: [id, text, logprob, special] - properties: - id: - type: integer - text: - type: string - logprob: - type: number - special: - type: boolean - - ChatCompletionChoice: - type: object - required: [index, delta, finish_reason] - properties: - index: - type: integer - finish_reason: - $ref: "#/components/schemas/FinishReason" - logprobs: - $ref: "#/components/schemas/LogprobsPart" - delta: - title: ChatCompletionChoiceDelta - type: object - required: [role] - properties: - token_id: - type: integer - role: - type: string - enum: ["system", "user", "assistant", "function", "tool"] - content: - type: string - nullable: true - tool_calls: - type: array - items: - $ref: "#/components/schemas/ToolChoice" - function_call: - type: object - deprecated: true - nullable: true - properties: - arguments: - type: string - name: - type: string - required: - - arguments - - name - - EmbeddingsRequest: - type: object - required: - - model - - input - properties: - model: - type: string - description: > - The name of the embedding model to use.
-
- [See all of Together AI's embedding models](https://docs.together.ai/docs/serverless-models#embedding-models) - example: togethercomputer/m2-bert-80M-8k-retrieval - anyOf: - - type: string - enum: - - WhereIsAI/UAE-Large-V1 - - BAAI/bge-large-en-v1.5 - - BAAI/bge-base-en-v1.5 - - togethercomputer/m2-bert-80M-8k-retrieval - - type: string - input: - oneOf: - - type: string - description: A string providing the text for the model to embed. - example: Our solar system orbits the Milky Way galaxy at about 515,000 mph - - type: array - items: - type: string - description: A string providing the text for the model to embed. - example: Our solar system orbits the Milky Way galaxy at about 515,000 mph - example: Our solar system orbits the Milky Way galaxy at about 515,000 mph - - EmbeddingsResponse: - type: object - required: - - object - - model - - data - properties: - object: - type: string - enum: - - list - model: - type: string - data: - type: array - items: - type: object - required: [index, object, embedding] - properties: - object: - type: string - enum: - - embedding - embedding: - type: array - items: - type: number - index: - type: integer - - ModelInfoList: - type: array - items: - $ref: "#/components/schemas/ModelInfo" - ModelInfo: - type: object - required: [id, object, created, type] - properties: - id: - type: string - example: "Austism/chronos-hermes-13b" - object: - type: string - example: "model" - created: - type: integer - example: 1692896905 - type: - enum: - - chat - - language - - code - - image - - embedding - - moderation - - rerank - example: "chat" - display_name: - type: string - example: "Chronos Hermes (13B)" - organization: - type: string - example: "Austism" - link: - type: string - license: - type: string - example: "other" - context_length: - type: integer - example: 2048 - pricing: - $ref: "#/components/schemas/Pricing" - ImageResponse: - type: object - properties: - id: - type: string - model: - type: string - object: - enum: - - list - example: "list" - data: - type: array - items: - type: object - properties: - index: - type: integer - b64_json: - type: string - url: - type: string - required: - - index - oneOf: - - required: - - b64_json - - required: - - url - required: - - id - - model - - object - - data - Pricing: - type: object - required: [hourly, input, output, base, finetune] - properties: - hourly: - type: number - example: 0 - input: - type: number - example: 0.3 - output: - type: number - example: 0.3 - base: - type: number - example: 0 - finetune: - type: number - example: 0 - - ToolsPart: - type: object - properties: - type: - type: string - example: "tool_type" - function: - type: object - properties: - description: - type: string - example: "A description of the function." - name: - type: string - example: "function_name" - parameters: - type: object - additionalProperties: true - description: "A map of parameter names to their values." - ToolChoice: - type: object - required: [id, type, function, index] - properties: - # TODO: is this the right place for index? - index: - type: number - id: - type: string - type: - type: string - enum: ["function"] - function: - type: object - required: [name, arguments] - properties: - name: - type: string - example: "function_name" - arguments: - type: string - - FileResponse: - type: object - required: - - id - - object - - created_at - - filename - - bytes - - purpose - - FileType - - Processed - - LineCount - properties: - id: - type: string - object: - type: string - example: "file" - created_at: - type: integer - example: 1715021438 - filename: - type: string - example: "my_file.jsonl" - bytes: - type: integer - example: 2664 - purpose: - enum: - - fine-tune - example: "fine-tune" - Processed: - type: boolean - FileType: - enum: - - jsonl - - parquet - example: "jsonl" - LineCount: - type: integer - FileList: - required: - - data - type: object - properties: - data: - type: array - items: - $ref: "#/components/schemas/FileResponse" - FileObject: - type: object - properties: - object: - type: string - id: - type: string - filename: - type: string - size: - type: integer - FileDeleteResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - FinetuneResponse: - type: object - required: - - id - - status - properties: - id: - type: string - format: uuid - training_file: - type: string - validation_file: - type: string - model: - type: string - model_output_name: - type: string - model_output_path: - type: string - trainingfile_numlines: - type: integer - trainingfile_size: - type: integer - created_at: - type: string - updated_at: - type: string - n_epochs: - type: integer - n_checkpoints: - type: integer - n_evals: - type: integer - batch_size: - type: integer - learning_rate: - type: number - lr_scheduler: - type: object - $ref: "#/components/schemas/LRScheduler" - warmup_ratio: - type: number - max_grad_norm: - type: number - format: float - weight_decay: - type: number - format: float - eval_steps: - type: integer - train_on_inputs: - oneOf: - - type: boolean - - type: string - enum: - - auto - default: auto - training_type: - type: object - oneOf: - - $ref: "#/components/schemas/FullTrainingType" - - $ref: "#/components/schemas/LoRATrainingType" - status: - $ref: "#/components/schemas/FinetuneJobStatus" - job_id: - type: string - events: - type: array - items: - $ref: "#/components/schemas/FineTuneEvent" - token_count: - type: integer - param_count: - type: integer - total_price: - type: integer - epochs_completed: - type: integer - queue_depth: - type: integer - wandb_project_name: - type: string - wandb_url: - type: string - - FinetuneJobStatus: - type: string - enum: - - pending - - queued - - running - - compressing - - uploading - - cancel_requested - - cancelled - - error - - completed - - FinetuneEventLevels: - type: string - enum: - - null - - info - - warning - - error - - legacy_info - - legacy_iwarning - - legacy_ierror - FinetuneEventType: - type: string - enum: - - job_pending - - job_start - - job_stopped - - model_downloading - - model_download_complete - - training_data_downloading - - training_data_download_complete - - validation_data_downloading - - validation_data_download_complete - - wandb_init - - training_start - - checkpoint_save - - billing_limit - - epoch_complete - - training_complete - - model_compressing - - model_compression_complete - - model_uploading - - model_upload_complete - - job_complete - - job_error - - cancel_requested - - job_restarted - - refund - - warning - - FinetuneList: - type: object - required: - - data - properties: - data: - type: array - items: - $ref: "#/components/schemas/FinetuneResponse" - FinetuneListEvents: - type: object - required: - - data - properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuneEvent" - FineTuneEvent: - type: object - required: - - object - - created_at - - message - - type - - param_count - - token_count - - total_steps - - wandb_url - - step - - checkpoint_path - - model_path - - training_offset - - hash - properties: - object: - type: string - enum: [fine-tune-event] - created_at: - type: string - level: - anyOf: - - $ref: "#/components/schemas/FinetuneEventLevels" - message: - type: string - type: - $ref: "#/components/schemas/FinetuneEventType" - param_count: - type: integer - token_count: - type: integer - total_steps: - type: integer - wandb_url: - type: string - step: - type: integer - checkpoint_path: - type: string - model_path: - type: string - training_offset: - type: integer - hash: - type: string - - FinetuneDownloadResult: - type: object - properties: - object: - enum: - - null - - local - id: - type: string - checkpoint_step: - type: integer - filename: - type: string - size: - type: integer - - FullTrainingType: - type: object - properties: - type: - type: string - enum: ["Full"] - required: - - type - LoRATrainingType: - type: object - properties: - type: - type: string - enum: ["Lora"] - lora_r: - type: integer - lora_alpha: - type: integer - lora_dropout: - type: number - format: float - default: 0.0 - lora_trainable_modules: - type: string - default: "all-linear" - required: - - type - - lora_r - - lora_alpha - LRScheduler: - type: object - properties: - lr_scheduler_type: - type: string - lr_scheduler_args: - type: object - $ref: "#/components/schemas/LinearLRSchedulerArgs" - required: - - lr_scheduler_type - LinearLRSchedulerArgs: - type: object - properties: - min_lr_ratio: - type: number - format: float - default: 0.0 - description: The ratio of the final learning rate to the peak learning rate - - Autoscaling: - type: object - description: Configuration for automatic scaling of replicas based on demand. - required: - - min_replicas - - max_replicas - properties: - min_replicas: - type: integer - format: int32 - description: The minimum number of replicas to maintain, even when there is no load - examples: - - 2 - max_replicas: - type: integer - format: int32 - description: The maximum number of replicas to scale up to under load - examples: - - 5 - - HardwareSpec: - type: object - description: Detailed specifications of a hardware configuration - required: - - gpu_type - - gpu_link - - gpu_memory - - gpu_count - properties: - gpu_type: - type: string - description: The type/model of GPU - examples: - - a100-80gb - gpu_link: - type: string - description: The GPU interconnect technology - examples: - - sxm - gpu_memory: - type: number - format: float - description: Amount of GPU memory in GB - examples: - - 80 - gpu_count: - type: integer - format: int32 - description: Number of GPUs in this configuration - examples: - - 2 - - EndpointPricing: - type: object - description: Pricing details for using an endpoint - required: - - cents_per_minute - properties: - cents_per_minute: - type: number - format: float - description: Cost per minute of endpoint uptime in cents - examples: - - 5.42 - - HardwareAvailability: - type: object - description: Indicates the current availability status of a hardware configuration - required: - - status - properties: - status: - type: string - description: The availability status of the hardware configuration - enum: - - available - - unavailable - - insufficient - - HardwareWithStatus: - type: object - description: Hardware configuration details with optional availability status - required: - - object - - id - - pricing - - specs - - updated_at - properties: - object: - type: string - enum: - - hardware - id: - type: string - description: Unique identifier for the hardware configuration - examples: - - 2x_nvidia_a100_80gb_sxm - pricing: - $ref: "#/components/schemas/EndpointPricing" - specs: - $ref: "#/components/schemas/HardwareSpec" - availability: - $ref: "#/components/schemas/HardwareAvailability" - updated_at: - type: string - format: date-time - description: Timestamp of when the hardware status was last updated - - CreateEndpointRequest: - type: object - required: - - model - - hardware - - autoscaling - properties: - display_name: - type: string - description: A human-readable name for the endpoint - examples: - - My Llama3 70b endpoint - model: - type: string - description: The model to deploy on this endpoint - examples: - - meta-llama/Llama-3-8b-chat-hf - hardware: - type: string - description: The hardware configuration to use for this endpoint - examples: - - 1x_nvidia_a100_80gb_sxm - autoscaling: - $ref: "#/components/schemas/Autoscaling" - description: Configuration for automatic scaling of the endpoint - disable_prompt_cache: - type: boolean - description: Whether to disable the prompt cache for this endpoint - default: false - disable_speculative_decoding: - type: boolean - description: Whether to disable speculative decoding for this endpoint - default: false - state: - type: string - description: The desired state of the endpoint - enum: - - STARTED - - STOPPED - default: STARTED - example: STARTED - - DedicatedEndpoint: - type: object - description: Details about a dedicated endpoint deployment - required: - - object - - id - - name - - display_name - - model - - hardware - - type - - owner - - state - - autoscaling - - created_at - properties: - object: - type: string - enum: - - endpoint - description: The type of object - example: endpoint - id: - type: string - description: Unique identifier for the endpoint - example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 - name: - type: string - description: System name for the endpoint - example: devuser/meta-llama/Llama-3-8b-chat-hf-a32b82a1 - display_name: - type: string - description: Human-readable name for the endpoint - example: My Llama3 70b endpoint - model: - type: string - description: The model deployed on this endpoint - example: meta-llama/Llama-3-8b-chat-hf - hardware: - type: string - description: The hardware configuration used for this endpoint - example: 1x_nvidia_a100_80gb_sxm - type: - type: string - enum: - - dedicated - description: The type of endpoint - example: dedicated - owner: - type: string - description: The owner of this endpoint - example: devuser - state: - type: string - enum: - - PENDING - - STARTING - - STARTED - - STOPPING - - STOPPED - - ERROR - description: Current state of the endpoint - example: STARTED - autoscaling: - $ref: "#/components/schemas/Autoscaling" - description: Configuration for automatic scaling of the endpoint - created_at: - type: string - format: date-time - description: Timestamp when the endpoint was created - example: 2025-02-04T10:43:55.405Z - - ListEndpoint: - type: object - description: Details about an endpoint when listed via the list endpoint - required: - - id - - object - - name - - model - - type - - owner - - state - - created_at - properties: - object: - type: string - enum: - - endpoint - description: The type of object - example: endpoint - id: - type: string - description: Unique identifier for the endpoint - example: endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7 - name: - type: string - description: System name for the endpoint - example: allenai/OLMo-7B - model: - type: string - description: The model deployed on this endpoint - example: allenai/OLMo-7B - type: - type: string - enum: - - serverless - - dedicated - description: The type of endpoint - example: serverless - owner: - type: string - description: The owner of this endpoint - example: together - state: - type: string - enum: - - PENDING - - STARTING - - STARTED - - STOPPING - - STOPPED - - ERROR - description: Current state of the endpoint - example: STARTED - created_at: - type: string - format: date-time - description: Timestamp when the endpoint was created - example: 2024-02-28T21:34:35.444Z diff --git a/src/together/abstract/api_requestor.py b/src/together/abstract/api_requestor.py index 4bc49288..7e37eaf8 100644 --- a/src/together/abstract/api_requestor.py +++ b/src/together/abstract/api_requestor.py @@ -437,7 +437,7 @@ def _prepare_request_raw( [(k, v) for k, v in options.params.items() if v is not None] ) abs_url = _build_api_url(abs_url, encoded_params) - elif options.method.lower() in {"post", "put"}: + elif options.method.lower() in {"post", "put", "patch"}: if options.params and (options.files or options.override_headers): data = options.params elif options.params and not options.files: diff --git a/src/together/cli/api/endpoints.py b/src/together/cli/api/endpoints.py index 19fc99d1..5380d112 100644 --- a/src/together/cli/api/endpoints.py +++ b/src/together/cli/api/endpoints.py @@ -8,13 +8,7 @@ import click from together import Together -from together.error import AuthenticationError, InvalidRequestError -from together.generated.exceptions import ( - BadRequestException, - ForbiddenException, - NotFoundException, - ServiceException, -) +from together.error import InvalidRequestError from together.types import DedicatedEndpoint, ListEndpoint @@ -74,19 +68,11 @@ def print_endpoint( def print_api_error( - e: Union[ - ForbiddenException, NotFoundException, BadRequestException, ServiceException - ], + e: InvalidRequestError, ) -> None: - error_details = "" - if e.data is not None: - error_details = e.data.to_dict()["error"]["message"] - elif e.body: - error_details = json.loads(e.body)["error"]["message"] - else: - error_details = str(e) + error_details = e.api_response.message - if ( + if error_details and ( "credentials" in error_details.lower() or "authentication" in error_details.lower() ): @@ -102,22 +88,8 @@ def handle_api_errors(f: F) -> F: def wrapper(*args: Any, **kwargs: Any) -> Any: try: return f(*args, **kwargs) - except ( - ForbiddenException, - NotFoundException, - BadRequestException, - ServiceException, - ) as e: - print_api_error(e) - - sys.exit(1) - except AuthenticationError as e: - click.echo(f"Error details: {str(e)}", err=True) - click.echo("Error: Invalid API key or authentication failed", err=True) - sys.exit(1) except InvalidRequestError as e: - click.echo(f"Error details: {str(e)}", err=True) - click.echo("Error: Invalid request", err=True) + print_api_error(e) sys.exit(1) except Exception as e: click.echo(f"Error: An unexpected error occurred - {str(e)}", err=True) @@ -226,15 +198,14 @@ def create( disable_speculative_decoding=no_speculative_decoding, state="STOPPED" if no_auto_start else "STARTED", ) - except NotFoundException as e: + except InvalidRequestError as e: + print_api_error(e) if "check the hardware api" in str(e).lower(): - print_api_error(e) fetch_and_print_hardware_options( client=client, model=model, print_json=False, available=True ) - sys.exit(1) - raise e + sys.exit(1) # Print detailed information to stderr click.echo("Created dedicated endpoint with:", err=True) @@ -251,7 +222,7 @@ def create( if no_auto_start: click.echo(" Auto-start: disabled", err=True) - click.echo("Endpoint created successfully", err=True) + click.echo(f"Endpoint created successfully, id: {response.id}", err=True) if wait: import time @@ -308,17 +279,7 @@ def fetch_and_print_hardware_options( ] if print_json: - json_output = [ - { - "id": hardware.id, - "pricing": hardware.pricing.to_dict(), - "specs": hardware.specs.to_dict(), - "availability": ( - hardware.availability.to_dict() if hardware.availability else None - ), - } - for hardware in hardware_options - ] + json_output = [hardware.model_dump() for hardware in hardware_options] click.echo(json.dumps(json_output, indent=2)) else: for hardware in hardware_options: diff --git a/src/together/error.py b/src/together/error.py index b5bdfd40..e2883a2c 100644 --- a/src/together/error.py +++ b/src/together/error.py @@ -18,6 +18,9 @@ def __init__( request_id: str | None = None, http_status: int | None = None, ) -> None: + if isinstance(message, TogetherErrorResponse): + self.api_response = message + _message = ( json.dumps(message.model_dump(exclude_none=True)) if isinstance(message, TogetherErrorResponse) diff --git a/src/together/generated/__init__.py b/src/together/generated/__init__.py deleted file mode 100644 index 2a7c8446..00000000 --- a/src/together/generated/__init__.py +++ /dev/null @@ -1,212 +0,0 @@ -# coding: utf-8 - -# flake8: noqa - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -__version__ = "1.0.0" - -# import apis into sdk package -from together.generated.api.audio_api import AudioApi -from together.generated.api.chat_api import ChatApi -from together.generated.api.completion_api import CompletionApi -from together.generated.api.embeddings_api import EmbeddingsApi -from together.generated.api.endpoints_api import EndpointsApi -from together.generated.api.files_api import FilesApi -from together.generated.api.fine_tuning_api import FineTuningApi -from together.generated.api.hardware_api import HardwareApi -from together.generated.api.images_api import ImagesApi -from together.generated.api.models_api import ModelsApi -from together.generated.api.rerank_api import RerankApi - -# import ApiClient -from together.generated.api_response import ApiResponse -from together.generated.api_client import ApiClient -from together.generated.configuration import Configuration -from together.generated.exceptions import OpenApiException -from together.generated.exceptions import ApiTypeError -from together.generated.exceptions import ApiValueError -from together.generated.exceptions import ApiKeyError -from together.generated.exceptions import ApiAttributeError -from together.generated.exceptions import ApiException - -# import models into sdk package -from together.generated.models.audio_speech_request import AudioSpeechRequest -from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel -from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice -from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk -from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent -from together.generated.models.audio_speech_stream_response import ( - AudioSpeechStreamResponse, -) -from together.generated.models.autoscaling import Autoscaling -from together.generated.models.chat_completion_assistant_message_param import ( - ChatCompletionAssistantMessageParam, -) -from together.generated.models.chat_completion_choice import ChatCompletionChoice -from together.generated.models.chat_completion_choice_delta import ( - ChatCompletionChoiceDelta, -) -from together.generated.models.chat_completion_choice_delta_function_call import ( - ChatCompletionChoiceDeltaFunctionCall, -) -from together.generated.models.chat_completion_choices_data_inner import ( - ChatCompletionChoicesDataInner, -) -from together.generated.models.chat_completion_choices_data_inner_logprobs import ( - ChatCompletionChoicesDataInnerLogprobs, -) -from together.generated.models.chat_completion_chunk import ChatCompletionChunk -from together.generated.models.chat_completion_chunk_choices_inner import ( - ChatCompletionChunkChoicesInner, -) -from together.generated.models.chat_completion_event import ChatCompletionEvent -from together.generated.models.chat_completion_function_message_param import ( - ChatCompletionFunctionMessageParam, -) -from together.generated.models.chat_completion_message import ChatCompletionMessage -from together.generated.models.chat_completion_message_function_call import ( - ChatCompletionMessageFunctionCall, -) -from together.generated.models.chat_completion_message_param import ( - ChatCompletionMessageParam, -) -from together.generated.models.chat_completion_request import ChatCompletionRequest -from together.generated.models.chat_completion_request_function_call import ( - ChatCompletionRequestFunctionCall, -) -from together.generated.models.chat_completion_request_function_call_one_of import ( - ChatCompletionRequestFunctionCallOneOf, -) -from together.generated.models.chat_completion_request_messages_inner import ( - ChatCompletionRequestMessagesInner, -) -from together.generated.models.chat_completion_request_model import ( - ChatCompletionRequestModel, -) -from together.generated.models.chat_completion_request_response_format import ( - ChatCompletionRequestResponseFormat, -) -from together.generated.models.chat_completion_request_tool_choice import ( - ChatCompletionRequestToolChoice, -) -from together.generated.models.chat_completion_response import ChatCompletionResponse -from together.generated.models.chat_completion_stream import ChatCompletionStream -from together.generated.models.chat_completion_system_message_param import ( - ChatCompletionSystemMessageParam, -) -from together.generated.models.chat_completion_token import ChatCompletionToken -from together.generated.models.chat_completion_tool import ChatCompletionTool -from together.generated.models.chat_completion_tool_function import ( - ChatCompletionToolFunction, -) -from together.generated.models.chat_completion_tool_message_param import ( - ChatCompletionToolMessageParam, -) -from together.generated.models.chat_completion_user_message_param import ( - ChatCompletionUserMessageParam, -) -from together.generated.models.completion_choice import CompletionChoice -from together.generated.models.completion_choices_data_inner import ( - CompletionChoicesDataInner, -) -from together.generated.models.completion_chunk import CompletionChunk -from together.generated.models.completion_chunk_usage import CompletionChunkUsage -from together.generated.models.completion_event import CompletionEvent -from together.generated.models.completion_request import CompletionRequest -from together.generated.models.completion_request_model import CompletionRequestModel -from together.generated.models.completion_request_safety_model import ( - CompletionRequestSafetyModel, -) -from together.generated.models.completion_response import CompletionResponse -from together.generated.models.completion_stream import CompletionStream -from together.generated.models.completion_token import CompletionToken -from together.generated.models.create_endpoint_request import CreateEndpointRequest -from together.generated.models.dedicated_endpoint import DedicatedEndpoint -from together.generated.models.embeddings_request import EmbeddingsRequest -from together.generated.models.embeddings_request_input import EmbeddingsRequestInput -from together.generated.models.embeddings_request_model import EmbeddingsRequestModel -from together.generated.models.embeddings_response import EmbeddingsResponse -from together.generated.models.embeddings_response_data_inner import ( - EmbeddingsResponseDataInner, -) -from together.generated.models.endpoint_pricing import EndpointPricing -from together.generated.models.error_data import ErrorData -from together.generated.models.error_data_error import ErrorDataError -from together.generated.models.file_delete_response import FileDeleteResponse -from together.generated.models.file_list import FileList -from together.generated.models.file_object import FileObject -from together.generated.models.file_response import FileResponse -from together.generated.models.fine_tune_event import FineTuneEvent -from together.generated.models.fine_tunes_post_request import FineTunesPostRequest -from together.generated.models.fine_tunes_post_request_train_on_inputs import ( - FineTunesPostRequestTrainOnInputs, -) -from together.generated.models.fine_tunes_post_request_training_type import ( - FineTunesPostRequestTrainingType, -) -from together.generated.models.finetune_download_result import FinetuneDownloadResult -from together.generated.models.finetune_event_levels import FinetuneEventLevels -from together.generated.models.finetune_event_type import FinetuneEventType -from together.generated.models.finetune_job_status import FinetuneJobStatus -from together.generated.models.finetune_list import FinetuneList -from together.generated.models.finetune_list_events import FinetuneListEvents -from together.generated.models.finetune_response import FinetuneResponse -from together.generated.models.finetune_response_train_on_inputs import ( - FinetuneResponseTrainOnInputs, -) -from together.generated.models.finish_reason import FinishReason -from together.generated.models.full_training_type import FullTrainingType -from together.generated.models.hardware_availability import HardwareAvailability -from together.generated.models.hardware_spec import HardwareSpec -from together.generated.models.hardware_with_status import HardwareWithStatus -from together.generated.models.image_response import ImageResponse -from together.generated.models.image_response_data_inner import ImageResponseDataInner -from together.generated.models.images_generations_post_request import ( - ImagesGenerationsPostRequest, -) -from together.generated.models.images_generations_post_request_image_loras_inner import ( - ImagesGenerationsPostRequestImageLorasInner, -) -from together.generated.models.images_generations_post_request_model import ( - ImagesGenerationsPostRequestModel, -) -from together.generated.models.lr_scheduler import LRScheduler -from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs -from together.generated.models.list_endpoint import ListEndpoint -from together.generated.models.list_endpoints200_response import ( - ListEndpoints200Response, -) -from together.generated.models.list_hardware200_response import ListHardware200Response -from together.generated.models.lo_ra_training_type import LoRATrainingType -from together.generated.models.logprobs_part import LogprobsPart -from together.generated.models.model_info import ModelInfo -from together.generated.models.pricing import Pricing -from together.generated.models.prompt_part_inner import PromptPartInner -from together.generated.models.rerank_request import RerankRequest -from together.generated.models.rerank_request_documents import RerankRequestDocuments -from together.generated.models.rerank_request_model import RerankRequestModel -from together.generated.models.rerank_response import RerankResponse -from together.generated.models.rerank_response_results_inner import ( - RerankResponseResultsInner, -) -from together.generated.models.rerank_response_results_inner_document import ( - RerankResponseResultsInnerDocument, -) -from together.generated.models.stream_sentinel import StreamSentinel -from together.generated.models.tool_choice import ToolChoice -from together.generated.models.tool_choice_function import ToolChoiceFunction -from together.generated.models.tools_part import ToolsPart -from together.generated.models.tools_part_function import ToolsPartFunction -from together.generated.models.update_endpoint_request import UpdateEndpointRequest -from together.generated.models.usage_data import UsageData diff --git a/src/together/generated/api/__init__.py b/src/together/generated/api/__init__.py deleted file mode 100644 index 50f8b438..00000000 --- a/src/together/generated/api/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# flake8: noqa - -# import apis into api package -from together.generated.api.audio_api import AudioApi -from together.generated.api.chat_api import ChatApi -from together.generated.api.completion_api import CompletionApi -from together.generated.api.embeddings_api import EmbeddingsApi -from together.generated.api.endpoints_api import EndpointsApi -from together.generated.api.files_api import FilesApi -from together.generated.api.fine_tuning_api import FineTuningApi -from together.generated.api.hardware_api import HardwareApi -from together.generated.api.images_api import ImagesApi -from together.generated.api.models_api import ModelsApi -from together.generated.api.rerank_api import RerankApi diff --git a/src/together/generated/api/audio_api.py b/src/together/generated/api/audio_api.py deleted file mode 100644 index f242af24..00000000 --- a/src/together/generated/api/audio_api.py +++ /dev/null @@ -1,302 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from pydantic import StrictBytes, StrictStr -from typing import Optional, Tuple, Union -from together.generated.models.audio_speech_request import AudioSpeechRequest - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class AudioApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def audio_speech( - self, - audio_speech_request: Optional[AudioSpeechRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> bytearray: - """Create audio generation request - - Generate audio from input text - - :param audio_speech_request: - :type audio_speech_request: AudioSpeechRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._audio_speech_serialize( - audio_speech_request=audio_speech_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "bytearray", - "400": "ErrorData", - "429": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def audio_speech_with_http_info( - self, - audio_speech_request: Optional[AudioSpeechRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[bytearray]: - """Create audio generation request - - Generate audio from input text - - :param audio_speech_request: - :type audio_speech_request: AudioSpeechRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._audio_speech_serialize( - audio_speech_request=audio_speech_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "bytearray", - "400": "ErrorData", - "429": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def audio_speech_without_preload_content( - self, - audio_speech_request: Optional[AudioSpeechRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create audio generation request - - Generate audio from input text - - :param audio_speech_request: - :type audio_speech_request: AudioSpeechRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._audio_speech_serialize( - audio_speech_request=audio_speech_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "bytearray", - "400": "ErrorData", - "429": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _audio_speech_serialize( - self, - audio_speech_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if audio_speech_request is not None: - _body_params = audio_speech_request - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - [ - "application/octet-stream", - "audio/wav", - "audio/mpeg", - "text/event-stream", - "application/json", - ] - ) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type( - ["application/json"] - ) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/audio/speech", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/chat_api.py b/src/together/generated/api/chat_api.py deleted file mode 100644 index 9c4bb818..00000000 --- a/src/together/generated/api/chat_api.py +++ /dev/null @@ -1,308 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from typing import Optional -from together.generated.models.chat_completion_request import ChatCompletionRequest -from together.generated.models.chat_completion_response import ChatCompletionResponse - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class ChatApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def chat_completions( - self, - chat_completion_request: Optional[ChatCompletionRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ChatCompletionResponse: - """Create chat completion - - Query a chat model. - - :param chat_completion_request: - :type chat_completion_request: ChatCompletionRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._chat_completions_serialize( - chat_completion_request=chat_completion_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ChatCompletionResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def chat_completions_with_http_info( - self, - chat_completion_request: Optional[ChatCompletionRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ChatCompletionResponse]: - """Create chat completion - - Query a chat model. - - :param chat_completion_request: - :type chat_completion_request: ChatCompletionRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._chat_completions_serialize( - chat_completion_request=chat_completion_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ChatCompletionResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def chat_completions_without_preload_content( - self, - chat_completion_request: Optional[ChatCompletionRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create chat completion - - Query a chat model. - - :param chat_completion_request: - :type chat_completion_request: ChatCompletionRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._chat_completions_serialize( - chat_completion_request=chat_completion_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ChatCompletionResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _chat_completions_serialize( - self, - chat_completion_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if chat_completion_request is not None: - _body_params = chat_completion_request - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json", "text/event-stream"] - ) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type( - ["application/json"] - ) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/chat/completions", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/completion_api.py b/src/together/generated/api/completion_api.py deleted file mode 100644 index 73f5e7fb..00000000 --- a/src/together/generated/api/completion_api.py +++ /dev/null @@ -1,308 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from typing import Optional -from together.generated.models.completion_request import CompletionRequest -from together.generated.models.completion_response import CompletionResponse - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class CompletionApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def completions( - self, - completion_request: Optional[CompletionRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> CompletionResponse: - """Create completion - - Query a language, code, or image model. - - :param completion_request: - :type completion_request: CompletionRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._completions_serialize( - completion_request=completion_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CompletionResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def completions_with_http_info( - self, - completion_request: Optional[CompletionRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[CompletionResponse]: - """Create completion - - Query a language, code, or image model. - - :param completion_request: - :type completion_request: CompletionRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._completions_serialize( - completion_request=completion_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CompletionResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def completions_without_preload_content( - self, - completion_request: Optional[CompletionRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create completion - - Query a language, code, or image model. - - :param completion_request: - :type completion_request: CompletionRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._completions_serialize( - completion_request=completion_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "CompletionResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _completions_serialize( - self, - completion_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if completion_request is not None: - _body_params = completion_request - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json", "text/event-stream"] - ) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type( - ["application/json"] - ) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/completions", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/embeddings_api.py b/src/together/generated/api/embeddings_api.py deleted file mode 100644 index e2dea123..00000000 --- a/src/together/generated/api/embeddings_api.py +++ /dev/null @@ -1,308 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from typing import Optional -from together.generated.models.embeddings_request import EmbeddingsRequest -from together.generated.models.embeddings_response import EmbeddingsResponse - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class EmbeddingsApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def embeddings( - self, - embeddings_request: Optional[EmbeddingsRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> EmbeddingsResponse: - """Create embedding - - Query an embedding model for a given string of text. - - :param embeddings_request: - :type embeddings_request: EmbeddingsRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._embeddings_serialize( - embeddings_request=embeddings_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "EmbeddingsResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def embeddings_with_http_info( - self, - embeddings_request: Optional[EmbeddingsRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[EmbeddingsResponse]: - """Create embedding - - Query an embedding model for a given string of text. - - :param embeddings_request: - :type embeddings_request: EmbeddingsRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._embeddings_serialize( - embeddings_request=embeddings_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "EmbeddingsResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def embeddings_without_preload_content( - self, - embeddings_request: Optional[EmbeddingsRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create embedding - - Query an embedding model for a given string of text. - - :param embeddings_request: - :type embeddings_request: EmbeddingsRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._embeddings_serialize( - embeddings_request=embeddings_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "EmbeddingsResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _embeddings_serialize( - self, - embeddings_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if embeddings_request is not None: - _body_params = embeddings_request - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type( - ["application/json"] - ) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/embeddings", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/endpoints_api.py b/src/together/generated/api/endpoints_api.py deleted file mode 100644 index 70c8824a..00000000 --- a/src/together/generated/api/endpoints_api.py +++ /dev/null @@ -1,1354 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from pydantic import Field, StrictStr, field_validator -from typing import Optional -from typing_extensions import Annotated -from together.generated.models.create_endpoint_request import CreateEndpointRequest -from together.generated.models.dedicated_endpoint import DedicatedEndpoint -from together.generated.models.list_endpoints200_response import ( - ListEndpoints200Response, -) -from together.generated.models.update_endpoint_request import UpdateEndpointRequest - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class EndpointsApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def create_endpoint( - self, - create_endpoint_request: CreateEndpointRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> DedicatedEndpoint: - """Create a dedicated endpoint, it will start automatically - - Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. - - :param create_endpoint_request: (required) - :type create_endpoint_request: CreateEndpointRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_endpoint_serialize( - create_endpoint_request=create_endpoint_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DedicatedEndpoint", - "403": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def create_endpoint_with_http_info( - self, - create_endpoint_request: CreateEndpointRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[DedicatedEndpoint]: - """Create a dedicated endpoint, it will start automatically - - Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. - - :param create_endpoint_request: (required) - :type create_endpoint_request: CreateEndpointRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_endpoint_serialize( - create_endpoint_request=create_endpoint_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DedicatedEndpoint", - "403": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def create_endpoint_without_preload_content( - self, - create_endpoint_request: CreateEndpointRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create a dedicated endpoint, it will start automatically - - Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. - - :param create_endpoint_request: (required) - :type create_endpoint_request: CreateEndpointRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._create_endpoint_serialize( - create_endpoint_request=create_endpoint_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DedicatedEndpoint", - "403": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _create_endpoint_serialize( - self, - create_endpoint_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if create_endpoint_request is not None: - _body_params = create_endpoint_request - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type( - ["application/json"] - ) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/endpoints", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def delete_endpoint( - self, - endpoint_id: Annotated[ - StrictStr, Field(description="The ID of the endpoint to delete") - ], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> None: - """Delete endpoint - - Permanently deletes an endpoint. This action cannot be undone. - - :param endpoint_id: The ID of the endpoint to delete (required) - :type endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_endpoint_serialize( - endpoint_id=endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "204": None, - "403": "ErrorData", - "404": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def delete_endpoint_with_http_info( - self, - endpoint_id: Annotated[ - StrictStr, Field(description="The ID of the endpoint to delete") - ], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[None]: - """Delete endpoint - - Permanently deletes an endpoint. This action cannot be undone. - - :param endpoint_id: The ID of the endpoint to delete (required) - :type endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_endpoint_serialize( - endpoint_id=endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "204": None, - "403": "ErrorData", - "404": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def delete_endpoint_without_preload_content( - self, - endpoint_id: Annotated[ - StrictStr, Field(description="The ID of the endpoint to delete") - ], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Delete endpoint - - Permanently deletes an endpoint. This action cannot be undone. - - :param endpoint_id: The ID of the endpoint to delete (required) - :type endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._delete_endpoint_serialize( - endpoint_id=endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "204": None, - "403": "ErrorData", - "404": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _delete_endpoint_serialize( - self, - endpoint_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if endpoint_id is not None: - _path_params["endpointId"] = endpoint_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="DELETE", - resource_path="/endpoints/{endpointId}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def get_endpoint( - self, - endpoint_id: Annotated[ - StrictStr, Field(description="The ID of the endpoint to retrieve") - ], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> DedicatedEndpoint: - """Get endpoint by ID - - Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. - - :param endpoint_id: The ID of the endpoint to retrieve (required) - :type endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_endpoint_serialize( - endpoint_id=endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DedicatedEndpoint", - "403": "ErrorData", - "404": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def get_endpoint_with_http_info( - self, - endpoint_id: Annotated[ - StrictStr, Field(description="The ID of the endpoint to retrieve") - ], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[DedicatedEndpoint]: - """Get endpoint by ID - - Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. - - :param endpoint_id: The ID of the endpoint to retrieve (required) - :type endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_endpoint_serialize( - endpoint_id=endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DedicatedEndpoint", - "403": "ErrorData", - "404": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def get_endpoint_without_preload_content( - self, - endpoint_id: Annotated[ - StrictStr, Field(description="The ID of the endpoint to retrieve") - ], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get endpoint by ID - - Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. - - :param endpoint_id: The ID of the endpoint to retrieve (required) - :type endpoint_id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_endpoint_serialize( - endpoint_id=endpoint_id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DedicatedEndpoint", - "403": "ErrorData", - "404": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _get_endpoint_serialize( - self, - endpoint_id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if endpoint_id is not None: - _path_params["endpointId"] = endpoint_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/endpoints/{endpointId}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def list_endpoints( - self, - type: Annotated[ - Optional[StrictStr], Field(description="Filter endpoints by type") - ] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListEndpoints200Response: - """List all endpoints, can be filtered by type - - Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). - - :param type: Filter endpoints by type - :type type: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_endpoints_serialize( - type=type, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListEndpoints200Response", - "403": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def list_endpoints_with_http_info( - self, - type: Annotated[ - Optional[StrictStr], Field(description="Filter endpoints by type") - ] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListEndpoints200Response]: - """List all endpoints, can be filtered by type - - Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). - - :param type: Filter endpoints by type - :type type: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_endpoints_serialize( - type=type, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListEndpoints200Response", - "403": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def list_endpoints_without_preload_content( - self, - type: Annotated[ - Optional[StrictStr], Field(description="Filter endpoints by type") - ] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List all endpoints, can be filtered by type - - Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). - - :param type: Filter endpoints by type - :type type: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_endpoints_serialize( - type=type, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListEndpoints200Response", - "403": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _list_endpoints_serialize( - self, - type, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if type is not None: - - _query_params.append(("type", type)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/endpoints", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def update_endpoint( - self, - endpoint_id: Annotated[ - StrictStr, Field(description="The ID of the endpoint to update") - ], - update_endpoint_request: UpdateEndpointRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> DedicatedEndpoint: - """Update endpoint, this can also be used to start or stop a dedicated endpoint - - Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). - - :param endpoint_id: The ID of the endpoint to update (required) - :type endpoint_id: str - :param update_endpoint_request: (required) - :type update_endpoint_request: UpdateEndpointRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_endpoint_serialize( - endpoint_id=endpoint_id, - update_endpoint_request=update_endpoint_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DedicatedEndpoint", - "403": "ErrorData", - "404": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def update_endpoint_with_http_info( - self, - endpoint_id: Annotated[ - StrictStr, Field(description="The ID of the endpoint to update") - ], - update_endpoint_request: UpdateEndpointRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[DedicatedEndpoint]: - """Update endpoint, this can also be used to start or stop a dedicated endpoint - - Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). - - :param endpoint_id: The ID of the endpoint to update (required) - :type endpoint_id: str - :param update_endpoint_request: (required) - :type update_endpoint_request: UpdateEndpointRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_endpoint_serialize( - endpoint_id=endpoint_id, - update_endpoint_request=update_endpoint_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DedicatedEndpoint", - "403": "ErrorData", - "404": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def update_endpoint_without_preload_content( - self, - endpoint_id: Annotated[ - StrictStr, Field(description="The ID of the endpoint to update") - ], - update_endpoint_request: UpdateEndpointRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Update endpoint, this can also be used to start or stop a dedicated endpoint - - Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). - - :param endpoint_id: The ID of the endpoint to update (required) - :type endpoint_id: str - :param update_endpoint_request: (required) - :type update_endpoint_request: UpdateEndpointRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._update_endpoint_serialize( - endpoint_id=endpoint_id, - update_endpoint_request=update_endpoint_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "DedicatedEndpoint", - "403": "ErrorData", - "404": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _update_endpoint_serialize( - self, - endpoint_id, - update_endpoint_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if endpoint_id is not None: - _path_params["endpointId"] = endpoint_id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if update_endpoint_request is not None: - _body_params = update_endpoint_request - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type( - ["application/json"] - ) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="PATCH", - resource_path="/endpoints/{endpointId}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/files_api.py b/src/together/generated/api/files_api.py deleted file mode 100644 index 1981fc9e..00000000 --- a/src/together/generated/api/files_api.py +++ /dev/null @@ -1,996 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from pydantic import StrictStr -from together.generated.models.file_delete_response import FileDeleteResponse -from together.generated.models.file_list import FileList -from together.generated.models.file_object import FileObject -from together.generated.models.file_response import FileResponse - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class FilesApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def files_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FileList: - """List all files - - List the metadata for all uploaded data files. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_get_serialize( - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileList", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def files_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FileList]: - """List all files - - List the metadata for all uploaded data files. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_get_serialize( - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileList", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def files_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List all files - - List the metadata for all uploaded data files. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_get_serialize( - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileList", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _files_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/files", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def files_id_content_get( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FileObject: - """Get file contents - - Get the contents of a single uploaded data file. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_id_content_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileObject", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def files_id_content_get_with_http_info( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FileObject]: - """Get file contents - - Get the contents of a single uploaded data file. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_id_content_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileObject", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def files_id_content_get_without_preload_content( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Get file contents - - Get the contents of a single uploaded data file. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_id_content_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileObject", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _files_id_content_get_serialize( - self, - id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if id is not None: - _path_params["id"] = id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/files/{id}/content", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def files_id_delete( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FileDeleteResponse: - """Delete a file - - Delete a previously uploaded data file. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_id_delete_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileDeleteResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def files_id_delete_with_http_info( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FileDeleteResponse]: - """Delete a file - - Delete a previously uploaded data file. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_id_delete_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileDeleteResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def files_id_delete_without_preload_content( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Delete a file - - Delete a previously uploaded data file. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_id_delete_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileDeleteResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _files_id_delete_serialize( - self, - id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if id is not None: - _path_params["id"] = id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="DELETE", - resource_path="/files/{id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def files_id_get( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FileResponse: - """List file - - List the metadata for a single uploaded data file. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_id_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def files_id_get_with_http_info( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FileResponse]: - """List file - - List the metadata for a single uploaded data file. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_id_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def files_id_get_without_preload_content( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List file - - List the metadata for a single uploaded data file. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._files_id_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FileResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _files_id_get_serialize( - self, - id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if id is not None: - _path_params["id"] = id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/files/{id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/fine_tuning_api.py b/src/together/generated/api/fine_tuning_api.py deleted file mode 100644 index d25eb662..00000000 --- a/src/together/generated/api/fine_tuning_api.py +++ /dev/null @@ -1,1630 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from pydantic import Field, StrictInt, StrictStr, field_validator -from typing import Optional -from typing_extensions import Annotated -from together.generated.models.fine_tunes_post_request import FineTunesPostRequest -from together.generated.models.finetune_download_result import FinetuneDownloadResult -from together.generated.models.finetune_list import FinetuneList -from together.generated.models.finetune_list_events import FinetuneListEvents -from together.generated.models.finetune_response import FinetuneResponse - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class FineTuningApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def fine_tunes_get( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FinetuneList: - """List all jobs - - List the metadata for all fine-tuning jobs. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_get_serialize( - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneList", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def fine_tunes_get_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FinetuneList]: - """List all jobs - - List the metadata for all fine-tuning jobs. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_get_serialize( - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneList", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def fine_tunes_get_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List all jobs - - List the metadata for all fine-tuning jobs. - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_get_serialize( - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneList", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _fine_tunes_get_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/fine-tunes", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def fine_tunes_id_cancel_post( - self, - id: Annotated[ - StrictStr, - Field( - description="Fine-tune ID to cancel. A string that starts with `ft-`." - ), - ], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FinetuneResponse: - """Cancel job - - Cancel a currently running fine-tuning job. - - :param id: Fine-tune ID to cancel. A string that starts with `ft-`. (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_id_cancel_post_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneResponse", - "400": None, - "404": None, - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def fine_tunes_id_cancel_post_with_http_info( - self, - id: Annotated[ - StrictStr, - Field( - description="Fine-tune ID to cancel. A string that starts with `ft-`." - ), - ], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FinetuneResponse]: - """Cancel job - - Cancel a currently running fine-tuning job. - - :param id: Fine-tune ID to cancel. A string that starts with `ft-`. (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_id_cancel_post_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneResponse", - "400": None, - "404": None, - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def fine_tunes_id_cancel_post_without_preload_content( - self, - id: Annotated[ - StrictStr, - Field( - description="Fine-tune ID to cancel. A string that starts with `ft-`." - ), - ], - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Cancel job - - Cancel a currently running fine-tuning job. - - :param id: Fine-tune ID to cancel. A string that starts with `ft-`. (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_id_cancel_post_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneResponse", - "400": None, - "404": None, - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _fine_tunes_id_cancel_post_serialize( - self, - id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if id is not None: - _path_params["id"] = id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/fine-tunes/{id}/cancel", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def fine_tunes_id_events_get( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FinetuneListEvents: - """List job events - - List the events for a single fine-tuning job. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_id_events_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneListEvents", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def fine_tunes_id_events_get_with_http_info( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FinetuneListEvents]: - """List job events - - List the events for a single fine-tuning job. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_id_events_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneListEvents", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def fine_tunes_id_events_get_without_preload_content( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List job events - - List the events for a single fine-tuning job. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_id_events_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneListEvents", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _fine_tunes_id_events_get_serialize( - self, - id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if id is not None: - _path_params["id"] = id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/fine-tunes/{id}/events", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def fine_tunes_id_get( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FinetuneResponse: - """List job - - List the metadata for a single fine-tuning job. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_id_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def fine_tunes_id_get_with_http_info( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FinetuneResponse]: - """List job - - List the metadata for a single fine-tuning job. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_id_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def fine_tunes_id_get_without_preload_content( - self, - id: StrictStr, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List job - - List the metadata for a single fine-tuning job. - - :param id: (required) - :type id: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_id_get_serialize( - id=id, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _fine_tunes_id_get_serialize( - self, - id, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if id is not None: - _path_params["id"] = id - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/fine-tunes/{id}", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def fine_tunes_post( - self, - fine_tunes_post_request: FineTunesPostRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FinetuneResponse: - """Create job - - Use a model to create a fine-tuning job. - - :param fine_tunes_post_request: (required) - :type fine_tunes_post_request: FineTunesPostRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_post_serialize( - fine_tunes_post_request=fine_tunes_post_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def fine_tunes_post_with_http_info( - self, - fine_tunes_post_request: FineTunesPostRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FinetuneResponse]: - """Create job - - Use a model to create a fine-tuning job. - - :param fine_tunes_post_request: (required) - :type fine_tunes_post_request: FineTunesPostRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_post_serialize( - fine_tunes_post_request=fine_tunes_post_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def fine_tunes_post_without_preload_content( - self, - fine_tunes_post_request: FineTunesPostRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create job - - Use a model to create a fine-tuning job. - - :param fine_tunes_post_request: (required) - :type fine_tunes_post_request: FineTunesPostRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._fine_tunes_post_serialize( - fine_tunes_post_request=fine_tunes_post_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _fine_tunes_post_serialize( - self, - fine_tunes_post_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if fine_tunes_post_request is not None: - _body_params = fine_tunes_post_request - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type( - ["application/json"] - ) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/fine-tunes", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) - - @validate_call - async def finetune_download_get( - self, - ft_id: Annotated[ - StrictStr, - Field( - description="Fine-tune ID to download. A string that starts with `ft-`." - ), - ], - checkpoint_step: Annotated[ - Optional[StrictInt], - Field( - description="Specifies step number for checkpoint to download. Ignores `checkpoint` value if set." - ), - ] = None, - checkpoint: Annotated[ - Optional[StrictStr], - Field( - description="Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set." - ), - ] = None, - output: Annotated[ - Optional[StrictStr], - Field( - description="Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`." - ), - ] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> FinetuneDownloadResult: - """Download model - - Download a compressed fine-tuned model or checkpoint to local disk. - - :param ft_id: Fine-tune ID to download. A string that starts with `ft-`. (required) - :type ft_id: str - :param checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. - :type checkpoint_step: int - :param checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. - :type checkpoint: str - :param output: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. - :type output: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._finetune_download_get_serialize( - ft_id=ft_id, - checkpoint_step=checkpoint_step, - checkpoint=checkpoint, - output=output, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneDownloadResult", - "400": None, - "404": None, - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def finetune_download_get_with_http_info( - self, - ft_id: Annotated[ - StrictStr, - Field( - description="Fine-tune ID to download. A string that starts with `ft-`." - ), - ], - checkpoint_step: Annotated[ - Optional[StrictInt], - Field( - description="Specifies step number for checkpoint to download. Ignores `checkpoint` value if set." - ), - ] = None, - checkpoint: Annotated[ - Optional[StrictStr], - Field( - description="Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set." - ), - ] = None, - output: Annotated[ - Optional[StrictStr], - Field( - description="Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`." - ), - ] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[FinetuneDownloadResult]: - """Download model - - Download a compressed fine-tuned model or checkpoint to local disk. - - :param ft_id: Fine-tune ID to download. A string that starts with `ft-`. (required) - :type ft_id: str - :param checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. - :type checkpoint_step: int - :param checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. - :type checkpoint: str - :param output: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. - :type output: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._finetune_download_get_serialize( - ft_id=ft_id, - checkpoint_step=checkpoint_step, - checkpoint=checkpoint, - output=output, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneDownloadResult", - "400": None, - "404": None, - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def finetune_download_get_without_preload_content( - self, - ft_id: Annotated[ - StrictStr, - Field( - description="Fine-tune ID to download. A string that starts with `ft-`." - ), - ], - checkpoint_step: Annotated[ - Optional[StrictInt], - Field( - description="Specifies step number for checkpoint to download. Ignores `checkpoint` value if set." - ), - ] = None, - checkpoint: Annotated[ - Optional[StrictStr], - Field( - description="Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set." - ), - ] = None, - output: Annotated[ - Optional[StrictStr], - Field( - description="Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`." - ), - ] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Download model - - Download a compressed fine-tuned model or checkpoint to local disk. - - :param ft_id: Fine-tune ID to download. A string that starts with `ft-`. (required) - :type ft_id: str - :param checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. - :type checkpoint_step: int - :param checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. - :type checkpoint: str - :param output: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. - :type output: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._finetune_download_get_serialize( - ft_id=ft_id, - checkpoint_step=checkpoint_step, - checkpoint=checkpoint, - output=output, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "FinetuneDownloadResult", - "400": None, - "404": None, - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _finetune_download_get_serialize( - self, - ft_id, - checkpoint_step, - checkpoint, - output, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if ft_id is not None: - - _query_params.append(("ft_id", ft_id)) - - if checkpoint_step is not None: - - _query_params.append(("checkpoint_step", checkpoint_step)) - - if checkpoint is not None: - - _query_params.append(("checkpoint", checkpoint)) - - if output is not None: - - _query_params.append(("output", output)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/finetune/download", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/hardware_api.py b/src/together/generated/api/hardware_api.py deleted file mode 100644 index 84b178de..00000000 --- a/src/together/generated/api/hardware_api.py +++ /dev/null @@ -1,304 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from pydantic import Field, StrictStr -from typing import Optional -from typing_extensions import Annotated -from together.generated.models.list_hardware200_response import ListHardware200Response - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class HardwareApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def list_hardware( - self, - model: Annotated[ - Optional[StrictStr], - Field( - description="Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. " - ), - ] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ListHardware200Response: - """List available hardware configurations - - Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. - - :param model: Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. - :type model: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_hardware_serialize( - model=model, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListHardware200Response", - "403": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def list_hardware_with_http_info( - self, - model: Annotated[ - Optional[StrictStr], - Field( - description="Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. " - ), - ] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ListHardware200Response]: - """List available hardware configurations - - Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. - - :param model: Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. - :type model: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_hardware_serialize( - model=model, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListHardware200Response", - "403": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def list_hardware_without_preload_content( - self, - model: Annotated[ - Optional[StrictStr], - Field( - description="Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. " - ), - ] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List available hardware configurations - - Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. - - :param model: Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. - :type model: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._list_hardware_serialize( - model=model, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ListHardware200Response", - "403": "ErrorData", - "500": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _list_hardware_serialize( - self, - model, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - if model is not None: - - _query_params.append(("model", model)) - - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/hardware", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/images_api.py b/src/together/generated/api/images_api.py deleted file mode 100644 index a61365e8..00000000 --- a/src/together/generated/api/images_api.py +++ /dev/null @@ -1,291 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from together.generated.models.image_response import ImageResponse -from together.generated.models.images_generations_post_request import ( - ImagesGenerationsPostRequest, -) - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class ImagesApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def images_generations_post( - self, - images_generations_post_request: ImagesGenerationsPostRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ImageResponse: - """Create image - - Use an image model to generate an image for a given prompt. - - :param images_generations_post_request: (required) - :type images_generations_post_request: ImagesGenerationsPostRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._images_generations_post_serialize( - images_generations_post_request=images_generations_post_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ImageResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def images_generations_post_with_http_info( - self, - images_generations_post_request: ImagesGenerationsPostRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[ImageResponse]: - """Create image - - Use an image model to generate an image for a given prompt. - - :param images_generations_post_request: (required) - :type images_generations_post_request: ImagesGenerationsPostRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._images_generations_post_serialize( - images_generations_post_request=images_generations_post_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ImageResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def images_generations_post_without_preload_content( - self, - images_generations_post_request: ImagesGenerationsPostRequest, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create image - - Use an image model to generate an image for a given prompt. - - :param images_generations_post_request: (required) - :type images_generations_post_request: ImagesGenerationsPostRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._images_generations_post_serialize( - images_generations_post_request=images_generations_post_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "ImageResponse", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _images_generations_post_serialize( - self, - images_generations_post_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if images_generations_post_request is not None: - _body_params = images_generations_post_request - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type( - ["application/json"] - ) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/images/generations", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/models_api.py b/src/together/generated/api/models_api.py deleted file mode 100644 index bb2d0e7b..00000000 --- a/src/together/generated/api/models_api.py +++ /dev/null @@ -1,279 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from typing import List -from together.generated.models.model_info import ModelInfo - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class ModelsApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def models( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> List[ModelInfo]: - """List all models - - Lists all of Together's open-source models - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._models_serialize( - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "List[ModelInfo]", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def models_with_http_info( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[List[ModelInfo]]: - """List all models - - Lists all of Together's open-source models - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._models_serialize( - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "List[ModelInfo]", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def models_without_preload_content( - self, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """List all models - - Lists all of Together's open-source models - - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._models_serialize( - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "List[ModelInfo]", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _models_serialize( - self, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="GET", - resource_path="/models", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api/rerank_api.py b/src/together/generated/api/rerank_api.py deleted file mode 100644 index 2b4f99ae..00000000 --- a/src/together/generated/api/rerank_api.py +++ /dev/null @@ -1,308 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from typing import Optional -from together.generated.models.rerank_request import RerankRequest -from together.generated.models.rerank_response import RerankResponse - -from together.generated.api_client import ApiClient, RequestSerialized -from together.generated.api_response import ApiResponse -from together.generated.rest import RESTResponseType - - -class RerankApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - @validate_call - async def rerank( - self, - rerank_request: Optional[RerankRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RerankResponse: - """Create a rerank request - - Query a reranker model - - :param rerank_request: - :type rerank_request: RerankRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._rerank_serialize( - rerank_request=rerank_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "RerankResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - @validate_call - async def rerank_with_http_info( - self, - rerank_request: Optional[RerankRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[RerankResponse]: - """Create a rerank request - - Query a reranker model - - :param rerank_request: - :type rerank_request: RerankRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._rerank_serialize( - rerank_request=rerank_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "RerankResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - await response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - @validate_call - async def rerank_without_preload_content( - self, - rerank_request: Optional[RerankRequest] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] - ], - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Create a rerank request - - Query a reranker model - - :param rerank_request: - :type rerank_request: RerankRequest - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._rerank_serialize( - rerank_request=rerank_request, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index, - ) - - _response_types_map: Dict[str, Optional[str]] = { - "200": "RerankResponse", - "400": "ErrorData", - "401": "ErrorData", - "404": "ErrorData", - "429": "ErrorData", - "503": "ErrorData", - "504": "ErrorData", - } - response_data = await self.api_client.call_api( - *_param, _request_timeout=_request_timeout - ) - return response_data.response - - def _rerank_serialize( - self, - rerank_request, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = {} - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - # process the query parameters - # process the header parameters - # process the form parameters - # process the body parameter - if rerank_request is not None: - _body_params = rerank_request - - # set the HTTP header `Accept` - if "Accept" not in _header_params: - _header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] - ) - - # set the HTTP header `Content-Type` - if _content_type: - _header_params["Content-Type"] = _content_type - else: - _default_content_type = self.api_client.select_header_content_type( - ["application/json"] - ) - if _default_content_type is not None: - _header_params["Content-Type"] = _default_content_type - - # authentication setting - _auth_settings: List[str] = ["bearerAuth"] - - return self.api_client.param_serialize( - method="POST", - resource_path="/rerank", - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth, - ) diff --git a/src/together/generated/api_client.py b/src/together/generated/api_client.py deleted file mode 100644 index bfdac8dc..00000000 --- a/src/together/generated/api_client.py +++ /dev/null @@ -1,758 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import datetime -from dateutil.parser import parse -from enum import Enum -import decimal -import json -import mimetypes -import os -import re -import tempfile - -from urllib.parse import quote -from typing import Tuple, Optional, List, Dict, Union -from pydantic import SecretStr - -from together.generated.configuration import Configuration -from together.generated.api_response import ApiResponse, T as ApiResponseT -import together.generated.models -from together.generated import rest -from together.generated.exceptions import ( - ApiValueError, - ApiException, - BadRequestException, - UnauthorizedException, - ForbiddenException, - NotFoundException, - ServiceException, -) - -RequestSerialized = Tuple[str, str, Dict[str, str], Optional[str], List[str]] - - -class ApiClient: - """Generic API client for OpenAPI client library builds. - - OpenAPI generic API client. This client handles the client- - server communication, and is invariant across implementations. Specifics of - the methods and models for each application are generated from the OpenAPI - templates. - - :param configuration: .Configuration object for this client - :param header_name: a header to pass when making calls to the API. - :param header_value: a header value to pass when making calls to - the API. - :param cookie: a cookie to include in the header when making calls - to the API - """ - - PRIMITIVE_TYPES = (float, bool, bytes, str, int) - NATIVE_TYPES_MAPPING = { - "int": int, - "long": int, # TODO remove as only py3 is supported? - "float": float, - "str": str, - "bool": bool, - "date": datetime.date, - "datetime": datetime.datetime, - "decimal": decimal.Decimal, - "object": object, - } - _pool = None - - def __init__( - self, configuration=None, header_name=None, header_value=None, cookie=None - ) -> None: - # use default configuration if none is provided - if configuration is None: - configuration = Configuration.get_default() - self.configuration = configuration - - self.rest_client = rest.RESTClientObject(configuration) - self.default_headers = {} - if header_name is not None: - self.default_headers[header_name] = header_value - self.cookie = cookie - # Set default User-Agent. - self.user_agent = "OpenAPI-Generator/1.0.0/python" - self.client_side_validation = configuration.client_side_validation - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_value, traceback): - await self.close() - - async def close(self): - await self.rest_client.close() - - @property - def user_agent(self): - """User agent for this API client""" - return self.default_headers["User-Agent"] - - @user_agent.setter - def user_agent(self, value): - self.default_headers["User-Agent"] = value - - def set_default_header(self, header_name, header_value): - self.default_headers[header_name] = header_value - - _default = None - - @classmethod - def get_default(cls): - """Return new instance of ApiClient. - - This method returns newly created, based on default constructor, - object of ApiClient class or returns a copy of default - ApiClient. - - :return: The ApiClient object. - """ - if cls._default is None: - cls._default = ApiClient() - return cls._default - - @classmethod - def set_default(cls, default): - """Set default instance of ApiClient. - - It stores default ApiClient. - - :param default: object of ApiClient. - """ - cls._default = default - - def param_serialize( - self, - method, - resource_path, - path_params=None, - query_params=None, - header_params=None, - body=None, - post_params=None, - files=None, - auth_settings=None, - collection_formats=None, - _host=None, - _request_auth=None, - ) -> RequestSerialized: - """Builds the HTTP request params needed by the request. - :param method: Method to call. - :param resource_path: Path to method endpoint. - :param path_params: Path parameters in the url. - :param query_params: Query parameters in the url. - :param header_params: Header parameters to be - placed in the request header. - :param body: Request body. - :param post_params dict: Request post form parameters, - for `application/x-www-form-urlencoded`, `multipart/form-data`. - :param auth_settings list: Auth Settings names for the request. - :param files dict: key -> filename, value -> filepath, - for `multipart/form-data`. - :param collection_formats: dict of collection formats for path, query, - header, and post parameters. - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the authentication - in the spec for a single request. - :return: tuple of form (path, http_method, query_params, header_params, - body, post_params, files) - """ - - config = self.configuration - - # header parameters - header_params = header_params or {} - header_params.update(self.default_headers) - if self.cookie: - header_params["Cookie"] = self.cookie - if header_params: - header_params = self.sanitize_for_serialization(header_params) - header_params = dict( - self.parameters_to_tuples(header_params, collection_formats) - ) - - # path parameters - if path_params: - path_params = self.sanitize_for_serialization(path_params) - path_params = self.parameters_to_tuples(path_params, collection_formats) - for k, v in path_params: - # specified safe chars, encode everything - resource_path = resource_path.replace( - "{%s}" % k, quote(str(v), safe=config.safe_chars_for_path_param) - ) - - # post parameters - if post_params or files: - post_params = post_params if post_params else [] - post_params = self.sanitize_for_serialization(post_params) - post_params = self.parameters_to_tuples(post_params, collection_formats) - if files: - post_params.extend(self.files_parameters(files)) - - # auth setting - self.update_params_for_auth( - header_params, - query_params, - auth_settings, - resource_path, - method, - body, - request_auth=_request_auth, - ) - - # body - if body: - body = self.sanitize_for_serialization(body) - - # request url - if _host is None or self.configuration.ignore_operation_servers: - url = self.configuration.host + resource_path - else: - # use server/host defined in path or operation instead - url = _host + resource_path - - # query parameters - if query_params: - query_params = self.sanitize_for_serialization(query_params) - url_query = self.parameters_to_url_query(query_params, collection_formats) - url += "?" + url_query - - return method, url, header_params, body, post_params - - async def call_api( - self, - method, - url, - header_params=None, - body=None, - post_params=None, - _request_timeout=None, - ) -> rest.RESTResponse: - """Makes the HTTP request (synchronous) - :param method: Method to call. - :param url: Path to method endpoint. - :param header_params: Header parameters to be - placed in the request header. - :param body: Request body. - :param post_params dict: Request post form parameters, - for `application/x-www-form-urlencoded`, `multipart/form-data`. - :param _request_timeout: timeout setting for this request. - :return: RESTResponse - """ - - try: - # perform request and return response - response_data = await self.rest_client.request( - method, - url, - headers=header_params, - body=body, - post_params=post_params, - _request_timeout=_request_timeout, - ) - - except ApiException as e: - raise e - - return response_data - - def response_deserialize( - self, - response_data: rest.RESTResponse, - response_types_map: Optional[Dict[str, ApiResponseT]] = None, - ) -> ApiResponse[ApiResponseT]: - """Deserializes response into an object. - :param response_data: RESTResponse object to be deserialized. - :param response_types_map: dict of response types. - :return: ApiResponse - """ - - msg = "RESTResponse.read() must be called before passing it to response_deserialize()" - assert response_data.data is not None, msg - - response_type = response_types_map.get(str(response_data.status), None) - if ( - not response_type - and isinstance(response_data.status, int) - and 100 <= response_data.status <= 599 - ): - # if not found, look for '1XX', '2XX', etc. - response_type = response_types_map.get( - str(response_data.status)[0] + "XX", None - ) - - # deserialize response data - response_text = None - return_data = None - try: - if response_type == "bytearray": - return_data = response_data.data - elif response_type == "file": - return_data = self.__deserialize_file(response_data) - elif response_type is not None: - match = None - content_type = response_data.getheader("content-type") - if content_type is not None: - match = re.search(r"charset=([a-zA-Z\-\d]+)[\s;]?", content_type) - encoding = match.group(1) if match else "utf-8" - response_text = response_data.data.decode(encoding) - return_data = self.deserialize( - response_text, response_type, content_type - ) - finally: - if not 200 <= response_data.status <= 299: - raise ApiException.from_response( - http_resp=response_data, - body=response_text, - data=return_data, - ) - - return ApiResponse( - status_code=response_data.status, - data=return_data, - headers=response_data.getheaders(), - raw_data=response_data.data, - ) - - def sanitize_for_serialization(self, obj): - """Builds a JSON POST object. - - If obj is None, return None. - If obj is SecretStr, return obj.get_secret_value() - If obj is str, int, long, float, bool, return directly. - If obj is datetime.datetime, datetime.date - convert to string in iso8601 format. - If obj is decimal.Decimal return string representation. - If obj is list, sanitize each element in the list. - If obj is dict, return the dict. - If obj is OpenAPI model, return the properties dict. - - :param obj: The data to serialize. - :return: The serialized form of data. - """ - if obj is None: - return None - elif isinstance(obj, Enum): - return obj.value - elif isinstance(obj, SecretStr): - return obj.get_secret_value() - elif isinstance(obj, self.PRIMITIVE_TYPES): - return obj - elif isinstance(obj, list): - return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] - elif isinstance(obj, tuple): - return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) - elif isinstance(obj, (datetime.datetime, datetime.date)): - return obj.isoformat() - elif isinstance(obj, decimal.Decimal): - return str(obj) - - elif isinstance(obj, dict): - obj_dict = obj - else: - # Convert model obj to dict except - # attributes `openapi_types`, `attribute_map` - # and attributes which value is not None. - # Convert attribute name to json key in - # model definition for request. - if hasattr(obj, "to_dict") and callable(getattr(obj, "to_dict")): - obj_dict = obj.to_dict() - else: - obj_dict = obj.__dict__ - - return { - key: self.sanitize_for_serialization(val) for key, val in obj_dict.items() - } - - def deserialize( - self, response_text: str, response_type: str, content_type: Optional[str] - ): - """Deserializes response into an object. - - :param response: RESTResponse object to be deserialized. - :param response_type: class literal for - deserialized object, or string of class name. - :param content_type: content type of response. - - :return: deserialized object. - """ - - # fetch data from response object - if content_type is None: - try: - data = json.loads(response_text) - except ValueError: - data = response_text - elif re.match( - r"^application/(json|[\w!#$&.+-^_]+\+json)\s*(;|$)", - content_type, - re.IGNORECASE, - ): - if response_text == "": - data = "" - else: - data = json.loads(response_text) - elif re.match(r"^text\/[a-z.+-]+\s*(;|$)", content_type, re.IGNORECASE): - data = response_text - else: - raise ApiException( - status=0, reason="Unsupported content type: {0}".format(content_type) - ) - - return self.__deserialize(data, response_type) - - def __deserialize(self, data, klass): - """Deserializes dict, list, str into an object. - - :param data: dict, list or str. - :param klass: class literal, or string of class name. - - :return: object. - """ - if data is None: - return None - - if isinstance(klass, str): - if klass.startswith("List["): - m = re.match(r"List\[(.*)]", klass) - assert m is not None, "Malformed List type definition" - sub_kls = m.group(1) - return [self.__deserialize(sub_data, sub_kls) for sub_data in data] - - if klass.startswith("Dict["): - m = re.match(r"Dict\[([^,]*), (.*)]", klass) - assert m is not None, "Malformed Dict type definition" - sub_kls = m.group(2) - return {k: self.__deserialize(v, sub_kls) for k, v in data.items()} - - # convert str to class - if klass in self.NATIVE_TYPES_MAPPING: - klass = self.NATIVE_TYPES_MAPPING[klass] - else: - klass = getattr(together.generated.models, klass) - - if klass in self.PRIMITIVE_TYPES: - return self.__deserialize_primitive(data, klass) - elif klass == object: - return self.__deserialize_object(data) - elif klass == datetime.date: - return self.__deserialize_date(data) - elif klass == datetime.datetime: - return self.__deserialize_datetime(data) - elif klass == decimal.Decimal: - return decimal.Decimal(data) - elif issubclass(klass, Enum): - return self.__deserialize_enum(data, klass) - else: - return self.__deserialize_model(data, klass) - - def parameters_to_tuples(self, params, collection_formats): - """Get parameters as list of tuples, formatting collections. - - :param params: Parameters as dict or list of two-tuples - :param dict collection_formats: Parameter collection formats - :return: Parameters as list of tuples, collections formatted - """ - new_params: List[Tuple[str, str]] = [] - if collection_formats is None: - collection_formats = {} - for k, v in params.items() if isinstance(params, dict) else params: - if k in collection_formats: - collection_format = collection_formats[k] - if collection_format == "multi": - new_params.extend((k, value) for value in v) - else: - if collection_format == "ssv": - delimiter = " " - elif collection_format == "tsv": - delimiter = "\t" - elif collection_format == "pipes": - delimiter = "|" - else: # csv is the default - delimiter = "," - new_params.append((k, delimiter.join(str(value) for value in v))) - else: - new_params.append((k, v)) - return new_params - - def parameters_to_url_query(self, params, collection_formats): - """Get parameters as list of tuples, formatting collections. - - :param params: Parameters as dict or list of two-tuples - :param dict collection_formats: Parameter collection formats - :return: URL query string (e.g. a=Hello%20World&b=123) - """ - new_params: List[Tuple[str, str]] = [] - if collection_formats is None: - collection_formats = {} - for k, v in params.items() if isinstance(params, dict) else params: - if isinstance(v, bool): - v = str(v).lower() - if isinstance(v, (int, float)): - v = str(v) - if isinstance(v, dict): - v = json.dumps(v) - - if k in collection_formats: - collection_format = collection_formats[k] - if collection_format == "multi": - new_params.extend((k, quote(str(value))) for value in v) - else: - if collection_format == "ssv": - delimiter = " " - elif collection_format == "tsv": - delimiter = "\t" - elif collection_format == "pipes": - delimiter = "|" - else: # csv is the default - delimiter = "," - new_params.append( - (k, delimiter.join(quote(str(value)) for value in v)) - ) - else: - new_params.append((k, quote(str(v)))) - - return "&".join(["=".join(map(str, item)) for item in new_params]) - - def files_parameters( - self, - files: Dict[str, Union[str, bytes, List[str], List[bytes], Tuple[str, bytes]]], - ): - """Builds form parameters. - - :param files: File parameters. - :return: Form parameters with files. - """ - params = [] - for k, v in files.items(): - if isinstance(v, str): - with open(v, "rb") as f: - filename = os.path.basename(f.name) - filedata = f.read() - elif isinstance(v, bytes): - filename = k - filedata = v - elif isinstance(v, tuple): - filename, filedata = v - elif isinstance(v, list): - for file_param in v: - params.extend(self.files_parameters({k: file_param})) - continue - else: - raise ValueError("Unsupported file value") - mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream" - params.append(tuple([k, tuple([filename, filedata, mimetype])])) - return params - - def select_header_accept(self, accepts: List[str]) -> Optional[str]: - """Returns `Accept` based on an array of accepts provided. - - :param accepts: List of headers. - :return: Accept (e.g. application/json). - """ - if not accepts: - return None - - for accept in accepts: - if re.search("json", accept, re.IGNORECASE): - return accept - - return accepts[0] - - def select_header_content_type(self, content_types): - """Returns `Content-Type` based on an array of content_types provided. - - :param content_types: List of content-types. - :return: Content-Type (e.g. application/json). - """ - if not content_types: - return None - - for content_type in content_types: - if re.search("json", content_type, re.IGNORECASE): - return content_type - - return content_types[0] - - def update_params_for_auth( - self, - headers, - queries, - auth_settings, - resource_path, - method, - body, - request_auth=None, - ) -> None: - """Updates header and query params based on authentication setting. - - :param headers: Header parameters dict to be updated. - :param queries: Query parameters tuple list to be updated. - :param auth_settings: Authentication setting identifiers list. - :resource_path: A string representation of the HTTP request resource path. - :method: A string representation of the HTTP request method. - :body: A object representing the body of the HTTP request. - The object type is the return value of sanitize_for_serialization(). - :param request_auth: if set, the provided settings will - override the token in the configuration. - """ - if not auth_settings: - return - - if request_auth: - self._apply_auth_params( - headers, queries, resource_path, method, body, request_auth - ) - else: - for auth in auth_settings: - auth_setting = self.configuration.auth_settings().get(auth) - if auth_setting: - self._apply_auth_params( - headers, queries, resource_path, method, body, auth_setting - ) - - def _apply_auth_params( - self, headers, queries, resource_path, method, body, auth_setting - ) -> None: - """Updates the request parameters based on a single auth_setting - - :param headers: Header parameters dict to be updated. - :param queries: Query parameters tuple list to be updated. - :resource_path: A string representation of the HTTP request resource path. - :method: A string representation of the HTTP request method. - :body: A object representing the body of the HTTP request. - The object type is the return value of sanitize_for_serialization(). - :param auth_setting: auth settings for the endpoint - """ - if auth_setting["in"] == "cookie": - headers["Cookie"] = auth_setting["value"] - elif auth_setting["in"] == "header": - if auth_setting["type"] != "http-signature": - headers[auth_setting["key"]] = auth_setting["value"] - elif auth_setting["in"] == "query": - queries.append((auth_setting["key"], auth_setting["value"])) - else: - raise ApiValueError("Authentication token must be in `query` or `header`") - - def __deserialize_file(self, response): - """Deserializes body to file - - Saves response body into a file in a temporary folder, - using the filename from the `Content-Disposition` header if provided. - - handle file downloading - save response body into a tmp file and return the instance - - :param response: RESTResponse. - :return: file path. - """ - fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path) - os.close(fd) - os.remove(path) - - content_disposition = response.getheader("Content-Disposition") - if content_disposition: - m = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition) - assert m is not None, "Unexpected 'content-disposition' header value" - filename = m.group(1) - path = os.path.join(os.path.dirname(path), filename) - - with open(path, "wb") as f: - f.write(response.data) - - return path - - def __deserialize_primitive(self, data, klass): - """Deserializes string to primitive type. - - :param data: str. - :param klass: class literal. - - :return: int, long, float, str, bool. - """ - try: - return klass(data) - except UnicodeEncodeError: - return str(data) - except TypeError: - return data - - def __deserialize_object(self, value): - """Return an original value. - - :return: object. - """ - return value - - def __deserialize_date(self, string): - """Deserializes string to date. - - :param string: str. - :return: date. - """ - try: - return parse(string).date() - except ImportError: - return string - except ValueError: - raise rest.ApiException( - status=0, reason="Failed to parse `{0}` as date object".format(string) - ) - - def __deserialize_datetime(self, string): - """Deserializes string to datetime. - - The string should be in iso8601 datetime format. - - :param string: str. - :return: datetime. - """ - try: - return parse(string) - except ImportError: - return string - except ValueError: - raise rest.ApiException( - status=0, - reason=("Failed to parse `{0}` as datetime object".format(string)), - ) - - def __deserialize_enum(self, data, klass): - """Deserializes primitive type to enum. - - :param data: primitive type. - :param klass: class literal. - :return: enum value. - """ - try: - return klass(data) - except ValueError: - raise rest.ApiException( - status=0, reason=("Failed to parse `{0}` as `{1}`".format(data, klass)) - ) - - def __deserialize_model(self, data, klass): - """Deserializes list or dict to model. - - :param data: dict, list. - :param klass: class literal. - :return: model object. - """ - - return klass.from_dict(data) diff --git a/src/together/generated/api_response.py b/src/together/generated/api_response.py deleted file mode 100644 index 1ce13729..00000000 --- a/src/together/generated/api_response.py +++ /dev/null @@ -1,20 +0,0 @@ -"""API response object.""" - -from __future__ import annotations -from typing import Optional, Generic, Mapping, TypeVar -from pydantic import Field, StrictInt, StrictBytes, BaseModel - -T = TypeVar("T") - - -class ApiResponse(BaseModel, Generic[T]): - """ - API response object - """ - - status_code: StrictInt = Field(description="HTTP status code") - headers: Optional[Mapping[str, str]] = Field(None, description="HTTP headers") - data: T = Field(description="Deserialized data given the data type") - raw_data: StrictBytes = Field(description="Raw data (HTTP response body)") - - model_config = {"arbitrary_types_allowed": True} diff --git a/src/together/generated/configuration.py b/src/together/generated/configuration.py deleted file mode 100644 index 603014b0..00000000 --- a/src/together/generated/configuration.py +++ /dev/null @@ -1,583 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import copy -import http.client as httplib -import logging -from logging import FileHandler -import sys -from typing import Any, ClassVar, Dict, List, Literal, Optional, TypedDict -from typing_extensions import NotRequired, Self - -import urllib3 - - -JSON_SCHEMA_VALIDATION_KEYWORDS = { - "multipleOf", - "maximum", - "exclusiveMaximum", - "minimum", - "exclusiveMinimum", - "maxLength", - "minLength", - "pattern", - "maxItems", - "minItems", -} - -ServerVariablesT = Dict[str, str] - -GenericAuthSetting = TypedDict( - "GenericAuthSetting", - { - "type": str, - "in": str, - "key": str, - "value": str, - }, -) - - -OAuth2AuthSetting = TypedDict( - "OAuth2AuthSetting", - { - "type": Literal["oauth2"], - "in": Literal["header"], - "key": Literal["Authorization"], - "value": str, - }, -) - - -APIKeyAuthSetting = TypedDict( - "APIKeyAuthSetting", - { - "type": Literal["api_key"], - "in": str, - "key": str, - "value": Optional[str], - }, -) - - -BasicAuthSetting = TypedDict( - "BasicAuthSetting", - { - "type": Literal["basic"], - "in": Literal["header"], - "key": Literal["Authorization"], - "value": Optional[str], - }, -) - - -BearerFormatAuthSetting = TypedDict( - "BearerFormatAuthSetting", - { - "type": Literal["bearer"], - "in": Literal["header"], - "format": Literal["JWT"], - "key": Literal["Authorization"], - "value": str, - }, -) - - -BearerAuthSetting = TypedDict( - "BearerAuthSetting", - { - "type": Literal["bearer"], - "in": Literal["header"], - "key": Literal["Authorization"], - "value": str, - }, -) - - -HTTPSignatureAuthSetting = TypedDict( - "HTTPSignatureAuthSetting", - { - "type": Literal["http-signature"], - "in": Literal["header"], - "key": Literal["Authorization"], - "value": None, - }, -) - - -AuthSettings = TypedDict( - "AuthSettings", - { - "bearerAuth": BearerAuthSetting, - }, - total=False, -) - - -class HostSettingVariable(TypedDict): - description: str - default_value: str - enum_values: List[str] - - -class HostSetting(TypedDict): - url: str - description: str - variables: NotRequired[Dict[str, HostSettingVariable]] - - -class Configuration: - """This class contains various settings of the API client. - - :param host: Base url. - :param ignore_operation_servers - Boolean to ignore operation servers for the API client. - Config will use `host` as the base url regardless of the operation servers. - :param api_key: Dict to store API key(s). - Each entry in the dict specifies an API key. - The dict key is the name of the security scheme in the OAS specification. - The dict value is the API key secret. - :param api_key_prefix: Dict to store API prefix (e.g. Bearer). - The dict key is the name of the security scheme in the OAS specification. - The dict value is an API key prefix when generating the auth data. - :param username: Username for HTTP basic authentication. - :param password: Password for HTTP basic authentication. - :param access_token: Access token. - :param server_index: Index to servers configuration. - :param server_variables: Mapping with string values to replace variables in - templated server configuration. The validation of enums is performed for - variables with defined enum values before. - :param server_operation_index: Mapping from operation ID to an index to server - configuration. - :param server_operation_variables: Mapping from operation ID to a mapping with - string values to replace variables in templated server configuration. - The validation of enums is performed for variables with defined enum - values before. - :param ssl_ca_cert: str - the path to a file of concatenated CA certificates - in PEM format. - :param retries: Number of retries for API requests. - - :Example: - """ - - _default: ClassVar[Optional[Self]] = None - - def __init__( - self, - host: Optional[str] = None, - api_key: Optional[Dict[str, str]] = None, - api_key_prefix: Optional[Dict[str, str]] = None, - username: Optional[str] = None, - password: Optional[str] = None, - access_token: Optional[str] = None, - server_index: Optional[int] = None, - server_variables: Optional[ServerVariablesT] = None, - server_operation_index: Optional[Dict[int, int]] = None, - server_operation_variables: Optional[Dict[int, ServerVariablesT]] = None, - ignore_operation_servers: bool = False, - ssl_ca_cert: Optional[str] = None, - retries: Optional[int] = None, - *, - debug: Optional[bool] = None, - ) -> None: - """Constructor""" - self._base_path = "https://api.together.xyz/v1" if host is None else host - """Default Base url - """ - self.server_index = 0 if server_index is None and host is None else server_index - self.server_operation_index = server_operation_index or {} - """Default server index - """ - self.server_variables = server_variables or {} - self.server_operation_variables = server_operation_variables or {} - """Default server variables - """ - self.ignore_operation_servers = ignore_operation_servers - """Ignore operation servers - """ - self.temp_folder_path = None - """Temp file folder for downloading files - """ - # Authentication Settings - self.api_key = {} - if api_key: - self.api_key = api_key - """dict to store API key(s) - """ - self.api_key_prefix = {} - if api_key_prefix: - self.api_key_prefix = api_key_prefix - """dict to store API prefix (e.g. Bearer) - """ - self.refresh_api_key_hook = None - """function hook to refresh API key if expired - """ - self.username = username - """Username for HTTP basic authentication - """ - self.password = password - """Password for HTTP basic authentication - """ - self.access_token = access_token - """Access token - """ - self.logger = {} - """Logging Settings - """ - self.logger["package_logger"] = logging.getLogger("together.generated") - self.logger["urllib3_logger"] = logging.getLogger("urllib3") - self.logger_format = "%(asctime)s %(levelname)s %(message)s" - """Log format - """ - self.logger_stream_handler = None - """Log stream handler - """ - self.logger_file_handler: Optional[FileHandler] = None - """Log file handler - """ - self.logger_file = None - """Debug file location - """ - if debug is not None: - self.debug = debug - else: - self.__debug = False - """Debug switch - """ - - self.verify_ssl = True - """SSL/TLS verification - Set this to false to skip verifying SSL certificate when calling API - from https server. - """ - self.ssl_ca_cert = ssl_ca_cert - """Set this to customize the certificate file to verify the peer. - """ - self.cert_file = None - """client certificate file - """ - self.key_file = None - """client key file - """ - self.assert_hostname = None - """Set this to True/False to enable/disable SSL hostname verification. - """ - self.tls_server_name = None - """SSL/TLS Server Name Indication (SNI) - Set this to the SNI value expected by the server. - """ - - self.connection_pool_maxsize = 100 - """This value is passed to the aiohttp to limit simultaneous connections. - Default values is 100, None means no-limit. - """ - - self.proxy: Optional[str] = None - """Proxy URL - """ - self.proxy_headers = None - """Proxy headers - """ - self.safe_chars_for_path_param = "" - """Safe chars for path_param - """ - self.retries = retries - """Adding retries to override urllib3 default value 3 - """ - # Enable client side validation - self.client_side_validation = True - - self.socket_options = None - """Options to pass down to the underlying urllib3 socket - """ - - self.datetime_format = "%Y-%m-%dT%H:%M:%S.%f%z" - """datetime format - """ - - self.date_format = "%Y-%m-%d" - """date format - """ - - def __deepcopy__(self, memo: Dict[int, Any]) -> Self: - cls = self.__class__ - result = cls.__new__(cls) - memo[id(self)] = result - for k, v in self.__dict__.items(): - if k not in ("logger", "logger_file_handler"): - setattr(result, k, copy.deepcopy(v, memo)) - # shallow copy of loggers - result.logger = copy.copy(self.logger) - # use setters to configure loggers - result.logger_file = self.logger_file - result.debug = self.debug - return result - - def __setattr__(self, name: str, value: Any) -> None: - object.__setattr__(self, name, value) - - @classmethod - def set_default(cls, default: Optional[Self]) -> None: - """Set default instance of configuration. - - It stores default configuration, which can be - returned by get_default_copy method. - - :param default: object of Configuration - """ - cls._default = default - - @classmethod - def get_default_copy(cls) -> Self: - """Deprecated. Please use `get_default` instead. - - Deprecated. Please use `get_default` instead. - - :return: The configuration object. - """ - return cls.get_default() - - @classmethod - def get_default(cls) -> Self: - """Return the default configuration. - - This method returns newly created, based on default constructor, - object of Configuration class or returns a copy of default - configuration. - - :return: The configuration object. - """ - if cls._default is None: - cls._default = cls() - return cls._default - - @property - def logger_file(self) -> Optional[str]: - """The logger file. - - If the logger_file is None, then add stream handler and remove file - handler. Otherwise, add file handler and remove stream handler. - - :param value: The logger_file path. - :type: str - """ - return self.__logger_file - - @logger_file.setter - def logger_file(self, value: Optional[str]) -> None: - """The logger file. - - If the logger_file is None, then add stream handler and remove file - handler. Otherwise, add file handler and remove stream handler. - - :param value: The logger_file path. - :type: str - """ - self.__logger_file = value - if self.__logger_file: - # If set logging file, - # then add file handler and remove stream handler. - self.logger_file_handler = logging.FileHandler(self.__logger_file) - self.logger_file_handler.setFormatter(self.logger_formatter) - for _, logger in self.logger.items(): - logger.addHandler(self.logger_file_handler) - - @property - def debug(self) -> bool: - """Debug status - - :param value: The debug status, True or False. - :type: bool - """ - return self.__debug - - @debug.setter - def debug(self, value: bool) -> None: - """Debug status - - :param value: The debug status, True or False. - :type: bool - """ - self.__debug = value - if self.__debug: - # if debug status is True, turn on debug logging - for _, logger in self.logger.items(): - logger.setLevel(logging.DEBUG) - # turn on httplib debug - httplib.HTTPConnection.debuglevel = 1 - else: - # if debug status is False, turn off debug logging, - # setting log level to default `logging.WARNING` - for _, logger in self.logger.items(): - logger.setLevel(logging.WARNING) - # turn off httplib debug - httplib.HTTPConnection.debuglevel = 0 - - @property - def logger_format(self) -> str: - """The logger format. - - The logger_formatter will be updated when sets logger_format. - - :param value: The format string. - :type: str - """ - return self.__logger_format - - @logger_format.setter - def logger_format(self, value: str) -> None: - """The logger format. - - The logger_formatter will be updated when sets logger_format. - - :param value: The format string. - :type: str - """ - self.__logger_format = value - self.logger_formatter = logging.Formatter(self.__logger_format) - - def get_api_key_with_prefix( - self, identifier: str, alias: Optional[str] = None - ) -> Optional[str]: - """Gets API key (with prefix if set). - - :param identifier: The identifier of apiKey. - :param alias: The alternative identifier of apiKey. - :return: The token for api key authentication. - """ - if self.refresh_api_key_hook is not None: - self.refresh_api_key_hook(self) - key = self.api_key.get( - identifier, self.api_key.get(alias) if alias is not None else None - ) - if key: - prefix = self.api_key_prefix.get(identifier) - if prefix: - return "%s %s" % (prefix, key) - else: - return key - - return None - - def get_basic_auth_token(self) -> Optional[str]: - """Gets HTTP basic authentication header (string). - - :return: The token for basic HTTP authentication. - """ - username = "" - if self.username is not None: - username = self.username - password = "" - if self.password is not None: - password = self.password - return urllib3.util.make_headers(basic_auth=username + ":" + password).get( - "authorization" - ) - - def auth_settings(self) -> AuthSettings: - """Gets Auth Settings dict for api client. - - :return: The Auth Settings information dict. - """ - auth: AuthSettings = {} - if self.access_token is not None: - auth["bearerAuth"] = { - "type": "bearer", - "in": "header", - "key": "Authorization", - "value": "Bearer " + self.access_token, - } - return auth - - def to_debug_report(self) -> str: - """Gets the essential information for debugging. - - :return: The report for debugging. - """ - return ( - "Python SDK Debug Report:\n" - "OS: {env}\n" - "Python Version: {pyversion}\n" - "Version of the API: 2.0.0\n" - "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version) - ) - - def get_host_settings(self) -> List[HostSetting]: - """Gets an array of host settings - - :return: An array of host settings - """ - return [ - { - "url": "https://api.together.xyz/v1", - "description": "No description provided", - } - ] - - def get_host_from_settings( - self, - index: Optional[int], - variables: Optional[ServerVariablesT] = None, - servers: Optional[List[HostSetting]] = None, - ) -> str: - """Gets host URL based on the index and variables - :param index: array index of the host settings - :param variables: hash of variable and the corresponding value - :param servers: an array of host settings or None - :return: URL based on host settings - """ - if index is None: - return self._base_path - - variables = {} if variables is None else variables - servers = self.get_host_settings() if servers is None else servers - - try: - server = servers[index] - except IndexError: - raise ValueError( - "Invalid index {0} when selecting the host settings. " - "Must be less than {1}".format(index, len(servers)) - ) - - url = server["url"] - - # go through variables and replace placeholders - for variable_name, variable in server.get("variables", {}).items(): - used_value = variables.get(variable_name, variable["default_value"]) - - if "enum_values" in variable and used_value not in variable["enum_values"]: - raise ValueError( - "The variable `{0}` in the host URL has invalid value " - "{1}. Must be {2}.".format( - variable_name, variables[variable_name], variable["enum_values"] - ) - ) - - url = url.replace("{" + variable_name + "}", used_value) - - return url - - @property - def host(self) -> str: - """Return generated host.""" - return self.get_host_from_settings( - self.server_index, variables=self.server_variables - ) - - @host.setter - def host(self, value: str) -> None: - """Fix base path.""" - self._base_path = value - self.server_index = None diff --git a/src/together/generated/docs/AudioApi.md b/src/together/generated/docs/AudioApi.md deleted file mode 100644 index adf6ebb4..00000000 --- a/src/together/generated/docs/AudioApi.md +++ /dev/null @@ -1,88 +0,0 @@ -# together.generated.AudioApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**audio_speech**](AudioApi.md#audio_speech) | **POST** /audio/speech | Create audio generation request - - -# **audio_speech** -> bytearray audio_speech(audio_speech_request=audio_speech_request) - -Create audio generation request - -Generate audio from input text - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.audio_speech_request import AudioSpeechRequest -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.AudioApi(api_client) - audio_speech_request = together.generated.AudioSpeechRequest() # AudioSpeechRequest | (optional) - - try: - # Create audio generation request - api_response = await api_instance.audio_speech(audio_speech_request=audio_speech_request) - print("The response of AudioApi->audio_speech:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling AudioApi->audio_speech: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **audio_speech_request** | [**AudioSpeechRequest**](AudioSpeechRequest.md)| | [optional] - -### Return type - -**bytearray** - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: application/json - - **Accept**: application/octet-stream, audio/wav, audio/mpeg, text/event-stream, application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | OK | - | -**400** | BadRequest | - | -**429** | RateLimit | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechRequest.md b/src/together/generated/docs/AudioSpeechRequest.md deleted file mode 100644 index 23ef3ade..00000000 --- a/src/together/generated/docs/AudioSpeechRequest.md +++ /dev/null @@ -1,34 +0,0 @@ -# AudioSpeechRequest - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**model** | [**AudioSpeechRequestModel**](AudioSpeechRequestModel.md) | | -**input** | **str** | Input text to generate the audio for | -**voice** | [**AudioSpeechRequestVoice**](AudioSpeechRequestVoice.md) | | -**response_format** | **str** | The format of audio output | [optional] [default to 'wav'] -**language** | **str** | Language of input text | [optional] [default to 'en'] -**response_encoding** | **str** | Audio encoding of response | [optional] [default to 'pcm_f32le'] -**sample_rate** | **float** | Sampling rate to use for the output audio | [optional] [default to 44100] -**stream** | **bool** | If true, output is streamed for several characters at a time instead of waiting for the full response. The stream terminates with `data: [DONE]`. If false, return the encoded audio as octet stream | [optional] [default to False] - -## Example - -```python -from together.generated.models.audio_speech_request import AudioSpeechRequest - -# TODO update the JSON string below -json = "{}" -# create an instance of AudioSpeechRequest from a JSON string -audio_speech_request_instance = AudioSpeechRequest.from_json(json) -# print the JSON string representation of the object -print(AudioSpeechRequest.to_json()) - -# convert the object into a dict -audio_speech_request_dict = audio_speech_request_instance.to_dict() -# create an instance of AudioSpeechRequest from a dict -audio_speech_request_from_dict = AudioSpeechRequest.from_dict(audio_speech_request_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechRequestModel.md b/src/together/generated/docs/AudioSpeechRequestModel.md deleted file mode 100644 index 41febc9a..00000000 --- a/src/together/generated/docs/AudioSpeechRequestModel.md +++ /dev/null @@ -1,27 +0,0 @@ -# AudioSpeechRequestModel - -The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models) - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel - -# TODO update the JSON string below -json = "{}" -# create an instance of AudioSpeechRequestModel from a JSON string -audio_speech_request_model_instance = AudioSpeechRequestModel.from_json(json) -# print the JSON string representation of the object -print(AudioSpeechRequestModel.to_json()) - -# convert the object into a dict -audio_speech_request_model_dict = audio_speech_request_model_instance.to_dict() -# create an instance of AudioSpeechRequestModel from a dict -audio_speech_request_model_from_dict = AudioSpeechRequestModel.from_dict(audio_speech_request_model_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechRequestVoice.md b/src/together/generated/docs/AudioSpeechRequestVoice.md deleted file mode 100644 index 9ad16586..00000000 --- a/src/together/generated/docs/AudioSpeechRequestVoice.md +++ /dev/null @@ -1,27 +0,0 @@ -# AudioSpeechRequestVoice - -The voice to use for generating the audio. [View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available). - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice - -# TODO update the JSON string below -json = "{}" -# create an instance of AudioSpeechRequestVoice from a JSON string -audio_speech_request_voice_instance = AudioSpeechRequestVoice.from_json(json) -# print the JSON string representation of the object -print(AudioSpeechRequestVoice.to_json()) - -# convert the object into a dict -audio_speech_request_voice_dict = audio_speech_request_voice_instance.to_dict() -# create an instance of AudioSpeechRequestVoice from a dict -audio_speech_request_voice_from_dict = AudioSpeechRequestVoice.from_dict(audio_speech_request_voice_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechStreamChunk.md b/src/together/generated/docs/AudioSpeechStreamChunk.md deleted file mode 100644 index 7d9f5558..00000000 --- a/src/together/generated/docs/AudioSpeechStreamChunk.md +++ /dev/null @@ -1,29 +0,0 @@ -# AudioSpeechStreamChunk - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**model** | **str** | | -**b64** | **str** | base64 encoded audio stream | - -## Example - -```python -from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk - -# TODO update the JSON string below -json = "{}" -# create an instance of AudioSpeechStreamChunk from a JSON string -audio_speech_stream_chunk_instance = AudioSpeechStreamChunk.from_json(json) -# print the JSON string representation of the object -print(AudioSpeechStreamChunk.to_json()) - -# convert the object into a dict -audio_speech_stream_chunk_dict = audio_speech_stream_chunk_instance.to_dict() -# create an instance of AudioSpeechStreamChunk from a dict -audio_speech_stream_chunk_from_dict = AudioSpeechStreamChunk.from_dict(audio_speech_stream_chunk_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechStreamEvent.md b/src/together/generated/docs/AudioSpeechStreamEvent.md deleted file mode 100644 index 9c2d9f7a..00000000 --- a/src/together/generated/docs/AudioSpeechStreamEvent.md +++ /dev/null @@ -1,27 +0,0 @@ -# AudioSpeechStreamEvent - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | [**AudioSpeechStreamChunk**](AudioSpeechStreamChunk.md) | | - -## Example - -```python -from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent - -# TODO update the JSON string below -json = "{}" -# create an instance of AudioSpeechStreamEvent from a JSON string -audio_speech_stream_event_instance = AudioSpeechStreamEvent.from_json(json) -# print the JSON string representation of the object -print(AudioSpeechStreamEvent.to_json()) - -# convert the object into a dict -audio_speech_stream_event_dict = audio_speech_stream_event_instance.to_dict() -# create an instance of AudioSpeechStreamEvent from a dict -audio_speech_stream_event_from_dict = AudioSpeechStreamEvent.from_dict(audio_speech_stream_event_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechStreamResponse.md b/src/together/generated/docs/AudioSpeechStreamResponse.md deleted file mode 100644 index eda7c0b0..00000000 --- a/src/together/generated/docs/AudioSpeechStreamResponse.md +++ /dev/null @@ -1,27 +0,0 @@ -# AudioSpeechStreamResponse - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | **str** | | - -## Example - -```python -from together.generated.models.audio_speech_stream_response import AudioSpeechStreamResponse - -# TODO update the JSON string below -json = "{}" -# create an instance of AudioSpeechStreamResponse from a JSON string -audio_speech_stream_response_instance = AudioSpeechStreamResponse.from_json(json) -# print the JSON string representation of the object -print(AudioSpeechStreamResponse.to_json()) - -# convert the object into a dict -audio_speech_stream_response_dict = audio_speech_stream_response_instance.to_dict() -# create an instance of AudioSpeechStreamResponse from a dict -audio_speech_stream_response_from_dict = AudioSpeechStreamResponse.from_dict(audio_speech_stream_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/Autoscaling.md b/src/together/generated/docs/Autoscaling.md deleted file mode 100644 index b1ee0b95..00000000 --- a/src/together/generated/docs/Autoscaling.md +++ /dev/null @@ -1,29 +0,0 @@ -# Autoscaling - -Configuration for automatic scaling of replicas based on demand. - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**min_replicas** | **int** | The minimum number of replicas to maintain, even when there is no load | -**max_replicas** | **int** | The maximum number of replicas to scale up to under load | - -## Example - -```python -from together.generated.models.autoscaling import Autoscaling - -# TODO update the JSON string below -json = "{}" -# create an instance of Autoscaling from a JSON string -autoscaling_instance = Autoscaling.from_json(json) -# print the JSON string representation of the object -print(Autoscaling.to_json()) - -# convert the object into a dict -autoscaling_dict = autoscaling_instance.to_dict() -# create an instance of Autoscaling from a dict -autoscaling_from_dict = Autoscaling.from_dict(autoscaling_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatApi.md b/src/together/generated/docs/ChatApi.md deleted file mode 100644 index dd0e179d..00000000 --- a/src/together/generated/docs/ChatApi.md +++ /dev/null @@ -1,93 +0,0 @@ -# together.generated.ChatApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**chat_completions**](ChatApi.md#chat_completions) | **POST** /chat/completions | Create chat completion - - -# **chat_completions** -> ChatCompletionResponse chat_completions(chat_completion_request=chat_completion_request) - -Create chat completion - -Query a chat model. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.chat_completion_request import ChatCompletionRequest -from together.generated.models.chat_completion_response import ChatCompletionResponse -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.ChatApi(api_client) - chat_completion_request = together.generated.ChatCompletionRequest() # ChatCompletionRequest | (optional) - - try: - # Create chat completion - api_response = await api_instance.chat_completions(chat_completion_request=chat_completion_request) - print("The response of ChatApi->chat_completions:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling ChatApi->chat_completions: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **chat_completion_request** | [**ChatCompletionRequest**](ChatCompletionRequest.md)| | [optional] - -### Return type - -[**ChatCompletionResponse**](ChatCompletionResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: application/json - - **Accept**: application/json, text/event-stream - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | 200 | - | -**400** | BadRequest | - | -**401** | Unauthorized | - | -**404** | NotFound | - | -**429** | RateLimit | - | -**503** | Overloaded | - | -**504** | Timeout | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionAssistantMessageParam.md b/src/together/generated/docs/ChatCompletionAssistantMessageParam.md deleted file mode 100644 index 5281fe5c..00000000 --- a/src/together/generated/docs/ChatCompletionAssistantMessageParam.md +++ /dev/null @@ -1,31 +0,0 @@ -# ChatCompletionAssistantMessageParam - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**content** | **str** | | [optional] -**role** | **str** | | -**name** | **str** | | [optional] -**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] -**function_call** | [**ChatCompletionMessageFunctionCall**](ChatCompletionMessageFunctionCall.md) | | [optional] - -## Example - -```python -from together.generated.models.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionAssistantMessageParam from a JSON string -chat_completion_assistant_message_param_instance = ChatCompletionAssistantMessageParam.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionAssistantMessageParam.to_json()) - -# convert the object into a dict -chat_completion_assistant_message_param_dict = chat_completion_assistant_message_param_instance.to_dict() -# create an instance of ChatCompletionAssistantMessageParam from a dict -chat_completion_assistant_message_param_from_dict = ChatCompletionAssistantMessageParam.from_dict(chat_completion_assistant_message_param_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoice.md b/src/together/generated/docs/ChatCompletionChoice.md deleted file mode 100644 index b75becc7..00000000 --- a/src/together/generated/docs/ChatCompletionChoice.md +++ /dev/null @@ -1,30 +0,0 @@ -# ChatCompletionChoice - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**index** | **int** | | -**finish_reason** | [**FinishReason**](FinishReason.md) | | -**logprobs** | [**LogprobsPart**](LogprobsPart.md) | | [optional] -**delta** | [**ChatCompletionChoiceDelta**](ChatCompletionChoiceDelta.md) | | - -## Example - -```python -from together.generated.models.chat_completion_choice import ChatCompletionChoice - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionChoice from a JSON string -chat_completion_choice_instance = ChatCompletionChoice.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionChoice.to_json()) - -# convert the object into a dict -chat_completion_choice_dict = chat_completion_choice_instance.to_dict() -# create an instance of ChatCompletionChoice from a dict -chat_completion_choice_from_dict = ChatCompletionChoice.from_dict(chat_completion_choice_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoiceDelta.md b/src/together/generated/docs/ChatCompletionChoiceDelta.md deleted file mode 100644 index 865b8090..00000000 --- a/src/together/generated/docs/ChatCompletionChoiceDelta.md +++ /dev/null @@ -1,31 +0,0 @@ -# ChatCompletionChoiceDelta - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**token_id** | **int** | | [optional] -**role** | **str** | | -**content** | **str** | | [optional] -**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] -**function_call** | [**ChatCompletionChoiceDeltaFunctionCall**](ChatCompletionChoiceDeltaFunctionCall.md) | | [optional] - -## Example - -```python -from together.generated.models.chat_completion_choice_delta import ChatCompletionChoiceDelta - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionChoiceDelta from a JSON string -chat_completion_choice_delta_instance = ChatCompletionChoiceDelta.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionChoiceDelta.to_json()) - -# convert the object into a dict -chat_completion_choice_delta_dict = chat_completion_choice_delta_instance.to_dict() -# create an instance of ChatCompletionChoiceDelta from a dict -chat_completion_choice_delta_from_dict = ChatCompletionChoiceDelta.from_dict(chat_completion_choice_delta_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md b/src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md deleted file mode 100644 index e6e861f0..00000000 --- a/src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md +++ /dev/null @@ -1,28 +0,0 @@ -# ChatCompletionChoiceDeltaFunctionCall - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**arguments** | **str** | | -**name** | **str** | | - -## Example - -```python -from together.generated.models.chat_completion_choice_delta_function_call import ChatCompletionChoiceDeltaFunctionCall - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionChoiceDeltaFunctionCall from a JSON string -chat_completion_choice_delta_function_call_instance = ChatCompletionChoiceDeltaFunctionCall.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionChoiceDeltaFunctionCall.to_json()) - -# convert the object into a dict -chat_completion_choice_delta_function_call_dict = chat_completion_choice_delta_function_call_instance.to_dict() -# create an instance of ChatCompletionChoiceDeltaFunctionCall from a dict -chat_completion_choice_delta_function_call_from_dict = ChatCompletionChoiceDeltaFunctionCall.from_dict(chat_completion_choice_delta_function_call_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoicesDataInner.md b/src/together/generated/docs/ChatCompletionChoicesDataInner.md deleted file mode 100644 index 56fe92b1..00000000 --- a/src/together/generated/docs/ChatCompletionChoicesDataInner.md +++ /dev/null @@ -1,32 +0,0 @@ -# ChatCompletionChoicesDataInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**text** | **str** | | [optional] -**index** | **int** | | [optional] -**seed** | **int** | | [optional] -**finish_reason** | [**FinishReason**](FinishReason.md) | | [optional] -**message** | [**ChatCompletionMessage**](ChatCompletionMessage.md) | | [optional] -**logprobs** | [**ChatCompletionChoicesDataInnerLogprobs**](ChatCompletionChoicesDataInnerLogprobs.md) | | [optional] - -## Example - -```python -from together.generated.models.chat_completion_choices_data_inner import ChatCompletionChoicesDataInner - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionChoicesDataInner from a JSON string -chat_completion_choices_data_inner_instance = ChatCompletionChoicesDataInner.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionChoicesDataInner.to_json()) - -# convert the object into a dict -chat_completion_choices_data_inner_dict = chat_completion_choices_data_inner_instance.to_dict() -# create an instance of ChatCompletionChoicesDataInner from a dict -chat_completion_choices_data_inner_from_dict = ChatCompletionChoicesDataInner.from_dict(chat_completion_choices_data_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md b/src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md deleted file mode 100644 index 72320aab..00000000 --- a/src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionChoicesDataInnerLogprobs - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**token_ids** | **List[float]** | List of token IDs corresponding to the logprobs | [optional] -**tokens** | **List[str]** | List of token strings | [optional] -**token_logprobs** | **List[float]** | List of token log probabilities | [optional] - -## Example - -```python -from together.generated.models.chat_completion_choices_data_inner_logprobs import ChatCompletionChoicesDataInnerLogprobs - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionChoicesDataInnerLogprobs from a JSON string -chat_completion_choices_data_inner_logprobs_instance = ChatCompletionChoicesDataInnerLogprobs.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionChoicesDataInnerLogprobs.to_json()) - -# convert the object into a dict -chat_completion_choices_data_inner_logprobs_dict = chat_completion_choices_data_inner_logprobs_instance.to_dict() -# create an instance of ChatCompletionChoicesDataInnerLogprobs from a dict -chat_completion_choices_data_inner_logprobs_from_dict = ChatCompletionChoicesDataInnerLogprobs.from_dict(chat_completion_choices_data_inner_logprobs_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChunk.md b/src/together/generated/docs/ChatCompletionChunk.md deleted file mode 100644 index d42484b9..00000000 --- a/src/together/generated/docs/ChatCompletionChunk.md +++ /dev/null @@ -1,33 +0,0 @@ -# ChatCompletionChunk - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **str** | | -**object** | **str** | | -**created** | **int** | | -**system_fingerprint** | **str** | | [optional] -**model** | **str** | | -**choices** | [**List[ChatCompletionChunkChoicesInner]**](ChatCompletionChunkChoicesInner.md) | | -**usage** | [**CompletionChunkUsage**](CompletionChunkUsage.md) | | [optional] - -## Example - -```python -from together.generated.models.chat_completion_chunk import ChatCompletionChunk - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionChunk from a JSON string -chat_completion_chunk_instance = ChatCompletionChunk.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionChunk.to_json()) - -# convert the object into a dict -chat_completion_chunk_dict = chat_completion_chunk_instance.to_dict() -# create an instance of ChatCompletionChunk from a dict -chat_completion_chunk_from_dict = ChatCompletionChunk.from_dict(chat_completion_chunk_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChunkChoicesInner.md b/src/together/generated/docs/ChatCompletionChunkChoicesInner.md deleted file mode 100644 index b33bff02..00000000 --- a/src/together/generated/docs/ChatCompletionChunkChoicesInner.md +++ /dev/null @@ -1,31 +0,0 @@ -# ChatCompletionChunkChoicesInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**index** | **int** | | -**finish_reason** | [**FinishReason**](FinishReason.md) | | -**logprobs** | **float** | | [optional] -**seed** | **int** | | [optional] -**delta** | [**ChatCompletionChoiceDelta**](ChatCompletionChoiceDelta.md) | | - -## Example - -```python -from together.generated.models.chat_completion_chunk_choices_inner import ChatCompletionChunkChoicesInner - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionChunkChoicesInner from a JSON string -chat_completion_chunk_choices_inner_instance = ChatCompletionChunkChoicesInner.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionChunkChoicesInner.to_json()) - -# convert the object into a dict -chat_completion_chunk_choices_inner_dict = chat_completion_chunk_choices_inner_instance.to_dict() -# create an instance of ChatCompletionChunkChoicesInner from a dict -chat_completion_chunk_choices_inner_from_dict = ChatCompletionChunkChoicesInner.from_dict(chat_completion_chunk_choices_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionEvent.md b/src/together/generated/docs/ChatCompletionEvent.md deleted file mode 100644 index 49c1046a..00000000 --- a/src/together/generated/docs/ChatCompletionEvent.md +++ /dev/null @@ -1,27 +0,0 @@ -# ChatCompletionEvent - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | [**ChatCompletionChunk**](ChatCompletionChunk.md) | | - -## Example - -```python -from together.generated.models.chat_completion_event import ChatCompletionEvent - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionEvent from a JSON string -chat_completion_event_instance = ChatCompletionEvent.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionEvent.to_json()) - -# convert the object into a dict -chat_completion_event_dict = chat_completion_event_instance.to_dict() -# create an instance of ChatCompletionEvent from a dict -chat_completion_event_from_dict = ChatCompletionEvent.from_dict(chat_completion_event_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionFunctionMessageParam.md b/src/together/generated/docs/ChatCompletionFunctionMessageParam.md deleted file mode 100644 index 1f89e299..00000000 --- a/src/together/generated/docs/ChatCompletionFunctionMessageParam.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionFunctionMessageParam - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**role** | **str** | | -**content** | **str** | | -**name** | **str** | | - -## Example - -```python -from together.generated.models.chat_completion_function_message_param import ChatCompletionFunctionMessageParam - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionFunctionMessageParam from a JSON string -chat_completion_function_message_param_instance = ChatCompletionFunctionMessageParam.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionFunctionMessageParam.to_json()) - -# convert the object into a dict -chat_completion_function_message_param_dict = chat_completion_function_message_param_instance.to_dict() -# create an instance of ChatCompletionFunctionMessageParam from a dict -chat_completion_function_message_param_from_dict = ChatCompletionFunctionMessageParam.from_dict(chat_completion_function_message_param_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionMessage.md b/src/together/generated/docs/ChatCompletionMessage.md deleted file mode 100644 index d3d814a1..00000000 --- a/src/together/generated/docs/ChatCompletionMessage.md +++ /dev/null @@ -1,30 +0,0 @@ -# ChatCompletionMessage - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**content** | **str** | | -**role** | **str** | | -**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] -**function_call** | [**ChatCompletionMessageFunctionCall**](ChatCompletionMessageFunctionCall.md) | | [optional] - -## Example - -```python -from together.generated.models.chat_completion_message import ChatCompletionMessage - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionMessage from a JSON string -chat_completion_message_instance = ChatCompletionMessage.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionMessage.to_json()) - -# convert the object into a dict -chat_completion_message_dict = chat_completion_message_instance.to_dict() -# create an instance of ChatCompletionMessage from a dict -chat_completion_message_from_dict = ChatCompletionMessage.from_dict(chat_completion_message_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionMessageFunctionCall.md b/src/together/generated/docs/ChatCompletionMessageFunctionCall.md deleted file mode 100644 index 177d9d1d..00000000 --- a/src/together/generated/docs/ChatCompletionMessageFunctionCall.md +++ /dev/null @@ -1,28 +0,0 @@ -# ChatCompletionMessageFunctionCall - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**arguments** | **str** | | -**name** | **str** | | - -## Example - -```python -from together.generated.models.chat_completion_message_function_call import ChatCompletionMessageFunctionCall - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionMessageFunctionCall from a JSON string -chat_completion_message_function_call_instance = ChatCompletionMessageFunctionCall.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionMessageFunctionCall.to_json()) - -# convert the object into a dict -chat_completion_message_function_call_dict = chat_completion_message_function_call_instance.to_dict() -# create an instance of ChatCompletionMessageFunctionCall from a dict -chat_completion_message_function_call_from_dict = ChatCompletionMessageFunctionCall.from_dict(chat_completion_message_function_call_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionMessageParam.md b/src/together/generated/docs/ChatCompletionMessageParam.md deleted file mode 100644 index c8e6136d..00000000 --- a/src/together/generated/docs/ChatCompletionMessageParam.md +++ /dev/null @@ -1,32 +0,0 @@ -# ChatCompletionMessageParam - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**content** | **str** | | -**role** | **str** | | -**name** | **str** | | -**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] -**function_call** | [**ChatCompletionMessageFunctionCall**](ChatCompletionMessageFunctionCall.md) | | [optional] -**tool_call_id** | **str** | | - -## Example - -```python -from together.generated.models.chat_completion_message_param import ChatCompletionMessageParam - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionMessageParam from a JSON string -chat_completion_message_param_instance = ChatCompletionMessageParam.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionMessageParam.to_json()) - -# convert the object into a dict -chat_completion_message_param_dict = chat_completion_message_param_instance.to_dict() -# create an instance of ChatCompletionMessageParam from a dict -chat_completion_message_param_from_dict = ChatCompletionMessageParam.from_dict(chat_completion_message_param_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequest.md b/src/together/generated/docs/ChatCompletionRequest.md deleted file mode 100644 index f0200b3f..00000000 --- a/src/together/generated/docs/ChatCompletionRequest.md +++ /dev/null @@ -1,49 +0,0 @@ -# ChatCompletionRequest - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**messages** | [**List[ChatCompletionRequestMessagesInner]**](ChatCompletionRequestMessagesInner.md) | A list of messages comprising the conversation so far. | -**model** | [**ChatCompletionRequestModel**](ChatCompletionRequestModel.md) | | -**max_tokens** | **int** | The maximum number of tokens to generate. | [optional] -**stop** | **List[str]** | A list of string sequences that will truncate (stop) inference text output. For example, \"</s>\" will stop generation as soon as the model generates the given token. | [optional] -**temperature** | **float** | A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. | [optional] -**top_p** | **float** | A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. | [optional] -**top_k** | **int** | An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. | [optional] -**context_length_exceeded_behavior** | **str** | Defined the behavior of the API when max_tokens exceed the maximum context length of the model. When set to 'error', API will return 400 with appropriate error message. When set to 'truncate', override the max_tokens with maximum context length of the model. | [optional] [default to 'error'] -**repetition_penalty** | **float** | A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. | [optional] -**stream** | **bool** | If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results. | [optional] -**logprobs** | **int** | Determines the number of most likely tokens to return at each token position log probabilities to return. | [optional] -**echo** | **bool** | If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. | [optional] -**n** | **int** | The number of completions to generate for each prompt. | [optional] -**min_p** | **float** | A number between 0 and 1 that can be used as an alternative to top_p and top-k. | [optional] -**presence_penalty** | **float** | A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. | [optional] -**frequency_penalty** | **float** | A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. | [optional] -**logit_bias** | **Dict[str, float]** | Adjusts the likelihood of specific tokens appearing in the generated output. | [optional] -**seed** | **int** | Seed value for reproducibility. | [optional] -**function_call** | [**ChatCompletionRequestFunctionCall**](ChatCompletionRequestFunctionCall.md) | | [optional] -**response_format** | [**ChatCompletionRequestResponseFormat**](ChatCompletionRequestResponseFormat.md) | | [optional] -**tools** | [**List[ToolsPart]**](ToolsPart.md) | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. | [optional] -**tool_choice** | [**ChatCompletionRequestToolChoice**](ChatCompletionRequestToolChoice.md) | | [optional] -**safety_model** | **str** | The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). | [optional] - -## Example - -```python -from together.generated.models.chat_completion_request import ChatCompletionRequest - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionRequest from a JSON string -chat_completion_request_instance = ChatCompletionRequest.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionRequest.to_json()) - -# convert the object into a dict -chat_completion_request_dict = chat_completion_request_instance.to_dict() -# create an instance of ChatCompletionRequest from a dict -chat_completion_request_from_dict = ChatCompletionRequest.from_dict(chat_completion_request_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestFunctionCall.md b/src/together/generated/docs/ChatCompletionRequestFunctionCall.md deleted file mode 100644 index dbeddd2a..00000000 --- a/src/together/generated/docs/ChatCompletionRequestFunctionCall.md +++ /dev/null @@ -1,27 +0,0 @@ -# ChatCompletionRequestFunctionCall - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**name** | **str** | | - -## Example - -```python -from together.generated.models.chat_completion_request_function_call import ChatCompletionRequestFunctionCall - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionRequestFunctionCall from a JSON string -chat_completion_request_function_call_instance = ChatCompletionRequestFunctionCall.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionRequestFunctionCall.to_json()) - -# convert the object into a dict -chat_completion_request_function_call_dict = chat_completion_request_function_call_instance.to_dict() -# create an instance of ChatCompletionRequestFunctionCall from a dict -chat_completion_request_function_call_from_dict = ChatCompletionRequestFunctionCall.from_dict(chat_completion_request_function_call_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md b/src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md deleted file mode 100644 index bb0a34f7..00000000 --- a/src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md +++ /dev/null @@ -1,27 +0,0 @@ -# ChatCompletionRequestFunctionCallOneOf - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**name** | **str** | | - -## Example - -```python -from together.generated.models.chat_completion_request_function_call_one_of import ChatCompletionRequestFunctionCallOneOf - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionRequestFunctionCallOneOf from a JSON string -chat_completion_request_function_call_one_of_instance = ChatCompletionRequestFunctionCallOneOf.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionRequestFunctionCallOneOf.to_json()) - -# convert the object into a dict -chat_completion_request_function_call_one_of_dict = chat_completion_request_function_call_one_of_instance.to_dict() -# create an instance of ChatCompletionRequestFunctionCallOneOf from a dict -chat_completion_request_function_call_one_of_from_dict = ChatCompletionRequestFunctionCallOneOf.from_dict(chat_completion_request_function_call_one_of_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestMessagesInner.md b/src/together/generated/docs/ChatCompletionRequestMessagesInner.md deleted file mode 100644 index 8512ef45..00000000 --- a/src/together/generated/docs/ChatCompletionRequestMessagesInner.md +++ /dev/null @@ -1,28 +0,0 @@ -# ChatCompletionRequestMessagesInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**role** | **str** | The role of the messages author. Choice between: system, user, or assistant. | -**content** | **str** | The content of the message, which can either be a simple string or a structured format. | - -## Example - -```python -from together.generated.models.chat_completion_request_messages_inner import ChatCompletionRequestMessagesInner - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionRequestMessagesInner from a JSON string -chat_completion_request_messages_inner_instance = ChatCompletionRequestMessagesInner.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionRequestMessagesInner.to_json()) - -# convert the object into a dict -chat_completion_request_messages_inner_dict = chat_completion_request_messages_inner_instance.to_dict() -# create an instance of ChatCompletionRequestMessagesInner from a dict -chat_completion_request_messages_inner_from_dict = ChatCompletionRequestMessagesInner.from_dict(chat_completion_request_messages_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestModel.md b/src/together/generated/docs/ChatCompletionRequestModel.md deleted file mode 100644 index c9387ca0..00000000 --- a/src/together/generated/docs/ChatCompletionRequestModel.md +++ /dev/null @@ -1,27 +0,0 @@ -# ChatCompletionRequestModel - -The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.chat_completion_request_model import ChatCompletionRequestModel - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionRequestModel from a JSON string -chat_completion_request_model_instance = ChatCompletionRequestModel.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionRequestModel.to_json()) - -# convert the object into a dict -chat_completion_request_model_dict = chat_completion_request_model_instance.to_dict() -# create an instance of ChatCompletionRequestModel from a dict -chat_completion_request_model_from_dict = ChatCompletionRequestModel.from_dict(chat_completion_request_model_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestResponseFormat.md b/src/together/generated/docs/ChatCompletionRequestResponseFormat.md deleted file mode 100644 index aa94fd0a..00000000 --- a/src/together/generated/docs/ChatCompletionRequestResponseFormat.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionRequestResponseFormat - -An object specifying the format that the model must output. - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**type** | **str** | The type of the response format. | [optional] -**var_schema** | **Dict[str, str]** | The schema of the response format. | [optional] - -## Example - -```python -from together.generated.models.chat_completion_request_response_format import ChatCompletionRequestResponseFormat - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionRequestResponseFormat from a JSON string -chat_completion_request_response_format_instance = ChatCompletionRequestResponseFormat.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionRequestResponseFormat.to_json()) - -# convert the object into a dict -chat_completion_request_response_format_dict = chat_completion_request_response_format_instance.to_dict() -# create an instance of ChatCompletionRequestResponseFormat from a dict -chat_completion_request_response_format_from_dict = ChatCompletionRequestResponseFormat.from_dict(chat_completion_request_response_format_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestToolChoice.md b/src/together/generated/docs/ChatCompletionRequestToolChoice.md deleted file mode 100644 index c6a50e08..00000000 --- a/src/together/generated/docs/ChatCompletionRequestToolChoice.md +++ /dev/null @@ -1,31 +0,0 @@ -# ChatCompletionRequestToolChoice - -Controls which (if any) function is called by the model. By default uses `auto`, which lets the model pick between generating a message or calling a function. - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**index** | **float** | | -**id** | **str** | | -**type** | **str** | | -**function** | [**ToolChoiceFunction**](ToolChoiceFunction.md) | | - -## Example - -```python -from together.generated.models.chat_completion_request_tool_choice import ChatCompletionRequestToolChoice - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionRequestToolChoice from a JSON string -chat_completion_request_tool_choice_instance = ChatCompletionRequestToolChoice.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionRequestToolChoice.to_json()) - -# convert the object into a dict -chat_completion_request_tool_choice_dict = chat_completion_request_tool_choice_instance.to_dict() -# create an instance of ChatCompletionRequestToolChoice from a dict -chat_completion_request_tool_choice_from_dict = ChatCompletionRequestToolChoice.from_dict(chat_completion_request_tool_choice_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionResponse.md b/src/together/generated/docs/ChatCompletionResponse.md deleted file mode 100644 index bff9c23b..00000000 --- a/src/together/generated/docs/ChatCompletionResponse.md +++ /dev/null @@ -1,32 +0,0 @@ -# ChatCompletionResponse - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **str** | | -**choices** | [**List[ChatCompletionChoicesDataInner]**](ChatCompletionChoicesDataInner.md) | | -**usage** | [**UsageData**](UsageData.md) | | [optional] -**created** | **int** | | -**model** | **str** | | -**object** | **str** | | - -## Example - -```python -from together.generated.models.chat_completion_response import ChatCompletionResponse - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionResponse from a JSON string -chat_completion_response_instance = ChatCompletionResponse.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionResponse.to_json()) - -# convert the object into a dict -chat_completion_response_dict = chat_completion_response_instance.to_dict() -# create an instance of ChatCompletionResponse from a dict -chat_completion_response_from_dict = ChatCompletionResponse.from_dict(chat_completion_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionStream.md b/src/together/generated/docs/ChatCompletionStream.md deleted file mode 100644 index 0425981a..00000000 --- a/src/together/generated/docs/ChatCompletionStream.md +++ /dev/null @@ -1,27 +0,0 @@ -# ChatCompletionStream - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | **str** | | - -## Example - -```python -from together.generated.models.chat_completion_stream import ChatCompletionStream - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionStream from a JSON string -chat_completion_stream_instance = ChatCompletionStream.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionStream.to_json()) - -# convert the object into a dict -chat_completion_stream_dict = chat_completion_stream_instance.to_dict() -# create an instance of ChatCompletionStream from a dict -chat_completion_stream_from_dict = ChatCompletionStream.from_dict(chat_completion_stream_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionSystemMessageParam.md b/src/together/generated/docs/ChatCompletionSystemMessageParam.md deleted file mode 100644 index b1b6ee2e..00000000 --- a/src/together/generated/docs/ChatCompletionSystemMessageParam.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionSystemMessageParam - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**content** | **str** | | -**role** | **str** | | -**name** | **str** | | [optional] - -## Example - -```python -from together.generated.models.chat_completion_system_message_param import ChatCompletionSystemMessageParam - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionSystemMessageParam from a JSON string -chat_completion_system_message_param_instance = ChatCompletionSystemMessageParam.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionSystemMessageParam.to_json()) - -# convert the object into a dict -chat_completion_system_message_param_dict = chat_completion_system_message_param_instance.to_dict() -# create an instance of ChatCompletionSystemMessageParam from a dict -chat_completion_system_message_param_from_dict = ChatCompletionSystemMessageParam.from_dict(chat_completion_system_message_param_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionToken.md b/src/together/generated/docs/ChatCompletionToken.md deleted file mode 100644 index 159ba763..00000000 --- a/src/together/generated/docs/ChatCompletionToken.md +++ /dev/null @@ -1,30 +0,0 @@ -# ChatCompletionToken - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **int** | | -**text** | **str** | | -**logprob** | **float** | | -**special** | **bool** | | - -## Example - -```python -from together.generated.models.chat_completion_token import ChatCompletionToken - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionToken from a JSON string -chat_completion_token_instance = ChatCompletionToken.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionToken.to_json()) - -# convert the object into a dict -chat_completion_token_dict = chat_completion_token_instance.to_dict() -# create an instance of ChatCompletionToken from a dict -chat_completion_token_from_dict = ChatCompletionToken.from_dict(chat_completion_token_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionTool.md b/src/together/generated/docs/ChatCompletionTool.md deleted file mode 100644 index ff0a341f..00000000 --- a/src/together/generated/docs/ChatCompletionTool.md +++ /dev/null @@ -1,28 +0,0 @@ -# ChatCompletionTool - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**type** | **str** | | -**function** | [**ChatCompletionToolFunction**](ChatCompletionToolFunction.md) | | - -## Example - -```python -from together.generated.models.chat_completion_tool import ChatCompletionTool - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionTool from a JSON string -chat_completion_tool_instance = ChatCompletionTool.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionTool.to_json()) - -# convert the object into a dict -chat_completion_tool_dict = chat_completion_tool_instance.to_dict() -# create an instance of ChatCompletionTool from a dict -chat_completion_tool_from_dict = ChatCompletionTool.from_dict(chat_completion_tool_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionToolFunction.md b/src/together/generated/docs/ChatCompletionToolFunction.md deleted file mode 100644 index a84fb6e5..00000000 --- a/src/together/generated/docs/ChatCompletionToolFunction.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionToolFunction - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**description** | **str** | | [optional] -**name** | **str** | | -**parameters** | **Dict[str, object]** | | [optional] - -## Example - -```python -from together.generated.models.chat_completion_tool_function import ChatCompletionToolFunction - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionToolFunction from a JSON string -chat_completion_tool_function_instance = ChatCompletionToolFunction.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionToolFunction.to_json()) - -# convert the object into a dict -chat_completion_tool_function_dict = chat_completion_tool_function_instance.to_dict() -# create an instance of ChatCompletionToolFunction from a dict -chat_completion_tool_function_from_dict = ChatCompletionToolFunction.from_dict(chat_completion_tool_function_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionToolMessageParam.md b/src/together/generated/docs/ChatCompletionToolMessageParam.md deleted file mode 100644 index 6e45af91..00000000 --- a/src/together/generated/docs/ChatCompletionToolMessageParam.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionToolMessageParam - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**role** | **str** | | -**content** | **str** | | -**tool_call_id** | **str** | | - -## Example - -```python -from together.generated.models.chat_completion_tool_message_param import ChatCompletionToolMessageParam - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionToolMessageParam from a JSON string -chat_completion_tool_message_param_instance = ChatCompletionToolMessageParam.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionToolMessageParam.to_json()) - -# convert the object into a dict -chat_completion_tool_message_param_dict = chat_completion_tool_message_param_instance.to_dict() -# create an instance of ChatCompletionToolMessageParam from a dict -chat_completion_tool_message_param_from_dict = ChatCompletionToolMessageParam.from_dict(chat_completion_tool_message_param_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionUserMessageParam.md b/src/together/generated/docs/ChatCompletionUserMessageParam.md deleted file mode 100644 index 1720154b..00000000 --- a/src/together/generated/docs/ChatCompletionUserMessageParam.md +++ /dev/null @@ -1,29 +0,0 @@ -# ChatCompletionUserMessageParam - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**content** | **str** | | -**role** | **str** | | -**name** | **str** | | [optional] - -## Example - -```python -from together.generated.models.chat_completion_user_message_param import ChatCompletionUserMessageParam - -# TODO update the JSON string below -json = "{}" -# create an instance of ChatCompletionUserMessageParam from a JSON string -chat_completion_user_message_param_instance = ChatCompletionUserMessageParam.from_json(json) -# print the JSON string representation of the object -print(ChatCompletionUserMessageParam.to_json()) - -# convert the object into a dict -chat_completion_user_message_param_dict = chat_completion_user_message_param_instance.to_dict() -# create an instance of ChatCompletionUserMessageParam from a dict -chat_completion_user_message_param_from_dict = ChatCompletionUserMessageParam.from_dict(chat_completion_user_message_param_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionApi.md b/src/together/generated/docs/CompletionApi.md deleted file mode 100644 index 3e0ef088..00000000 --- a/src/together/generated/docs/CompletionApi.md +++ /dev/null @@ -1,93 +0,0 @@ -# together.generated.CompletionApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**completions**](CompletionApi.md#completions) | **POST** /completions | Create completion - - -# **completions** -> CompletionResponse completions(completion_request=completion_request) - -Create completion - -Query a language, code, or image model. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.completion_request import CompletionRequest -from together.generated.models.completion_response import CompletionResponse -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.CompletionApi(api_client) - completion_request = together.generated.CompletionRequest() # CompletionRequest | (optional) - - try: - # Create completion - api_response = await api_instance.completions(completion_request=completion_request) - print("The response of CompletionApi->completions:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling CompletionApi->completions: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **completion_request** | [**CompletionRequest**](CompletionRequest.md)| | [optional] - -### Return type - -[**CompletionResponse**](CompletionResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: application/json - - **Accept**: application/json, text/event-stream - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | 200 | - | -**400** | BadRequest | - | -**401** | Unauthorized | - | -**404** | NotFound | - | -**429** | RateLimit | - | -**503** | Overloaded | - | -**504** | Timeout | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChoice.md b/src/together/generated/docs/CompletionChoice.md deleted file mode 100644 index 8c8f978c..00000000 --- a/src/together/generated/docs/CompletionChoice.md +++ /dev/null @@ -1,27 +0,0 @@ -# CompletionChoice - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**text** | **str** | | [optional] - -## Example - -```python -from together.generated.models.completion_choice import CompletionChoice - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionChoice from a JSON string -completion_choice_instance = CompletionChoice.from_json(json) -# print the JSON string representation of the object -print(CompletionChoice.to_json()) - -# convert the object into a dict -completion_choice_dict = completion_choice_instance.to_dict() -# create an instance of CompletionChoice from a dict -completion_choice_from_dict = CompletionChoice.from_dict(completion_choice_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChoicesDataInner.md b/src/together/generated/docs/CompletionChoicesDataInner.md deleted file mode 100644 index 370136e4..00000000 --- a/src/together/generated/docs/CompletionChoicesDataInner.md +++ /dev/null @@ -1,30 +0,0 @@ -# CompletionChoicesDataInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**text** | **str** | | [optional] -**seed** | **int** | | [optional] -**finish_reason** | [**FinishReason**](FinishReason.md) | | [optional] -**logprobs** | [**LogprobsPart**](.md) | | [optional] - -## Example - -```python -from together.generated.models.completion_choices_data_inner import CompletionChoicesDataInner - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionChoicesDataInner from a JSON string -completion_choices_data_inner_instance = CompletionChoicesDataInner.from_json(json) -# print the JSON string representation of the object -print(CompletionChoicesDataInner.to_json()) - -# convert the object into a dict -completion_choices_data_inner_dict = completion_choices_data_inner_instance.to_dict() -# create an instance of CompletionChoicesDataInner from a dict -completion_choices_data_inner_from_dict = CompletionChoicesDataInner.from_dict(completion_choices_data_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChunk.md b/src/together/generated/docs/CompletionChunk.md deleted file mode 100644 index 2d9fe0e5..00000000 --- a/src/together/generated/docs/CompletionChunk.md +++ /dev/null @@ -1,32 +0,0 @@ -# CompletionChunk - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **str** | | -**token** | [**CompletionToken**](CompletionToken.md) | | -**choices** | [**List[CompletionChoice]**](CompletionChoice.md) | | -**usage** | [**CompletionChunkUsage**](CompletionChunkUsage.md) | | -**seed** | **int** | | [optional] -**finish_reason** | [**FinishReason**](FinishReason.md) | | - -## Example - -```python -from together.generated.models.completion_chunk import CompletionChunk - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionChunk from a JSON string -completion_chunk_instance = CompletionChunk.from_json(json) -# print the JSON string representation of the object -print(CompletionChunk.to_json()) - -# convert the object into a dict -completion_chunk_dict = completion_chunk_instance.to_dict() -# create an instance of CompletionChunk from a dict -completion_chunk_from_dict = CompletionChunk.from_dict(completion_chunk_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChunkUsage.md b/src/together/generated/docs/CompletionChunkUsage.md deleted file mode 100644 index 3e74c9f4..00000000 --- a/src/together/generated/docs/CompletionChunkUsage.md +++ /dev/null @@ -1,29 +0,0 @@ -# CompletionChunkUsage - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**prompt_tokens** | **int** | | -**completion_tokens** | **int** | | -**total_tokens** | **int** | | - -## Example - -```python -from together.generated.models.completion_chunk_usage import CompletionChunkUsage - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionChunkUsage from a JSON string -completion_chunk_usage_instance = CompletionChunkUsage.from_json(json) -# print the JSON string representation of the object -print(CompletionChunkUsage.to_json()) - -# convert the object into a dict -completion_chunk_usage_dict = completion_chunk_usage_instance.to_dict() -# create an instance of CompletionChunkUsage from a dict -completion_chunk_usage_from_dict = CompletionChunkUsage.from_dict(completion_chunk_usage_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionEvent.md b/src/together/generated/docs/CompletionEvent.md deleted file mode 100644 index 92acef57..00000000 --- a/src/together/generated/docs/CompletionEvent.md +++ /dev/null @@ -1,27 +0,0 @@ -# CompletionEvent - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | [**CompletionChunk**](CompletionChunk.md) | | - -## Example - -```python -from together.generated.models.completion_event import CompletionEvent - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionEvent from a JSON string -completion_event_instance = CompletionEvent.from_json(json) -# print the JSON string representation of the object -print(CompletionEvent.to_json()) - -# convert the object into a dict -completion_event_dict = completion_event_instance.to_dict() -# create an instance of CompletionEvent from a dict -completion_event_from_dict = CompletionEvent.from_dict(completion_event_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionRequest.md b/src/together/generated/docs/CompletionRequest.md deleted file mode 100644 index 0af19ec6..00000000 --- a/src/together/generated/docs/CompletionRequest.md +++ /dev/null @@ -1,44 +0,0 @@ -# CompletionRequest - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**prompt** | **str** | A string providing context for the model to complete. | -**model** | [**CompletionRequestModel**](CompletionRequestModel.md) | | -**max_tokens** | **int** | The maximum number of tokens to generate. | [optional] -**stop** | **List[str]** | A list of string sequences that will truncate (stop) inference text output. For example, \"</s>\" will stop generation as soon as the model generates the given token. | [optional] -**temperature** | **float** | A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. | [optional] -**top_p** | **float** | A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. | [optional] -**top_k** | **int** | An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. | [optional] -**repetition_penalty** | **float** | A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. | [optional] -**stream** | **bool** | If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results. | [optional] -**logprobs** | **int** | Determines the number of most likely tokens to return at each token position log probabilities to return. | [optional] -**echo** | **bool** | If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. | [optional] -**n** | **int** | The number of completions to generate for each prompt. | [optional] -**safety_model** | [**CompletionRequestSafetyModel**](CompletionRequestSafetyModel.md) | | [optional] -**min_p** | **float** | A number between 0 and 1 that can be used as an alternative to top-p and top-k. | [optional] -**presence_penalty** | **float** | A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. | [optional] -**frequency_penalty** | **float** | A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. | [optional] -**logit_bias** | **Dict[str, float]** | Adjusts the likelihood of specific tokens appearing in the generated output. | [optional] -**seed** | **int** | Seed value for reproducibility. | [optional] - -## Example - -```python -from together.generated.models.completion_request import CompletionRequest - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionRequest from a JSON string -completion_request_instance = CompletionRequest.from_json(json) -# print the JSON string representation of the object -print(CompletionRequest.to_json()) - -# convert the object into a dict -completion_request_dict = completion_request_instance.to_dict() -# create an instance of CompletionRequest from a dict -completion_request_from_dict = CompletionRequest.from_dict(completion_request_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionRequestModel.md b/src/together/generated/docs/CompletionRequestModel.md deleted file mode 100644 index 15040351..00000000 --- a/src/together/generated/docs/CompletionRequestModel.md +++ /dev/null @@ -1,27 +0,0 @@ -# CompletionRequestModel - -The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.completion_request_model import CompletionRequestModel - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionRequestModel from a JSON string -completion_request_model_instance = CompletionRequestModel.from_json(json) -# print the JSON string representation of the object -print(CompletionRequestModel.to_json()) - -# convert the object into a dict -completion_request_model_dict = completion_request_model_instance.to_dict() -# create an instance of CompletionRequestModel from a dict -completion_request_model_from_dict = CompletionRequestModel.from_dict(completion_request_model_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionRequestSafetyModel.md b/src/together/generated/docs/CompletionRequestSafetyModel.md deleted file mode 100644 index a5b83b73..00000000 --- a/src/together/generated/docs/CompletionRequestSafetyModel.md +++ /dev/null @@ -1,27 +0,0 @@ -# CompletionRequestSafetyModel - -The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.completion_request_safety_model import CompletionRequestSafetyModel - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionRequestSafetyModel from a JSON string -completion_request_safety_model_instance = CompletionRequestSafetyModel.from_json(json) -# print the JSON string representation of the object -print(CompletionRequestSafetyModel.to_json()) - -# convert the object into a dict -completion_request_safety_model_dict = completion_request_safety_model_instance.to_dict() -# create an instance of CompletionRequestSafetyModel from a dict -completion_request_safety_model_from_dict = CompletionRequestSafetyModel.from_dict(completion_request_safety_model_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionResponse.md b/src/together/generated/docs/CompletionResponse.md deleted file mode 100644 index 6170558e..00000000 --- a/src/together/generated/docs/CompletionResponse.md +++ /dev/null @@ -1,33 +0,0 @@ -# CompletionResponse - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **str** | | -**choices** | [**List[CompletionChoicesDataInner]**](CompletionChoicesDataInner.md) | | -**prompt** | [**List[PromptPartInner]**](PromptPartInner.md) | | [optional] -**usage** | [**UsageData**](UsageData.md) | | -**created** | **int** | | -**model** | **str** | | -**object** | **str** | | - -## Example - -```python -from together.generated.models.completion_response import CompletionResponse - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionResponse from a JSON string -completion_response_instance = CompletionResponse.from_json(json) -# print the JSON string representation of the object -print(CompletionResponse.to_json()) - -# convert the object into a dict -completion_response_dict = completion_response_instance.to_dict() -# create an instance of CompletionResponse from a dict -completion_response_from_dict = CompletionResponse.from_dict(completion_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionStream.md b/src/together/generated/docs/CompletionStream.md deleted file mode 100644 index aa6342a9..00000000 --- a/src/together/generated/docs/CompletionStream.md +++ /dev/null @@ -1,27 +0,0 @@ -# CompletionStream - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | **str** | | - -## Example - -```python -from together.generated.models.completion_stream import CompletionStream - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionStream from a JSON string -completion_stream_instance = CompletionStream.from_json(json) -# print the JSON string representation of the object -print(CompletionStream.to_json()) - -# convert the object into a dict -completion_stream_dict = completion_stream_instance.to_dict() -# create an instance of CompletionStream from a dict -completion_stream_from_dict = CompletionStream.from_dict(completion_stream_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionToken.md b/src/together/generated/docs/CompletionToken.md deleted file mode 100644 index f4d5f6b9..00000000 --- a/src/together/generated/docs/CompletionToken.md +++ /dev/null @@ -1,30 +0,0 @@ -# CompletionToken - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **int** | | -**text** | **str** | | -**logprob** | **float** | | -**special** | **bool** | | - -## Example - -```python -from together.generated.models.completion_token import CompletionToken - -# TODO update the JSON string below -json = "{}" -# create an instance of CompletionToken from a JSON string -completion_token_instance = CompletionToken.from_json(json) -# print the JSON string representation of the object -print(CompletionToken.to_json()) - -# convert the object into a dict -completion_token_dict = completion_token_instance.to_dict() -# create an instance of CompletionToken from a dict -completion_token_from_dict = CompletionToken.from_dict(completion_token_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CreateEndpointRequest.md b/src/together/generated/docs/CreateEndpointRequest.md deleted file mode 100644 index add9632f..00000000 --- a/src/together/generated/docs/CreateEndpointRequest.md +++ /dev/null @@ -1,33 +0,0 @@ -# CreateEndpointRequest - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**display_name** | **str** | A human-readable name for the endpoint | [optional] -**model** | **str** | The model to deploy on this endpoint | -**hardware** | **str** | The hardware configuration to use for this endpoint | -**autoscaling** | [**Autoscaling**](Autoscaling.md) | Configuration for automatic scaling of the endpoint | -**disable_prompt_cache** | **bool** | Whether to disable the prompt cache for this endpoint | [optional] [default to False] -**disable_speculative_decoding** | **bool** | Whether to disable speculative decoding for this endpoint | [optional] [default to False] -**state** | **str** | The desired state of the endpoint | [optional] [default to 'STARTED'] - -## Example - -```python -from together.generated.models.create_endpoint_request import CreateEndpointRequest - -# TODO update the JSON string below -json = "{}" -# create an instance of CreateEndpointRequest from a JSON string -create_endpoint_request_instance = CreateEndpointRequest.from_json(json) -# print the JSON string representation of the object -print(CreateEndpointRequest.to_json()) - -# convert the object into a dict -create_endpoint_request_dict = create_endpoint_request_instance.to_dict() -# create an instance of CreateEndpointRequest from a dict -create_endpoint_request_from_dict = CreateEndpointRequest.from_dict(create_endpoint_request_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/DedicatedEndpoint.md b/src/together/generated/docs/DedicatedEndpoint.md deleted file mode 100644 index eb9079f0..00000000 --- a/src/together/generated/docs/DedicatedEndpoint.md +++ /dev/null @@ -1,38 +0,0 @@ -# DedicatedEndpoint - -Details about a dedicated endpoint deployment - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | The type of object | -**id** | **str** | Unique identifier for the endpoint | -**name** | **str** | System name for the endpoint | -**display_name** | **str** | Human-readable name for the endpoint | -**model** | **str** | The model deployed on this endpoint | -**hardware** | **str** | The hardware configuration used for this endpoint | -**type** | **str** | The type of endpoint | -**owner** | **str** | The owner of this endpoint | -**state** | **str** | Current state of the endpoint | -**autoscaling** | [**Autoscaling**](Autoscaling.md) | Configuration for automatic scaling of the endpoint | -**created_at** | **datetime** | Timestamp when the endpoint was created | - -## Example - -```python -from together.generated.models.dedicated_endpoint import DedicatedEndpoint - -# TODO update the JSON string below -json = "{}" -# create an instance of DedicatedEndpoint from a JSON string -dedicated_endpoint_instance = DedicatedEndpoint.from_json(json) -# print the JSON string representation of the object -print(DedicatedEndpoint.to_json()) - -# convert the object into a dict -dedicated_endpoint_dict = dedicated_endpoint_instance.to_dict() -# create an instance of DedicatedEndpoint from a dict -dedicated_endpoint_from_dict = DedicatedEndpoint.from_dict(dedicated_endpoint_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsApi.md b/src/together/generated/docs/EmbeddingsApi.md deleted file mode 100644 index 3035a5ac..00000000 --- a/src/together/generated/docs/EmbeddingsApi.md +++ /dev/null @@ -1,93 +0,0 @@ -# together.generated.EmbeddingsApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**embeddings**](EmbeddingsApi.md#embeddings) | **POST** /embeddings | Create embedding - - -# **embeddings** -> EmbeddingsResponse embeddings(embeddings_request=embeddings_request) - -Create embedding - -Query an embedding model for a given string of text. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.embeddings_request import EmbeddingsRequest -from together.generated.models.embeddings_response import EmbeddingsResponse -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.EmbeddingsApi(api_client) - embeddings_request = together.generated.EmbeddingsRequest() # EmbeddingsRequest | (optional) - - try: - # Create embedding - api_response = await api_instance.embeddings(embeddings_request=embeddings_request) - print("The response of EmbeddingsApi->embeddings:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling EmbeddingsApi->embeddings: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **embeddings_request** | [**EmbeddingsRequest**](EmbeddingsRequest.md)| | [optional] - -### Return type - -[**EmbeddingsResponse**](EmbeddingsResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: application/json - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | 200 | - | -**400** | BadRequest | - | -**401** | Unauthorized | - | -**404** | NotFound | - | -**429** | RateLimit | - | -**503** | Overloaded | - | -**504** | Timeout | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsRequest.md b/src/together/generated/docs/EmbeddingsRequest.md deleted file mode 100644 index f14bc778..00000000 --- a/src/together/generated/docs/EmbeddingsRequest.md +++ /dev/null @@ -1,28 +0,0 @@ -# EmbeddingsRequest - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**model** | [**EmbeddingsRequestModel**](EmbeddingsRequestModel.md) | | -**input** | [**EmbeddingsRequestInput**](EmbeddingsRequestInput.md) | | - -## Example - -```python -from together.generated.models.embeddings_request import EmbeddingsRequest - -# TODO update the JSON string below -json = "{}" -# create an instance of EmbeddingsRequest from a JSON string -embeddings_request_instance = EmbeddingsRequest.from_json(json) -# print the JSON string representation of the object -print(EmbeddingsRequest.to_json()) - -# convert the object into a dict -embeddings_request_dict = embeddings_request_instance.to_dict() -# create an instance of EmbeddingsRequest from a dict -embeddings_request_from_dict = EmbeddingsRequest.from_dict(embeddings_request_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsRequestInput.md b/src/together/generated/docs/EmbeddingsRequestInput.md deleted file mode 100644 index e3b4af93..00000000 --- a/src/together/generated/docs/EmbeddingsRequestInput.md +++ /dev/null @@ -1,26 +0,0 @@ -# EmbeddingsRequestInput - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.embeddings_request_input import EmbeddingsRequestInput - -# TODO update the JSON string below -json = "{}" -# create an instance of EmbeddingsRequestInput from a JSON string -embeddings_request_input_instance = EmbeddingsRequestInput.from_json(json) -# print the JSON string representation of the object -print(EmbeddingsRequestInput.to_json()) - -# convert the object into a dict -embeddings_request_input_dict = embeddings_request_input_instance.to_dict() -# create an instance of EmbeddingsRequestInput from a dict -embeddings_request_input_from_dict = EmbeddingsRequestInput.from_dict(embeddings_request_input_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsRequestModel.md b/src/together/generated/docs/EmbeddingsRequestModel.md deleted file mode 100644 index 6376e042..00000000 --- a/src/together/generated/docs/EmbeddingsRequestModel.md +++ /dev/null @@ -1,27 +0,0 @@ -# EmbeddingsRequestModel - -The name of the embedding model to use.

[See all of Together AI's embedding models](https://docs.together.ai/docs/serverless-models#embedding-models) - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.embeddings_request_model import EmbeddingsRequestModel - -# TODO update the JSON string below -json = "{}" -# create an instance of EmbeddingsRequestModel from a JSON string -embeddings_request_model_instance = EmbeddingsRequestModel.from_json(json) -# print the JSON string representation of the object -print(EmbeddingsRequestModel.to_json()) - -# convert the object into a dict -embeddings_request_model_dict = embeddings_request_model_instance.to_dict() -# create an instance of EmbeddingsRequestModel from a dict -embeddings_request_model_from_dict = EmbeddingsRequestModel.from_dict(embeddings_request_model_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsResponse.md b/src/together/generated/docs/EmbeddingsResponse.md deleted file mode 100644 index 0e08c129..00000000 --- a/src/together/generated/docs/EmbeddingsResponse.md +++ /dev/null @@ -1,29 +0,0 @@ -# EmbeddingsResponse - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**model** | **str** | | -**data** | [**List[EmbeddingsResponseDataInner]**](EmbeddingsResponseDataInner.md) | | - -## Example - -```python -from together.generated.models.embeddings_response import EmbeddingsResponse - -# TODO update the JSON string below -json = "{}" -# create an instance of EmbeddingsResponse from a JSON string -embeddings_response_instance = EmbeddingsResponse.from_json(json) -# print the JSON string representation of the object -print(EmbeddingsResponse.to_json()) - -# convert the object into a dict -embeddings_response_dict = embeddings_response_instance.to_dict() -# create an instance of EmbeddingsResponse from a dict -embeddings_response_from_dict = EmbeddingsResponse.from_dict(embeddings_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsResponseDataInner.md b/src/together/generated/docs/EmbeddingsResponseDataInner.md deleted file mode 100644 index cc11de78..00000000 --- a/src/together/generated/docs/EmbeddingsResponseDataInner.md +++ /dev/null @@ -1,29 +0,0 @@ -# EmbeddingsResponseDataInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**embedding** | **List[float]** | | -**index** | **int** | | - -## Example - -```python -from together.generated.models.embeddings_response_data_inner import EmbeddingsResponseDataInner - -# TODO update the JSON string below -json = "{}" -# create an instance of EmbeddingsResponseDataInner from a JSON string -embeddings_response_data_inner_instance = EmbeddingsResponseDataInner.from_json(json) -# print the JSON string representation of the object -print(EmbeddingsResponseDataInner.to_json()) - -# convert the object into a dict -embeddings_response_data_inner_dict = embeddings_response_data_inner_instance.to_dict() -# create an instance of EmbeddingsResponseDataInner from a dict -embeddings_response_data_inner_from_dict = EmbeddingsResponseDataInner.from_dict(embeddings_response_data_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EndpointPricing.md b/src/together/generated/docs/EndpointPricing.md deleted file mode 100644 index e557beff..00000000 --- a/src/together/generated/docs/EndpointPricing.md +++ /dev/null @@ -1,28 +0,0 @@ -# EndpointPricing - -Pricing details for using an endpoint - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**cents_per_minute** | **float** | Cost per minute of endpoint uptime in cents | - -## Example - -```python -from together.generated.models.endpoint_pricing import EndpointPricing - -# TODO update the JSON string below -json = "{}" -# create an instance of EndpointPricing from a JSON string -endpoint_pricing_instance = EndpointPricing.from_json(json) -# print the JSON string representation of the object -print(EndpointPricing.to_json()) - -# convert the object into a dict -endpoint_pricing_dict = endpoint_pricing_instance.to_dict() -# create an instance of EndpointPricing from a dict -endpoint_pricing_from_dict = EndpointPricing.from_dict(endpoint_pricing_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EndpointsApi.md b/src/together/generated/docs/EndpointsApi.md deleted file mode 100644 index 74fc70f1..00000000 --- a/src/together/generated/docs/EndpointsApi.md +++ /dev/null @@ -1,416 +0,0 @@ -# together.generated.EndpointsApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**create_endpoint**](EndpointsApi.md#create_endpoint) | **POST** /endpoints | Create a dedicated endpoint, it will start automatically -[**delete_endpoint**](EndpointsApi.md#delete_endpoint) | **DELETE** /endpoints/{endpointId} | Delete endpoint -[**get_endpoint**](EndpointsApi.md#get_endpoint) | **GET** /endpoints/{endpointId} | Get endpoint by ID -[**list_endpoints**](EndpointsApi.md#list_endpoints) | **GET** /endpoints | List all endpoints, can be filtered by type -[**update_endpoint**](EndpointsApi.md#update_endpoint) | **PATCH** /endpoints/{endpointId} | Update endpoint, this can also be used to start or stop a dedicated endpoint - - -# **create_endpoint** -> DedicatedEndpoint create_endpoint(create_endpoint_request) - -Create a dedicated endpoint, it will start automatically - -Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.create_endpoint_request import CreateEndpointRequest -from together.generated.models.dedicated_endpoint import DedicatedEndpoint -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.EndpointsApi(api_client) - create_endpoint_request = together.generated.CreateEndpointRequest() # CreateEndpointRequest | - - try: - # Create a dedicated endpoint, it will start automatically - api_response = await api_instance.create_endpoint(create_endpoint_request) - print("The response of EndpointsApi->create_endpoint:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling EndpointsApi->create_endpoint: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **create_endpoint_request** | [**CreateEndpointRequest**](CreateEndpointRequest.md)| | - -### Return type - -[**DedicatedEndpoint**](DedicatedEndpoint.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: application/json - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | 200 | - | -**403** | Unauthorized | - | -**500** | Internal error | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **delete_endpoint** -> delete_endpoint(endpoint_id) - -Delete endpoint - -Permanently deletes an endpoint. This action cannot be undone. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.EndpointsApi(api_client) - endpoint_id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7' # str | The ID of the endpoint to delete - - try: - # Delete endpoint - await api_instance.delete_endpoint(endpoint_id) - except Exception as e: - print("Exception when calling EndpointsApi->delete_endpoint: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **endpoint_id** | **str**| The ID of the endpoint to delete | - -### Return type - -void (empty response body) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**204** | No Content - Endpoint successfully deleted | - | -**403** | Unauthorized | - | -**404** | Not Found | - | -**500** | Internal error | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **get_endpoint** -> DedicatedEndpoint get_endpoint(endpoint_id) - -Get endpoint by ID - -Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.dedicated_endpoint import DedicatedEndpoint -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.EndpointsApi(api_client) - endpoint_id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7' # str | The ID of the endpoint to retrieve - - try: - # Get endpoint by ID - api_response = await api_instance.get_endpoint(endpoint_id) - print("The response of EndpointsApi->get_endpoint:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling EndpointsApi->get_endpoint: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **endpoint_id** | **str**| The ID of the endpoint to retrieve | - -### Return type - -[**DedicatedEndpoint**](DedicatedEndpoint.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | 200 | - | -**403** | Unauthorized | - | -**404** | Not Found | - | -**500** | Internal error | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **list_endpoints** -> ListEndpoints200Response list_endpoints(type=type) - -List all endpoints, can be filtered by type - -Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.list_endpoints200_response import ListEndpoints200Response -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.EndpointsApi(api_client) - type = 'dedicated' # str | Filter endpoints by type (optional) - - try: - # List all endpoints, can be filtered by type - api_response = await api_instance.list_endpoints(type=type) - print("The response of EndpointsApi->list_endpoints:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling EndpointsApi->list_endpoints: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **type** | **str**| Filter endpoints by type | [optional] - -### Return type - -[**ListEndpoints200Response**](ListEndpoints200Response.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | 200 | - | -**403** | Unauthorized | - | -**500** | Internal error | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **update_endpoint** -> DedicatedEndpoint update_endpoint(endpoint_id, update_endpoint_request) - -Update endpoint, this can also be used to start or stop a dedicated endpoint - -Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.dedicated_endpoint import DedicatedEndpoint -from together.generated.models.update_endpoint_request import UpdateEndpointRequest -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.EndpointsApi(api_client) - endpoint_id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7' # str | The ID of the endpoint to update - update_endpoint_request = together.generated.UpdateEndpointRequest() # UpdateEndpointRequest | - - try: - # Update endpoint, this can also be used to start or stop a dedicated endpoint - api_response = await api_instance.update_endpoint(endpoint_id, update_endpoint_request) - print("The response of EndpointsApi->update_endpoint:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling EndpointsApi->update_endpoint: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **endpoint_id** | **str**| The ID of the endpoint to update | - **update_endpoint_request** | [**UpdateEndpointRequest**](UpdateEndpointRequest.md)| | - -### Return type - -[**DedicatedEndpoint**](DedicatedEndpoint.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: application/json - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | 200 | - | -**403** | Unauthorized | - | -**404** | Not Found | - | -**500** | Internal error | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ErrorData.md b/src/together/generated/docs/ErrorData.md deleted file mode 100644 index d29d8ec3..00000000 --- a/src/together/generated/docs/ErrorData.md +++ /dev/null @@ -1,27 +0,0 @@ -# ErrorData - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**error** | [**ErrorDataError**](ErrorDataError.md) | | - -## Example - -```python -from together.generated.models.error_data import ErrorData - -# TODO update the JSON string below -json = "{}" -# create an instance of ErrorData from a JSON string -error_data_instance = ErrorData.from_json(json) -# print the JSON string representation of the object -print(ErrorData.to_json()) - -# convert the object into a dict -error_data_dict = error_data_instance.to_dict() -# create an instance of ErrorData from a dict -error_data_from_dict = ErrorData.from_dict(error_data_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ErrorDataError.md b/src/together/generated/docs/ErrorDataError.md deleted file mode 100644 index d4990950..00000000 --- a/src/together/generated/docs/ErrorDataError.md +++ /dev/null @@ -1,30 +0,0 @@ -# ErrorDataError - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**message** | **str** | | -**type** | **str** | | -**param** | **str** | | [optional] -**code** | **str** | | [optional] - -## Example - -```python -from together.generated.models.error_data_error import ErrorDataError - -# TODO update the JSON string below -json = "{}" -# create an instance of ErrorDataError from a JSON string -error_data_error_instance = ErrorDataError.from_json(json) -# print the JSON string representation of the object -print(ErrorDataError.to_json()) - -# convert the object into a dict -error_data_error_dict = error_data_error_instance.to_dict() -# create an instance of ErrorDataError from a dict -error_data_error_from_dict = ErrorDataError.from_dict(error_data_error_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileDeleteResponse.md b/src/together/generated/docs/FileDeleteResponse.md deleted file mode 100644 index e20ba15d..00000000 --- a/src/together/generated/docs/FileDeleteResponse.md +++ /dev/null @@ -1,28 +0,0 @@ -# FileDeleteResponse - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **str** | | [optional] -**deleted** | **bool** | | [optional] - -## Example - -```python -from together.generated.models.file_delete_response import FileDeleteResponse - -# TODO update the JSON string below -json = "{}" -# create an instance of FileDeleteResponse from a JSON string -file_delete_response_instance = FileDeleteResponse.from_json(json) -# print the JSON string representation of the object -print(FileDeleteResponse.to_json()) - -# convert the object into a dict -file_delete_response_dict = file_delete_response_instance.to_dict() -# create an instance of FileDeleteResponse from a dict -file_delete_response_from_dict = FileDeleteResponse.from_dict(file_delete_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileList.md b/src/together/generated/docs/FileList.md deleted file mode 100644 index 83bc78a6..00000000 --- a/src/together/generated/docs/FileList.md +++ /dev/null @@ -1,27 +0,0 @@ -# FileList - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | [**List[FileResponse]**](FileResponse.md) | | - -## Example - -```python -from together.generated.models.file_list import FileList - -# TODO update the JSON string below -json = "{}" -# create an instance of FileList from a JSON string -file_list_instance = FileList.from_json(json) -# print the JSON string representation of the object -print(FileList.to_json()) - -# convert the object into a dict -file_list_dict = file_list_instance.to_dict() -# create an instance of FileList from a dict -file_list_from_dict = FileList.from_dict(file_list_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileObject.md b/src/together/generated/docs/FileObject.md deleted file mode 100644 index a95e1fe1..00000000 --- a/src/together/generated/docs/FileObject.md +++ /dev/null @@ -1,30 +0,0 @@ -# FileObject - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | [optional] -**id** | **str** | | [optional] -**filename** | **str** | | [optional] -**size** | **int** | | [optional] - -## Example - -```python -from together.generated.models.file_object import FileObject - -# TODO update the JSON string below -json = "{}" -# create an instance of FileObject from a JSON string -file_object_instance = FileObject.from_json(json) -# print the JSON string representation of the object -print(FileObject.to_json()) - -# convert the object into a dict -file_object_dict = file_object_instance.to_dict() -# create an instance of FileObject from a dict -file_object_from_dict = FileObject.from_dict(file_object_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileResponse.md b/src/together/generated/docs/FileResponse.md deleted file mode 100644 index 88317020..00000000 --- a/src/together/generated/docs/FileResponse.md +++ /dev/null @@ -1,35 +0,0 @@ -# FileResponse - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **str** | | -**object** | **str** | | -**created_at** | **int** | | -**filename** | **str** | | -**bytes** | **int** | | -**purpose** | **str** | | -**processed** | **bool** | | -**file_type** | **str** | | -**line_count** | **int** | | - -## Example - -```python -from together.generated.models.file_response import FileResponse - -# TODO update the JSON string below -json = "{}" -# create an instance of FileResponse from a JSON string -file_response_instance = FileResponse.from_json(json) -# print the JSON string representation of the object -print(FileResponse.to_json()) - -# convert the object into a dict -file_response_dict = file_response_instance.to_dict() -# create an instance of FileResponse from a dict -file_response_from_dict = FileResponse.from_dict(file_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FilesApi.md b/src/together/generated/docs/FilesApi.md deleted file mode 100644 index 5d0e6962..00000000 --- a/src/together/generated/docs/FilesApi.md +++ /dev/null @@ -1,320 +0,0 @@ -# together.generated.FilesApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**files_get**](FilesApi.md#files_get) | **GET** /files | List all files -[**files_id_content_get**](FilesApi.md#files_id_content_get) | **GET** /files/{id}/content | Get file contents -[**files_id_delete**](FilesApi.md#files_id_delete) | **DELETE** /files/{id} | Delete a file -[**files_id_get**](FilesApi.md#files_id_get) | **GET** /files/{id} | List file - - -# **files_get** -> FileList files_get() - -List all files - -List the metadata for all uploaded data files. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.file_list import FileList -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FilesApi(api_client) - - try: - # List all files - api_response = await api_instance.files_get() - print("The response of FilesApi->files_get:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FilesApi->files_get: %s\n" % e) -``` - - - -### Parameters - -This endpoint does not need any parameter. - -### Return type - -[**FileList**](FileList.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | List of files | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **files_id_content_get** -> FileObject files_id_content_get(id) - -Get file contents - -Get the contents of a single uploaded data file. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.file_object import FileObject -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FilesApi(api_client) - id = 'id_example' # str | - - try: - # Get file contents - api_response = await api_instance.files_id_content_get(id) - print("The response of FilesApi->files_id_content_get:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FilesApi->files_id_content_get: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **id** | **str**| | - -### Return type - -[**FileObject**](FileObject.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | File content retrieved successfully | - | -**500** | Internal Server Error | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **files_id_delete** -> FileDeleteResponse files_id_delete(id) - -Delete a file - -Delete a previously uploaded data file. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.file_delete_response import FileDeleteResponse -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FilesApi(api_client) - id = 'id_example' # str | - - try: - # Delete a file - api_response = await api_instance.files_id_delete(id) - print("The response of FilesApi->files_id_delete:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FilesApi->files_id_delete: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **id** | **str**| | - -### Return type - -[**FileDeleteResponse**](FileDeleteResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | File deleted successfully | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **files_id_get** -> FileResponse files_id_get(id) - -List file - -List the metadata for a single uploaded data file. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.file_response import FileResponse -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FilesApi(api_client) - id = 'id_example' # str | - - try: - # List file - api_response = await api_instance.files_id_get(id) - print("The response of FilesApi->files_id_get:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FilesApi->files_id_get: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **id** | **str**| | - -### Return type - -[**FileResponse**](FileResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | File retrieved successfully | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTuneEvent.md b/src/together/generated/docs/FineTuneEvent.md deleted file mode 100644 index 23eea549..00000000 --- a/src/together/generated/docs/FineTuneEvent.md +++ /dev/null @@ -1,40 +0,0 @@ -# FineTuneEvent - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**created_at** | **str** | | -**level** | [**FinetuneEventLevels**](FinetuneEventLevels.md) | | [optional] -**message** | **str** | | -**type** | [**FinetuneEventType**](FinetuneEventType.md) | | -**param_count** | **int** | | -**token_count** | **int** | | -**total_steps** | **int** | | -**wandb_url** | **str** | | -**step** | **int** | | -**checkpoint_path** | **str** | | -**model_path** | **str** | | -**training_offset** | **int** | | -**hash** | **str** | | - -## Example - -```python -from together.generated.models.fine_tune_event import FineTuneEvent - -# TODO update the JSON string below -json = "{}" -# create an instance of FineTuneEvent from a JSON string -fine_tune_event_instance = FineTuneEvent.from_json(json) -# print the JSON string representation of the object -print(FineTuneEvent.to_json()) - -# convert the object into a dict -fine_tune_event_dict = fine_tune_event_instance.to_dict() -# create an instance of FineTuneEvent from a dict -fine_tune_event_from_dict = FineTuneEvent.from_dict(fine_tune_event_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTunesPostRequest.md b/src/together/generated/docs/FineTunesPostRequest.md deleted file mode 100644 index f75f37cd..00000000 --- a/src/together/generated/docs/FineTunesPostRequest.md +++ /dev/null @@ -1,45 +0,0 @@ -# FineTunesPostRequest - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**training_file** | **str** | File-ID of a training file uploaded to the Together API | -**validation_file** | **str** | File-ID of a validation file uploaded to the Together API | [optional] -**model** | **str** | Name of the base model to run fine-tune job on | -**n_epochs** | **int** | Number of epochs for fine-tuning | [optional] [default to 1] -**n_checkpoints** | **int** | Number of checkpoints to save during fine-tuning | [optional] [default to 1] -**n_evals** | **int** | Number of evaluations to be run on a given validation set during training | [optional] [default to 0] -**batch_size** | **int** | Batch size for fine-tuning | [optional] [default to 32] -**learning_rate** | **float** | Learning rate multiplier to use for training | [optional] [default to 0.000010] -**lr_scheduler** | [**LRScheduler**](.md) | | [optional] -**warmup_ratio** | **float** | The percent of steps at the start of training to linearly increase the learning rate. | [optional] [default to 0.0] -**max_grad_norm** | **float** | Max gradient norm to be used for gradient clipping. Set to 0 to disable. | [optional] [default to 1.0] -**weight_decay** | **float** | Weight decay | [optional] [default to 0.0] -**suffix** | **str** | Suffix that will be added to your fine-tuned model name | [optional] -**wandb_api_key** | **str** | API key for Weights & Biases integration | [optional] -**wandb_base_url** | **str** | The base URL of a dedicated Weights & Biases instance. | [optional] -**wandb_project_name** | **str** | The Weights & Biases project for your run. If not specified, will use `together` as the project name. | [optional] -**wandb_name** | **str** | The Weights & Biases name for your run. | [optional] -**train_on_inputs** | [**FineTunesPostRequestTrainOnInputs**](FineTunesPostRequestTrainOnInputs.md) | | [optional] [default to False] -**training_type** | [**FineTunesPostRequestTrainingType**](FineTunesPostRequestTrainingType.md) | | [optional] - -## Example - -```python -from together.generated.models.fine_tunes_post_request import FineTunesPostRequest - -# TODO update the JSON string below -json = "{}" -# create an instance of FineTunesPostRequest from a JSON string -fine_tunes_post_request_instance = FineTunesPostRequest.from_json(json) -# print the JSON string representation of the object -print(FineTunesPostRequest.to_json()) - -# convert the object into a dict -fine_tunes_post_request_dict = fine_tunes_post_request_instance.to_dict() -# create an instance of FineTunesPostRequest from a dict -fine_tunes_post_request_from_dict = FineTunesPostRequest.from_dict(fine_tunes_post_request_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md b/src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md deleted file mode 100644 index 554e32a3..00000000 --- a/src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md +++ /dev/null @@ -1,27 +0,0 @@ -# FineTunesPostRequestTrainOnInputs - -Whether to mask the user messages in conversational data or prompts in instruction data. - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.fine_tunes_post_request_train_on_inputs import FineTunesPostRequestTrainOnInputs - -# TODO update the JSON string below -json = "{}" -# create an instance of FineTunesPostRequestTrainOnInputs from a JSON string -fine_tunes_post_request_train_on_inputs_instance = FineTunesPostRequestTrainOnInputs.from_json(json) -# print the JSON string representation of the object -print(FineTunesPostRequestTrainOnInputs.to_json()) - -# convert the object into a dict -fine_tunes_post_request_train_on_inputs_dict = fine_tunes_post_request_train_on_inputs_instance.to_dict() -# create an instance of FineTunesPostRequestTrainOnInputs from a dict -fine_tunes_post_request_train_on_inputs_from_dict = FineTunesPostRequestTrainOnInputs.from_dict(fine_tunes_post_request_train_on_inputs_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTunesPostRequestTrainingType.md b/src/together/generated/docs/FineTunesPostRequestTrainingType.md deleted file mode 100644 index 92af3191..00000000 --- a/src/together/generated/docs/FineTunesPostRequestTrainingType.md +++ /dev/null @@ -1,31 +0,0 @@ -# FineTunesPostRequestTrainingType - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**type** | **str** | | -**lora_r** | **int** | | -**lora_alpha** | **int** | | -**lora_dropout** | **float** | | [optional] [default to 0.0] -**lora_trainable_modules** | **str** | | [optional] [default to 'all-linear'] - -## Example - -```python -from together.generated.models.fine_tunes_post_request_training_type import FineTunesPostRequestTrainingType - -# TODO update the JSON string below -json = "{}" -# create an instance of FineTunesPostRequestTrainingType from a JSON string -fine_tunes_post_request_training_type_instance = FineTunesPostRequestTrainingType.from_json(json) -# print the JSON string representation of the object -print(FineTunesPostRequestTrainingType.to_json()) - -# convert the object into a dict -fine_tunes_post_request_training_type_dict = fine_tunes_post_request_training_type_instance.to_dict() -# create an instance of FineTunesPostRequestTrainingType from a dict -fine_tunes_post_request_training_type_from_dict = FineTunesPostRequestTrainingType.from_dict(fine_tunes_post_request_training_type_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTuningApi.md b/src/together/generated/docs/FineTuningApi.md deleted file mode 100644 index 465f9925..00000000 --- a/src/together/generated/docs/FineTuningApi.md +++ /dev/null @@ -1,488 +0,0 @@ -# together.generated.FineTuningApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**fine_tunes_get**](FineTuningApi.md#fine_tunes_get) | **GET** /fine-tunes | List all jobs -[**fine_tunes_id_cancel_post**](FineTuningApi.md#fine_tunes_id_cancel_post) | **POST** /fine-tunes/{id}/cancel | Cancel job -[**fine_tunes_id_events_get**](FineTuningApi.md#fine_tunes_id_events_get) | **GET** /fine-tunes/{id}/events | List job events -[**fine_tunes_id_get**](FineTuningApi.md#fine_tunes_id_get) | **GET** /fine-tunes/{id} | List job -[**fine_tunes_post**](FineTuningApi.md#fine_tunes_post) | **POST** /fine-tunes | Create job -[**finetune_download_get**](FineTuningApi.md#finetune_download_get) | **GET** /finetune/download | Download model - - -# **fine_tunes_get** -> FinetuneList fine_tunes_get() - -List all jobs - -List the metadata for all fine-tuning jobs. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.finetune_list import FinetuneList -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FineTuningApi(api_client) - - try: - # List all jobs - api_response = await api_instance.fine_tunes_get() - print("The response of FineTuningApi->fine_tunes_get:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FineTuningApi->fine_tunes_get: %s\n" % e) -``` - - - -### Parameters - -This endpoint does not need any parameter. - -### Return type - -[**FinetuneList**](FinetuneList.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | List of fine-tune jobs | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **fine_tunes_id_cancel_post** -> FinetuneResponse fine_tunes_id_cancel_post(id) - -Cancel job - -Cancel a currently running fine-tuning job. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.finetune_response import FinetuneResponse -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FineTuningApi(api_client) - id = 'id_example' # str | Fine-tune ID to cancel. A string that starts with `ft-`. - - try: - # Cancel job - api_response = await api_instance.fine_tunes_id_cancel_post(id) - print("The response of FineTuningApi->fine_tunes_id_cancel_post:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FineTuningApi->fine_tunes_id_cancel_post: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **id** | **str**| Fine-tune ID to cancel. A string that starts with `ft-`. | - -### Return type - -[**FinetuneResponse**](FinetuneResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | Successfully cancelled the fine-tuning job. | - | -**400** | Invalid request parameters. | - | -**404** | Fine-tune ID not found. | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **fine_tunes_id_events_get** -> FinetuneListEvents fine_tunes_id_events_get(id) - -List job events - -List the events for a single fine-tuning job. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.finetune_list_events import FinetuneListEvents -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FineTuningApi(api_client) - id = 'id_example' # str | - - try: - # List job events - api_response = await api_instance.fine_tunes_id_events_get(id) - print("The response of FineTuningApi->fine_tunes_id_events_get:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FineTuningApi->fine_tunes_id_events_get: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **id** | **str**| | - -### Return type - -[**FinetuneListEvents**](FinetuneListEvents.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | List of fine-tune events | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **fine_tunes_id_get** -> FinetuneResponse fine_tunes_id_get(id) - -List job - -List the metadata for a single fine-tuning job. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.finetune_response import FinetuneResponse -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FineTuningApi(api_client) - id = 'id_example' # str | - - try: - # List job - api_response = await api_instance.fine_tunes_id_get(id) - print("The response of FineTuningApi->fine_tunes_id_get:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FineTuningApi->fine_tunes_id_get: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **id** | **str**| | - -### Return type - -[**FinetuneResponse**](FinetuneResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | Fine-tune job details retrieved successfully | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **fine_tunes_post** -> FinetuneResponse fine_tunes_post(fine_tunes_post_request) - -Create job - -Use a model to create a fine-tuning job. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.fine_tunes_post_request import FineTunesPostRequest -from together.generated.models.finetune_response import FinetuneResponse -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FineTuningApi(api_client) - fine_tunes_post_request = together.generated.FineTunesPostRequest() # FineTunesPostRequest | - - try: - # Create job - api_response = await api_instance.fine_tunes_post(fine_tunes_post_request) - print("The response of FineTuningApi->fine_tunes_post:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FineTuningApi->fine_tunes_post: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **fine_tunes_post_request** | [**FineTunesPostRequest**](FineTunesPostRequest.md)| | - -### Return type - -[**FinetuneResponse**](FinetuneResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: application/json - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | Fine-tuning job initiated successfully | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **finetune_download_get** -> FinetuneDownloadResult finetune_download_get(ft_id, checkpoint_step=checkpoint_step, checkpoint=checkpoint, output=output) - -Download model - -Download a compressed fine-tuned model or checkpoint to local disk. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.finetune_download_result import FinetuneDownloadResult -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.FineTuningApi(api_client) - ft_id = 'ft_id_example' # str | Fine-tune ID to download. A string that starts with `ft-`. - checkpoint_step = 56 # int | Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. (optional) - checkpoint = 'checkpoint_example' # str | Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. (optional) - output = 'output_example' # str | Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. (optional) - - try: - # Download model - api_response = await api_instance.finetune_download_get(ft_id, checkpoint_step=checkpoint_step, checkpoint=checkpoint, output=output) - print("The response of FineTuningApi->finetune_download_get:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FineTuningApi->finetune_download_get: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **ft_id** | **str**| Fine-tune ID to download. A string that starts with `ft-`. | - **checkpoint_step** | **int**| Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. | [optional] - **checkpoint** | **str**| Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. | [optional] - **output** | **str**| Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. | [optional] - -### Return type - -[**FinetuneDownloadResult**](FinetuneDownloadResult.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | Successfully downloaded the fine-tuned model or checkpoint. | - | -**400** | Invalid request parameters. | - | -**404** | Fine-tune ID not found. | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneDownloadResult.md b/src/together/generated/docs/FinetuneDownloadResult.md deleted file mode 100644 index 36bce63b..00000000 --- a/src/together/generated/docs/FinetuneDownloadResult.md +++ /dev/null @@ -1,31 +0,0 @@ -# FinetuneDownloadResult - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | [optional] -**id** | **str** | | [optional] -**checkpoint_step** | **int** | | [optional] -**filename** | **str** | | [optional] -**size** | **int** | | [optional] - -## Example - -```python -from together.generated.models.finetune_download_result import FinetuneDownloadResult - -# TODO update the JSON string below -json = "{}" -# create an instance of FinetuneDownloadResult from a JSON string -finetune_download_result_instance = FinetuneDownloadResult.from_json(json) -# print the JSON string representation of the object -print(FinetuneDownloadResult.to_json()) - -# convert the object into a dict -finetune_download_result_dict = finetune_download_result_instance.to_dict() -# create an instance of FinetuneDownloadResult from a dict -finetune_download_result_from_dict = FinetuneDownloadResult.from_dict(finetune_download_result_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneEventLevels.md b/src/together/generated/docs/FinetuneEventLevels.md deleted file mode 100644 index 0056898d..00000000 --- a/src/together/generated/docs/FinetuneEventLevels.md +++ /dev/null @@ -1,18 +0,0 @@ -# FinetuneEventLevels - - -## Enum - -* `INFO` (value: `'info'`) - -* `WARNING` (value: `'warning'`) - -* `ERROR` (value: `'error'`) - -* `LEGACY_INFO` (value: `'legacy_info'`) - -* `LEGACY_IWARNING` (value: `'legacy_iwarning'`) - -* `LEGACY_IERROR` (value: `'legacy_ierror'`) - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneEventType.md b/src/together/generated/docs/FinetuneEventType.md deleted file mode 100644 index 6f936a60..00000000 --- a/src/together/generated/docs/FinetuneEventType.md +++ /dev/null @@ -1,56 +0,0 @@ -# FinetuneEventType - - -## Enum - -* `JOB_PENDING` (value: `'job_pending'`) - -* `JOB_START` (value: `'job_start'`) - -* `JOB_STOPPED` (value: `'job_stopped'`) - -* `MODEL_DOWNLOADING` (value: `'model_downloading'`) - -* `MODEL_DOWNLOAD_COMPLETE` (value: `'model_download_complete'`) - -* `TRAINING_DATA_DOWNLOADING` (value: `'training_data_downloading'`) - -* `TRAINING_DATA_DOWNLOAD_COMPLETE` (value: `'training_data_download_complete'`) - -* `VALIDATION_DATA_DOWNLOADING` (value: `'validation_data_downloading'`) - -* `VALIDATION_DATA_DOWNLOAD_COMPLETE` (value: `'validation_data_download_complete'`) - -* `WANDB_INIT` (value: `'wandb_init'`) - -* `TRAINING_START` (value: `'training_start'`) - -* `CHECKPOINT_SAVE` (value: `'checkpoint_save'`) - -* `BILLING_LIMIT` (value: `'billing_limit'`) - -* `EPOCH_COMPLETE` (value: `'epoch_complete'`) - -* `TRAINING_COMPLETE` (value: `'training_complete'`) - -* `MODEL_COMPRESSING` (value: `'model_compressing'`) - -* `MODEL_COMPRESSION_COMPLETE` (value: `'model_compression_complete'`) - -* `MODEL_UPLOADING` (value: `'model_uploading'`) - -* `MODEL_UPLOAD_COMPLETE` (value: `'model_upload_complete'`) - -* `JOB_COMPLETE` (value: `'job_complete'`) - -* `JOB_ERROR` (value: `'job_error'`) - -* `CANCEL_REQUESTED` (value: `'cancel_requested'`) - -* `JOB_RESTARTED` (value: `'job_restarted'`) - -* `REFUND` (value: `'refund'`) - -* `WARNING` (value: `'warning'`) - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneJobStatus.md b/src/together/generated/docs/FinetuneJobStatus.md deleted file mode 100644 index 750038be..00000000 --- a/src/together/generated/docs/FinetuneJobStatus.md +++ /dev/null @@ -1,24 +0,0 @@ -# FinetuneJobStatus - - -## Enum - -* `PENDING` (value: `'pending'`) - -* `QUEUED` (value: `'queued'`) - -* `RUNNING` (value: `'running'`) - -* `COMPRESSING` (value: `'compressing'`) - -* `UPLOADING` (value: `'uploading'`) - -* `CANCEL_REQUESTED` (value: `'cancel_requested'`) - -* `CANCELLED` (value: `'cancelled'`) - -* `ERROR` (value: `'error'`) - -* `COMPLETED` (value: `'completed'`) - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneList.md b/src/together/generated/docs/FinetuneList.md deleted file mode 100644 index 4785467b..00000000 --- a/src/together/generated/docs/FinetuneList.md +++ /dev/null @@ -1,27 +0,0 @@ -# FinetuneList - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | [**List[FinetuneResponse]**](FinetuneResponse.md) | | - -## Example - -```python -from together.generated.models.finetune_list import FinetuneList - -# TODO update the JSON string below -json = "{}" -# create an instance of FinetuneList from a JSON string -finetune_list_instance = FinetuneList.from_json(json) -# print the JSON string representation of the object -print(FinetuneList.to_json()) - -# convert the object into a dict -finetune_list_dict = finetune_list_instance.to_dict() -# create an instance of FinetuneList from a dict -finetune_list_from_dict = FinetuneList.from_dict(finetune_list_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneListEvents.md b/src/together/generated/docs/FinetuneListEvents.md deleted file mode 100644 index 2fa6ed43..00000000 --- a/src/together/generated/docs/FinetuneListEvents.md +++ /dev/null @@ -1,27 +0,0 @@ -# FinetuneListEvents - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | [**List[FineTuneEvent]**](FineTuneEvent.md) | | - -## Example - -```python -from together.generated.models.finetune_list_events import FinetuneListEvents - -# TODO update the JSON string below -json = "{}" -# create an instance of FinetuneListEvents from a JSON string -finetune_list_events_instance = FinetuneListEvents.from_json(json) -# print the JSON string representation of the object -print(FinetuneListEvents.to_json()) - -# convert the object into a dict -finetune_list_events_dict = finetune_list_events_instance.to_dict() -# create an instance of FinetuneListEvents from a dict -finetune_list_events_from_dict = FinetuneListEvents.from_dict(finetune_list_events_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneResponse.md b/src/together/generated/docs/FinetuneResponse.md deleted file mode 100644 index 68dc10c6..00000000 --- a/src/together/generated/docs/FinetuneResponse.md +++ /dev/null @@ -1,58 +0,0 @@ -# FinetuneResponse - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **str** | | -**training_file** | **str** | | [optional] -**validation_file** | **str** | | [optional] -**model** | **str** | | [optional] -**model_output_name** | **str** | | [optional] -**model_output_path** | **str** | | [optional] -**trainingfile_numlines** | **int** | | [optional] -**trainingfile_size** | **int** | | [optional] -**created_at** | **str** | | [optional] -**updated_at** | **str** | | [optional] -**n_epochs** | **int** | | [optional] -**n_checkpoints** | **int** | | [optional] -**n_evals** | **int** | | [optional] -**batch_size** | **int** | | [optional] -**learning_rate** | **float** | | [optional] -**lr_scheduler** | [**LRScheduler**](.md) | | [optional] -**warmup_ratio** | **float** | | [optional] -**max_grad_norm** | **float** | | [optional] -**weight_decay** | **float** | | [optional] -**eval_steps** | **int** | | [optional] -**train_on_inputs** | [**FinetuneResponseTrainOnInputs**](FinetuneResponseTrainOnInputs.md) | | [optional] -**training_type** | [**FineTunesPostRequestTrainingType**](FineTunesPostRequestTrainingType.md) | | [optional] -**status** | [**FinetuneJobStatus**](FinetuneJobStatus.md) | | -**job_id** | **str** | | [optional] -**events** | [**List[FineTuneEvent]**](FineTuneEvent.md) | | [optional] -**token_count** | **int** | | [optional] -**param_count** | **int** | | [optional] -**total_price** | **int** | | [optional] -**epochs_completed** | **int** | | [optional] -**queue_depth** | **int** | | [optional] -**wandb_project_name** | **str** | | [optional] -**wandb_url** | **str** | | [optional] - -## Example - -```python -from together.generated.models.finetune_response import FinetuneResponse - -# TODO update the JSON string below -json = "{}" -# create an instance of FinetuneResponse from a JSON string -finetune_response_instance = FinetuneResponse.from_json(json) -# print the JSON string representation of the object -print(FinetuneResponse.to_json()) - -# convert the object into a dict -finetune_response_dict = finetune_response_instance.to_dict() -# create an instance of FinetuneResponse from a dict -finetune_response_from_dict = FinetuneResponse.from_dict(finetune_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneResponseTrainOnInputs.md b/src/together/generated/docs/FinetuneResponseTrainOnInputs.md deleted file mode 100644 index 0ea8e32c..00000000 --- a/src/together/generated/docs/FinetuneResponseTrainOnInputs.md +++ /dev/null @@ -1,26 +0,0 @@ -# FinetuneResponseTrainOnInputs - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.finetune_response_train_on_inputs import FinetuneResponseTrainOnInputs - -# TODO update the JSON string below -json = "{}" -# create an instance of FinetuneResponseTrainOnInputs from a JSON string -finetune_response_train_on_inputs_instance = FinetuneResponseTrainOnInputs.from_json(json) -# print the JSON string representation of the object -print(FinetuneResponseTrainOnInputs.to_json()) - -# convert the object into a dict -finetune_response_train_on_inputs_dict = finetune_response_train_on_inputs_instance.to_dict() -# create an instance of FinetuneResponseTrainOnInputs from a dict -finetune_response_train_on_inputs_from_dict = FinetuneResponseTrainOnInputs.from_dict(finetune_response_train_on_inputs_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinishReason.md b/src/together/generated/docs/FinishReason.md deleted file mode 100644 index e6907d83..00000000 --- a/src/together/generated/docs/FinishReason.md +++ /dev/null @@ -1,16 +0,0 @@ -# FinishReason - - -## Enum - -* `STOP` (value: `'stop'`) - -* `EOS` (value: `'eos'`) - -* `LENGTH` (value: `'length'`) - -* `TOOL_CALLS` (value: `'tool_calls'`) - -* `FUNCTION_CALL` (value: `'function_call'`) - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FullTrainingType.md b/src/together/generated/docs/FullTrainingType.md deleted file mode 100644 index 4b40ee0f..00000000 --- a/src/together/generated/docs/FullTrainingType.md +++ /dev/null @@ -1,27 +0,0 @@ -# FullTrainingType - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**type** | **str** | | - -## Example - -```python -from together.generated.models.full_training_type import FullTrainingType - -# TODO update the JSON string below -json = "{}" -# create an instance of FullTrainingType from a JSON string -full_training_type_instance = FullTrainingType.from_json(json) -# print the JSON string representation of the object -print(FullTrainingType.to_json()) - -# convert the object into a dict -full_training_type_dict = full_training_type_instance.to_dict() -# create an instance of FullTrainingType from a dict -full_training_type_from_dict = FullTrainingType.from_dict(full_training_type_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareApi.md b/src/together/generated/docs/HardwareApi.md deleted file mode 100644 index 6498f346..00000000 --- a/src/together/generated/docs/HardwareApi.md +++ /dev/null @@ -1,88 +0,0 @@ -# together.generated.HardwareApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**list_hardware**](HardwareApi.md#list_hardware) | **GET** /hardware | List available hardware configurations - - -# **list_hardware** -> ListHardware200Response list_hardware(model=model) - -List available hardware configurations - -Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.list_hardware200_response import ListHardware200Response -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.HardwareApi(api_client) - model = 'meta-llama/Llama-3-70b-chat-hf' # str | Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. (optional) - - try: - # List available hardware configurations - api_response = await api_instance.list_hardware(model=model) - print("The response of HardwareApi->list_hardware:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling HardwareApi->list_hardware: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **model** | **str**| Filter hardware configurations by model compatibility. When provided, the response includes availability status for each compatible configuration. | [optional] - -### Return type - -[**ListHardware200Response**](ListHardware200Response.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | List of available hardware configurations | - | -**403** | Unauthorized | - | -**500** | Internal error | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareAvailability.md b/src/together/generated/docs/HardwareAvailability.md deleted file mode 100644 index 6ff309ee..00000000 --- a/src/together/generated/docs/HardwareAvailability.md +++ /dev/null @@ -1,28 +0,0 @@ -# HardwareAvailability - -Indicates the current availability status of a hardware configuration - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**status** | **str** | The availability status of the hardware configuration | - -## Example - -```python -from together.generated.models.hardware_availability import HardwareAvailability - -# TODO update the JSON string below -json = "{}" -# create an instance of HardwareAvailability from a JSON string -hardware_availability_instance = HardwareAvailability.from_json(json) -# print the JSON string representation of the object -print(HardwareAvailability.to_json()) - -# convert the object into a dict -hardware_availability_dict = hardware_availability_instance.to_dict() -# create an instance of HardwareAvailability from a dict -hardware_availability_from_dict = HardwareAvailability.from_dict(hardware_availability_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareSpec.md b/src/together/generated/docs/HardwareSpec.md deleted file mode 100644 index 9967c6f2..00000000 --- a/src/together/generated/docs/HardwareSpec.md +++ /dev/null @@ -1,31 +0,0 @@ -# HardwareSpec - -Detailed specifications of a hardware configuration - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**gpu_type** | **str** | The type/model of GPU | -**gpu_link** | **str** | The GPU interconnect technology | -**gpu_memory** | **float** | Amount of GPU memory in GB | -**gpu_count** | **int** | Number of GPUs in this configuration | - -## Example - -```python -from together.generated.models.hardware_spec import HardwareSpec - -# TODO update the JSON string below -json = "{}" -# create an instance of HardwareSpec from a JSON string -hardware_spec_instance = HardwareSpec.from_json(json) -# print the JSON string representation of the object -print(HardwareSpec.to_json()) - -# convert the object into a dict -hardware_spec_dict = hardware_spec_instance.to_dict() -# create an instance of HardwareSpec from a dict -hardware_spec_from_dict = HardwareSpec.from_dict(hardware_spec_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareWithStatus.md b/src/together/generated/docs/HardwareWithStatus.md deleted file mode 100644 index 3d85fdbd..00000000 --- a/src/together/generated/docs/HardwareWithStatus.md +++ /dev/null @@ -1,33 +0,0 @@ -# HardwareWithStatus - -Hardware configuration details with optional availability status - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**id** | **str** | Unique identifier for the hardware configuration | -**pricing** | [**EndpointPricing**](EndpointPricing.md) | | -**specs** | [**HardwareSpec**](HardwareSpec.md) | | -**availability** | [**HardwareAvailability**](HardwareAvailability.md) | | [optional] -**updated_at** | **datetime** | Timestamp of when the hardware status was last updated | - -## Example - -```python -from together.generated.models.hardware_with_status import HardwareWithStatus - -# TODO update the JSON string below -json = "{}" -# create an instance of HardwareWithStatus from a JSON string -hardware_with_status_instance = HardwareWithStatus.from_json(json) -# print the JSON string representation of the object -print(HardwareWithStatus.to_json()) - -# convert the object into a dict -hardware_with_status_dict = hardware_with_status_instance.to_dict() -# create an instance of HardwareWithStatus from a dict -hardware_with_status_from_dict = HardwareWithStatus.from_dict(hardware_with_status_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImageResponse.md b/src/together/generated/docs/ImageResponse.md deleted file mode 100644 index eca8ec0c..00000000 --- a/src/together/generated/docs/ImageResponse.md +++ /dev/null @@ -1,30 +0,0 @@ -# ImageResponse - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **str** | | -**model** | **str** | | -**object** | **str** | | -**data** | [**List[ImageResponseDataInner]**](ImageResponseDataInner.md) | | - -## Example - -```python -from together.generated.models.image_response import ImageResponse - -# TODO update the JSON string below -json = "{}" -# create an instance of ImageResponse from a JSON string -image_response_instance = ImageResponse.from_json(json) -# print the JSON string representation of the object -print(ImageResponse.to_json()) - -# convert the object into a dict -image_response_dict = image_response_instance.to_dict() -# create an instance of ImageResponse from a dict -image_response_from_dict = ImageResponse.from_dict(image_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImageResponseDataInner.md b/src/together/generated/docs/ImageResponseDataInner.md deleted file mode 100644 index f529b63c..00000000 --- a/src/together/generated/docs/ImageResponseDataInner.md +++ /dev/null @@ -1,29 +0,0 @@ -# ImageResponseDataInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**index** | **int** | | -**b64_json** | **str** | | [optional] -**url** | **str** | | [optional] - -## Example - -```python -from together.generated.models.image_response_data_inner import ImageResponseDataInner - -# TODO update the JSON string below -json = "{}" -# create an instance of ImageResponseDataInner from a JSON string -image_response_data_inner_instance = ImageResponseDataInner.from_json(json) -# print the JSON string representation of the object -print(ImageResponseDataInner.to_json()) - -# convert the object into a dict -image_response_data_inner_dict = image_response_data_inner_instance.to_dict() -# create an instance of ImageResponseDataInner from a dict -image_response_data_inner_from_dict = ImageResponseDataInner.from_dict(image_response_data_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesApi.md b/src/together/generated/docs/ImagesApi.md deleted file mode 100644 index d7db520e..00000000 --- a/src/together/generated/docs/ImagesApi.md +++ /dev/null @@ -1,87 +0,0 @@ -# together.generated.ImagesApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**images_generations_post**](ImagesApi.md#images_generations_post) | **POST** /images/generations | Create image - - -# **images_generations_post** -> ImageResponse images_generations_post(images_generations_post_request) - -Create image - -Use an image model to generate an image for a given prompt. - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.image_response import ImageResponse -from together.generated.models.images_generations_post_request import ImagesGenerationsPostRequest -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.ImagesApi(api_client) - images_generations_post_request = together.generated.ImagesGenerationsPostRequest() # ImagesGenerationsPostRequest | - - try: - # Create image - api_response = await api_instance.images_generations_post(images_generations_post_request) - print("The response of ImagesApi->images_generations_post:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling ImagesApi->images_generations_post: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **images_generations_post_request** | [**ImagesGenerationsPostRequest**](ImagesGenerationsPostRequest.md)| | - -### Return type - -[**ImageResponse**](ImageResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: application/json - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | Image generated successfully | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesGenerationsPostRequest.md b/src/together/generated/docs/ImagesGenerationsPostRequest.md deleted file mode 100644 index 4263429b..00000000 --- a/src/together/generated/docs/ImagesGenerationsPostRequest.md +++ /dev/null @@ -1,39 +0,0 @@ -# ImagesGenerationsPostRequest - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**prompt** | **str** | A description of the desired images. Maximum length varies by model. | -**model** | [**ImagesGenerationsPostRequestModel**](ImagesGenerationsPostRequestModel.md) | | -**steps** | **int** | Number of generation steps. | [optional] [default to 20] -**image_url** | **str** | URL of an image to use for image models that support it. | [optional] -**seed** | **int** | Seed used for generation. Can be used to reproduce image generations. | [optional] -**n** | **int** | Number of image results to generate. | [optional] [default to 1] -**height** | **int** | Height of the image to generate in number of pixels. | [optional] [default to 1024] -**width** | **int** | Width of the image to generate in number of pixels. | [optional] [default to 1024] -**negative_prompt** | **str** | The prompt or prompts not to guide the image generation. | [optional] -**response_format** | **str** | Format of the image response. Can be either a base64 string or a URL. | [optional] -**guidance** | **float** | Adjusts the alignment of the generated image with the input prompt. Higher values (e.g., 8-10) make the output more faithful to the prompt, while lower values (e.g., 1-5) encourage more creative freedom. | [optional] [default to 3.5] -**output_format** | **str** | The format of the image response. Can be either be `jpeg` or `png`. Defaults to `jpeg`. | [optional] [default to 'jpeg'] -**image_loras** | [**List[ImagesGenerationsPostRequestImageLorasInner]**](ImagesGenerationsPostRequestImageLorasInner.md) | An array of objects that define LoRAs (Low-Rank Adaptations) to influence the generated image. | [optional] - -## Example - -```python -from together.generated.models.images_generations_post_request import ImagesGenerationsPostRequest - -# TODO update the JSON string below -json = "{}" -# create an instance of ImagesGenerationsPostRequest from a JSON string -images_generations_post_request_instance = ImagesGenerationsPostRequest.from_json(json) -# print the JSON string representation of the object -print(ImagesGenerationsPostRequest.to_json()) - -# convert the object into a dict -images_generations_post_request_dict = images_generations_post_request_instance.to_dict() -# create an instance of ImagesGenerationsPostRequest from a dict -images_generations_post_request_from_dict = ImagesGenerationsPostRequest.from_dict(images_generations_post_request_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md b/src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md deleted file mode 100644 index 6bb54e5e..00000000 --- a/src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md +++ /dev/null @@ -1,28 +0,0 @@ -# ImagesGenerationsPostRequestImageLorasInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**path** | **str** | The URL of the LoRA to apply (e.g. https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA). | -**scale** | **float** | The strength of the LoRA's influence. Most LoRA's recommend a value of 1. | - -## Example - -```python -from together.generated.models.images_generations_post_request_image_loras_inner import ImagesGenerationsPostRequestImageLorasInner - -# TODO update the JSON string below -json = "{}" -# create an instance of ImagesGenerationsPostRequestImageLorasInner from a JSON string -images_generations_post_request_image_loras_inner_instance = ImagesGenerationsPostRequestImageLorasInner.from_json(json) -# print the JSON string representation of the object -print(ImagesGenerationsPostRequestImageLorasInner.to_json()) - -# convert the object into a dict -images_generations_post_request_image_loras_inner_dict = images_generations_post_request_image_loras_inner_instance.to_dict() -# create an instance of ImagesGenerationsPostRequestImageLorasInner from a dict -images_generations_post_request_image_loras_inner_from_dict = ImagesGenerationsPostRequestImageLorasInner.from_dict(images_generations_post_request_image_loras_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesGenerationsPostRequestModel.md b/src/together/generated/docs/ImagesGenerationsPostRequestModel.md deleted file mode 100644 index 333020df..00000000 --- a/src/together/generated/docs/ImagesGenerationsPostRequestModel.md +++ /dev/null @@ -1,27 +0,0 @@ -# ImagesGenerationsPostRequestModel - -The model to use for image generation.

[See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models) - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.images_generations_post_request_model import ImagesGenerationsPostRequestModel - -# TODO update the JSON string below -json = "{}" -# create an instance of ImagesGenerationsPostRequestModel from a JSON string -images_generations_post_request_model_instance = ImagesGenerationsPostRequestModel.from_json(json) -# print the JSON string representation of the object -print(ImagesGenerationsPostRequestModel.to_json()) - -# convert the object into a dict -images_generations_post_request_model_dict = images_generations_post_request_model_instance.to_dict() -# create an instance of ImagesGenerationsPostRequestModel from a dict -images_generations_post_request_model_from_dict = ImagesGenerationsPostRequestModel.from_dict(images_generations_post_request_model_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LRScheduler.md b/src/together/generated/docs/LRScheduler.md deleted file mode 100644 index 6580bafd..00000000 --- a/src/together/generated/docs/LRScheduler.md +++ /dev/null @@ -1,28 +0,0 @@ -# LRScheduler - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**lr_scheduler_type** | **str** | | -**lr_scheduler_args** | [**LinearLRSchedulerArgs**](.md) | | [optional] - -## Example - -```python -from together.generated.models.lr_scheduler import LRScheduler - -# TODO update the JSON string below -json = "{}" -# create an instance of LRScheduler from a JSON string -lr_scheduler_instance = LRScheduler.from_json(json) -# print the JSON string representation of the object -print(LRScheduler.to_json()) - -# convert the object into a dict -lr_scheduler_dict = lr_scheduler_instance.to_dict() -# create an instance of LRScheduler from a dict -lr_scheduler_from_dict = LRScheduler.from_dict(lr_scheduler_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LinearLRSchedulerArgs.md b/src/together/generated/docs/LinearLRSchedulerArgs.md deleted file mode 100644 index 82240c87..00000000 --- a/src/together/generated/docs/LinearLRSchedulerArgs.md +++ /dev/null @@ -1,27 +0,0 @@ -# LinearLRSchedulerArgs - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**min_lr_ratio** | **float** | The ratio of the final learning rate to the peak learning rate | [optional] [default to 0.0] - -## Example - -```python -from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs - -# TODO update the JSON string below -json = "{}" -# create an instance of LinearLRSchedulerArgs from a JSON string -linear_lr_scheduler_args_instance = LinearLRSchedulerArgs.from_json(json) -# print the JSON string representation of the object -print(LinearLRSchedulerArgs.to_json()) - -# convert the object into a dict -linear_lr_scheduler_args_dict = linear_lr_scheduler_args_instance.to_dict() -# create an instance of LinearLRSchedulerArgs from a dict -linear_lr_scheduler_args_from_dict = LinearLRSchedulerArgs.from_dict(linear_lr_scheduler_args_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListEndpoint.md b/src/together/generated/docs/ListEndpoint.md deleted file mode 100644 index c7a226ea..00000000 --- a/src/together/generated/docs/ListEndpoint.md +++ /dev/null @@ -1,35 +0,0 @@ -# ListEndpoint - -Details about an endpoint when listed via the list endpoint - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | The type of object | -**id** | **str** | Unique identifier for the endpoint | -**name** | **str** | System name for the endpoint | -**model** | **str** | The model deployed on this endpoint | -**type** | **str** | The type of endpoint | -**owner** | **str** | The owner of this endpoint | -**state** | **str** | Current state of the endpoint | -**created_at** | **datetime** | Timestamp when the endpoint was created | - -## Example - -```python -from together.generated.models.list_endpoint import ListEndpoint - -# TODO update the JSON string below -json = "{}" -# create an instance of ListEndpoint from a JSON string -list_endpoint_instance = ListEndpoint.from_json(json) -# print the JSON string representation of the object -print(ListEndpoint.to_json()) - -# convert the object into a dict -list_endpoint_dict = list_endpoint_instance.to_dict() -# create an instance of ListEndpoint from a dict -list_endpoint_from_dict = ListEndpoint.from_dict(list_endpoint_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListEndpoints200Response.md b/src/together/generated/docs/ListEndpoints200Response.md deleted file mode 100644 index 16babfb5..00000000 --- a/src/together/generated/docs/ListEndpoints200Response.md +++ /dev/null @@ -1,28 +0,0 @@ -# ListEndpoints200Response - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**data** | [**List[ListEndpoint]**](ListEndpoint.md) | | - -## Example - -```python -from together.generated.models.list_endpoints200_response import ListEndpoints200Response - -# TODO update the JSON string below -json = "{}" -# create an instance of ListEndpoints200Response from a JSON string -list_endpoints200_response_instance = ListEndpoints200Response.from_json(json) -# print the JSON string representation of the object -print(ListEndpoints200Response.to_json()) - -# convert the object into a dict -list_endpoints200_response_dict = list_endpoints200_response_instance.to_dict() -# create an instance of ListEndpoints200Response from a dict -list_endpoints200_response_from_dict = ListEndpoints200Response.from_dict(list_endpoints200_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200Response.md b/src/together/generated/docs/ListHardware200Response.md deleted file mode 100644 index 60fe285e..00000000 --- a/src/together/generated/docs/ListHardware200Response.md +++ /dev/null @@ -1,28 +0,0 @@ -# ListHardware200Response - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | | -**data** | [**List[HardwareWithStatus]**](HardwareWithStatus.md) | | - -## Example - -```python -from together.generated.models.list_hardware200_response import ListHardware200Response - -# TODO update the JSON string below -json = "{}" -# create an instance of ListHardware200Response from a JSON string -list_hardware200_response_instance = ListHardware200Response.from_json(json) -# print the JSON string representation of the object -print(ListHardware200Response.to_json()) - -# convert the object into a dict -list_hardware200_response_dict = list_hardware200_response_instance.to_dict() -# create an instance of ListHardware200Response from a dict -list_hardware200_response_from_dict = ListHardware200Response.from_dict(list_hardware200_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LoRATrainingType.md b/src/together/generated/docs/LoRATrainingType.md deleted file mode 100644 index e977d18b..00000000 --- a/src/together/generated/docs/LoRATrainingType.md +++ /dev/null @@ -1,31 +0,0 @@ -# LoRATrainingType - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**type** | **str** | | -**lora_r** | **int** | | -**lora_alpha** | **int** | | -**lora_dropout** | **float** | | [optional] [default to 0.0] -**lora_trainable_modules** | **str** | | [optional] [default to 'all-linear'] - -## Example - -```python -from together.generated.models.lo_ra_training_type import LoRATrainingType - -# TODO update the JSON string below -json = "{}" -# create an instance of LoRATrainingType from a JSON string -lo_ra_training_type_instance = LoRATrainingType.from_json(json) -# print the JSON string representation of the object -print(LoRATrainingType.to_json()) - -# convert the object into a dict -lo_ra_training_type_dict = lo_ra_training_type_instance.to_dict() -# create an instance of LoRATrainingType from a dict -lo_ra_training_type_from_dict = LoRATrainingType.from_dict(lo_ra_training_type_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LogprobsPart.md b/src/together/generated/docs/LogprobsPart.md deleted file mode 100644 index d489ad95..00000000 --- a/src/together/generated/docs/LogprobsPart.md +++ /dev/null @@ -1,29 +0,0 @@ -# LogprobsPart - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**token_ids** | **List[float]** | List of token IDs corresponding to the logprobs | [optional] -**tokens** | **List[str]** | List of token strings | [optional] -**token_logprobs** | **List[float]** | List of token log probabilities | [optional] - -## Example - -```python -from together.generated.models.logprobs_part import LogprobsPart - -# TODO update the JSON string below -json = "{}" -# create an instance of LogprobsPart from a JSON string -logprobs_part_instance = LogprobsPart.from_json(json) -# print the JSON string representation of the object -print(LogprobsPart.to_json()) - -# convert the object into a dict -logprobs_part_dict = logprobs_part_instance.to_dict() -# create an instance of LogprobsPart from a dict -logprobs_part_from_dict = LogprobsPart.from_dict(logprobs_part_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ModelInfo.md b/src/together/generated/docs/ModelInfo.md deleted file mode 100644 index 06bac6fe..00000000 --- a/src/together/generated/docs/ModelInfo.md +++ /dev/null @@ -1,36 +0,0 @@ -# ModelInfo - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**id** | **str** | | -**object** | **str** | | -**created** | **int** | | -**type** | **str** | | -**display_name** | **str** | | [optional] -**organization** | **str** | | [optional] -**link** | **str** | | [optional] -**license** | **str** | | [optional] -**context_length** | **int** | | [optional] -**pricing** | [**Pricing**](Pricing.md) | | [optional] - -## Example - -```python -from together.generated.models.model_info import ModelInfo - -# TODO update the JSON string below -json = "{}" -# create an instance of ModelInfo from a JSON string -model_info_instance = ModelInfo.from_json(json) -# print the JSON string representation of the object -print(ModelInfo.to_json()) - -# convert the object into a dict -model_info_dict = model_info_instance.to_dict() -# create an instance of ModelInfo from a dict -model_info_from_dict = ModelInfo.from_dict(model_info_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ModelsApi.md b/src/together/generated/docs/ModelsApi.md deleted file mode 100644 index d5584a59..00000000 --- a/src/together/generated/docs/ModelsApi.md +++ /dev/null @@ -1,87 +0,0 @@ -# together.generated.ModelsApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**models**](ModelsApi.md#models) | **GET** /models | List all models - - -# **models** -> List[ModelInfo] models() - -List all models - -Lists all of Together's open-source models - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.model_info import ModelInfo -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.ModelsApi(api_client) - - try: - # List all models - api_response = await api_instance.models() - print("The response of ModelsApi->models:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling ModelsApi->models: %s\n" % e) -``` - - - -### Parameters - -This endpoint does not need any parameter. - -### Return type - -[**List[ModelInfo]**](ModelInfo.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | 200 | - | -**400** | BadRequest | - | -**401** | Unauthorized | - | -**404** | NotFound | - | -**429** | RateLimit | - | -**504** | Timeout | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/Pricing.md b/src/together/generated/docs/Pricing.md deleted file mode 100644 index 24a5d7b8..00000000 --- a/src/together/generated/docs/Pricing.md +++ /dev/null @@ -1,31 +0,0 @@ -# Pricing - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**hourly** | **float** | | -**input** | **float** | | -**output** | **float** | | -**base** | **float** | | -**finetune** | **float** | | - -## Example - -```python -from together.generated.models.pricing import Pricing - -# TODO update the JSON string below -json = "{}" -# create an instance of Pricing from a JSON string -pricing_instance = Pricing.from_json(json) -# print the JSON string representation of the object -print(Pricing.to_json()) - -# convert the object into a dict -pricing_dict = pricing_instance.to_dict() -# create an instance of Pricing from a dict -pricing_from_dict = Pricing.from_dict(pricing_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/PromptPartInner.md b/src/together/generated/docs/PromptPartInner.md deleted file mode 100644 index e1270712..00000000 --- a/src/together/generated/docs/PromptPartInner.md +++ /dev/null @@ -1,28 +0,0 @@ -# PromptPartInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**text** | **str** | | [optional] -**logprobs** | [**LogprobsPart**](LogprobsPart.md) | | [optional] - -## Example - -```python -from together.generated.models.prompt_part_inner import PromptPartInner - -# TODO update the JSON string below -json = "{}" -# create an instance of PromptPartInner from a JSON string -prompt_part_inner_instance = PromptPartInner.from_json(json) -# print the JSON string representation of the object -print(PromptPartInner.to_json()) - -# convert the object into a dict -prompt_part_inner_dict = prompt_part_inner_instance.to_dict() -# create an instance of PromptPartInner from a dict -prompt_part_inner_from_dict = PromptPartInner.from_dict(prompt_part_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankApi.md b/src/together/generated/docs/RerankApi.md deleted file mode 100644 index 2990c1f9..00000000 --- a/src/together/generated/docs/RerankApi.md +++ /dev/null @@ -1,93 +0,0 @@ -# together.generated.RerankApi - -All URIs are relative to *https://api.together.xyz/v1* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**rerank**](RerankApi.md#rerank) | **POST** /rerank | Create a rerank request - - -# **rerank** -> RerankResponse rerank(rerank_request=rerank_request) - -Create a rerank request - -Query a reranker model - -### Example - -* Bearer Authentication (bearerAuth): - -```python -import together.generated -from together.generated.models.rerank_request import RerankRequest -from together.generated.models.rerank_response import RerankResponse -from together.generated.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.together.xyz/v1 -# See configuration.py for a list of all supported configuration parameters. -configuration = together.generated.Configuration( - host = "https://api.together.xyz/v1" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure Bearer authorization: bearerAuth -configuration = together.generated.Configuration( - access_token = os.environ["BEARER_TOKEN"] -) - -# Enter a context with an instance of the API client -async with together.generated.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = together.generated.RerankApi(api_client) - rerank_request = together.generated.RerankRequest() # RerankRequest | (optional) - - try: - # Create a rerank request - api_response = await api_instance.rerank(rerank_request=rerank_request) - print("The response of RerankApi->rerank:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling RerankApi->rerank: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **rerank_request** | [**RerankRequest**](RerankRequest.md)| | [optional] - -### Return type - -[**RerankResponse**](RerankResponse.md) - -### Authorization - -[bearerAuth](../README.md#bearerAuth) - -### HTTP request headers - - - **Content-Type**: application/json - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | 200 | - | -**400** | BadRequest | - | -**401** | Unauthorized | - | -**404** | NotFound | - | -**429** | RateLimit | - | -**503** | Overloaded | - | -**504** | Timeout | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankRequest.md b/src/together/generated/docs/RerankRequest.md deleted file mode 100644 index 4c7ce08a..00000000 --- a/src/together/generated/docs/RerankRequest.md +++ /dev/null @@ -1,32 +0,0 @@ -# RerankRequest - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**model** | [**RerankRequestModel**](RerankRequestModel.md) | | -**query** | **str** | The search query to be used for ranking. | -**documents** | [**RerankRequestDocuments**](RerankRequestDocuments.md) | | -**top_n** | **int** | The number of top results to return. | [optional] -**return_documents** | **bool** | Whether to return supplied documents with the response. | [optional] -**rank_fields** | **List[str]** | List of keys in the JSON Object document to rank by. Defaults to use all supplied keys for ranking. | [optional] - -## Example - -```python -from together.generated.models.rerank_request import RerankRequest - -# TODO update the JSON string below -json = "{}" -# create an instance of RerankRequest from a JSON string -rerank_request_instance = RerankRequest.from_json(json) -# print the JSON string representation of the object -print(RerankRequest.to_json()) - -# convert the object into a dict -rerank_request_dict = rerank_request_instance.to_dict() -# create an instance of RerankRequest from a dict -rerank_request_from_dict = RerankRequest.from_dict(rerank_request_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankRequestDocuments.md b/src/together/generated/docs/RerankRequestDocuments.md deleted file mode 100644 index 51411574..00000000 --- a/src/together/generated/docs/RerankRequestDocuments.md +++ /dev/null @@ -1,27 +0,0 @@ -# RerankRequestDocuments - -List of documents, which can be either strings or objects. - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.rerank_request_documents import RerankRequestDocuments - -# TODO update the JSON string below -json = "{}" -# create an instance of RerankRequestDocuments from a JSON string -rerank_request_documents_instance = RerankRequestDocuments.from_json(json) -# print the JSON string representation of the object -print(RerankRequestDocuments.to_json()) - -# convert the object into a dict -rerank_request_documents_dict = rerank_request_documents_instance.to_dict() -# create an instance of RerankRequestDocuments from a dict -rerank_request_documents_from_dict = RerankRequestDocuments.from_dict(rerank_request_documents_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankRequestModel.md b/src/together/generated/docs/RerankRequestModel.md deleted file mode 100644 index 8d99f7bd..00000000 --- a/src/together/generated/docs/RerankRequestModel.md +++ /dev/null @@ -1,27 +0,0 @@ -# RerankRequestModel - -The model to be used for the rerank request.

[See all of Together AI's rerank models](https://docs.together.ai/docs/serverless-models#rerank-models) - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - -## Example - -```python -from together.generated.models.rerank_request_model import RerankRequestModel - -# TODO update the JSON string below -json = "{}" -# create an instance of RerankRequestModel from a JSON string -rerank_request_model_instance = RerankRequestModel.from_json(json) -# print the JSON string representation of the object -print(RerankRequestModel.to_json()) - -# convert the object into a dict -rerank_request_model_dict = rerank_request_model_instance.to_dict() -# create an instance of RerankRequestModel from a dict -rerank_request_model_from_dict = RerankRequestModel.from_dict(rerank_request_model_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankResponse.md b/src/together/generated/docs/RerankResponse.md deleted file mode 100644 index a40aa152..00000000 --- a/src/together/generated/docs/RerankResponse.md +++ /dev/null @@ -1,31 +0,0 @@ -# RerankResponse - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**object** | **str** | Object type | -**id** | **str** | Request ID | [optional] -**model** | **str** | The model to be used for the rerank request. | -**results** | [**List[RerankResponseResultsInner]**](RerankResponseResultsInner.md) | | -**usage** | [**UsageData**](UsageData.md) | | [optional] - -## Example - -```python -from together.generated.models.rerank_response import RerankResponse - -# TODO update the JSON string below -json = "{}" -# create an instance of RerankResponse from a JSON string -rerank_response_instance = RerankResponse.from_json(json) -# print the JSON string representation of the object -print(RerankResponse.to_json()) - -# convert the object into a dict -rerank_response_dict = rerank_response_instance.to_dict() -# create an instance of RerankResponse from a dict -rerank_response_from_dict = RerankResponse.from_dict(rerank_response_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankResponseResultsInner.md b/src/together/generated/docs/RerankResponseResultsInner.md deleted file mode 100644 index 0f245895..00000000 --- a/src/together/generated/docs/RerankResponseResultsInner.md +++ /dev/null @@ -1,29 +0,0 @@ -# RerankResponseResultsInner - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**index** | **int** | | -**relevance_score** | **float** | | -**document** | [**RerankResponseResultsInnerDocument**](RerankResponseResultsInnerDocument.md) | | - -## Example - -```python -from together.generated.models.rerank_response_results_inner import RerankResponseResultsInner - -# TODO update the JSON string below -json = "{}" -# create an instance of RerankResponseResultsInner from a JSON string -rerank_response_results_inner_instance = RerankResponseResultsInner.from_json(json) -# print the JSON string representation of the object -print(RerankResponseResultsInner.to_json()) - -# convert the object into a dict -rerank_response_results_inner_dict = rerank_response_results_inner_instance.to_dict() -# create an instance of RerankResponseResultsInner from a dict -rerank_response_results_inner_from_dict = RerankResponseResultsInner.from_dict(rerank_response_results_inner_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankResponseResultsInnerDocument.md b/src/together/generated/docs/RerankResponseResultsInnerDocument.md deleted file mode 100644 index 75ea6439..00000000 --- a/src/together/generated/docs/RerankResponseResultsInnerDocument.md +++ /dev/null @@ -1,27 +0,0 @@ -# RerankResponseResultsInnerDocument - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**text** | **str** | | [optional] - -## Example - -```python -from together.generated.models.rerank_response_results_inner_document import RerankResponseResultsInnerDocument - -# TODO update the JSON string below -json = "{}" -# create an instance of RerankResponseResultsInnerDocument from a JSON string -rerank_response_results_inner_document_instance = RerankResponseResultsInnerDocument.from_json(json) -# print the JSON string representation of the object -print(RerankResponseResultsInnerDocument.to_json()) - -# convert the object into a dict -rerank_response_results_inner_document_dict = rerank_response_results_inner_document_instance.to_dict() -# create an instance of RerankResponseResultsInnerDocument from a dict -rerank_response_results_inner_document_from_dict = RerankResponseResultsInnerDocument.from_dict(rerank_response_results_inner_document_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/StreamSentinel.md b/src/together/generated/docs/StreamSentinel.md deleted file mode 100644 index aeb5f6f4..00000000 --- a/src/together/generated/docs/StreamSentinel.md +++ /dev/null @@ -1,27 +0,0 @@ -# StreamSentinel - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**data** | **str** | | - -## Example - -```python -from together.generated.models.stream_sentinel import StreamSentinel - -# TODO update the JSON string below -json = "{}" -# create an instance of StreamSentinel from a JSON string -stream_sentinel_instance = StreamSentinel.from_json(json) -# print the JSON string representation of the object -print(StreamSentinel.to_json()) - -# convert the object into a dict -stream_sentinel_dict = stream_sentinel_instance.to_dict() -# create an instance of StreamSentinel from a dict -stream_sentinel_from_dict = StreamSentinel.from_dict(stream_sentinel_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolChoice.md b/src/together/generated/docs/ToolChoice.md deleted file mode 100644 index e0d6a775..00000000 --- a/src/together/generated/docs/ToolChoice.md +++ /dev/null @@ -1,30 +0,0 @@ -# ToolChoice - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**index** | **float** | | -**id** | **str** | | -**type** | **str** | | -**function** | [**ToolChoiceFunction**](ToolChoiceFunction.md) | | - -## Example - -```python -from together.generated.models.tool_choice import ToolChoice - -# TODO update the JSON string below -json = "{}" -# create an instance of ToolChoice from a JSON string -tool_choice_instance = ToolChoice.from_json(json) -# print the JSON string representation of the object -print(ToolChoice.to_json()) - -# convert the object into a dict -tool_choice_dict = tool_choice_instance.to_dict() -# create an instance of ToolChoice from a dict -tool_choice_from_dict = ToolChoice.from_dict(tool_choice_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolChoiceFunction.md b/src/together/generated/docs/ToolChoiceFunction.md deleted file mode 100644 index a740c34e..00000000 --- a/src/together/generated/docs/ToolChoiceFunction.md +++ /dev/null @@ -1,28 +0,0 @@ -# ToolChoiceFunction - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**name** | **str** | | -**arguments** | **str** | | - -## Example - -```python -from together.generated.models.tool_choice_function import ToolChoiceFunction - -# TODO update the JSON string below -json = "{}" -# create an instance of ToolChoiceFunction from a JSON string -tool_choice_function_instance = ToolChoiceFunction.from_json(json) -# print the JSON string representation of the object -print(ToolChoiceFunction.to_json()) - -# convert the object into a dict -tool_choice_function_dict = tool_choice_function_instance.to_dict() -# create an instance of ToolChoiceFunction from a dict -tool_choice_function_from_dict = ToolChoiceFunction.from_dict(tool_choice_function_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolsPart.md b/src/together/generated/docs/ToolsPart.md deleted file mode 100644 index 733e311e..00000000 --- a/src/together/generated/docs/ToolsPart.md +++ /dev/null @@ -1,28 +0,0 @@ -# ToolsPart - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**type** | **str** | | [optional] -**function** | [**ToolsPartFunction**](ToolsPartFunction.md) | | [optional] - -## Example - -```python -from together.generated.models.tools_part import ToolsPart - -# TODO update the JSON string below -json = "{}" -# create an instance of ToolsPart from a JSON string -tools_part_instance = ToolsPart.from_json(json) -# print the JSON string representation of the object -print(ToolsPart.to_json()) - -# convert the object into a dict -tools_part_dict = tools_part_instance.to_dict() -# create an instance of ToolsPart from a dict -tools_part_from_dict = ToolsPart.from_dict(tools_part_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolsPartFunction.md b/src/together/generated/docs/ToolsPartFunction.md deleted file mode 100644 index 27d59e43..00000000 --- a/src/together/generated/docs/ToolsPartFunction.md +++ /dev/null @@ -1,29 +0,0 @@ -# ToolsPartFunction - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**description** | **str** | | [optional] -**name** | **str** | | [optional] -**parameters** | **Dict[str, object]** | A map of parameter names to their values. | [optional] - -## Example - -```python -from together.generated.models.tools_part_function import ToolsPartFunction - -# TODO update the JSON string below -json = "{}" -# create an instance of ToolsPartFunction from a JSON string -tools_part_function_instance = ToolsPartFunction.from_json(json) -# print the JSON string representation of the object -print(ToolsPartFunction.to_json()) - -# convert the object into a dict -tools_part_function_dict = tools_part_function_instance.to_dict() -# create an instance of ToolsPartFunction from a dict -tools_part_function_from_dict = ToolsPartFunction.from_dict(tools_part_function_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/UpdateEndpointRequest.md b/src/together/generated/docs/UpdateEndpointRequest.md deleted file mode 100644 index 76fb7a8e..00000000 --- a/src/together/generated/docs/UpdateEndpointRequest.md +++ /dev/null @@ -1,29 +0,0 @@ -# UpdateEndpointRequest - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**display_name** | **str** | A human-readable name for the endpoint | [optional] -**state** | **str** | The desired state of the endpoint | [optional] -**autoscaling** | [**Autoscaling**](Autoscaling.md) | New autoscaling configuration for the endpoint | [optional] - -## Example - -```python -from together.generated.models.update_endpoint_request import UpdateEndpointRequest - -# TODO update the JSON string below -json = "{}" -# create an instance of UpdateEndpointRequest from a JSON string -update_endpoint_request_instance = UpdateEndpointRequest.from_json(json) -# print the JSON string representation of the object -print(UpdateEndpointRequest.to_json()) - -# convert the object into a dict -update_endpoint_request_dict = update_endpoint_request_instance.to_dict() -# create an instance of UpdateEndpointRequest from a dict -update_endpoint_request_from_dict = UpdateEndpointRequest.from_dict(update_endpoint_request_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/UsageData.md b/src/together/generated/docs/UsageData.md deleted file mode 100644 index 0a0f4692..00000000 --- a/src/together/generated/docs/UsageData.md +++ /dev/null @@ -1,29 +0,0 @@ -# UsageData - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**prompt_tokens** | **int** | | -**completion_tokens** | **int** | | -**total_tokens** | **int** | | - -## Example - -```python -from together.generated.models.usage_data import UsageData - -# TODO update the JSON string below -json = "{}" -# create an instance of UsageData from a JSON string -usage_data_instance = UsageData.from_json(json) -# print the JSON string representation of the object -print(UsageData.to_json()) - -# convert the object into a dict -usage_data_dict = usage_data_instance.to_dict() -# create an instance of UsageData from a dict -usage_data_from_dict = UsageData.from_dict(usage_data_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/exceptions.py b/src/together/generated/exceptions.py deleted file mode 100644 index ade3cc31..00000000 --- a/src/together/generated/exceptions.py +++ /dev/null @@ -1,220 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -from typing import Any, Optional -from typing_extensions import Self - - -class OpenApiException(Exception): - """The base exception class for all OpenAPIExceptions""" - - -class ApiTypeError(OpenApiException, TypeError): - def __init__( - self, msg, path_to_item=None, valid_classes=None, key_type=None - ) -> None: - """Raises an exception for TypeErrors - - Args: - msg (str): the exception message - - Keyword Args: - path_to_item (list): a list of keys an indices to get to the - current_item - None if unset - valid_classes (tuple): the primitive classes that current item - should be an instance of - None if unset - key_type (bool): False if our value is a value in a dict - True if it is a key in a dict - False if our item is an item in a list - None if unset - """ - self.path_to_item = path_to_item - self.valid_classes = valid_classes - self.key_type = key_type - full_msg = msg - if path_to_item: - full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) - super(ApiTypeError, self).__init__(full_msg) - - -class ApiValueError(OpenApiException, ValueError): - def __init__(self, msg, path_to_item=None) -> None: - """ - Args: - msg (str): the exception message - - Keyword Args: - path_to_item (list) the path to the exception in the - received_data dict. None if unset - """ - - self.path_to_item = path_to_item - full_msg = msg - if path_to_item: - full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) - super(ApiValueError, self).__init__(full_msg) - - -class ApiAttributeError(OpenApiException, AttributeError): - def __init__(self, msg, path_to_item=None) -> None: - """ - Raised when an attribute reference or assignment fails. - - Args: - msg (str): the exception message - - Keyword Args: - path_to_item (None/list) the path to the exception in the - received_data dict - """ - self.path_to_item = path_to_item - full_msg = msg - if path_to_item: - full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) - super(ApiAttributeError, self).__init__(full_msg) - - -class ApiKeyError(OpenApiException, KeyError): - def __init__(self, msg, path_to_item=None) -> None: - """ - Args: - msg (str): the exception message - - Keyword Args: - path_to_item (None/list) the path to the exception in the - received_data dict - """ - self.path_to_item = path_to_item - full_msg = msg - if path_to_item: - full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) - super(ApiKeyError, self).__init__(full_msg) - - -class ApiException(OpenApiException): - - def __init__( - self, - status=None, - reason=None, - http_resp=None, - *, - body: Optional[str] = None, - data: Optional[Any] = None, - ) -> None: - self.status = status - self.reason = reason - self.body = body - self.data = data - self.headers = None - - if http_resp: - if self.status is None: - self.status = http_resp.status - if self.reason is None: - self.reason = http_resp.reason - if self.body is None: - try: - self.body = http_resp.data.decode("utf-8") - except Exception: - pass - self.headers = http_resp.getheaders() - - @classmethod - def from_response( - cls, - *, - http_resp, - body: Optional[str], - data: Optional[Any], - ) -> Self: - if http_resp.status == 400: - raise BadRequestException(http_resp=http_resp, body=body, data=data) - - if http_resp.status == 401: - raise UnauthorizedException(http_resp=http_resp, body=body, data=data) - - if http_resp.status == 403: - raise ForbiddenException(http_resp=http_resp, body=body, data=data) - - if http_resp.status == 404: - raise NotFoundException(http_resp=http_resp, body=body, data=data) - - # Added new conditions for 409 and 422 - if http_resp.status == 409: - raise ConflictException(http_resp=http_resp, body=body, data=data) - - if http_resp.status == 422: - raise UnprocessableEntityException( - http_resp=http_resp, body=body, data=data - ) - - if 500 <= http_resp.status <= 599: - raise ServiceException(http_resp=http_resp, body=body, data=data) - raise ApiException(http_resp=http_resp, body=body, data=data) - - def __str__(self): - """Custom error messages for exception""" - error_message = "({0})\n" "Reason: {1}\n".format(self.status, self.reason) - if self.headers: - error_message += "HTTP response headers: {0}\n".format(self.headers) - - if self.data or self.body: - error_message += "HTTP response body: {0}\n".format(self.data or self.body) - - return error_message - - -class BadRequestException(ApiException): - pass - - -class NotFoundException(ApiException): - pass - - -class UnauthorizedException(ApiException): - pass - - -class ForbiddenException(ApiException): - pass - - -class ServiceException(ApiException): - pass - - -class ConflictException(ApiException): - """Exception for HTTP 409 Conflict.""" - - pass - - -class UnprocessableEntityException(ApiException): - """Exception for HTTP 422 Unprocessable Entity.""" - - pass - - -def render_path(path_to_item): - """Returns a string representation of a path""" - result = "" - for pth in path_to_item: - if isinstance(pth, int): - result += "[{0}]".format(pth) - else: - result += "['{0}']".format(pth) - return result diff --git a/src/together/generated/models/__init__.py b/src/together/generated/models/__init__.py deleted file mode 100644 index 2fe07559..00000000 --- a/src/together/generated/models/__init__.py +++ /dev/null @@ -1,185 +0,0 @@ -# coding: utf-8 - -# flake8: noqa -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -# import models into model package -from together.generated.models.audio_speech_request import AudioSpeechRequest -from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel -from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice -from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk -from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent -from together.generated.models.audio_speech_stream_response import ( - AudioSpeechStreamResponse, -) -from together.generated.models.autoscaling import Autoscaling -from together.generated.models.chat_completion_assistant_message_param import ( - ChatCompletionAssistantMessageParam, -) -from together.generated.models.chat_completion_choice import ChatCompletionChoice -from together.generated.models.chat_completion_choice_delta import ( - ChatCompletionChoiceDelta, -) -from together.generated.models.chat_completion_choice_delta_function_call import ( - ChatCompletionChoiceDeltaFunctionCall, -) -from together.generated.models.chat_completion_choices_data_inner import ( - ChatCompletionChoicesDataInner, -) -from together.generated.models.chat_completion_choices_data_inner_logprobs import ( - ChatCompletionChoicesDataInnerLogprobs, -) -from together.generated.models.chat_completion_chunk import ChatCompletionChunk -from together.generated.models.chat_completion_chunk_choices_inner import ( - ChatCompletionChunkChoicesInner, -) -from together.generated.models.chat_completion_event import ChatCompletionEvent -from together.generated.models.chat_completion_function_message_param import ( - ChatCompletionFunctionMessageParam, -) -from together.generated.models.chat_completion_message import ChatCompletionMessage -from together.generated.models.chat_completion_message_function_call import ( - ChatCompletionMessageFunctionCall, -) -from together.generated.models.chat_completion_message_param import ( - ChatCompletionMessageParam, -) -from together.generated.models.chat_completion_request import ChatCompletionRequest -from together.generated.models.chat_completion_request_function_call import ( - ChatCompletionRequestFunctionCall, -) -from together.generated.models.chat_completion_request_function_call_one_of import ( - ChatCompletionRequestFunctionCallOneOf, -) -from together.generated.models.chat_completion_request_messages_inner import ( - ChatCompletionRequestMessagesInner, -) -from together.generated.models.chat_completion_request_model import ( - ChatCompletionRequestModel, -) -from together.generated.models.chat_completion_request_response_format import ( - ChatCompletionRequestResponseFormat, -) -from together.generated.models.chat_completion_request_tool_choice import ( - ChatCompletionRequestToolChoice, -) -from together.generated.models.chat_completion_response import ChatCompletionResponse -from together.generated.models.chat_completion_stream import ChatCompletionStream -from together.generated.models.chat_completion_system_message_param import ( - ChatCompletionSystemMessageParam, -) -from together.generated.models.chat_completion_token import ChatCompletionToken -from together.generated.models.chat_completion_tool import ChatCompletionTool -from together.generated.models.chat_completion_tool_function import ( - ChatCompletionToolFunction, -) -from together.generated.models.chat_completion_tool_message_param import ( - ChatCompletionToolMessageParam, -) -from together.generated.models.chat_completion_user_message_param import ( - ChatCompletionUserMessageParam, -) -from together.generated.models.completion_choice import CompletionChoice -from together.generated.models.completion_choices_data_inner import ( - CompletionChoicesDataInner, -) -from together.generated.models.completion_chunk import CompletionChunk -from together.generated.models.completion_chunk_usage import CompletionChunkUsage -from together.generated.models.completion_event import CompletionEvent -from together.generated.models.completion_request import CompletionRequest -from together.generated.models.completion_request_model import CompletionRequestModel -from together.generated.models.completion_request_safety_model import ( - CompletionRequestSafetyModel, -) -from together.generated.models.completion_response import CompletionResponse -from together.generated.models.completion_stream import CompletionStream -from together.generated.models.completion_token import CompletionToken -from together.generated.models.create_endpoint_request import CreateEndpointRequest -from together.generated.models.dedicated_endpoint import DedicatedEndpoint -from together.generated.models.embeddings_request import EmbeddingsRequest -from together.generated.models.embeddings_request_input import EmbeddingsRequestInput -from together.generated.models.embeddings_request_model import EmbeddingsRequestModel -from together.generated.models.embeddings_response import EmbeddingsResponse -from together.generated.models.embeddings_response_data_inner import ( - EmbeddingsResponseDataInner, -) -from together.generated.models.endpoint_pricing import EndpointPricing -from together.generated.models.error_data import ErrorData -from together.generated.models.error_data_error import ErrorDataError -from together.generated.models.file_delete_response import FileDeleteResponse -from together.generated.models.file_list import FileList -from together.generated.models.file_object import FileObject -from together.generated.models.file_response import FileResponse -from together.generated.models.fine_tune_event import FineTuneEvent -from together.generated.models.fine_tunes_post_request import FineTunesPostRequest -from together.generated.models.fine_tunes_post_request_train_on_inputs import ( - FineTunesPostRequestTrainOnInputs, -) -from together.generated.models.fine_tunes_post_request_training_type import ( - FineTunesPostRequestTrainingType, -) -from together.generated.models.finetune_download_result import FinetuneDownloadResult -from together.generated.models.finetune_event_levels import FinetuneEventLevels -from together.generated.models.finetune_event_type import FinetuneEventType -from together.generated.models.finetune_job_status import FinetuneJobStatus -from together.generated.models.finetune_list import FinetuneList -from together.generated.models.finetune_list_events import FinetuneListEvents -from together.generated.models.finetune_response import FinetuneResponse -from together.generated.models.finetune_response_train_on_inputs import ( - FinetuneResponseTrainOnInputs, -) -from together.generated.models.finish_reason import FinishReason -from together.generated.models.full_training_type import FullTrainingType -from together.generated.models.hardware_availability import HardwareAvailability -from together.generated.models.hardware_spec import HardwareSpec -from together.generated.models.hardware_with_status import HardwareWithStatus -from together.generated.models.image_response import ImageResponse -from together.generated.models.image_response_data_inner import ImageResponseDataInner -from together.generated.models.images_generations_post_request import ( - ImagesGenerationsPostRequest, -) -from together.generated.models.images_generations_post_request_image_loras_inner import ( - ImagesGenerationsPostRequestImageLorasInner, -) -from together.generated.models.images_generations_post_request_model import ( - ImagesGenerationsPostRequestModel, -) -from together.generated.models.lr_scheduler import LRScheduler -from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs -from together.generated.models.list_endpoint import ListEndpoint -from together.generated.models.list_endpoints200_response import ( - ListEndpoints200Response, -) -from together.generated.models.list_hardware200_response import ListHardware200Response -from together.generated.models.lo_ra_training_type import LoRATrainingType -from together.generated.models.logprobs_part import LogprobsPart -from together.generated.models.model_info import ModelInfo -from together.generated.models.pricing import Pricing -from together.generated.models.prompt_part_inner import PromptPartInner -from together.generated.models.rerank_request import RerankRequest -from together.generated.models.rerank_request_documents import RerankRequestDocuments -from together.generated.models.rerank_request_model import RerankRequestModel -from together.generated.models.rerank_response import RerankResponse -from together.generated.models.rerank_response_results_inner import ( - RerankResponseResultsInner, -) -from together.generated.models.rerank_response_results_inner_document import ( - RerankResponseResultsInnerDocument, -) -from together.generated.models.stream_sentinel import StreamSentinel -from together.generated.models.tool_choice import ToolChoice -from together.generated.models.tool_choice_function import ToolChoiceFunction -from together.generated.models.tools_part import ToolsPart -from together.generated.models.tools_part_function import ToolsPartFunction -from together.generated.models.update_endpoint_request import UpdateEndpointRequest -from together.generated.models.usage_data import UsageData diff --git a/src/together/generated/models/audio_speech_request.py b/src/together/generated/models/audio_speech_request.py deleted file mode 100644 index af1a11ef..00000000 --- a/src/together/generated/models/audio_speech_request.py +++ /dev/null @@ -1,212 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictFloat, - StrictInt, - StrictStr, - field_validator, -) -from typing import Any, ClassVar, Dict, List, Optional, Union -from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel -from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice -from typing import Optional, Set -from typing_extensions import Self - - -class AudioSpeechRequest(BaseModel): - """ - AudioSpeechRequest - """ # noqa: E501 - - model: AudioSpeechRequestModel - input: StrictStr = Field(description="Input text to generate the audio for") - voice: AudioSpeechRequestVoice - response_format: Optional[StrictStr] = Field( - default="wav", description="The format of audio output" - ) - language: Optional[StrictStr] = Field( - default="en", description="Language of input text" - ) - response_encoding: Optional[StrictStr] = Field( - default="pcm_f32le", description="Audio encoding of response" - ) - sample_rate: Optional[Union[StrictFloat, StrictInt]] = Field( - default=44100, description="Sampling rate to use for the output audio" - ) - stream: Optional[StrictBool] = Field( - default=False, - description="If true, output is streamed for several characters at a time instead of waiting for the full response. The stream terminates with `data: [DONE]`. If false, return the encoded audio as octet stream", - ) - __properties: ClassVar[List[str]] = [ - "model", - "input", - "voice", - "response_format", - "language", - "response_encoding", - "sample_rate", - "stream", - ] - - @field_validator("response_format") - def response_format_validate_enum(cls, value): - """Validates the enum""" - if value is None: - return value - - if value not in set(["mp3", "wav", "raw"]): - raise ValueError("must be one of enum values ('mp3', 'wav', 'raw')") - return value - - @field_validator("language") - def language_validate_enum(cls, value): - """Validates the enum""" - if value is None: - return value - - if value not in set( - [ - "en", - "de", - "fr", - "es", - "hi", - "it", - "ja", - "ko", - "nl", - "pl", - "pt", - "ru", - "sv", - "tr", - "zh", - ] - ): - raise ValueError( - "must be one of enum values ('en', 'de', 'fr', 'es', 'hi', 'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ru', 'sv', 'tr', 'zh')" - ) - return value - - @field_validator("response_encoding") - def response_encoding_validate_enum(cls, value): - """Validates the enum""" - if value is None: - return value - - if value not in set(["pcm_f32le", "pcm_s16le", "pcm_mulaw", "pcm_alaw"]): - raise ValueError( - "must be one of enum values ('pcm_f32le', 'pcm_s16le', 'pcm_mulaw', 'pcm_alaw')" - ) - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of AudioSpeechRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of model - if self.model: - _dict["model"] = self.model.to_dict() - # override the default output from pydantic by calling `to_dict()` of voice - if self.voice: - _dict["voice"] = self.voice.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of AudioSpeechRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "model": ( - AudioSpeechRequestModel.from_dict(obj["model"]) - if obj.get("model") is not None - else None - ), - "input": obj.get("input"), - "voice": ( - AudioSpeechRequestVoice.from_dict(obj["voice"]) - if obj.get("voice") is not None - else None - ), - "response_format": ( - obj.get("response_format") - if obj.get("response_format") is not None - else "wav" - ), - "language": ( - obj.get("language") if obj.get("language") is not None else "en" - ), - "response_encoding": ( - obj.get("response_encoding") - if obj.get("response_encoding") is not None - else "pcm_f32le" - ), - "sample_rate": ( - obj.get("sample_rate") - if obj.get("sample_rate") is not None - else 44100 - ), - "stream": obj.get("stream") if obj.get("stream") is not None else False, - } - ) - return _obj diff --git a/src/together/generated/models/audio_speech_request_model.py b/src/together/generated/models/audio_speech_request_model.py deleted file mode 100644 index 4ab613b5..00000000 --- a/src/together/generated/models/audio_speech_request_model.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -from inspect import getfullargspec -import json -import pprint -import re # noqa: F401 -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Optional -from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict -from typing_extensions import Literal, Self -from pydantic import Field - -AUDIOSPEECHREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] - - -class AudioSpeechRequestModel(BaseModel): - """ - The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models) - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: str - anyof_schema_2_validator: Optional[StrictStr] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = AudioSpeechRequestModel.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in AudioSpeechRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into AudioSpeechRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/audio_speech_request_voice.py b/src/together/generated/models/audio_speech_request_voice.py deleted file mode 100644 index 81c1f689..00000000 --- a/src/together/generated/models/audio_speech_request_voice.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -from inspect import getfullargspec -import json -import pprint -import re # noqa: F401 -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Optional -from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict -from typing_extensions import Literal, Self -from pydantic import Field - -AUDIOSPEECHREQUESTVOICE_ANY_OF_SCHEMAS = ["str"] - - -class AudioSpeechRequestVoice(BaseModel): - """ - The voice to use for generating the audio. [View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available). - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: str - anyof_schema_2_validator: Optional[StrictStr] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = AudioSpeechRequestVoice.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in AudioSpeechRequestVoice with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into AudioSpeechRequestVoice with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/audio_speech_stream_chunk.py b/src/together/generated/models/audio_speech_stream_chunk.py deleted file mode 100644 index 27627dd2..00000000 --- a/src/together/generated/models/audio_speech_stream_chunk.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class AudioSpeechStreamChunk(BaseModel): - """ - AudioSpeechStreamChunk - """ # noqa: E501 - - object: StrictStr - model: StrictStr - b64: StrictStr = Field(description="base64 encoded audio stream") - __properties: ClassVar[List[str]] = ["object", "model", "b64"] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["audio.tts.chunk"]): - raise ValueError("must be one of enum values ('audio.tts.chunk')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of AudioSpeechStreamChunk from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of AudioSpeechStreamChunk from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "model": obj.get("model"), - "b64": obj.get("b64"), - } - ) - return _obj diff --git a/src/together/generated/models/audio_speech_stream_event.py b/src/together/generated/models/audio_speech_stream_event.py deleted file mode 100644 index 9c11b923..00000000 --- a/src/together/generated/models/audio_speech_stream_event.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict -from typing import Any, ClassVar, Dict, List -from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk -from typing import Optional, Set -from typing_extensions import Self - - -class AudioSpeechStreamEvent(BaseModel): - """ - AudioSpeechStreamEvent - """ # noqa: E501 - - data: AudioSpeechStreamChunk - __properties: ClassVar[List[str]] = ["data"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of AudioSpeechStreamEvent from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of data - if self.data: - _dict["data"] = self.data.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of AudioSpeechStreamEvent from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "data": ( - AudioSpeechStreamChunk.from_dict(obj["data"]) - if obj.get("data") is not None - else None - ) - } - ) - return _obj diff --git a/src/together/generated/models/audio_speech_stream_response.py b/src/together/generated/models/audio_speech_stream_response.py deleted file mode 100644 index b573857d..00000000 --- a/src/together/generated/models/audio_speech_stream_response.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent -from together.generated.models.stream_sentinel import StreamSentinel -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -AUDIOSPEECHSTREAMRESPONSE_ONE_OF_SCHEMAS = ["AudioSpeechStreamEvent", "StreamSentinel"] - - -class AudioSpeechStreamResponse(BaseModel): - """ - AudioSpeechStreamResponse - """ - - # data type: AudioSpeechStreamEvent - oneof_schema_1_validator: Optional[AudioSpeechStreamEvent] = None - # data type: StreamSentinel - oneof_schema_2_validator: Optional[StreamSentinel] = None - actual_instance: Optional[Union[AudioSpeechStreamEvent, StreamSentinel]] = None - one_of_schemas: Set[str] = {"AudioSpeechStreamEvent", "StreamSentinel"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = AudioSpeechStreamResponse.model_construct() - error_messages = [] - match = 0 - # validate data type: AudioSpeechStreamEvent - if not isinstance(v, AudioSpeechStreamEvent): - error_messages.append( - f"Error! Input type `{type(v)}` is not `AudioSpeechStreamEvent`" - ) - else: - match += 1 - # validate data type: StreamSentinel - if not isinstance(v, StreamSentinel): - error_messages.append( - f"Error! Input type `{type(v)}` is not `StreamSentinel`" - ) - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into AudioSpeechStreamEvent - try: - instance.actual_instance = AudioSpeechStreamEvent.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into StreamSentinel - try: - instance.actual_instance = StreamSentinel.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict( - self, - ) -> Optional[Union[Dict[str, Any], AudioSpeechStreamEvent, StreamSentinel]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/autoscaling.py b/src/together/generated/models/autoscaling.py deleted file mode 100644 index fb79d4f5..00000000 --- a/src/together/generated/models/autoscaling.py +++ /dev/null @@ -1,93 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictInt -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class Autoscaling(BaseModel): - """ - Configuration for automatic scaling of replicas based on demand. - """ # noqa: E501 - - min_replicas: StrictInt = Field( - description="The minimum number of replicas to maintain, even when there is no load" - ) - max_replicas: StrictInt = Field( - description="The maximum number of replicas to scale up to under load" - ) - __properties: ClassVar[List[str]] = ["min_replicas", "max_replicas"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of Autoscaling from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of Autoscaling from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "min_replicas": obj.get("min_replicas"), - "max_replicas": obj.get("max_replicas"), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_assistant_message_param.py b/src/together/generated/models/chat_completion_assistant_message_param.py deleted file mode 100644 index dbb10cfe..00000000 --- a/src/together/generated/models/chat_completion_assistant_message_param.py +++ /dev/null @@ -1,130 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.chat_completion_message_function_call import ( - ChatCompletionMessageFunctionCall, -) -from together.generated.models.tool_choice import ToolChoice -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionAssistantMessageParam(BaseModel): - """ - ChatCompletionAssistantMessageParam - """ # noqa: E501 - - content: Optional[StrictStr] = None - role: StrictStr - name: Optional[StrictStr] = None - tool_calls: Optional[List[ToolChoice]] = None - function_call: Optional[ChatCompletionMessageFunctionCall] = None - __properties: ClassVar[List[str]] = [ - "content", - "role", - "name", - "tool_calls", - "function_call", - ] - - @field_validator("role") - def role_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["assistant"]): - raise ValueError("must be one of enum values ('assistant')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionAssistantMessageParam from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in tool_calls (list) - _items = [] - if self.tool_calls: - for _item_tool_calls in self.tool_calls: - if _item_tool_calls: - _items.append(_item_tool_calls.to_dict()) - _dict["tool_calls"] = _items - # override the default output from pydantic by calling `to_dict()` of function_call - if self.function_call: - _dict["function_call"] = self.function_call.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionAssistantMessageParam from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "content": obj.get("content"), - "role": obj.get("role"), - "name": obj.get("name"), - "tool_calls": ( - [ToolChoice.from_dict(_item) for _item in obj["tool_calls"]] - if obj.get("tool_calls") is not None - else None - ), - "function_call": ( - ChatCompletionMessageFunctionCall.from_dict(obj["function_call"]) - if obj.get("function_call") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_choice.py b/src/together/generated/models/chat_completion_choice.py deleted file mode 100644 index 3cd51127..00000000 --- a/src/together/generated/models/chat_completion_choice.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.chat_completion_choice_delta import ( - ChatCompletionChoiceDelta, -) -from together.generated.models.finish_reason import FinishReason -from together.generated.models.logprobs_part import LogprobsPart -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionChoice(BaseModel): - """ - ChatCompletionChoice - """ # noqa: E501 - - index: StrictInt - finish_reason: FinishReason - logprobs: Optional[LogprobsPart] = None - delta: ChatCompletionChoiceDelta - __properties: ClassVar[List[str]] = ["index", "finish_reason", "logprobs", "delta"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionChoice from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of logprobs - if self.logprobs: - _dict["logprobs"] = self.logprobs.to_dict() - # override the default output from pydantic by calling `to_dict()` of delta - if self.delta: - _dict["delta"] = self.delta.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionChoice from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "index": obj.get("index"), - "finish_reason": obj.get("finish_reason"), - "logprobs": ( - LogprobsPart.from_dict(obj["logprobs"]) - if obj.get("logprobs") is not None - else None - ), - "delta": ( - ChatCompletionChoiceDelta.from_dict(obj["delta"]) - if obj.get("delta") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_choice_delta.py b/src/together/generated/models/chat_completion_choice_delta.py deleted file mode 100644 index be8bde6b..00000000 --- a/src/together/generated/models/chat_completion_choice_delta.py +++ /dev/null @@ -1,134 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.chat_completion_choice_delta_function_call import ( - ChatCompletionChoiceDeltaFunctionCall, -) -from together.generated.models.tool_choice import ToolChoice -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionChoiceDelta(BaseModel): - """ - ChatCompletionChoiceDelta - """ # noqa: E501 - - token_id: Optional[StrictInt] = None - role: StrictStr - content: Optional[StrictStr] = None - tool_calls: Optional[List[ToolChoice]] = None - function_call: Optional[ChatCompletionChoiceDeltaFunctionCall] = None - __properties: ClassVar[List[str]] = [ - "token_id", - "role", - "content", - "tool_calls", - "function_call", - ] - - @field_validator("role") - def role_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["system", "user", "assistant", "function", "tool"]): - raise ValueError( - "must be one of enum values ('system', 'user', 'assistant', 'function', 'tool')" - ) - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionChoiceDelta from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in tool_calls (list) - _items = [] - if self.tool_calls: - for _item_tool_calls in self.tool_calls: - if _item_tool_calls: - _items.append(_item_tool_calls.to_dict()) - _dict["tool_calls"] = _items - # override the default output from pydantic by calling `to_dict()` of function_call - if self.function_call: - _dict["function_call"] = self.function_call.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionChoiceDelta from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "token_id": obj.get("token_id"), - "role": obj.get("role"), - "content": obj.get("content"), - "tool_calls": ( - [ToolChoice.from_dict(_item) for _item in obj["tool_calls"]] - if obj.get("tool_calls") is not None - else None - ), - "function_call": ( - ChatCompletionChoiceDeltaFunctionCall.from_dict( - obj["function_call"] - ) - if obj.get("function_call") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_choice_delta_function_call.py b/src/together/generated/models/chat_completion_choice_delta_function_call.py deleted file mode 100644 index 4d1c4079..00000000 --- a/src/together/generated/models/chat_completion_choice_delta_function_call.py +++ /dev/null @@ -1,86 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionChoiceDeltaFunctionCall(BaseModel): - """ - ChatCompletionChoiceDeltaFunctionCall - """ # noqa: E501 - - arguments: StrictStr - name: StrictStr - __properties: ClassVar[List[str]] = ["arguments", "name"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionChoiceDeltaFunctionCall from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionChoiceDeltaFunctionCall from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"arguments": obj.get("arguments"), "name": obj.get("name")} - ) - return _obj diff --git a/src/together/generated/models/chat_completion_choices_data_inner.py b/src/together/generated/models/chat_completion_choices_data_inner.py deleted file mode 100644 index 042cd5f3..00000000 --- a/src/together/generated/models/chat_completion_choices_data_inner.py +++ /dev/null @@ -1,123 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.chat_completion_choices_data_inner_logprobs import ( - ChatCompletionChoicesDataInnerLogprobs, -) -from together.generated.models.chat_completion_message import ChatCompletionMessage -from together.generated.models.finish_reason import FinishReason -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionChoicesDataInner(BaseModel): - """ - ChatCompletionChoicesDataInner - """ # noqa: E501 - - text: Optional[StrictStr] = None - index: Optional[StrictInt] = None - seed: Optional[StrictInt] = None - finish_reason: Optional[FinishReason] = None - message: Optional[ChatCompletionMessage] = None - logprobs: Optional[ChatCompletionChoicesDataInnerLogprobs] = None - __properties: ClassVar[List[str]] = [ - "text", - "index", - "seed", - "finish_reason", - "message", - "logprobs", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionChoicesDataInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of message - if self.message: - _dict["message"] = self.message.to_dict() - # override the default output from pydantic by calling `to_dict()` of logprobs - if self.logprobs: - _dict["logprobs"] = self.logprobs.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionChoicesDataInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "text": obj.get("text"), - "index": obj.get("index"), - "seed": obj.get("seed"), - "finish_reason": obj.get("finish_reason"), - "message": ( - ChatCompletionMessage.from_dict(obj["message"]) - if obj.get("message") is not None - else None - ), - "logprobs": ( - ChatCompletionChoicesDataInnerLogprobs.from_dict(obj["logprobs"]) - if obj.get("logprobs") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_choices_data_inner_logprobs.py b/src/together/generated/models/chat_completion_choices_data_inner_logprobs.py deleted file mode 100644 index 608b6c10..00000000 --- a/src/together/generated/models/chat_completion_choices_data_inner_logprobs.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional, Union -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionChoicesDataInnerLogprobs(BaseModel): - """ - ChatCompletionChoicesDataInnerLogprobs - """ # noqa: E501 - - token_ids: Optional[List[Union[StrictFloat, StrictInt]]] = Field( - default=None, description="List of token IDs corresponding to the logprobs" - ) - tokens: Optional[List[StrictStr]] = Field( - default=None, description="List of token strings" - ) - token_logprobs: Optional[List[Union[StrictFloat, StrictInt]]] = Field( - default=None, description="List of token log probabilities" - ) - __properties: ClassVar[List[str]] = ["token_ids", "tokens", "token_logprobs"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionChoicesDataInnerLogprobs from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionChoicesDataInnerLogprobs from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "token_ids": obj.get("token_ids"), - "tokens": obj.get("tokens"), - "token_logprobs": obj.get("token_logprobs"), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_chunk.py b/src/together/generated/models/chat_completion_chunk.py deleted file mode 100644 index 5d1dd9de..00000000 --- a/src/together/generated/models/chat_completion_chunk.py +++ /dev/null @@ -1,139 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.chat_completion_chunk_choices_inner import ( - ChatCompletionChunkChoicesInner, -) -from together.generated.models.completion_chunk_usage import CompletionChunkUsage -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionChunk(BaseModel): - """ - ChatCompletionChunk - """ # noqa: E501 - - id: StrictStr - object: StrictStr - created: StrictInt - system_fingerprint: Optional[StrictStr] = None - model: StrictStr - choices: List[ChatCompletionChunkChoicesInner] - usage: Optional[CompletionChunkUsage] = None - __properties: ClassVar[List[str]] = [ - "id", - "object", - "created", - "system_fingerprint", - "model", - "choices", - "usage", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["chat.completion.chunk"]): - raise ValueError("must be one of enum values ('chat.completion.chunk')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionChunk from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in choices (list) - _items = [] - if self.choices: - for _item_choices in self.choices: - if _item_choices: - _items.append(_item_choices.to_dict()) - _dict["choices"] = _items - # override the default output from pydantic by calling `to_dict()` of usage - if self.usage: - _dict["usage"] = self.usage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionChunk from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "object": obj.get("object"), - "created": obj.get("created"), - "system_fingerprint": obj.get("system_fingerprint"), - "model": obj.get("model"), - "choices": ( - [ - ChatCompletionChunkChoicesInner.from_dict(_item) - for _item in obj["choices"] - ] - if obj.get("choices") is not None - else None - ), - "usage": ( - CompletionChunkUsage.from_dict(obj["usage"]) - if obj.get("usage") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_chunk_choices_inner.py b/src/together/generated/models/chat_completion_chunk_choices_inner.py deleted file mode 100644 index fd5a9ef9..00000000 --- a/src/together/generated/models/chat_completion_chunk_choices_inner.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt -from typing import Any, ClassVar, Dict, List, Optional, Union -from together.generated.models.chat_completion_choice_delta import ( - ChatCompletionChoiceDelta, -) -from together.generated.models.finish_reason import FinishReason -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionChunkChoicesInner(BaseModel): - """ - ChatCompletionChunkChoicesInner - """ # noqa: E501 - - index: StrictInt - finish_reason: FinishReason - logprobs: Optional[Union[StrictFloat, StrictInt]] = None - seed: Optional[StrictInt] = None - delta: ChatCompletionChoiceDelta - __properties: ClassVar[List[str]] = [ - "index", - "finish_reason", - "logprobs", - "seed", - "delta", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionChunkChoicesInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of delta - if self.delta: - _dict["delta"] = self.delta.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionChunkChoicesInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "index": obj.get("index"), - "finish_reason": obj.get("finish_reason"), - "logprobs": obj.get("logprobs"), - "seed": obj.get("seed"), - "delta": ( - ChatCompletionChoiceDelta.from_dict(obj["delta"]) - if obj.get("delta") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_event.py b/src/together/generated/models/chat_completion_event.py deleted file mode 100644 index 49c84ba7..00000000 --- a/src/together/generated/models/chat_completion_event.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict -from typing import Any, ClassVar, Dict, List -from together.generated.models.chat_completion_chunk import ChatCompletionChunk -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionEvent(BaseModel): - """ - ChatCompletionEvent - """ # noqa: E501 - - data: ChatCompletionChunk - __properties: ClassVar[List[str]] = ["data"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionEvent from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of data - if self.data: - _dict["data"] = self.data.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionEvent from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "data": ( - ChatCompletionChunk.from_dict(obj["data"]) - if obj.get("data") is not None - else None - ) - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_function_message_param.py b/src/together/generated/models/chat_completion_function_message_param.py deleted file mode 100644 index b6679430..00000000 --- a/src/together/generated/models/chat_completion_function_message_param.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionFunctionMessageParam(BaseModel): - """ - ChatCompletionFunctionMessageParam - """ # noqa: E501 - - role: StrictStr - content: StrictStr - name: StrictStr - __properties: ClassVar[List[str]] = ["role", "content", "name"] - - @field_validator("role") - def role_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["function"]): - raise ValueError("must be one of enum values ('function')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionFunctionMessageParam from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionFunctionMessageParam from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "role": obj.get("role"), - "content": obj.get("content"), - "name": obj.get("name"), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_message.py b/src/together/generated/models/chat_completion_message.py deleted file mode 100644 index 08eeca7a..00000000 --- a/src/together/generated/models/chat_completion_message.py +++ /dev/null @@ -1,127 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.chat_completion_message_function_call import ( - ChatCompletionMessageFunctionCall, -) -from together.generated.models.tool_choice import ToolChoice -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionMessage(BaseModel): - """ - ChatCompletionMessage - """ # noqa: E501 - - content: StrictStr - role: StrictStr - tool_calls: Optional[List[ToolChoice]] = None - function_call: Optional[ChatCompletionMessageFunctionCall] = None - __properties: ClassVar[List[str]] = [ - "content", - "role", - "tool_calls", - "function_call", - ] - - @field_validator("role") - def role_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["assistant"]): - raise ValueError("must be one of enum values ('assistant')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionMessage from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in tool_calls (list) - _items = [] - if self.tool_calls: - for _item_tool_calls in self.tool_calls: - if _item_tool_calls: - _items.append(_item_tool_calls.to_dict()) - _dict["tool_calls"] = _items - # override the default output from pydantic by calling `to_dict()` of function_call - if self.function_call: - _dict["function_call"] = self.function_call.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionMessage from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "content": obj.get("content"), - "role": obj.get("role"), - "tool_calls": ( - [ToolChoice.from_dict(_item) for _item in obj["tool_calls"]] - if obj.get("tool_calls") is not None - else None - ), - "function_call": ( - ChatCompletionMessageFunctionCall.from_dict(obj["function_call"]) - if obj.get("function_call") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_message_function_call.py b/src/together/generated/models/chat_completion_message_function_call.py deleted file mode 100644 index 6deaeb4a..00000000 --- a/src/together/generated/models/chat_completion_message_function_call.py +++ /dev/null @@ -1,86 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionMessageFunctionCall(BaseModel): - """ - ChatCompletionMessageFunctionCall - """ # noqa: E501 - - arguments: StrictStr - name: StrictStr - __properties: ClassVar[List[str]] = ["arguments", "name"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionMessageFunctionCall from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionMessageFunctionCall from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"arguments": obj.get("arguments"), "name": obj.get("name")} - ) - return _obj diff --git a/src/together/generated/models/chat_completion_message_param.py b/src/together/generated/models/chat_completion_message_param.py deleted file mode 100644 index 1984218d..00000000 --- a/src/together/generated/models/chat_completion_message_param.py +++ /dev/null @@ -1,266 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from together.generated.models.chat_completion_assistant_message_param import ( - ChatCompletionAssistantMessageParam, -) -from together.generated.models.chat_completion_function_message_param import ( - ChatCompletionFunctionMessageParam, -) -from together.generated.models.chat_completion_system_message_param import ( - ChatCompletionSystemMessageParam, -) -from together.generated.models.chat_completion_tool_message_param import ( - ChatCompletionToolMessageParam, -) -from together.generated.models.chat_completion_user_message_param import ( - ChatCompletionUserMessageParam, -) -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -CHATCOMPLETIONMESSAGEPARAM_ONE_OF_SCHEMAS = [ - "ChatCompletionAssistantMessageParam", - "ChatCompletionFunctionMessageParam", - "ChatCompletionSystemMessageParam", - "ChatCompletionToolMessageParam", - "ChatCompletionUserMessageParam", -] - - -class ChatCompletionMessageParam(BaseModel): - """ - ChatCompletionMessageParam - """ - - # data type: ChatCompletionSystemMessageParam - oneof_schema_1_validator: Optional[ChatCompletionSystemMessageParam] = None - # data type: ChatCompletionUserMessageParam - oneof_schema_2_validator: Optional[ChatCompletionUserMessageParam] = None - # data type: ChatCompletionAssistantMessageParam - oneof_schema_3_validator: Optional[ChatCompletionAssistantMessageParam] = None - # data type: ChatCompletionToolMessageParam - oneof_schema_4_validator: Optional[ChatCompletionToolMessageParam] = None - # data type: ChatCompletionFunctionMessageParam - oneof_schema_5_validator: Optional[ChatCompletionFunctionMessageParam] = None - actual_instance: Optional[ - Union[ - ChatCompletionAssistantMessageParam, - ChatCompletionFunctionMessageParam, - ChatCompletionSystemMessageParam, - ChatCompletionToolMessageParam, - ChatCompletionUserMessageParam, - ] - ] = None - one_of_schemas: Set[str] = { - "ChatCompletionAssistantMessageParam", - "ChatCompletionFunctionMessageParam", - "ChatCompletionSystemMessageParam", - "ChatCompletionToolMessageParam", - "ChatCompletionUserMessageParam", - } - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = ChatCompletionMessageParam.model_construct() - error_messages = [] - match = 0 - # validate data type: ChatCompletionSystemMessageParam - if not isinstance(v, ChatCompletionSystemMessageParam): - error_messages.append( - f"Error! Input type `{type(v)}` is not `ChatCompletionSystemMessageParam`" - ) - else: - match += 1 - # validate data type: ChatCompletionUserMessageParam - if not isinstance(v, ChatCompletionUserMessageParam): - error_messages.append( - f"Error! Input type `{type(v)}` is not `ChatCompletionUserMessageParam`" - ) - else: - match += 1 - # validate data type: ChatCompletionAssistantMessageParam - if not isinstance(v, ChatCompletionAssistantMessageParam): - error_messages.append( - f"Error! Input type `{type(v)}` is not `ChatCompletionAssistantMessageParam`" - ) - else: - match += 1 - # validate data type: ChatCompletionToolMessageParam - if not isinstance(v, ChatCompletionToolMessageParam): - error_messages.append( - f"Error! Input type `{type(v)}` is not `ChatCompletionToolMessageParam`" - ) - else: - match += 1 - # validate data type: ChatCompletionFunctionMessageParam - if not isinstance(v, ChatCompletionFunctionMessageParam): - error_messages.append( - f"Error! Input type `{type(v)}` is not `ChatCompletionFunctionMessageParam`" - ) - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into ChatCompletionSystemMessageParam - try: - instance.actual_instance = ChatCompletionSystemMessageParam.from_json( - json_str - ) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into ChatCompletionUserMessageParam - try: - instance.actual_instance = ChatCompletionUserMessageParam.from_json( - json_str - ) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into ChatCompletionAssistantMessageParam - try: - instance.actual_instance = ChatCompletionAssistantMessageParam.from_json( - json_str - ) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into ChatCompletionToolMessageParam - try: - instance.actual_instance = ChatCompletionToolMessageParam.from_json( - json_str - ) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into ChatCompletionFunctionMessageParam - try: - instance.actual_instance = ChatCompletionFunctionMessageParam.from_json( - json_str - ) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict( - self, - ) -> Optional[ - Union[ - Dict[str, Any], - ChatCompletionAssistantMessageParam, - ChatCompletionFunctionMessageParam, - ChatCompletionSystemMessageParam, - ChatCompletionToolMessageParam, - ChatCompletionUserMessageParam, - ] - ]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_request.py b/src/together/generated/models/chat_completion_request.py deleted file mode 100644 index c779b6c5..00000000 --- a/src/together/generated/models/chat_completion_request.py +++ /dev/null @@ -1,304 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictFloat, - StrictInt, - StrictStr, - field_validator, -) -from typing import Any, ClassVar, Dict, List, Optional, Union -from typing_extensions import Annotated -from together.generated.models.chat_completion_request_function_call import ( - ChatCompletionRequestFunctionCall, -) -from together.generated.models.chat_completion_request_messages_inner import ( - ChatCompletionRequestMessagesInner, -) -from together.generated.models.chat_completion_request_model import ( - ChatCompletionRequestModel, -) -from together.generated.models.chat_completion_request_response_format import ( - ChatCompletionRequestResponseFormat, -) -from together.generated.models.chat_completion_request_tool_choice import ( - ChatCompletionRequestToolChoice, -) -from together.generated.models.tools_part import ToolsPart -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionRequest(BaseModel): - """ - ChatCompletionRequest - """ # noqa: E501 - - messages: List[ChatCompletionRequestMessagesInner] = Field( - description="A list of messages comprising the conversation so far." - ) - model: ChatCompletionRequestModel - max_tokens: Optional[StrictInt] = Field( - default=None, description="The maximum number of tokens to generate." - ) - stop: Optional[List[StrictStr]] = Field( - default=None, - description='A list of string sequences that will truncate (stop) inference text output. For example, "
" will stop generation as soon as the model generates the given token.', - ) - temperature: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.", - ) - top_p: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.", - ) - top_k: Optional[StrictInt] = Field( - default=None, - description="An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.", - ) - context_length_exceeded_behavior: Optional[StrictStr] = Field( - default="error", - description="Defined the behavior of the API when max_tokens exceed the maximum context length of the model. When set to 'error', API will return 400 with appropriate error message. When set to 'truncate', override the max_tokens with maximum context length of the model.", - ) - repetition_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition.", - ) - stream: Optional[StrictBool] = Field( - default=None, - description="If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results.", - ) - logprobs: Optional[Annotated[int, Field(le=1, strict=True, ge=0)]] = Field( - default=None, - description="Determines the number of most likely tokens to return at each token position log probabilities to return.", - ) - echo: Optional[StrictBool] = Field( - default=None, - description="If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs.", - ) - n: Optional[Annotated[int, Field(le=128, strict=True, ge=1)]] = Field( - default=None, - description="The number of completions to generate for each prompt.", - ) - min_p: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A number between 0 and 1 that can be used as an alternative to top_p and top-k.", - ) - presence_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.", - ) - frequency_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.", - ) - logit_bias: Optional[Dict[str, Union[StrictFloat, StrictInt]]] = Field( - default=None, - description="Adjusts the likelihood of specific tokens appearing in the generated output.", - ) - seed: Optional[StrictInt] = Field( - default=None, description="Seed value for reproducibility." - ) - function_call: Optional[ChatCompletionRequestFunctionCall] = None - response_format: Optional[ChatCompletionRequestResponseFormat] = None - tools: Optional[List[ToolsPart]] = Field( - default=None, - description="A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.", - ) - tool_choice: Optional[ChatCompletionRequestToolChoice] = None - safety_model: Optional[StrictStr] = Field( - default=None, - description="The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models).", - ) - __properties: ClassVar[List[str]] = [ - "messages", - "model", - "max_tokens", - "stop", - "temperature", - "top_p", - "top_k", - "context_length_exceeded_behavior", - "repetition_penalty", - "stream", - "logprobs", - "echo", - "n", - "min_p", - "presence_penalty", - "frequency_penalty", - "logit_bias", - "seed", - "function_call", - "response_format", - "tools", - "tool_choice", - "safety_model", - ] - - @field_validator("context_length_exceeded_behavior") - def context_length_exceeded_behavior_validate_enum(cls, value): - """Validates the enum""" - if value is None: - return value - - if value not in set(["truncate", "error"]): - raise ValueError("must be one of enum values ('truncate', 'error')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in messages (list) - _items = [] - if self.messages: - for _item_messages in self.messages: - if _item_messages: - _items.append(_item_messages.to_dict()) - _dict["messages"] = _items - # override the default output from pydantic by calling `to_dict()` of model - if self.model: - _dict["model"] = self.model.to_dict() - # override the default output from pydantic by calling `to_dict()` of function_call - if self.function_call: - _dict["function_call"] = self.function_call.to_dict() - # override the default output from pydantic by calling `to_dict()` of response_format - if self.response_format: - _dict["response_format"] = self.response_format.to_dict() - # override the default output from pydantic by calling `to_dict()` of each item in tools (list) - _items = [] - if self.tools: - for _item_tools in self.tools: - if _item_tools: - _items.append(_item_tools.to_dict()) - _dict["tools"] = _items - # override the default output from pydantic by calling `to_dict()` of tool_choice - if self.tool_choice: - _dict["tool_choice"] = self.tool_choice.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "messages": ( - [ - ChatCompletionRequestMessagesInner.from_dict(_item) - for _item in obj["messages"] - ] - if obj.get("messages") is not None - else None - ), - "model": ( - ChatCompletionRequestModel.from_dict(obj["model"]) - if obj.get("model") is not None - else None - ), - "max_tokens": obj.get("max_tokens"), - "stop": obj.get("stop"), - "temperature": obj.get("temperature"), - "top_p": obj.get("top_p"), - "top_k": obj.get("top_k"), - "context_length_exceeded_behavior": ( - obj.get("context_length_exceeded_behavior") - if obj.get("context_length_exceeded_behavior") is not None - else "error" - ), - "repetition_penalty": obj.get("repetition_penalty"), - "stream": obj.get("stream"), - "logprobs": obj.get("logprobs"), - "echo": obj.get("echo"), - "n": obj.get("n"), - "min_p": obj.get("min_p"), - "presence_penalty": obj.get("presence_penalty"), - "frequency_penalty": obj.get("frequency_penalty"), - "logit_bias": obj.get("logit_bias"), - "seed": obj.get("seed"), - "function_call": ( - ChatCompletionRequestFunctionCall.from_dict(obj["function_call"]) - if obj.get("function_call") is not None - else None - ), - "response_format": ( - ChatCompletionRequestResponseFormat.from_dict( - obj["response_format"] - ) - if obj.get("response_format") is not None - else None - ), - "tools": ( - [ToolsPart.from_dict(_item) for _item in obj["tools"]] - if obj.get("tools") is not None - else None - ), - "tool_choice": ( - ChatCompletionRequestToolChoice.from_dict(obj["tool_choice"]) - if obj.get("tool_choice") is not None - else None - ), - "safety_model": obj.get("safety_model"), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_request_function_call.py b/src/together/generated/models/chat_completion_request_function_call.py deleted file mode 100644 index cb8159fa..00000000 --- a/src/together/generated/models/chat_completion_request_function_call.py +++ /dev/null @@ -1,177 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from together.generated.models.chat_completion_request_function_call_one_of import ( - ChatCompletionRequestFunctionCallOneOf, -) -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -CHATCOMPLETIONREQUESTFUNCTIONCALL_ONE_OF_SCHEMAS = [ - "ChatCompletionRequestFunctionCallOneOf", - "str", -] - - -class ChatCompletionRequestFunctionCall(BaseModel): - """ - ChatCompletionRequestFunctionCall - """ - - # data type: str - oneof_schema_1_validator: Optional[StrictStr] = None - # data type: ChatCompletionRequestFunctionCallOneOf - oneof_schema_2_validator: Optional[ChatCompletionRequestFunctionCallOneOf] = None - actual_instance: Optional[Union[ChatCompletionRequestFunctionCallOneOf, str]] = None - one_of_schemas: Set[str] = {"ChatCompletionRequestFunctionCallOneOf", "str"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = ChatCompletionRequestFunctionCall.model_construct() - error_messages = [] - match = 0 - # validate data type: str - try: - instance.oneof_schema_1_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: ChatCompletionRequestFunctionCallOneOf - if not isinstance(v, ChatCompletionRequestFunctionCallOneOf): - error_messages.append( - f"Error! Input type `{type(v)}` is not `ChatCompletionRequestFunctionCallOneOf`" - ) - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into str - try: - # validation - instance.oneof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_1_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into ChatCompletionRequestFunctionCallOneOf - try: - instance.actual_instance = ChatCompletionRequestFunctionCallOneOf.from_json( - json_str - ) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict( - self, - ) -> Optional[Union[Dict[str, Any], ChatCompletionRequestFunctionCallOneOf, str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_request_function_call_one_of.py b/src/together/generated/models/chat_completion_request_function_call_one_of.py deleted file mode 100644 index 1dc7940e..00000000 --- a/src/together/generated/models/chat_completion_request_function_call_one_of.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionRequestFunctionCallOneOf(BaseModel): - """ - ChatCompletionRequestFunctionCallOneOf - """ # noqa: E501 - - name: StrictStr - __properties: ClassVar[List[str]] = ["name"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionRequestFunctionCallOneOf from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionRequestFunctionCallOneOf from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"name": obj.get("name")}) - return _obj diff --git a/src/together/generated/models/chat_completion_request_messages_inner.py b/src/together/generated/models/chat_completion_request_messages_inner.py deleted file mode 100644 index 2c9802b9..00000000 --- a/src/together/generated/models/chat_completion_request_messages_inner.py +++ /dev/null @@ -1,99 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionRequestMessagesInner(BaseModel): - """ - ChatCompletionRequestMessagesInner - """ # noqa: E501 - - role: StrictStr = Field( - description="The role of the messages author. Choice between: system, user, or assistant." - ) - content: StrictStr = Field( - description="The content of the message, which can either be a simple string or a structured format." - ) - __properties: ClassVar[List[str]] = ["role", "content"] - - @field_validator("role") - def role_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["system", "user", "assistant", "tool"]): - raise ValueError( - "must be one of enum values ('system', 'user', 'assistant', 'tool')" - ) - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionRequestMessagesInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionRequestMessagesInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"role": obj.get("role"), "content": obj.get("content")} - ) - return _obj diff --git a/src/together/generated/models/chat_completion_request_model.py b/src/together/generated/models/chat_completion_request_model.py deleted file mode 100644 index f5cd61a8..00000000 --- a/src/together/generated/models/chat_completion_request_model.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -from inspect import getfullargspec -import json -import pprint -import re # noqa: F401 -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Optional -from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict -from typing_extensions import Literal, Self -from pydantic import Field - -CHATCOMPLETIONREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] - - -class ChatCompletionRequestModel(BaseModel): - """ - The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: str - anyof_schema_2_validator: Optional[StrictStr] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = ChatCompletionRequestModel.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in ChatCompletionRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into ChatCompletionRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_request_response_format.py b/src/together/generated/models/chat_completion_request_response_format.py deleted file mode 100644 index 69ab762f..00000000 --- a/src/together/generated/models/chat_completion_request_response_format.py +++ /dev/null @@ -1,90 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionRequestResponseFormat(BaseModel): - """ - An object specifying the format that the model must output. - """ # noqa: E501 - - type: Optional[StrictStr] = Field( - default=None, description="The type of the response format." - ) - var_schema: Optional[Dict[str, StrictStr]] = Field( - default=None, description="The schema of the response format.", alias="schema" - ) - __properties: ClassVar[List[str]] = ["type", "schema"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionRequestResponseFormat from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionRequestResponseFormat from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"type": obj.get("type"), "schema": obj.get("schema")} - ) - return _obj diff --git a/src/together/generated/models/chat_completion_request_tool_choice.py b/src/together/generated/models/chat_completion_request_tool_choice.py deleted file mode 100644 index d78632af..00000000 --- a/src/together/generated/models/chat_completion_request_tool_choice.py +++ /dev/null @@ -1,166 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from together.generated.models.tool_choice import ToolChoice -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -CHATCOMPLETIONREQUESTTOOLCHOICE_ONE_OF_SCHEMAS = ["ToolChoice", "str"] - - -class ChatCompletionRequestToolChoice(BaseModel): - """ - Controls which (if any) function is called by the model. By default uses `auto`, which lets the model pick between generating a message or calling a function. - """ - - # data type: str - oneof_schema_1_validator: Optional[StrictStr] = None - # data type: ToolChoice - oneof_schema_2_validator: Optional[ToolChoice] = None - actual_instance: Optional[Union[ToolChoice, str]] = None - one_of_schemas: Set[str] = {"ToolChoice", "str"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = ChatCompletionRequestToolChoice.model_construct() - error_messages = [] - match = 0 - # validate data type: str - try: - instance.oneof_schema_1_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: ToolChoice - if not isinstance(v, ToolChoice): - error_messages.append(f"Error! Input type `{type(v)}` is not `ToolChoice`") - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into str - try: - # validation - instance.oneof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_1_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into ToolChoice - try: - instance.actual_instance = ToolChoice.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], ToolChoice, str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_response.py b/src/together/generated/models/chat_completion_response.py deleted file mode 100644 index 1a023acb..00000000 --- a/src/together/generated/models/chat_completion_response.py +++ /dev/null @@ -1,136 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.chat_completion_choices_data_inner import ( - ChatCompletionChoicesDataInner, -) -from together.generated.models.usage_data import UsageData -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionResponse(BaseModel): - """ - ChatCompletionResponse - """ # noqa: E501 - - id: StrictStr - choices: List[ChatCompletionChoicesDataInner] - usage: Optional[UsageData] = None - created: StrictInt - model: StrictStr - object: StrictStr - __properties: ClassVar[List[str]] = [ - "id", - "choices", - "usage", - "created", - "model", - "object", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["chat.completion"]): - raise ValueError("must be one of enum values ('chat.completion')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in choices (list) - _items = [] - if self.choices: - for _item_choices in self.choices: - if _item_choices: - _items.append(_item_choices.to_dict()) - _dict["choices"] = _items - # override the default output from pydantic by calling `to_dict()` of usage - if self.usage: - _dict["usage"] = self.usage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "choices": ( - [ - ChatCompletionChoicesDataInner.from_dict(_item) - for _item in obj["choices"] - ] - if obj.get("choices") is not None - else None - ), - "usage": ( - UsageData.from_dict(obj["usage"]) - if obj.get("usage") is not None - else None - ), - "created": obj.get("created"), - "model": obj.get("model"), - "object": obj.get("object"), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_stream.py b/src/together/generated/models/chat_completion_stream.py deleted file mode 100644 index 7f3c93de..00000000 --- a/src/together/generated/models/chat_completion_stream.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from together.generated.models.chat_completion_event import ChatCompletionEvent -from together.generated.models.stream_sentinel import StreamSentinel -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -CHATCOMPLETIONSTREAM_ONE_OF_SCHEMAS = ["ChatCompletionEvent", "StreamSentinel"] - - -class ChatCompletionStream(BaseModel): - """ - ChatCompletionStream - """ - - # data type: ChatCompletionEvent - oneof_schema_1_validator: Optional[ChatCompletionEvent] = None - # data type: StreamSentinel - oneof_schema_2_validator: Optional[StreamSentinel] = None - actual_instance: Optional[Union[ChatCompletionEvent, StreamSentinel]] = None - one_of_schemas: Set[str] = {"ChatCompletionEvent", "StreamSentinel"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = ChatCompletionStream.model_construct() - error_messages = [] - match = 0 - # validate data type: ChatCompletionEvent - if not isinstance(v, ChatCompletionEvent): - error_messages.append( - f"Error! Input type `{type(v)}` is not `ChatCompletionEvent`" - ) - else: - match += 1 - # validate data type: StreamSentinel - if not isinstance(v, StreamSentinel): - error_messages.append( - f"Error! Input type `{type(v)}` is not `StreamSentinel`" - ) - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into ChatCompletionEvent - try: - instance.actual_instance = ChatCompletionEvent.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into StreamSentinel - try: - instance.actual_instance = StreamSentinel.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict( - self, - ) -> Optional[Union[Dict[str, Any], ChatCompletionEvent, StreamSentinel]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_system_message_param.py b/src/together/generated/models/chat_completion_system_message_param.py deleted file mode 100644 index 02c80038..00000000 --- a/src/together/generated/models/chat_completion_system_message_param.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionSystemMessageParam(BaseModel): - """ - ChatCompletionSystemMessageParam - """ # noqa: E501 - - content: StrictStr - role: StrictStr - name: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["content", "role", "name"] - - @field_validator("role") - def role_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["system"]): - raise ValueError("must be one of enum values ('system')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionSystemMessageParam from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionSystemMessageParam from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "content": obj.get("content"), - "role": obj.get("role"), - "name": obj.get("name"), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_token.py b/src/together/generated/models/chat_completion_token.py deleted file mode 100644 index 60862fec..00000000 --- a/src/together/generated/models/chat_completion_token.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - StrictBool, - StrictFloat, - StrictInt, - StrictStr, -) -from typing import Any, ClassVar, Dict, List, Union -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionToken(BaseModel): - """ - ChatCompletionToken - """ # noqa: E501 - - id: StrictInt - text: StrictStr - logprob: Union[StrictFloat, StrictInt] - special: StrictBool - __properties: ClassVar[List[str]] = ["id", "text", "logprob", "special"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionToken from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionToken from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "text": obj.get("text"), - "logprob": obj.get("logprob"), - "special": obj.get("special"), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_tool.py b/src/together/generated/models/chat_completion_tool.py deleted file mode 100644 index 05d66c4d..00000000 --- a/src/together/generated/models/chat_completion_tool.py +++ /dev/null @@ -1,106 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from together.generated.models.chat_completion_tool_function import ( - ChatCompletionToolFunction, -) -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionTool(BaseModel): - """ - ChatCompletionTool - """ # noqa: E501 - - type: StrictStr - function: ChatCompletionToolFunction - __properties: ClassVar[List[str]] = ["type", "function"] - - @field_validator("type") - def type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["function"]): - raise ValueError("must be one of enum values ('function')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionTool from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of function - if self.function: - _dict["function"] = self.function.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionTool from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "type": obj.get("type"), - "function": ( - ChatCompletionToolFunction.from_dict(obj["function"]) - if obj.get("function") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_tool_function.py b/src/together/generated/models/chat_completion_tool_function.py deleted file mode 100644 index 3ebaa385..00000000 --- a/src/together/generated/models/chat_completion_tool_function.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionToolFunction(BaseModel): - """ - ChatCompletionToolFunction - """ # noqa: E501 - - description: Optional[StrictStr] = None - name: StrictStr - parameters: Optional[Dict[str, Any]] = None - __properties: ClassVar[List[str]] = ["description", "name", "parameters"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionToolFunction from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionToolFunction from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "description": obj.get("description"), - "name": obj.get("name"), - "parameters": obj.get("parameters"), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_tool_message_param.py b/src/together/generated/models/chat_completion_tool_message_param.py deleted file mode 100644 index 115b718b..00000000 --- a/src/together/generated/models/chat_completion_tool_message_param.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionToolMessageParam(BaseModel): - """ - ChatCompletionToolMessageParam - """ # noqa: E501 - - role: StrictStr - content: StrictStr - tool_call_id: StrictStr - __properties: ClassVar[List[str]] = ["role", "content", "tool_call_id"] - - @field_validator("role") - def role_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["tool"]): - raise ValueError("must be one of enum values ('tool')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionToolMessageParam from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionToolMessageParam from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "role": obj.get("role"), - "content": obj.get("content"), - "tool_call_id": obj.get("tool_call_id"), - } - ) - return _obj diff --git a/src/together/generated/models/chat_completion_user_message_param.py b/src/together/generated/models/chat_completion_user_message_param.py deleted file mode 100644 index e02a998c..00000000 --- a/src/together/generated/models/chat_completion_user_message_param.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class ChatCompletionUserMessageParam(BaseModel): - """ - ChatCompletionUserMessageParam - """ # noqa: E501 - - content: StrictStr - role: StrictStr - name: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["content", "role", "name"] - - @field_validator("role") - def role_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["user"]): - raise ValueError("must be one of enum values ('user')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ChatCompletionUserMessageParam from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ChatCompletionUserMessageParam from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "content": obj.get("content"), - "role": obj.get("role"), - "name": obj.get("name"), - } - ) - return _obj diff --git a/src/together/generated/models/completion_choice.py b/src/together/generated/models/completion_choice.py deleted file mode 100644 index 0b1ac0a1..00000000 --- a/src/together/generated/models/completion_choice.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class CompletionChoice(BaseModel): - """ - CompletionChoice - """ # noqa: E501 - - text: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["text"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionChoice from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionChoice from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"text": obj.get("text")}) - return _obj diff --git a/src/together/generated/models/completion_choices_data_inner.py b/src/together/generated/models/completion_choices_data_inner.py deleted file mode 100644 index b8cb4e7e..00000000 --- a/src/together/generated/models/completion_choices_data_inner.py +++ /dev/null @@ -1,101 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.finish_reason import FinishReason -from typing import Optional, Set -from typing_extensions import Self - - -class CompletionChoicesDataInner(BaseModel): - """ - CompletionChoicesDataInner - """ # noqa: E501 - - text: Optional[StrictStr] = None - seed: Optional[StrictInt] = None - finish_reason: Optional[FinishReason] = None - logprobs: Optional[Dict[str, Any]] = None - __properties: ClassVar[List[str]] = ["text", "seed", "finish_reason", "logprobs"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionChoicesDataInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of logprobs - if self.logprobs: - _dict["logprobs"] = self.logprobs.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionChoicesDataInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "text": obj.get("text"), - "seed": obj.get("seed"), - "finish_reason": obj.get("finish_reason"), - "logprobs": ( - LogprobsPart.from_dict(obj["logprobs"]) - if obj.get("logprobs") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/completion_chunk.py b/src/together/generated/models/completion_chunk.py deleted file mode 100644 index 7a0b16bd..00000000 --- a/src/together/generated/models/completion_chunk.py +++ /dev/null @@ -1,139 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.completion_choice import CompletionChoice -from together.generated.models.completion_chunk_usage import CompletionChunkUsage -from together.generated.models.completion_token import CompletionToken -from typing import Optional, Set -from typing_extensions import Self - - -class CompletionChunk(BaseModel): - """ - CompletionChunk - """ # noqa: E501 - - id: StrictStr - token: CompletionToken - choices: List[CompletionChoice] - usage: CompletionChunkUsage - seed: Optional[StrictInt] = None - finish_reason: Any - __properties: ClassVar[List[str]] = [ - "id", - "token", - "choices", - "usage", - "seed", - "finish_reason", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionChunk from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of token - if self.token: - _dict["token"] = self.token.to_dict() - # override the default output from pydantic by calling `to_dict()` of each item in choices (list) - _items = [] - if self.choices: - for _item_choices in self.choices: - if _item_choices: - _items.append(_item_choices.to_dict()) - _dict["choices"] = _items - # override the default output from pydantic by calling `to_dict()` of usage - if self.usage: - _dict["usage"] = self.usage.to_dict() - # override the default output from pydantic by calling `to_dict()` of finish_reason - if self.finish_reason: - _dict["finish_reason"] = self.finish_reason.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionChunk from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "token": ( - CompletionToken.from_dict(obj["token"]) - if obj.get("token") is not None - else None - ), - "choices": ( - [CompletionChoice.from_dict(_item) for _item in obj["choices"]] - if obj.get("choices") is not None - else None - ), - "usage": ( - CompletionChunkUsage.from_dict(obj["usage"]) - if obj.get("usage") is not None - else None - ), - "seed": obj.get("seed"), - "finish_reason": ( - FinishReason.from_dict(obj["finish_reason"]) - if obj.get("finish_reason") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/completion_chunk_usage.py b/src/together/generated/models/completion_chunk_usage.py deleted file mode 100644 index df7a48f6..00000000 --- a/src/together/generated/models/completion_chunk_usage.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class CompletionChunkUsage(BaseModel): - """ - CompletionChunkUsage - """ # noqa: E501 - - prompt_tokens: StrictInt - completion_tokens: StrictInt - total_tokens: StrictInt - __properties: ClassVar[List[str]] = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionChunkUsage from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionChunkUsage from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "prompt_tokens": obj.get("prompt_tokens"), - "completion_tokens": obj.get("completion_tokens"), - "total_tokens": obj.get("total_tokens"), - } - ) - return _obj diff --git a/src/together/generated/models/completion_event.py b/src/together/generated/models/completion_event.py deleted file mode 100644 index 5bd84c47..00000000 --- a/src/together/generated/models/completion_event.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict -from typing import Any, ClassVar, Dict, List -from together.generated.models.completion_chunk import CompletionChunk -from typing import Optional, Set -from typing_extensions import Self - - -class CompletionEvent(BaseModel): - """ - CompletionEvent - """ # noqa: E501 - - data: CompletionChunk - __properties: ClassVar[List[str]] = ["data"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionEvent from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of data - if self.data: - _dict["data"] = self.data.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionEvent from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "data": ( - CompletionChunk.from_dict(obj["data"]) - if obj.get("data") is not None - else None - ) - } - ) - return _obj diff --git a/src/together/generated/models/completion_request.py b/src/together/generated/models/completion_request.py deleted file mode 100644 index 065bef8b..00000000 --- a/src/together/generated/models/completion_request.py +++ /dev/null @@ -1,212 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictFloat, - StrictInt, - StrictStr, -) -from typing import Any, ClassVar, Dict, List, Optional, Union -from typing_extensions import Annotated -from together.generated.models.completion_request_model import CompletionRequestModel -from together.generated.models.completion_request_safety_model import ( - CompletionRequestSafetyModel, -) -from typing import Optional, Set -from typing_extensions import Self - - -class CompletionRequest(BaseModel): - """ - CompletionRequest - """ # noqa: E501 - - prompt: StrictStr = Field( - description="A string providing context for the model to complete." - ) - model: CompletionRequestModel - max_tokens: Optional[StrictInt] = Field( - default=None, description="The maximum number of tokens to generate." - ) - stop: Optional[List[StrictStr]] = Field( - default=None, - description='A list of string sequences that will truncate (stop) inference text output. For example, "
" will stop generation as soon as the model generates the given token.', - ) - temperature: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.", - ) - top_p: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.", - ) - top_k: Optional[StrictInt] = Field( - default=None, - description="An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.", - ) - repetition_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition.", - ) - stream: Optional[StrictBool] = Field( - default=None, - description="If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results.", - ) - logprobs: Optional[Annotated[int, Field(le=1, strict=True, ge=0)]] = Field( - default=None, - description="Determines the number of most likely tokens to return at each token position log probabilities to return.", - ) - echo: Optional[StrictBool] = Field( - default=None, - description="If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs.", - ) - n: Optional[Annotated[int, Field(le=128, strict=True, ge=1)]] = Field( - default=None, - description="The number of completions to generate for each prompt.", - ) - safety_model: Optional[CompletionRequestSafetyModel] = None - min_p: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A number between 0 and 1 that can be used as an alternative to top-p and top-k.", - ) - presence_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.", - ) - frequency_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( - default=None, - description="A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.", - ) - logit_bias: Optional[Dict[str, Union[StrictFloat, StrictInt]]] = Field( - default=None, - description="Adjusts the likelihood of specific tokens appearing in the generated output.", - ) - seed: Optional[StrictInt] = Field( - default=None, description="Seed value for reproducibility." - ) - __properties: ClassVar[List[str]] = [ - "prompt", - "model", - "max_tokens", - "stop", - "temperature", - "top_p", - "top_k", - "repetition_penalty", - "stream", - "logprobs", - "echo", - "n", - "safety_model", - "min_p", - "presence_penalty", - "frequency_penalty", - "logit_bias", - "seed", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of model - if self.model: - _dict["model"] = self.model.to_dict() - # override the default output from pydantic by calling `to_dict()` of safety_model - if self.safety_model: - _dict["safety_model"] = self.safety_model.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "prompt": obj.get("prompt"), - "model": ( - CompletionRequestModel.from_dict(obj["model"]) - if obj.get("model") is not None - else None - ), - "max_tokens": obj.get("max_tokens"), - "stop": obj.get("stop"), - "temperature": obj.get("temperature"), - "top_p": obj.get("top_p"), - "top_k": obj.get("top_k"), - "repetition_penalty": obj.get("repetition_penalty"), - "stream": obj.get("stream"), - "logprobs": obj.get("logprobs"), - "echo": obj.get("echo"), - "n": obj.get("n"), - "safety_model": ( - CompletionRequestSafetyModel.from_dict(obj["safety_model"]) - if obj.get("safety_model") is not None - else None - ), - "min_p": obj.get("min_p"), - "presence_penalty": obj.get("presence_penalty"), - "frequency_penalty": obj.get("frequency_penalty"), - "logit_bias": obj.get("logit_bias"), - "seed": obj.get("seed"), - } - ) - return _obj diff --git a/src/together/generated/models/completion_request_model.py b/src/together/generated/models/completion_request_model.py deleted file mode 100644 index 38d25705..00000000 --- a/src/together/generated/models/completion_request_model.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -from inspect import getfullargspec -import json -import pprint -import re # noqa: F401 -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Optional -from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict -from typing_extensions import Literal, Self -from pydantic import Field - -COMPLETIONREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] - - -class CompletionRequestModel(BaseModel): - """ - The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: str - anyof_schema_2_validator: Optional[StrictStr] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = CompletionRequestModel.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in CompletionRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into CompletionRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/completion_request_safety_model.py b/src/together/generated/models/completion_request_safety_model.py deleted file mode 100644 index 981ee1e4..00000000 --- a/src/together/generated/models/completion_request_safety_model.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -from inspect import getfullargspec -import json -import pprint -import re # noqa: F401 -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Optional -from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict -from typing_extensions import Literal, Self -from pydantic import Field - -COMPLETIONREQUESTSAFETYMODEL_ANY_OF_SCHEMAS = ["str"] - - -class CompletionRequestSafetyModel(BaseModel): - """ - The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: str - anyof_schema_2_validator: Optional[StrictStr] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = CompletionRequestSafetyModel.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in CompletionRequestSafetyModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into CompletionRequestSafetyModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/completion_response.py b/src/together/generated/models/completion_response.py deleted file mode 100644 index ac858b6d..00000000 --- a/src/together/generated/models/completion_response.py +++ /dev/null @@ -1,151 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.completion_choices_data_inner import ( - CompletionChoicesDataInner, -) -from together.generated.models.prompt_part_inner import PromptPartInner -from together.generated.models.usage_data import UsageData -from typing import Optional, Set -from typing_extensions import Self - - -class CompletionResponse(BaseModel): - """ - CompletionResponse - """ # noqa: E501 - - id: StrictStr - choices: List[CompletionChoicesDataInner] - prompt: Optional[List[PromptPartInner]] = None - usage: UsageData - created: StrictInt - model: StrictStr - object: StrictStr - __properties: ClassVar[List[str]] = [ - "id", - "choices", - "prompt", - "usage", - "created", - "model", - "object", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["text_completion"]): - raise ValueError("must be one of enum values ('text_completion')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in choices (list) - _items = [] - if self.choices: - for _item_choices in self.choices: - if _item_choices: - _items.append(_item_choices.to_dict()) - _dict["choices"] = _items - # override the default output from pydantic by calling `to_dict()` of each item in prompt (list) - _items = [] - if self.prompt: - for _item_prompt in self.prompt: - if _item_prompt: - _items.append(_item_prompt.to_dict()) - _dict["prompt"] = _items - # override the default output from pydantic by calling `to_dict()` of usage - if self.usage: - _dict["usage"] = self.usage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "choices": ( - [ - CompletionChoicesDataInner.from_dict(_item) - for _item in obj["choices"] - ] - if obj.get("choices") is not None - else None - ), - "prompt": ( - [PromptPartInner.from_dict(_item) for _item in obj["prompt"]] - if obj.get("prompt") is not None - else None - ), - "usage": ( - UsageData.from_dict(obj["usage"]) - if obj.get("usage") is not None - else None - ), - "created": obj.get("created"), - "model": obj.get("model"), - "object": obj.get("object"), - } - ) - return _obj diff --git a/src/together/generated/models/completion_stream.py b/src/together/generated/models/completion_stream.py deleted file mode 100644 index 12e58c46..00000000 --- a/src/together/generated/models/completion_stream.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from together.generated.models.completion_event import CompletionEvent -from together.generated.models.stream_sentinel import StreamSentinel -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -COMPLETIONSTREAM_ONE_OF_SCHEMAS = ["CompletionEvent", "StreamSentinel"] - - -class CompletionStream(BaseModel): - """ - CompletionStream - """ - - # data type: CompletionEvent - oneof_schema_1_validator: Optional[CompletionEvent] = None - # data type: StreamSentinel - oneof_schema_2_validator: Optional[StreamSentinel] = None - actual_instance: Optional[Union[CompletionEvent, StreamSentinel]] = None - one_of_schemas: Set[str] = {"CompletionEvent", "StreamSentinel"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = CompletionStream.model_construct() - error_messages = [] - match = 0 - # validate data type: CompletionEvent - if not isinstance(v, CompletionEvent): - error_messages.append( - f"Error! Input type `{type(v)}` is not `CompletionEvent`" - ) - else: - match += 1 - # validate data type: StreamSentinel - if not isinstance(v, StreamSentinel): - error_messages.append( - f"Error! Input type `{type(v)}` is not `StreamSentinel`" - ) - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into CompletionEvent - try: - instance.actual_instance = CompletionEvent.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into StreamSentinel - try: - instance.actual_instance = StreamSentinel.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict( - self, - ) -> Optional[Union[Dict[str, Any], CompletionEvent, StreamSentinel]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/completion_token.py b/src/together/generated/models/completion_token.py deleted file mode 100644 index de9a208d..00000000 --- a/src/together/generated/models/completion_token.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - StrictBool, - StrictFloat, - StrictInt, - StrictStr, -) -from typing import Any, ClassVar, Dict, List, Union -from typing import Optional, Set -from typing_extensions import Self - - -class CompletionToken(BaseModel): - """ - CompletionToken - """ # noqa: E501 - - id: StrictInt - text: StrictStr - logprob: Union[StrictFloat, StrictInt] - special: StrictBool - __properties: ClassVar[List[str]] = ["id", "text", "logprob", "special"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CompletionToken from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CompletionToken from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "text": obj.get("text"), - "logprob": obj.get("logprob"), - "special": obj.get("special"), - } - ) - return _obj diff --git a/src/together/generated/models/create_endpoint_request.py b/src/together/generated/models/create_endpoint_request.py deleted file mode 100644 index 70fc97b4..00000000 --- a/src/together/generated/models/create_endpoint_request.py +++ /dev/null @@ -1,156 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictStr, - field_validator, -) -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.autoscaling import Autoscaling -from typing import Optional, Set -from typing_extensions import Self - - -class CreateEndpointRequest(BaseModel): - """ - CreateEndpointRequest - """ # noqa: E501 - - display_name: Optional[StrictStr] = Field( - default=None, description="A human-readable name for the endpoint" - ) - model: StrictStr = Field(description="The model to deploy on this endpoint") - hardware: StrictStr = Field( - description="The hardware configuration to use for this endpoint" - ) - autoscaling: Autoscaling = Field( - description="Configuration for automatic scaling of the endpoint" - ) - disable_prompt_cache: Optional[StrictBool] = Field( - default=False, - description="Whether to disable the prompt cache for this endpoint", - ) - disable_speculative_decoding: Optional[StrictBool] = Field( - default=False, - description="Whether to disable speculative decoding for this endpoint", - ) - state: Optional[StrictStr] = Field( - default="STARTED", description="The desired state of the endpoint" - ) - __properties: ClassVar[List[str]] = [ - "display_name", - "model", - "hardware", - "autoscaling", - "disable_prompt_cache", - "disable_speculative_decoding", - "state", - ] - - @field_validator("state") - def state_validate_enum(cls, value): - """Validates the enum""" - if value is None: - return value - - if value not in set(["STARTED", "STOPPED"]): - raise ValueError("must be one of enum values ('STARTED', 'STOPPED')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CreateEndpointRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of autoscaling - if self.autoscaling: - _dict["autoscaling"] = self.autoscaling.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CreateEndpointRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "display_name": obj.get("display_name"), - "model": obj.get("model"), - "hardware": obj.get("hardware"), - "autoscaling": ( - Autoscaling.from_dict(obj["autoscaling"]) - if obj.get("autoscaling") is not None - else None - ), - "disable_prompt_cache": ( - obj.get("disable_prompt_cache") - if obj.get("disable_prompt_cache") is not None - else False - ), - "disable_speculative_decoding": ( - obj.get("disable_speculative_decoding") - if obj.get("disable_speculative_decoding") is not None - else False - ), - "state": ( - obj.get("state") if obj.get("state") is not None else "STARTED" - ), - } - ) - return _obj diff --git a/src/together/generated/models/dedicated_endpoint.py b/src/together/generated/models/dedicated_endpoint.py deleted file mode 100644 index 87ef87c5..00000000 --- a/src/together/generated/models/dedicated_endpoint.py +++ /dev/null @@ -1,157 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from datetime import datetime -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from together.generated.models.autoscaling import Autoscaling -from typing import Optional, Set -from typing_extensions import Self - - -class DedicatedEndpoint(BaseModel): - """ - Details about a dedicated endpoint deployment - """ # noqa: E501 - - object: StrictStr = Field(description="The type of object") - id: StrictStr = Field(description="Unique identifier for the endpoint") - name: StrictStr = Field(description="System name for the endpoint") - display_name: StrictStr = Field(description="Human-readable name for the endpoint") - model: StrictStr = Field(description="The model deployed on this endpoint") - hardware: StrictStr = Field( - description="The hardware configuration used for this endpoint" - ) - type: StrictStr = Field(description="The type of endpoint") - owner: StrictStr = Field(description="The owner of this endpoint") - state: StrictStr = Field(description="Current state of the endpoint") - autoscaling: Autoscaling = Field( - description="Configuration for automatic scaling of the endpoint" - ) - created_at: datetime = Field(description="Timestamp when the endpoint was created") - __properties: ClassVar[List[str]] = [ - "object", - "id", - "name", - "display_name", - "model", - "hardware", - "type", - "owner", - "state", - "autoscaling", - "created_at", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["endpoint"]): - raise ValueError("must be one of enum values ('endpoint')") - return value - - @field_validator("type") - def type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["dedicated"]): - raise ValueError("must be one of enum values ('dedicated')") - return value - - @field_validator("state") - def state_validate_enum(cls, value): - """Validates the enum""" - if value not in set( - ["PENDING", "STARTING", "STARTED", "STOPPING", "STOPPED", "ERROR"] - ): - raise ValueError( - "must be one of enum values ('PENDING', 'STARTING', 'STARTED', 'STOPPING', 'STOPPED', 'ERROR')" - ) - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of DedicatedEndpoint from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of autoscaling - if self.autoscaling: - _dict["autoscaling"] = self.autoscaling.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of DedicatedEndpoint from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "id": obj.get("id"), - "name": obj.get("name"), - "display_name": obj.get("display_name"), - "model": obj.get("model"), - "hardware": obj.get("hardware"), - "type": obj.get("type"), - "owner": obj.get("owner"), - "state": obj.get("state"), - "autoscaling": ( - Autoscaling.from_dict(obj["autoscaling"]) - if obj.get("autoscaling") is not None - else None - ), - "created_at": obj.get("created_at"), - } - ) - return _obj diff --git a/src/together/generated/models/embeddings_request.py b/src/together/generated/models/embeddings_request.py deleted file mode 100644 index bad5473c..00000000 --- a/src/together/generated/models/embeddings_request.py +++ /dev/null @@ -1,105 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict -from typing import Any, ClassVar, Dict, List -from together.generated.models.embeddings_request_input import EmbeddingsRequestInput -from together.generated.models.embeddings_request_model import EmbeddingsRequestModel -from typing import Optional, Set -from typing_extensions import Self - - -class EmbeddingsRequest(BaseModel): - """ - EmbeddingsRequest - """ # noqa: E501 - - model: EmbeddingsRequestModel - input: EmbeddingsRequestInput - __properties: ClassVar[List[str]] = ["model", "input"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of EmbeddingsRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of model - if self.model: - _dict["model"] = self.model.to_dict() - # override the default output from pydantic by calling `to_dict()` of input - if self.input: - _dict["input"] = self.input.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of EmbeddingsRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "model": ( - EmbeddingsRequestModel.from_dict(obj["model"]) - if obj.get("model") is not None - else None - ), - "input": ( - EmbeddingsRequestInput.from_dict(obj["input"]) - if obj.get("input") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/embeddings_request_input.py b/src/together/generated/models/embeddings_request_input.py deleted file mode 100644 index 3a4ec919..00000000 --- a/src/together/generated/models/embeddings_request_input.py +++ /dev/null @@ -1,171 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -EMBEDDINGSREQUESTINPUT_ONE_OF_SCHEMAS = ["List[str]", "str"] - - -class EmbeddingsRequestInput(BaseModel): - """ - EmbeddingsRequestInput - """ - - # data type: str - oneof_schema_1_validator: Optional[StrictStr] = Field( - default=None, description="A string providing the text for the model to embed." - ) - # data type: List[str] - oneof_schema_2_validator: Optional[List[StrictStr]] = None - actual_instance: Optional[Union[List[str], str]] = None - one_of_schemas: Set[str] = {"List[str]", "str"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = EmbeddingsRequestInput.model_construct() - error_messages = [] - match = 0 - # validate data type: str - try: - instance.oneof_schema_1_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: List[str] - try: - instance.oneof_schema_2_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into str - try: - # validation - instance.oneof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_1_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into List[str] - try: - # validation - instance.oneof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_2_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], List[str], str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/embeddings_request_model.py b/src/together/generated/models/embeddings_request_model.py deleted file mode 100644 index 5a40eb92..00000000 --- a/src/together/generated/models/embeddings_request_model.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -from inspect import getfullargspec -import json -import pprint -import re # noqa: F401 -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Optional -from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict -from typing_extensions import Literal, Self -from pydantic import Field - -EMBEDDINGSREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] - - -class EmbeddingsRequestModel(BaseModel): - """ - The name of the embedding model to use.

[See all of Together AI's embedding models](https://docs.together.ai/docs/serverless-models#embedding-models) - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: str - anyof_schema_2_validator: Optional[StrictStr] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = EmbeddingsRequestModel.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in EmbeddingsRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into EmbeddingsRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/embeddings_response.py b/src/together/generated/models/embeddings_response.py deleted file mode 100644 index cdf15928..00000000 --- a/src/together/generated/models/embeddings_response.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from together.generated.models.embeddings_response_data_inner import ( - EmbeddingsResponseDataInner, -) -from typing import Optional, Set -from typing_extensions import Self - - -class EmbeddingsResponse(BaseModel): - """ - EmbeddingsResponse - """ # noqa: E501 - - object: StrictStr - model: StrictStr - data: List[EmbeddingsResponseDataInner] - __properties: ClassVar[List[str]] = ["object", "model", "data"] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["list"]): - raise ValueError("must be one of enum values ('list')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of EmbeddingsResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in data (list) - _items = [] - if self.data: - for _item_data in self.data: - if _item_data: - _items.append(_item_data.to_dict()) - _dict["data"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of EmbeddingsResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "model": obj.get("model"), - "data": ( - [ - EmbeddingsResponseDataInner.from_dict(_item) - for _item in obj["data"] - ] - if obj.get("data") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/embeddings_response_data_inner.py b/src/together/generated/models/embeddings_response_data_inner.py deleted file mode 100644 index 68e816ac..00000000 --- a/src/together/generated/models/embeddings_response_data_inner.py +++ /dev/null @@ -1,105 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - StrictFloat, - StrictInt, - StrictStr, - field_validator, -) -from typing import Any, ClassVar, Dict, List, Union -from typing import Optional, Set -from typing_extensions import Self - - -class EmbeddingsResponseDataInner(BaseModel): - """ - EmbeddingsResponseDataInner - """ # noqa: E501 - - object: StrictStr - embedding: List[Union[StrictFloat, StrictInt]] - index: StrictInt - __properties: ClassVar[List[str]] = ["object", "embedding", "index"] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["embedding"]): - raise ValueError("must be one of enum values ('embedding')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of EmbeddingsResponseDataInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of EmbeddingsResponseDataInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "embedding": obj.get("embedding"), - "index": obj.get("index"), - } - ) - return _obj diff --git a/src/together/generated/models/endpoint_pricing.py b/src/together/generated/models/endpoint_pricing.py deleted file mode 100644 index 847be535..00000000 --- a/src/together/generated/models/endpoint_pricing.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt -from typing import Any, ClassVar, Dict, List, Union -from typing import Optional, Set -from typing_extensions import Self - - -class EndpointPricing(BaseModel): - """ - Pricing details for using an endpoint - """ # noqa: E501 - - cents_per_minute: Union[StrictFloat, StrictInt] = Field( - description="Cost per minute of endpoint uptime in cents" - ) - __properties: ClassVar[List[str]] = ["cents_per_minute"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of EndpointPricing from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of EndpointPricing from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"cents_per_minute": obj.get("cents_per_minute")}) - return _obj diff --git a/src/together/generated/models/error_data.py b/src/together/generated/models/error_data.py deleted file mode 100644 index 0dd9d3ec..00000000 --- a/src/together/generated/models/error_data.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict -from typing import Any, ClassVar, Dict, List -from together.generated.models.error_data_error import ErrorDataError -from typing import Optional, Set -from typing_extensions import Self - - -class ErrorData(BaseModel): - """ - ErrorData - """ # noqa: E501 - - error: ErrorDataError - __properties: ClassVar[List[str]] = ["error"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ErrorData from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of error - if self.error: - _dict["error"] = self.error.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ErrorData from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "error": ( - ErrorDataError.from_dict(obj["error"]) - if obj.get("error") is not None - else None - ) - } - ) - return _obj diff --git a/src/together/generated/models/error_data_error.py b/src/together/generated/models/error_data_error.py deleted file mode 100644 index f43533ea..00000000 --- a/src/together/generated/models/error_data_error.py +++ /dev/null @@ -1,93 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class ErrorDataError(BaseModel): - """ - ErrorDataError - """ # noqa: E501 - - message: StrictStr - type: StrictStr - param: Optional[StrictStr] = None - code: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["message", "type", "param", "code"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ErrorDataError from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ErrorDataError from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "message": obj.get("message"), - "type": obj.get("type"), - "param": obj.get("param"), - "code": obj.get("code"), - } - ) - return _obj diff --git a/src/together/generated/models/file_delete_response.py b/src/together/generated/models/file_delete_response.py deleted file mode 100644 index 07ad867e..00000000 --- a/src/together/generated/models/file_delete_response.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class FileDeleteResponse(BaseModel): - """ - FileDeleteResponse - """ # noqa: E501 - - id: Optional[StrictStr] = None - deleted: Optional[StrictBool] = None - __properties: ClassVar[List[str]] = ["id", "deleted"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FileDeleteResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FileDeleteResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"id": obj.get("id"), "deleted": obj.get("deleted")}) - return _obj diff --git a/src/together/generated/models/file_list.py b/src/together/generated/models/file_list.py deleted file mode 100644 index ca596491..00000000 --- a/src/together/generated/models/file_list.py +++ /dev/null @@ -1,99 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict -from typing import Any, ClassVar, Dict, List -from together.generated.models.file_response import FileResponse -from typing import Optional, Set -from typing_extensions import Self - - -class FileList(BaseModel): - """ - FileList - """ # noqa: E501 - - data: List[FileResponse] - __properties: ClassVar[List[str]] = ["data"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FileList from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in data (list) - _items = [] - if self.data: - for _item_data in self.data: - if _item_data: - _items.append(_item_data.to_dict()) - _dict["data"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FileList from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "data": ( - [FileResponse.from_dict(_item) for _item in obj["data"]] - if obj.get("data") is not None - else None - ) - } - ) - return _obj diff --git a/src/together/generated/models/file_object.py b/src/together/generated/models/file_object.py deleted file mode 100644 index 55a8b422..00000000 --- a/src/together/generated/models/file_object.py +++ /dev/null @@ -1,93 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class FileObject(BaseModel): - """ - FileObject - """ # noqa: E501 - - object: Optional[StrictStr] = None - id: Optional[StrictStr] = None - filename: Optional[StrictStr] = None - size: Optional[StrictInt] = None - __properties: ClassVar[List[str]] = ["object", "id", "filename", "size"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FileObject from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FileObject from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "id": obj.get("id"), - "filename": obj.get("filename"), - "size": obj.get("size"), - } - ) - return _obj diff --git a/src/together/generated/models/file_response.py b/src/together/generated/models/file_response.py deleted file mode 100644 index 7fe105e4..00000000 --- a/src/together/generated/models/file_response.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictInt, - StrictStr, - field_validator, -) -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class FileResponse(BaseModel): - """ - FileResponse - """ # noqa: E501 - - id: StrictStr - object: StrictStr - created_at: StrictInt - filename: StrictStr - bytes: StrictInt - purpose: StrictStr - processed: StrictBool = Field(alias="Processed") - file_type: StrictStr = Field(alias="FileType") - line_count: StrictInt = Field(alias="LineCount") - __properties: ClassVar[List[str]] = [ - "id", - "object", - "created_at", - "filename", - "bytes", - "purpose", - "Processed", - "FileType", - "LineCount", - ] - - @field_validator("purpose") - def purpose_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["fine-tune"]): - raise ValueError("must be one of enum values ('fine-tune')") - return value - - @field_validator("file_type") - def file_type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["jsonl", "parquet"]): - raise ValueError("must be one of enum values ('jsonl', 'parquet')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FileResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FileResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "object": obj.get("object"), - "created_at": obj.get("created_at"), - "filename": obj.get("filename"), - "bytes": obj.get("bytes"), - "purpose": obj.get("purpose"), - "Processed": obj.get("Processed"), - "FileType": obj.get("FileType"), - "LineCount": obj.get("LineCount"), - } - ) - return _obj diff --git a/src/together/generated/models/fine_tune_event.py b/src/together/generated/models/fine_tune_event.py deleted file mode 100644 index 638272ab..00000000 --- a/src/together/generated/models/fine_tune_event.py +++ /dev/null @@ -1,137 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.finetune_event_levels import FinetuneEventLevels -from together.generated.models.finetune_event_type import FinetuneEventType -from typing import Optional, Set -from typing_extensions import Self - - -class FineTuneEvent(BaseModel): - """ - FineTuneEvent - """ # noqa: E501 - - object: StrictStr - created_at: StrictStr - level: Optional[FinetuneEventLevels] = None - message: StrictStr - type: FinetuneEventType - param_count: StrictInt - token_count: StrictInt - total_steps: StrictInt - wandb_url: StrictStr - step: StrictInt - checkpoint_path: StrictStr - model_path: StrictStr - training_offset: StrictInt - hash: StrictStr - __properties: ClassVar[List[str]] = [ - "object", - "created_at", - "level", - "message", - "type", - "param_count", - "token_count", - "total_steps", - "wandb_url", - "step", - "checkpoint_path", - "model_path", - "training_offset", - "hash", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["fine-tune-event"]): - raise ValueError("must be one of enum values ('fine-tune-event')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FineTuneEvent from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FineTuneEvent from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "created_at": obj.get("created_at"), - "level": obj.get("level"), - "message": obj.get("message"), - "type": obj.get("type"), - "param_count": obj.get("param_count"), - "token_count": obj.get("token_count"), - "total_steps": obj.get("total_steps"), - "wandb_url": obj.get("wandb_url"), - "step": obj.get("step"), - "checkpoint_path": obj.get("checkpoint_path"), - "model_path": obj.get("model_path"), - "training_offset": obj.get("training_offset"), - "hash": obj.get("hash"), - } - ) - return _obj diff --git a/src/together/generated/models/fine_tunes_post_request.py b/src/together/generated/models/fine_tunes_post_request.py deleted file mode 100644 index 215ca080..00000000 --- a/src/together/generated/models/fine_tunes_post_request.py +++ /dev/null @@ -1,233 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional, Union -from together.generated.models.fine_tunes_post_request_train_on_inputs import ( - FineTunesPostRequestTrainOnInputs, -) -from together.generated.models.fine_tunes_post_request_training_type import ( - FineTunesPostRequestTrainingType, -) -from typing import Optional, Set -from typing_extensions import Self - - -class FineTunesPostRequest(BaseModel): - """ - FineTunesPostRequest - """ # noqa: E501 - - training_file: StrictStr = Field( - description="File-ID of a training file uploaded to the Together API" - ) - validation_file: Optional[StrictStr] = Field( - default=None, - description="File-ID of a validation file uploaded to the Together API", - ) - model: StrictStr = Field( - description="Name of the base model to run fine-tune job on" - ) - n_epochs: Optional[StrictInt] = Field( - default=1, description="Number of epochs for fine-tuning" - ) - n_checkpoints: Optional[StrictInt] = Field( - default=1, description="Number of checkpoints to save during fine-tuning" - ) - n_evals: Optional[StrictInt] = Field( - default=0, - description="Number of evaluations to be run on a given validation set during training", - ) - batch_size: Optional[StrictInt] = Field( - default=32, description="Batch size for fine-tuning" - ) - learning_rate: Optional[Union[StrictFloat, StrictInt]] = Field( - default=0.000010, description="Learning rate multiplier to use for training" - ) - lr_scheduler: Optional[Dict[str, Any]] = None - warmup_ratio: Optional[Union[StrictFloat, StrictInt]] = Field( - default=0.0, - description="The percent of steps at the start of training to linearly increase the learning rate.", - ) - max_grad_norm: Optional[Union[StrictFloat, StrictInt]] = Field( - default=1.0, - description="Max gradient norm to be used for gradient clipping. Set to 0 to disable.", - ) - weight_decay: Optional[Union[StrictFloat, StrictInt]] = Field( - default=0.0, description="Weight decay" - ) - suffix: Optional[StrictStr] = Field( - default=None, - description="Suffix that will be added to your fine-tuned model name", - ) - wandb_api_key: Optional[StrictStr] = Field( - default=None, description="API key for Weights & Biases integration" - ) - wandb_base_url: Optional[StrictStr] = Field( - default=None, - description="The base URL of a dedicated Weights & Biases instance.", - ) - wandb_project_name: Optional[StrictStr] = Field( - default=None, - description="The Weights & Biases project for your run. If not specified, will use `together` as the project name.", - ) - wandb_name: Optional[StrictStr] = Field( - default=None, description="The Weights & Biases name for your run." - ) - train_on_inputs: Optional[FineTunesPostRequestTrainOnInputs] = False - training_type: Optional[FineTunesPostRequestTrainingType] = None - __properties: ClassVar[List[str]] = [ - "training_file", - "validation_file", - "model", - "n_epochs", - "n_checkpoints", - "n_evals", - "batch_size", - "learning_rate", - "lr_scheduler", - "warmup_ratio", - "max_grad_norm", - "weight_decay", - "suffix", - "wandb_api_key", - "wandb_base_url", - "wandb_project_name", - "wandb_name", - "train_on_inputs", - "training_type", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FineTunesPostRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of lr_scheduler - if self.lr_scheduler: - _dict["lr_scheduler"] = self.lr_scheduler.to_dict() - # override the default output from pydantic by calling `to_dict()` of train_on_inputs - if self.train_on_inputs: - _dict["train_on_inputs"] = self.train_on_inputs.to_dict() - # override the default output from pydantic by calling `to_dict()` of training_type - if self.training_type: - _dict["training_type"] = self.training_type.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FineTunesPostRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "training_file": obj.get("training_file"), - "validation_file": obj.get("validation_file"), - "model": obj.get("model"), - "n_epochs": ( - obj.get("n_epochs") if obj.get("n_epochs") is not None else 1 - ), - "n_checkpoints": ( - obj.get("n_checkpoints") - if obj.get("n_checkpoints") is not None - else 1 - ), - "n_evals": obj.get("n_evals") if obj.get("n_evals") is not None else 0, - "batch_size": ( - obj.get("batch_size") if obj.get("batch_size") is not None else 32 - ), - "learning_rate": ( - obj.get("learning_rate") - if obj.get("learning_rate") is not None - else 0.000010 - ), - "lr_scheduler": ( - LRScheduler.from_dict(obj["lr_scheduler"]) - if obj.get("lr_scheduler") is not None - else None - ), - "warmup_ratio": ( - obj.get("warmup_ratio") - if obj.get("warmup_ratio") is not None - else 0.0 - ), - "max_grad_norm": ( - obj.get("max_grad_norm") - if obj.get("max_grad_norm") is not None - else 1.0 - ), - "weight_decay": ( - obj.get("weight_decay") - if obj.get("weight_decay") is not None - else 0.0 - ), - "suffix": obj.get("suffix"), - "wandb_api_key": obj.get("wandb_api_key"), - "wandb_base_url": obj.get("wandb_base_url"), - "wandb_project_name": obj.get("wandb_project_name"), - "wandb_name": obj.get("wandb_name"), - "train_on_inputs": ( - FineTunesPostRequestTrainOnInputs.from_dict(obj["train_on_inputs"]) - if obj.get("train_on_inputs") is not None - else None - ), - "training_type": ( - FineTunesPostRequestTrainingType.from_dict(obj["training_type"]) - if obj.get("training_type") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/fine_tunes_post_request_train_on_inputs.py b/src/together/generated/models/fine_tunes_post_request_train_on_inputs.py deleted file mode 100644 index 4c5e7c3c..00000000 --- a/src/together/generated/models/fine_tunes_post_request_train_on_inputs.py +++ /dev/null @@ -1,170 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -FINETUNESPOSTREQUESTTRAINONINPUTS_ONE_OF_SCHEMAS = ["bool", "str"] - - -class FineTunesPostRequestTrainOnInputs(BaseModel): - """ - Whether to mask the user messages in conversational data or prompts in instruction data. - """ - - # data type: bool - oneof_schema_1_validator: Optional[StrictBool] = None - # data type: str - oneof_schema_2_validator: Optional[StrictStr] = None - actual_instance: Optional[Union[bool, str]] = None - one_of_schemas: Set[str] = {"bool", "str"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = FineTunesPostRequestTrainOnInputs.model_construct() - error_messages = [] - match = 0 - # validate data type: bool - try: - instance.oneof_schema_1_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.oneof_schema_2_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into bool - try: - # validation - instance.oneof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_1_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.oneof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_2_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], bool, str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/fine_tunes_post_request_training_type.py b/src/together/generated/models/fine_tunes_post_request_training_type.py deleted file mode 100644 index 8d4b6906..00000000 --- a/src/together/generated/models/fine_tunes_post_request_training_type.py +++ /dev/null @@ -1,172 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from together.generated.models.full_training_type import FullTrainingType -from together.generated.models.lo_ra_training_type import LoRATrainingType -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -FINETUNESPOSTREQUESTTRAININGTYPE_ONE_OF_SCHEMAS = [ - "FullTrainingType", - "LoRATrainingType", -] - - -class FineTunesPostRequestTrainingType(BaseModel): - """ - FineTunesPostRequestTrainingType - """ - - # data type: FullTrainingType - oneof_schema_1_validator: Optional[FullTrainingType] = None - # data type: LoRATrainingType - oneof_schema_2_validator: Optional[LoRATrainingType] = None - actual_instance: Optional[Union[FullTrainingType, LoRATrainingType]] = None - one_of_schemas: Set[str] = {"FullTrainingType", "LoRATrainingType"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = FineTunesPostRequestTrainingType.model_construct() - error_messages = [] - match = 0 - # validate data type: FullTrainingType - if not isinstance(v, FullTrainingType): - error_messages.append( - f"Error! Input type `{type(v)}` is not `FullTrainingType`" - ) - else: - match += 1 - # validate data type: LoRATrainingType - if not isinstance(v, LoRATrainingType): - error_messages.append( - f"Error! Input type `{type(v)}` is not `LoRATrainingType`" - ) - else: - match += 1 - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into FullTrainingType - try: - instance.actual_instance = FullTrainingType.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into LoRATrainingType - try: - instance.actual_instance = LoRATrainingType.from_json(json_str) - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict( - self, - ) -> Optional[Union[Dict[str, Any], FullTrainingType, LoRATrainingType]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/finetune_download_result.py b/src/together/generated/models/finetune_download_result.py deleted file mode 100644 index ddebbc70..00000000 --- a/src/together/generated/models/finetune_download_result.py +++ /dev/null @@ -1,116 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class FinetuneDownloadResult(BaseModel): - """ - FinetuneDownloadResult - """ # noqa: E501 - - object: Optional[StrictStr] = None - id: Optional[StrictStr] = None - checkpoint_step: Optional[StrictInt] = None - filename: Optional[StrictStr] = None - size: Optional[StrictInt] = None - __properties: ClassVar[List[str]] = [ - "object", - "id", - "checkpoint_step", - "filename", - "size", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value is None: - return value - - if value not in set(["local"]): - raise ValueError("must be one of enum values ('local')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FinetuneDownloadResult from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # set to None if object (nullable) is None - # and model_fields_set contains the field - if self.object is None and "object" in self.model_fields_set: - _dict["object"] = None - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FinetuneDownloadResult from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "id": obj.get("id"), - "checkpoint_step": obj.get("checkpoint_step"), - "filename": obj.get("filename"), - "size": obj.get("size"), - } - ) - return _obj diff --git a/src/together/generated/models/finetune_event_levels.py b/src/together/generated/models/finetune_event_levels.py deleted file mode 100644 index ee263088..00000000 --- a/src/together/generated/models/finetune_event_levels.py +++ /dev/null @@ -1,39 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -from enum import Enum -from typing_extensions import Self - - -class FinetuneEventLevels(str, Enum): - """ - FinetuneEventLevels - """ - - """ - allowed enum values - """ - INFO = "info" - WARNING = "warning" - ERROR = "error" - LEGACY_INFO = "legacy_info" - LEGACY_IWARNING = "legacy_iwarning" - LEGACY_IERROR = "legacy_ierror" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of FinetuneEventLevels from a JSON string""" - return cls(json.loads(json_str)) diff --git a/src/together/generated/models/finetune_event_type.py b/src/together/generated/models/finetune_event_type.py deleted file mode 100644 index 8f65293b..00000000 --- a/src/together/generated/models/finetune_event_type.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -from enum import Enum -from typing_extensions import Self - - -class FinetuneEventType(str, Enum): - """ - FinetuneEventType - """ - - """ - allowed enum values - """ - JOB_PENDING = "job_pending" - JOB_START = "job_start" - JOB_STOPPED = "job_stopped" - MODEL_DOWNLOADING = "model_downloading" - MODEL_DOWNLOAD_COMPLETE = "model_download_complete" - TRAINING_DATA_DOWNLOADING = "training_data_downloading" - TRAINING_DATA_DOWNLOAD_COMPLETE = "training_data_download_complete" - VALIDATION_DATA_DOWNLOADING = "validation_data_downloading" - VALIDATION_DATA_DOWNLOAD_COMPLETE = "validation_data_download_complete" - WANDB_INIT = "wandb_init" - TRAINING_START = "training_start" - CHECKPOINT_SAVE = "checkpoint_save" - BILLING_LIMIT = "billing_limit" - EPOCH_COMPLETE = "epoch_complete" - TRAINING_COMPLETE = "training_complete" - MODEL_COMPRESSING = "model_compressing" - MODEL_COMPRESSION_COMPLETE = "model_compression_complete" - MODEL_UPLOADING = "model_uploading" - MODEL_UPLOAD_COMPLETE = "model_upload_complete" - JOB_COMPLETE = "job_complete" - JOB_ERROR = "job_error" - CANCEL_REQUESTED = "cancel_requested" - JOB_RESTARTED = "job_restarted" - REFUND = "refund" - WARNING = "warning" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of FinetuneEventType from a JSON string""" - return cls(json.loads(json_str)) diff --git a/src/together/generated/models/finetune_job_status.py b/src/together/generated/models/finetune_job_status.py deleted file mode 100644 index 97b5dd1c..00000000 --- a/src/together/generated/models/finetune_job_status.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -from enum import Enum -from typing_extensions import Self - - -class FinetuneJobStatus(str, Enum): - """ - FinetuneJobStatus - """ - - """ - allowed enum values - """ - PENDING = "pending" - QUEUED = "queued" - RUNNING = "running" - COMPRESSING = "compressing" - UPLOADING = "uploading" - CANCEL_REQUESTED = "cancel_requested" - CANCELLED = "cancelled" - ERROR = "error" - COMPLETED = "completed" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of FinetuneJobStatus from a JSON string""" - return cls(json.loads(json_str)) diff --git a/src/together/generated/models/finetune_list.py b/src/together/generated/models/finetune_list.py deleted file mode 100644 index 2a20ba4c..00000000 --- a/src/together/generated/models/finetune_list.py +++ /dev/null @@ -1,99 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict -from typing import Any, ClassVar, Dict, List -from together.generated.models.finetune_response import FinetuneResponse -from typing import Optional, Set -from typing_extensions import Self - - -class FinetuneList(BaseModel): - """ - FinetuneList - """ # noqa: E501 - - data: List[FinetuneResponse] - __properties: ClassVar[List[str]] = ["data"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FinetuneList from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in data (list) - _items = [] - if self.data: - for _item_data in self.data: - if _item_data: - _items.append(_item_data.to_dict()) - _dict["data"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FinetuneList from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "data": ( - [FinetuneResponse.from_dict(_item) for _item in obj["data"]] - if obj.get("data") is not None - else None - ) - } - ) - return _obj diff --git a/src/together/generated/models/finetune_list_events.py b/src/together/generated/models/finetune_list_events.py deleted file mode 100644 index c4266c1c..00000000 --- a/src/together/generated/models/finetune_list_events.py +++ /dev/null @@ -1,99 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict -from typing import Any, ClassVar, Dict, List -from together.generated.models.fine_tune_event import FineTuneEvent -from typing import Optional, Set -from typing_extensions import Self - - -class FinetuneListEvents(BaseModel): - """ - FinetuneListEvents - """ # noqa: E501 - - data: List[FineTuneEvent] - __properties: ClassVar[List[str]] = ["data"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FinetuneListEvents from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in data (list) - _items = [] - if self.data: - for _item_data in self.data: - if _item_data: - _items.append(_item_data.to_dict()) - _dict["data"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FinetuneListEvents from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "data": ( - [FineTuneEvent.from_dict(_item) for _item in obj["data"]] - if obj.get("data") is not None - else None - ) - } - ) - return _obj diff --git a/src/together/generated/models/finetune_response.py b/src/together/generated/models/finetune_response.py deleted file mode 100644 index a1055827..00000000 --- a/src/together/generated/models/finetune_response.py +++ /dev/null @@ -1,222 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional, Union -from together.generated.models.fine_tune_event import FineTuneEvent -from together.generated.models.fine_tunes_post_request_training_type import ( - FineTunesPostRequestTrainingType, -) -from together.generated.models.finetune_job_status import FinetuneJobStatus -from together.generated.models.finetune_response_train_on_inputs import ( - FinetuneResponseTrainOnInputs, -) -from typing import Optional, Set -from typing_extensions import Self - - -class FinetuneResponse(BaseModel): - """ - FinetuneResponse - """ # noqa: E501 - - id: StrictStr - training_file: Optional[StrictStr] = None - validation_file: Optional[StrictStr] = None - model: Optional[StrictStr] = None - model_output_name: Optional[StrictStr] = None - model_output_path: Optional[StrictStr] = None - trainingfile_numlines: Optional[StrictInt] = None - trainingfile_size: Optional[StrictInt] = None - created_at: Optional[StrictStr] = None - updated_at: Optional[StrictStr] = None - n_epochs: Optional[StrictInt] = None - n_checkpoints: Optional[StrictInt] = None - n_evals: Optional[StrictInt] = None - batch_size: Optional[StrictInt] = None - learning_rate: Optional[Union[StrictFloat, StrictInt]] = None - lr_scheduler: Optional[Dict[str, Any]] = None - warmup_ratio: Optional[Union[StrictFloat, StrictInt]] = None - max_grad_norm: Optional[Union[StrictFloat, StrictInt]] = None - weight_decay: Optional[Union[StrictFloat, StrictInt]] = None - eval_steps: Optional[StrictInt] = None - train_on_inputs: Optional[FinetuneResponseTrainOnInputs] = None - training_type: Optional[FineTunesPostRequestTrainingType] = None - status: FinetuneJobStatus - job_id: Optional[StrictStr] = None - events: Optional[List[FineTuneEvent]] = None - token_count: Optional[StrictInt] = None - param_count: Optional[StrictInt] = None - total_price: Optional[StrictInt] = None - epochs_completed: Optional[StrictInt] = None - queue_depth: Optional[StrictInt] = None - wandb_project_name: Optional[StrictStr] = None - wandb_url: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [ - "id", - "training_file", - "validation_file", - "model", - "model_output_name", - "model_output_path", - "trainingfile_numlines", - "trainingfile_size", - "created_at", - "updated_at", - "n_epochs", - "n_checkpoints", - "n_evals", - "batch_size", - "learning_rate", - "lr_scheduler", - "warmup_ratio", - "max_grad_norm", - "weight_decay", - "eval_steps", - "train_on_inputs", - "training_type", - "status", - "job_id", - "events", - "token_count", - "param_count", - "total_price", - "epochs_completed", - "queue_depth", - "wandb_project_name", - "wandb_url", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FinetuneResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of lr_scheduler - if self.lr_scheduler: - _dict["lr_scheduler"] = self.lr_scheduler.to_dict() - # override the default output from pydantic by calling `to_dict()` of train_on_inputs - if self.train_on_inputs: - _dict["train_on_inputs"] = self.train_on_inputs.to_dict() - # override the default output from pydantic by calling `to_dict()` of training_type - if self.training_type: - _dict["training_type"] = self.training_type.to_dict() - # override the default output from pydantic by calling `to_dict()` of each item in events (list) - _items = [] - if self.events: - for _item_events in self.events: - if _item_events: - _items.append(_item_events.to_dict()) - _dict["events"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FinetuneResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "training_file": obj.get("training_file"), - "validation_file": obj.get("validation_file"), - "model": obj.get("model"), - "model_output_name": obj.get("model_output_name"), - "model_output_path": obj.get("model_output_path"), - "trainingfile_numlines": obj.get("trainingfile_numlines"), - "trainingfile_size": obj.get("trainingfile_size"), - "created_at": obj.get("created_at"), - "updated_at": obj.get("updated_at"), - "n_epochs": obj.get("n_epochs"), - "n_checkpoints": obj.get("n_checkpoints"), - "n_evals": obj.get("n_evals"), - "batch_size": obj.get("batch_size"), - "learning_rate": obj.get("learning_rate"), - "lr_scheduler": ( - LRScheduler.from_dict(obj["lr_scheduler"]) - if obj.get("lr_scheduler") is not None - else None - ), - "warmup_ratio": obj.get("warmup_ratio"), - "max_grad_norm": obj.get("max_grad_norm"), - "weight_decay": obj.get("weight_decay"), - "eval_steps": obj.get("eval_steps"), - "train_on_inputs": ( - FinetuneResponseTrainOnInputs.from_dict(obj["train_on_inputs"]) - if obj.get("train_on_inputs") is not None - else None - ), - "training_type": ( - FineTunesPostRequestTrainingType.from_dict(obj["training_type"]) - if obj.get("training_type") is not None - else None - ), - "status": obj.get("status"), - "job_id": obj.get("job_id"), - "events": ( - [FineTuneEvent.from_dict(_item) for _item in obj["events"]] - if obj.get("events") is not None - else None - ), - "token_count": obj.get("token_count"), - "param_count": obj.get("param_count"), - "total_price": obj.get("total_price"), - "epochs_completed": obj.get("epochs_completed"), - "queue_depth": obj.get("queue_depth"), - "wandb_project_name": obj.get("wandb_project_name"), - "wandb_url": obj.get("wandb_url"), - } - ) - return _obj diff --git a/src/together/generated/models/finetune_response_train_on_inputs.py b/src/together/generated/models/finetune_response_train_on_inputs.py deleted file mode 100644 index 44ff7e8a..00000000 --- a/src/together/generated/models/finetune_response_train_on_inputs.py +++ /dev/null @@ -1,170 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, List, Optional -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -FINETUNERESPONSETRAINONINPUTS_ONE_OF_SCHEMAS = ["bool", "str"] - - -class FinetuneResponseTrainOnInputs(BaseModel): - """ - FinetuneResponseTrainOnInputs - """ - - # data type: bool - oneof_schema_1_validator: Optional[StrictBool] = None - # data type: str - oneof_schema_2_validator: Optional[StrictStr] = None - actual_instance: Optional[Union[bool, str]] = None - one_of_schemas: Set[str] = {"bool", "str"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = FinetuneResponseTrainOnInputs.model_construct() - error_messages = [] - match = 0 - # validate data type: bool - try: - instance.oneof_schema_1_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.oneof_schema_2_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into bool - try: - # validation - instance.oneof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_1_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.oneof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_2_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], bool, str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/finish_reason.py b/src/together/generated/models/finish_reason.py deleted file mode 100644 index 4d88eced..00000000 --- a/src/together/generated/models/finish_reason.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -from enum import Enum -from typing_extensions import Self - - -class FinishReason(str, Enum): - """ - FinishReason - """ - - """ - allowed enum values - """ - STOP = "stop" - EOS = "eos" - LENGTH = "length" - TOOL_CALLS = "tool_calls" - FUNCTION_CALL = "function_call" - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Create an instance of FinishReason from a JSON string""" - return cls(json.loads(json_str)) diff --git a/src/together/generated/models/full_training_type.py b/src/together/generated/models/full_training_type.py deleted file mode 100644 index 6999096a..00000000 --- a/src/together/generated/models/full_training_type.py +++ /dev/null @@ -1,90 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class FullTrainingType(BaseModel): - """ - FullTrainingType - """ # noqa: E501 - - type: StrictStr - __properties: ClassVar[List[str]] = ["type"] - - @field_validator("type") - def type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["Full"]): - raise ValueError("must be one of enum values ('Full')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FullTrainingType from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FullTrainingType from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"type": obj.get("type")}) - return _obj diff --git a/src/together/generated/models/hardware_availability.py b/src/together/generated/models/hardware_availability.py deleted file mode 100644 index 53a166cf..00000000 --- a/src/together/generated/models/hardware_availability.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class HardwareAvailability(BaseModel): - """ - Indicates the current availability status of a hardware configuration - """ # noqa: E501 - - status: StrictStr = Field( - description="The availability status of the hardware configuration" - ) - __properties: ClassVar[List[str]] = ["status"] - - @field_validator("status") - def status_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["available", "unavailable", "insufficient"]): - raise ValueError( - "must be one of enum values ('available', 'unavailable', 'insufficient')" - ) - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of HardwareAvailability from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of HardwareAvailability from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"status": obj.get("status")}) - return _obj diff --git a/src/together/generated/models/hardware_spec.py b/src/together/generated/models/hardware_spec.py deleted file mode 100644 index 10d0058a..00000000 --- a/src/together/generated/models/hardware_spec.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Union -from typing import Optional, Set -from typing_extensions import Self - - -class HardwareSpec(BaseModel): - """ - Detailed specifications of a hardware configuration - """ # noqa: E501 - - gpu_type: StrictStr = Field(description="The type/model of GPU") - gpu_link: StrictStr = Field(description="The GPU interconnect technology") - gpu_memory: Union[StrictFloat, StrictInt] = Field( - description="Amount of GPU memory in GB" - ) - gpu_count: StrictInt = Field(description="Number of GPUs in this configuration") - __properties: ClassVar[List[str]] = [ - "gpu_type", - "gpu_link", - "gpu_memory", - "gpu_count", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of HardwareSpec from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of HardwareSpec from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "gpu_type": obj.get("gpu_type"), - "gpu_link": obj.get("gpu_link"), - "gpu_memory": obj.get("gpu_memory"), - "gpu_count": obj.get("gpu_count"), - } - ) - return _obj diff --git a/src/together/generated/models/hardware_with_status.py b/src/together/generated/models/hardware_with_status.py deleted file mode 100644 index 4ce638a2..00000000 --- a/src/together/generated/models/hardware_with_status.py +++ /dev/null @@ -1,140 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from datetime import datetime -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.endpoint_pricing import EndpointPricing -from together.generated.models.hardware_availability import HardwareAvailability -from together.generated.models.hardware_spec import HardwareSpec -from typing import Optional, Set -from typing_extensions import Self - - -class HardwareWithStatus(BaseModel): - """ - Hardware configuration details with optional availability status - """ # noqa: E501 - - object: StrictStr - id: StrictStr = Field( - description="Unique identifier for the hardware configuration" - ) - pricing: EndpointPricing - specs: HardwareSpec - availability: Optional[HardwareAvailability] = None - updated_at: datetime = Field( - description="Timestamp of when the hardware status was last updated" - ) - __properties: ClassVar[List[str]] = [ - "object", - "id", - "pricing", - "specs", - "availability", - "updated_at", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["hardware"]): - raise ValueError("must be one of enum values ('hardware')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of HardwareWithStatus from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of pricing - if self.pricing: - _dict["pricing"] = self.pricing.to_dict() - # override the default output from pydantic by calling `to_dict()` of specs - if self.specs: - _dict["specs"] = self.specs.to_dict() - # override the default output from pydantic by calling `to_dict()` of availability - if self.availability: - _dict["availability"] = self.availability.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of HardwareWithStatus from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "id": obj.get("id"), - "pricing": ( - EndpointPricing.from_dict(obj["pricing"]) - if obj.get("pricing") is not None - else None - ), - "specs": ( - HardwareSpec.from_dict(obj["specs"]) - if obj.get("specs") is not None - else None - ), - "availability": ( - HardwareAvailability.from_dict(obj["availability"]) - if obj.get("availability") is not None - else None - ), - "updated_at": obj.get("updated_at"), - } - ) - return _obj diff --git a/src/together/generated/models/image_response.py b/src/together/generated/models/image_response.py deleted file mode 100644 index 35b81ea4..00000000 --- a/src/together/generated/models/image_response.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.image_response_data_inner import ImageResponseDataInner -from typing import Optional, Set -from typing_extensions import Self - - -class ImageResponse(BaseModel): - """ - ImageResponse - """ # noqa: E501 - - id: StrictStr - model: StrictStr - object: StrictStr - data: List[Optional[ImageResponseDataInner]] - __properties: ClassVar[List[str]] = ["id", "model", "object", "data"] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["list"]): - raise ValueError("must be one of enum values ('list')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ImageResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in data (list) - _items = [] - if self.data: - for _item_data in self.data: - if _item_data: - _items.append(_item_data.to_dict()) - _dict["data"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ImageResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "model": obj.get("model"), - "object": obj.get("object"), - "data": ( - [ImageResponseDataInner.from_dict(_item) for _item in obj["data"]] - if obj.get("data") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/image_response_data_inner.py b/src/together/generated/models/image_response_data_inner.py deleted file mode 100644 index 4e625f45..00000000 --- a/src/together/generated/models/image_response_data_inner.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class ImageResponseDataInner(BaseModel): - """ - ImageResponseDataInner - """ # noqa: E501 - - index: StrictInt - b64_json: Optional[StrictStr] = None - url: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = [] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ImageResponseDataInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ImageResponseDataInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({}) - return _obj diff --git a/src/together/generated/models/images_generations_post_request.py b/src/together/generated/models/images_generations_post_request.py deleted file mode 100644 index 9ccc6fe9..00000000 --- a/src/together/generated/models/images_generations_post_request.py +++ /dev/null @@ -1,217 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictFloat, - StrictInt, - StrictStr, - field_validator, -) -from typing import Any, ClassVar, Dict, List, Optional, Union -from together.generated.models.images_generations_post_request_image_loras_inner import ( - ImagesGenerationsPostRequestImageLorasInner, -) -from together.generated.models.images_generations_post_request_model import ( - ImagesGenerationsPostRequestModel, -) -from typing import Optional, Set -from typing_extensions import Self - - -class ImagesGenerationsPostRequest(BaseModel): - """ - ImagesGenerationsPostRequest - """ # noqa: E501 - - prompt: StrictStr = Field( - description="A description of the desired images. Maximum length varies by model." - ) - model: ImagesGenerationsPostRequestModel - steps: Optional[StrictInt] = Field( - default=20, description="Number of generation steps." - ) - image_url: Optional[StrictStr] = Field( - default=None, - description="URL of an image to use for image models that support it.", - ) - seed: Optional[StrictInt] = Field( - default=None, - description="Seed used for generation. Can be used to reproduce image generations.", - ) - n: Optional[StrictInt] = Field( - default=1, description="Number of image results to generate." - ) - height: Optional[StrictInt] = Field( - default=1024, description="Height of the image to generate in number of pixels." - ) - width: Optional[StrictInt] = Field( - default=1024, description="Width of the image to generate in number of pixels." - ) - negative_prompt: Optional[StrictStr] = Field( - default=None, - description="The prompt or prompts not to guide the image generation.", - ) - response_format: Optional[StrictStr] = Field( - default=None, - description="Format of the image response. Can be either a base64 string or a URL.", - ) - guidance: Optional[Union[StrictFloat, StrictInt]] = Field( - default=3.5, - description="Adjusts the alignment of the generated image with the input prompt. Higher values (e.g., 8-10) make the output more faithful to the prompt, while lower values (e.g., 1-5) encourage more creative freedom.", - ) - output_format: Optional[StrictStr] = Field( - default="jpeg", - description="The format of the image response. Can be either be `jpeg` or `png`. Defaults to `jpeg`.", - ) - image_loras: Optional[List[ImagesGenerationsPostRequestImageLorasInner]] = Field( - default=None, - description="An array of objects that define LoRAs (Low-Rank Adaptations) to influence the generated image.", - ) - __properties: ClassVar[List[str]] = [ - "prompt", - "model", - "steps", - "image_url", - "seed", - "n", - "height", - "width", - "negative_prompt", - "response_format", - "guidance", - "output_format", - "image_loras", - ] - - @field_validator("response_format") - def response_format_validate_enum(cls, value): - """Validates the enum""" - if value is None: - return value - - if value not in set(["base64", "url"]): - raise ValueError("must be one of enum values ('base64', 'url')") - return value - - @field_validator("output_format") - def output_format_validate_enum(cls, value): - """Validates the enum""" - if value is None: - return value - - if value not in set(["jpeg", "png"]): - raise ValueError("must be one of enum values ('jpeg', 'png')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ImagesGenerationsPostRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of model - if self.model: - _dict["model"] = self.model.to_dict() - # override the default output from pydantic by calling `to_dict()` of each item in image_loras (list) - _items = [] - if self.image_loras: - for _item_image_loras in self.image_loras: - if _item_image_loras: - _items.append(_item_image_loras.to_dict()) - _dict["image_loras"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ImagesGenerationsPostRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "prompt": obj.get("prompt"), - "model": ( - ImagesGenerationsPostRequestModel.from_dict(obj["model"]) - if obj.get("model") is not None - else None - ), - "steps": obj.get("steps") if obj.get("steps") is not None else 20, - "image_url": obj.get("image_url"), - "seed": obj.get("seed"), - "n": obj.get("n") if obj.get("n") is not None else 1, - "height": obj.get("height") if obj.get("height") is not None else 1024, - "width": obj.get("width") if obj.get("width") is not None else 1024, - "negative_prompt": obj.get("negative_prompt"), - "response_format": obj.get("response_format"), - "guidance": ( - obj.get("guidance") if obj.get("guidance") is not None else 3.5 - ), - "output_format": ( - obj.get("output_format") - if obj.get("output_format") is not None - else "jpeg" - ), - "image_loras": ( - [ - ImagesGenerationsPostRequestImageLorasInner.from_dict(_item) - for _item in obj["image_loras"] - ] - if obj.get("image_loras") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/images_generations_post_request_image_loras_inner.py b/src/together/generated/models/images_generations_post_request_image_loras_inner.py deleted file mode 100644 index 50f29485..00000000 --- a/src/together/generated/models/images_generations_post_request_image_loras_inner.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Union -from typing import Optional, Set -from typing_extensions import Self - - -class ImagesGenerationsPostRequestImageLorasInner(BaseModel): - """ - ImagesGenerationsPostRequestImageLorasInner - """ # noqa: E501 - - path: StrictStr = Field( - description="The URL of the LoRA to apply (e.g. https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA)." - ) - scale: Union[StrictFloat, StrictInt] = Field( - description="The strength of the LoRA's influence. Most LoRA's recommend a value of 1." - ) - __properties: ClassVar[List[str]] = ["path", "scale"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ImagesGenerationsPostRequestImageLorasInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ImagesGenerationsPostRequestImageLorasInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"path": obj.get("path"), "scale": obj.get("scale")}) - return _obj diff --git a/src/together/generated/models/images_generations_post_request_model.py b/src/together/generated/models/images_generations_post_request_model.py deleted file mode 100644 index a61ffba7..00000000 --- a/src/together/generated/models/images_generations_post_request_model.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -from inspect import getfullargspec -import json -import pprint -import re # noqa: F401 -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Optional -from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict -from typing_extensions import Literal, Self -from pydantic import Field - -IMAGESGENERATIONSPOSTREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] - - -class ImagesGenerationsPostRequestModel(BaseModel): - """ - The model to use for image generation.

[See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models) - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: str - anyof_schema_2_validator: Optional[StrictStr] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = ImagesGenerationsPostRequestModel.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in ImagesGenerationsPostRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into ImagesGenerationsPostRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/linear_lr_scheduler_args.py b/src/together/generated/models/linear_lr_scheduler_args.py deleted file mode 100644 index d84842f1..00000000 --- a/src/together/generated/models/linear_lr_scheduler_args.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt -from typing import Any, ClassVar, Dict, List, Optional, Union -from typing import Optional, Set -from typing_extensions import Self - - -class LinearLRSchedulerArgs(BaseModel): - """ - LinearLRSchedulerArgs - """ # noqa: E501 - - min_lr_ratio: Optional[Union[StrictFloat, StrictInt]] = Field( - default=0.0, - description="The ratio of the final learning rate to the peak learning rate", - ) - __properties: ClassVar[List[str]] = ["min_lr_ratio"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of LinearLRSchedulerArgs from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of LinearLRSchedulerArgs from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "min_lr_ratio": ( - obj.get("min_lr_ratio") - if obj.get("min_lr_ratio") is not None - else 0.0 - ) - } - ) - return _obj diff --git a/src/together/generated/models/list_endpoint.py b/src/together/generated/models/list_endpoint.py deleted file mode 100644 index 417949c3..00000000 --- a/src/together/generated/models/list_endpoint.py +++ /dev/null @@ -1,136 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from datetime import datetime -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class ListEndpoint(BaseModel): - """ - Details about an endpoint when listed via the list endpoint - """ # noqa: E501 - - object: StrictStr = Field(description="The type of object") - id: StrictStr = Field(description="Unique identifier for the endpoint") - name: StrictStr = Field(description="System name for the endpoint") - model: StrictStr = Field(description="The model deployed on this endpoint") - type: StrictStr = Field(description="The type of endpoint") - owner: StrictStr = Field(description="The owner of this endpoint") - state: StrictStr = Field(description="Current state of the endpoint") - created_at: datetime = Field(description="Timestamp when the endpoint was created") - __properties: ClassVar[List[str]] = [ - "object", - "id", - "name", - "model", - "type", - "owner", - "state", - "created_at", - ] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["endpoint"]): - raise ValueError("must be one of enum values ('endpoint')") - return value - - @field_validator("type") - def type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["serverless", "dedicated"]): - raise ValueError("must be one of enum values ('serverless', 'dedicated')") - return value - - @field_validator("state") - def state_validate_enum(cls, value): - """Validates the enum""" - if value not in set( - ["PENDING", "STARTING", "STARTED", "STOPPING", "STOPPED", "ERROR"] - ): - raise ValueError( - "must be one of enum values ('PENDING', 'STARTING', 'STARTED', 'STOPPING', 'STOPPED', 'ERROR')" - ) - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListEndpoint from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListEndpoint from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "id": obj.get("id"), - "name": obj.get("name"), - "model": obj.get("model"), - "type": obj.get("type"), - "owner": obj.get("owner"), - "state": obj.get("state"), - "created_at": obj.get("created_at"), - } - ) - return _obj diff --git a/src/together/generated/models/list_endpoints200_response.py b/src/together/generated/models/list_endpoints200_response.py deleted file mode 100644 index 0db32f09..00000000 --- a/src/together/generated/models/list_endpoints200_response.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from together.generated.models.list_endpoint import ListEndpoint -from typing import Optional, Set -from typing_extensions import Self - - -class ListEndpoints200Response(BaseModel): - """ - ListEndpoints200Response - """ # noqa: E501 - - object: StrictStr - data: List[ListEndpoint] - __properties: ClassVar[List[str]] = ["object", "data"] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["list"]): - raise ValueError("must be one of enum values ('list')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListEndpoints200Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in data (list) - _items = [] - if self.data: - for _item_data in self.data: - if _item_data: - _items.append(_item_data.to_dict()) - _dict["data"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListEndpoints200Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "data": ( - [ListEndpoint.from_dict(_item) for _item in obj["data"]] - if obj.get("data") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/list_hardware200_response.py b/src/together/generated/models/list_hardware200_response.py deleted file mode 100644 index 42b45e76..00000000 --- a/src/together/generated/models/list_hardware200_response.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from together.generated.models.hardware_with_status import HardwareWithStatus -from typing import Optional, Set -from typing_extensions import Self - - -class ListHardware200Response(BaseModel): - """ - ListHardware200Response - """ # noqa: E501 - - object: StrictStr - data: List[HardwareWithStatus] - __properties: ClassVar[List[str]] = ["object", "data"] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["list"]): - raise ValueError("must be one of enum values ('list')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListHardware200Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in data (list) - _items = [] - if self.data: - for _item_data in self.data: - if _item_data: - _items.append(_item_data.to_dict()) - _dict["data"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListHardware200Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "data": ( - [HardwareWithStatus.from_dict(_item) for _item in obj["data"]] - if obj.get("data") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/lo_ra_training_type.py b/src/together/generated/models/lo_ra_training_type.py deleted file mode 100644 index 44d82d70..00000000 --- a/src/together/generated/models/lo_ra_training_type.py +++ /dev/null @@ -1,123 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - StrictFloat, - StrictInt, - StrictStr, - field_validator, -) -from typing import Any, ClassVar, Dict, List, Optional, Union -from typing import Optional, Set -from typing_extensions import Self - - -class LoRATrainingType(BaseModel): - """ - LoRATrainingType - """ # noqa: E501 - - type: StrictStr - lora_r: StrictInt - lora_alpha: StrictInt - lora_dropout: Optional[Union[StrictFloat, StrictInt]] = 0.0 - lora_trainable_modules: Optional[StrictStr] = "all-linear" - __properties: ClassVar[List[str]] = [ - "type", - "lora_r", - "lora_alpha", - "lora_dropout", - "lora_trainable_modules", - ] - - @field_validator("type") - def type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["Lora"]): - raise ValueError("must be one of enum values ('Lora')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of LoRATrainingType from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of LoRATrainingType from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "type": obj.get("type"), - "lora_r": obj.get("lora_r"), - "lora_alpha": obj.get("lora_alpha"), - "lora_dropout": ( - obj.get("lora_dropout") - if obj.get("lora_dropout") is not None - else 0.0 - ), - "lora_trainable_modules": ( - obj.get("lora_trainable_modules") - if obj.get("lora_trainable_modules") is not None - else "all-linear" - ), - } - ) - return _obj diff --git a/src/together/generated/models/logprobs_part.py b/src/together/generated/models/logprobs_part.py deleted file mode 100644 index dd1de169..00000000 --- a/src/together/generated/models/logprobs_part.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional, Union -from typing import Optional, Set -from typing_extensions import Self - - -class LogprobsPart(BaseModel): - """ - LogprobsPart - """ # noqa: E501 - - token_ids: Optional[List[Union[StrictFloat, StrictInt]]] = Field( - default=None, description="List of token IDs corresponding to the logprobs" - ) - tokens: Optional[List[StrictStr]] = Field( - default=None, description="List of token strings" - ) - token_logprobs: Optional[List[Union[StrictFloat, StrictInt]]] = Field( - default=None, description="List of token log probabilities" - ) - __properties: ClassVar[List[str]] = ["token_ids", "tokens", "token_logprobs"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of LogprobsPart from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of LogprobsPart from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "token_ids": obj.get("token_ids"), - "tokens": obj.get("tokens"), - "token_logprobs": obj.get("token_logprobs"), - } - ) - return _obj diff --git a/src/together/generated/models/lr_scheduler.py b/src/together/generated/models/lr_scheduler.py deleted file mode 100644 index 4e327e05..00000000 --- a/src/together/generated/models/lr_scheduler.py +++ /dev/null @@ -1,96 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class LRScheduler(BaseModel): - """ - LRScheduler - """ # noqa: E501 - - lr_scheduler_type: StrictStr - lr_scheduler_args: Optional[Dict[str, Any]] = None - __properties: ClassVar[List[str]] = ["lr_scheduler_type", "lr_scheduler_args"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of LRScheduler from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of lr_scheduler_args - if self.lr_scheduler_args: - _dict["lr_scheduler_args"] = self.lr_scheduler_args.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of LRScheduler from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "lr_scheduler_type": obj.get("lr_scheduler_type"), - "lr_scheduler_args": ( - LinearLRSchedulerArgs.from_dict(obj["lr_scheduler_args"]) - if obj.get("lr_scheduler_args") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/model_info.py b/src/together/generated/models/model_info.py deleted file mode 100644 index 61a48d64..00000000 --- a/src/together/generated/models/model_info.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.pricing import Pricing -from typing import Optional, Set -from typing_extensions import Self - - -class ModelInfo(BaseModel): - """ - ModelInfo - """ # noqa: E501 - - id: StrictStr - object: StrictStr - created: StrictInt - type: StrictStr - display_name: Optional[StrictStr] = None - organization: Optional[StrictStr] = None - link: Optional[StrictStr] = None - license: Optional[StrictStr] = None - context_length: Optional[StrictInt] = None - pricing: Optional[Pricing] = None - __properties: ClassVar[List[str]] = [ - "id", - "object", - "created", - "type", - "display_name", - "organization", - "link", - "license", - "context_length", - "pricing", - ] - - @field_validator("type") - def type_validate_enum(cls, value): - """Validates the enum""" - if value not in set( - ["chat", "language", "code", "image", "embedding", "moderation", "rerank"] - ): - raise ValueError( - "must be one of enum values ('chat', 'language', 'code', 'image', 'embedding', 'moderation', 'rerank')" - ) - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ModelInfo from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of pricing - if self.pricing: - _dict["pricing"] = self.pricing.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ModelInfo from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "id": obj.get("id"), - "object": obj.get("object"), - "created": obj.get("created"), - "type": obj.get("type"), - "display_name": obj.get("display_name"), - "organization": obj.get("organization"), - "link": obj.get("link"), - "license": obj.get("license"), - "context_length": obj.get("context_length"), - "pricing": ( - Pricing.from_dict(obj["pricing"]) - if obj.get("pricing") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/pricing.py b/src/together/generated/models/pricing.py deleted file mode 100644 index 87350c63..00000000 --- a/src/together/generated/models/pricing.py +++ /dev/null @@ -1,101 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt -from typing import Any, ClassVar, Dict, List, Union -from typing import Optional, Set -from typing_extensions import Self - - -class Pricing(BaseModel): - """ - Pricing - """ # noqa: E501 - - hourly: Union[StrictFloat, StrictInt] - input: Union[StrictFloat, StrictInt] - output: Union[StrictFloat, StrictInt] - base: Union[StrictFloat, StrictInt] - finetune: Union[StrictFloat, StrictInt] - __properties: ClassVar[List[str]] = [ - "hourly", - "input", - "output", - "base", - "finetune", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of Pricing from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of Pricing from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "hourly": obj.get("hourly"), - "input": obj.get("input"), - "output": obj.get("output"), - "base": obj.get("base"), - "finetune": obj.get("finetune"), - } - ) - return _obj diff --git a/src/together/generated/models/prompt_part_inner.py b/src/together/generated/models/prompt_part_inner.py deleted file mode 100644 index a999f700..00000000 --- a/src/together/generated/models/prompt_part_inner.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.logprobs_part import LogprobsPart -from typing import Optional, Set -from typing_extensions import Self - - -class PromptPartInner(BaseModel): - """ - PromptPartInner - """ # noqa: E501 - - text: Optional[StrictStr] = None - logprobs: Optional[LogprobsPart] = None - __properties: ClassVar[List[str]] = ["text", "logprobs"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of PromptPartInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of logprobs - if self.logprobs: - _dict["logprobs"] = self.logprobs.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of PromptPartInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "text": obj.get("text"), - "logprobs": ( - LogprobsPart.from_dict(obj["logprobs"]) - if obj.get("logprobs") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/rerank_request.py b/src/together/generated/models/rerank_request.py deleted file mode 100644 index 5a68173e..00000000 --- a/src/together/generated/models/rerank_request.py +++ /dev/null @@ -1,144 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.rerank_request_documents import RerankRequestDocuments -from together.generated.models.rerank_request_model import RerankRequestModel -from typing import Optional, Set -from typing_extensions import Self - - -class RerankRequest(BaseModel): - """ - RerankRequest - """ # noqa: E501 - - model: RerankRequestModel - query: StrictStr = Field(description="The search query to be used for ranking.") - documents: RerankRequestDocuments - top_n: Optional[StrictInt] = Field( - default=None, description="The number of top results to return." - ) - return_documents: Optional[StrictBool] = Field( - default=None, - description="Whether to return supplied documents with the response.", - ) - rank_fields: Optional[List[StrictStr]] = Field( - default=None, - description="List of keys in the JSON Object document to rank by. Defaults to use all supplied keys for ranking.", - ) - additional_properties: Dict[str, Any] = {} - __properties: ClassVar[List[str]] = [ - "model", - "query", - "documents", - "top_n", - "return_documents", - "rank_fields", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of RerankRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - * Fields in `self.additional_properties` are added to the output dict. - """ - excluded_fields: Set[str] = set( - [ - "additional_properties", - ] - ) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of model - if self.model: - _dict["model"] = self.model.to_dict() - # override the default output from pydantic by calling `to_dict()` of documents - if self.documents: - _dict["documents"] = self.documents.to_dict() - # puts key-value pairs in additional_properties in the top level - if self.additional_properties is not None: - for _key, _value in self.additional_properties.items(): - _dict[_key] = _value - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of RerankRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "model": ( - RerankRequestModel.from_dict(obj["model"]) - if obj.get("model") is not None - else None - ), - "query": obj.get("query"), - "documents": ( - RerankRequestDocuments.from_dict(obj["documents"]) - if obj.get("documents") is not None - else None - ), - "top_n": obj.get("top_n"), - "return_documents": obj.get("return_documents"), - "rank_fields": obj.get("rank_fields"), - } - ) - # store additional fields in additional_properties - for _key in obj.keys(): - if _key not in cls.__properties: - _obj.additional_properties[_key] = obj.get(_key) - - return _obj diff --git a/src/together/generated/models/rerank_request_documents.py b/src/together/generated/models/rerank_request_documents.py deleted file mode 100644 index f1e24d87..00000000 --- a/src/together/generated/models/rerank_request_documents.py +++ /dev/null @@ -1,171 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import json -import pprint -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Any, Dict, List, Optional -from pydantic import StrictStr, Field -from typing import Union, List, Set, Optional, Dict -from typing_extensions import Literal, Self - -RERANKREQUESTDOCUMENTS_ONE_OF_SCHEMAS = ["List[Dict[str, object]]", "List[str]"] - - -class RerankRequestDocuments(BaseModel): - """ - List of documents, which can be either strings or objects. - """ - - # data type: List[Dict[str, object]] - oneof_schema_1_validator: Optional[List[Dict[str, Any]]] = None - # data type: List[str] - oneof_schema_2_validator: Optional[List[StrictStr]] = None - actual_instance: Optional[Union[List[Dict[str, object]], List[str]]] = None - one_of_schemas: Set[str] = {"List[Dict[str, object]]", "List[str]"} - - model_config = ConfigDict( - validate_assignment=True, - protected_namespaces=(), - ) - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_oneof(cls, v): - instance = RerankRequestDocuments.model_construct() - error_messages = [] - match = 0 - # validate data type: List[Dict[str, object]] - try: - instance.oneof_schema_1_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: List[str] - try: - instance.oneof_schema_2_validator = v - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when setting `actual_instance` in RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when setting `actual_instance` in RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - match = 0 - - # deserialize data into List[Dict[str, object]] - try: - # validation - instance.oneof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_1_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into List[str] - try: - # validation - instance.oneof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.oneof_schema_2_validator - match += 1 - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if match > 1: - # more than 1 match - raise ValueError( - "Multiple matches found when deserializing the JSON string into RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " - + ", ".join(error_messages) - ) - elif match == 0: - # no match - raise ValueError( - "No match found when deserializing the JSON string into RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict( - self, - ) -> Optional[Union[Dict[str, Any], List[Dict[str, object]], List[str]]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - # primitive type - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/rerank_request_model.py b/src/together/generated/models/rerank_request_model.py deleted file mode 100644 index 1f72a3a6..00000000 --- a/src/together/generated/models/rerank_request_model.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -from inspect import getfullargspec -import json -import pprint -import re # noqa: F401 -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictStr, - ValidationError, - field_validator, -) -from typing import Optional -from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict -from typing_extensions import Literal, Self -from pydantic import Field - -RERANKREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] - - -class RerankRequestModel(BaseModel): - """ - The model to be used for the rerank request.

[See all of Together AI's rerank models](https://docs.together.ai/docs/serverless-models#rerank-models) - """ - - # data type: str - anyof_schema_1_validator: Optional[StrictStr] = None - # data type: str - anyof_schema_2_validator: Optional[StrictStr] = None - if TYPE_CHECKING: - actual_instance: Optional[Union[str]] = None - else: - actual_instance: Any = None - any_of_schemas: Set[str] = {"str"} - - model_config = { - "validate_assignment": True, - "protected_namespaces": (), - } - - def __init__(self, *args, **kwargs) -> None: - if args: - if len(args) > 1: - raise ValueError( - "If a position argument is used, only 1 is allowed to set `actual_instance`" - ) - if kwargs: - raise ValueError( - "If a position argument is used, keyword arguments cannot be used." - ) - super().__init__(actual_instance=args[0]) - else: - super().__init__(**kwargs) - - @field_validator("actual_instance") - def actual_instance_must_validate_anyof(cls, v): - instance = RerankRequestModel.model_construct() - error_messages = [] - # validate data type: str - try: - instance.anyof_schema_1_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # validate data type: str - try: - instance.anyof_schema_2_validator = v - return v - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - if error_messages: - # no match - raise ValueError( - "No match found when setting the actual_instance in RerankRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return v - - @classmethod - def from_dict(cls, obj: Dict[str, Any]) -> Self: - return cls.from_json(json.dumps(obj)) - - @classmethod - def from_json(cls, json_str: str) -> Self: - """Returns the object represented by the json string""" - instance = cls.model_construct() - error_messages = [] - # deserialize data into str - try: - # validation - instance.anyof_schema_1_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_1_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - # deserialize data into str - try: - # validation - instance.anyof_schema_2_validator = json.loads(json_str) - # assign value to actual_instance - instance.actual_instance = instance.anyof_schema_2_validator - return instance - except (ValidationError, ValueError) as e: - error_messages.append(str(e)) - - if error_messages: - # no match - raise ValueError( - "No match found when deserializing the JSON string into RerankRequestModel with anyOf schemas: str. Details: " - + ", ".join(error_messages) - ) - else: - return instance - - def to_json(self) -> str: - """Returns the JSON representation of the actual instance""" - if self.actual_instance is None: - return "null" - - if hasattr(self.actual_instance, "to_json") and callable( - self.actual_instance.to_json - ): - return self.actual_instance.to_json() - else: - return json.dumps(self.actual_instance) - - def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: - """Returns the dict representation of the actual instance""" - if self.actual_instance is None: - return None - - if hasattr(self.actual_instance, "to_dict") and callable( - self.actual_instance.to_dict - ): - return self.actual_instance.to_dict() - else: - return self.actual_instance - - def to_str(self) -> str: - """Returns the string representation of the actual instance""" - return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/rerank_response.py b/src/together/generated/models/rerank_response.py deleted file mode 100644 index edfb2fd3..00000000 --- a/src/together/generated/models/rerank_response.py +++ /dev/null @@ -1,127 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.rerank_response_results_inner import ( - RerankResponseResultsInner, -) -from together.generated.models.usage_data import UsageData -from typing import Optional, Set -from typing_extensions import Self - - -class RerankResponse(BaseModel): - """ - RerankResponse - """ # noqa: E501 - - object: StrictStr = Field(description="Object type") - id: Optional[StrictStr] = Field(default=None, description="Request ID") - model: StrictStr = Field(description="The model to be used for the rerank request.") - results: List[RerankResponseResultsInner] - usage: Optional[UsageData] = None - __properties: ClassVar[List[str]] = ["object", "id", "model", "results", "usage"] - - @field_validator("object") - def object_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["rerank"]): - raise ValueError("must be one of enum values ('rerank')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of RerankResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each item in results (list) - _items = [] - if self.results: - for _item_results in self.results: - if _item_results: - _items.append(_item_results.to_dict()) - _dict["results"] = _items - # override the default output from pydantic by calling `to_dict()` of usage - if self.usage: - _dict["usage"] = self.usage.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of RerankResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "object": obj.get("object"), - "id": obj.get("id"), - "model": obj.get("model"), - "results": ( - [ - RerankResponseResultsInner.from_dict(_item) - for _item in obj["results"] - ] - if obj.get("results") is not None - else None - ), - "usage": ( - UsageData.from_dict(obj["usage"]) - if obj.get("usage") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/rerank_response_results_inner.py b/src/together/generated/models/rerank_response_results_inner.py deleted file mode 100644 index 51610442..00000000 --- a/src/together/generated/models/rerank_response_results_inner.py +++ /dev/null @@ -1,101 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt -from typing import Any, ClassVar, Dict, List, Union -from together.generated.models.rerank_response_results_inner_document import ( - RerankResponseResultsInnerDocument, -) -from typing import Optional, Set -from typing_extensions import Self - - -class RerankResponseResultsInner(BaseModel): - """ - RerankResponseResultsInner - """ # noqa: E501 - - index: StrictInt - relevance_score: Union[StrictFloat, StrictInt] - document: RerankResponseResultsInnerDocument - __properties: ClassVar[List[str]] = ["index", "relevance_score", "document"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of RerankResponseResultsInner from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of document - if self.document: - _dict["document"] = self.document.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of RerankResponseResultsInner from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "index": obj.get("index"), - "relevance_score": obj.get("relevance_score"), - "document": ( - RerankResponseResultsInnerDocument.from_dict(obj["document"]) - if obj.get("document") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/rerank_response_results_inner_document.py b/src/together/generated/models/rerank_response_results_inner_document.py deleted file mode 100644 index 51258d5a..00000000 --- a/src/together/generated/models/rerank_response_results_inner_document.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class RerankResponseResultsInnerDocument(BaseModel): - """ - RerankResponseResultsInnerDocument - """ # noqa: E501 - - text: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["text"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of RerankResponseResultsInnerDocument from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of RerankResponseResultsInnerDocument from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"text": obj.get("text")}) - return _obj diff --git a/src/together/generated/models/stream_sentinel.py b/src/together/generated/models/stream_sentinel.py deleted file mode 100644 index 54c925d4..00000000 --- a/src/together/generated/models/stream_sentinel.py +++ /dev/null @@ -1,90 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class StreamSentinel(BaseModel): - """ - StreamSentinel - """ # noqa: E501 - - data: StrictStr - __properties: ClassVar[List[str]] = ["data"] - - @field_validator("data") - def data_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["[DONE]"]): - raise ValueError("must be one of enum values ('[DONE]')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of StreamSentinel from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of StreamSentinel from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"data": obj.get("data")}) - return _obj diff --git a/src/together/generated/models/tool_choice.py b/src/together/generated/models/tool_choice.py deleted file mode 100644 index 2571dcf9..00000000 --- a/src/together/generated/models/tool_choice.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import ( - BaseModel, - ConfigDict, - StrictFloat, - StrictInt, - StrictStr, - field_validator, -) -from typing import Any, ClassVar, Dict, List, Union -from together.generated.models.tool_choice_function import ToolChoiceFunction -from typing import Optional, Set -from typing_extensions import Self - - -class ToolChoice(BaseModel): - """ - ToolChoice - """ # noqa: E501 - - index: Union[StrictFloat, StrictInt] - id: StrictStr - type: StrictStr - function: ToolChoiceFunction - __properties: ClassVar[List[str]] = ["index", "id", "type", "function"] - - @field_validator("type") - def type_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["function"]): - raise ValueError("must be one of enum values ('function')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ToolChoice from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of function - if self.function: - _dict["function"] = self.function.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ToolChoice from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "index": obj.get("index"), - "id": obj.get("id"), - "type": obj.get("type"), - "function": ( - ToolChoiceFunction.from_dict(obj["function"]) - if obj.get("function") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/tool_choice_function.py b/src/together/generated/models/tool_choice_function.py deleted file mode 100644 index 308cbe71..00000000 --- a/src/together/generated/models/tool_choice_function.py +++ /dev/null @@ -1,86 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class ToolChoiceFunction(BaseModel): - """ - ToolChoiceFunction - """ # noqa: E501 - - name: StrictStr - arguments: StrictStr - __properties: ClassVar[List[str]] = ["name", "arguments"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ToolChoiceFunction from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ToolChoiceFunction from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"name": obj.get("name"), "arguments": obj.get("arguments")} - ) - return _obj diff --git a/src/together/generated/models/tools_part.py b/src/together/generated/models/tools_part.py deleted file mode 100644 index e26792c2..00000000 --- a/src/together/generated/models/tools_part.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.tools_part_function import ToolsPartFunction -from typing import Optional, Set -from typing_extensions import Self - - -class ToolsPart(BaseModel): - """ - ToolsPart - """ # noqa: E501 - - type: Optional[StrictStr] = None - function: Optional[ToolsPartFunction] = None - __properties: ClassVar[List[str]] = ["type", "function"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ToolsPart from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of function - if self.function: - _dict["function"] = self.function.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ToolsPart from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "type": obj.get("type"), - "function": ( - ToolsPartFunction.from_dict(obj["function"]) - if obj.get("function") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/tools_part_function.py b/src/together/generated/models/tools_part_function.py deleted file mode 100644 index cbb5d419..00000000 --- a/src/together/generated/models/tools_part_function.py +++ /dev/null @@ -1,93 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from typing import Optional, Set -from typing_extensions import Self - - -class ToolsPartFunction(BaseModel): - """ - ToolsPartFunction - """ # noqa: E501 - - description: Optional[StrictStr] = None - name: Optional[StrictStr] = None - parameters: Optional[Dict[str, Any]] = Field( - default=None, description="A map of parameter names to their values." - ) - __properties: ClassVar[List[str]] = ["description", "name", "parameters"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ToolsPartFunction from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ToolsPartFunction from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "description": obj.get("description"), - "name": obj.get("name"), - "parameters": obj.get("parameters"), - } - ) - return _obj diff --git a/src/together/generated/models/update_endpoint_request.py b/src/together/generated/models/update_endpoint_request.py deleted file mode 100644 index ee2d2ff0..00000000 --- a/src/together/generated/models/update_endpoint_request.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator -from typing import Any, ClassVar, Dict, List, Optional -from together.generated.models.autoscaling import Autoscaling -from typing import Optional, Set -from typing_extensions import Self - - -class UpdateEndpointRequest(BaseModel): - """ - UpdateEndpointRequest - """ # noqa: E501 - - display_name: Optional[StrictStr] = Field( - default=None, description="A human-readable name for the endpoint" - ) - state: Optional[StrictStr] = Field( - default=None, description="The desired state of the endpoint" - ) - autoscaling: Optional[Autoscaling] = Field( - default=None, description="New autoscaling configuration for the endpoint" - ) - __properties: ClassVar[List[str]] = ["display_name", "state", "autoscaling"] - - @field_validator("state") - def state_validate_enum(cls, value): - """Validates the enum""" - if value is None: - return value - - if value not in set(["STARTED", "STOPPED"]): - raise ValueError("must be one of enum values ('STARTED', 'STOPPED')") - return value - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UpdateEndpointRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of autoscaling - if self.autoscaling: - _dict["autoscaling"] = self.autoscaling.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UpdateEndpointRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "display_name": obj.get("display_name"), - "state": obj.get("state"), - "autoscaling": ( - Autoscaling.from_dict(obj["autoscaling"]) - if obj.get("autoscaling") is not None - else None - ), - } - ) - return _obj diff --git a/src/together/generated/models/usage_data.py b/src/together/generated/models/usage_data.py deleted file mode 100644 index 82a825c7..00000000 --- a/src/together/generated/models/usage_data.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, StrictInt -from typing import Any, ClassVar, Dict, List -from typing import Optional, Set -from typing_extensions import Self - - -class UsageData(BaseModel): - """ - UsageData - """ # noqa: E501 - - prompt_tokens: StrictInt - completion_tokens: StrictInt - total_tokens: StrictInt - __properties: ClassVar[List[str]] = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of UsageData from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of UsageData from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "prompt_tokens": obj.get("prompt_tokens"), - "completion_tokens": obj.get("completion_tokens"), - "total_tokens": obj.get("total_tokens"), - } - ) - return _obj diff --git a/src/together/generated/rest.py b/src/together/generated/rest.py deleted file mode 100644 index 0f92a615..00000000 --- a/src/together/generated/rest.py +++ /dev/null @@ -1,195 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import io -import json -import re -import ssl -from typing import Optional, Union - -import aiohttp -import aiohttp_retry - -from together.generated.exceptions import ApiException, ApiValueError - -RESTResponseType = aiohttp.ClientResponse - -ALLOW_RETRY_METHODS = frozenset({"DELETE", "GET", "HEAD", "OPTIONS", "PUT", "TRACE"}) - - -class RESTResponse(io.IOBase): - - def __init__(self, resp) -> None: - self.response = resp - self.status = resp.status - self.reason = resp.reason - self.data = None - - async def read(self): - if self.data is None: - self.data = await self.response.read() - return self.data - - def getheaders(self): - """Returns a CIMultiDictProxy of the response headers.""" - return self.response.headers - - def getheader(self, name, default=None): - """Returns a given response header.""" - return self.response.headers.get(name, default) - - -class RESTClientObject: - - def __init__(self, configuration) -> None: - - # maxsize is number of requests to host that are allowed in parallel - self.maxsize = configuration.connection_pool_maxsize - - self.ssl_context = ssl.create_default_context(cafile=configuration.ssl_ca_cert) - if configuration.cert_file: - self.ssl_context.load_cert_chain( - configuration.cert_file, keyfile=configuration.key_file - ) - - if not configuration.verify_ssl: - self.ssl_context.check_hostname = False - self.ssl_context.verify_mode = ssl.CERT_NONE - - self.proxy = configuration.proxy - self.proxy_headers = configuration.proxy_headers - - self.retries = configuration.retries - - self.pool_manager: Optional[aiohttp.ClientSession] = None - self.retry_client: Optional[aiohttp_retry.RetryClient] = None - - async def close(self) -> None: - if self.pool_manager: - await self.pool_manager.close() - if self.retry_client is not None: - await self.retry_client.close() - - async def request( - self, - method, - url, - headers=None, - body=None, - post_params=None, - _request_timeout=None, - ): - """Execute request - - :param method: http request method - :param url: http request url - :param headers: http request headers - :param body: request json body, for `application/json` - :param post_params: request post parameters, - `application/x-www-form-urlencoded` - and `multipart/form-data` - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - """ - method = method.upper() - assert method in ["GET", "HEAD", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"] - - if post_params and body: - raise ApiValueError( - "body parameter cannot be used with post_params parameter." - ) - - post_params = post_params or {} - headers = headers or {} - # url already contains the URL query string - timeout = _request_timeout or 5 * 60 - - if "Content-Type" not in headers: - headers["Content-Type"] = "application/json" - - args = {"method": method, "url": url, "timeout": timeout, "headers": headers} - - if self.proxy: - args["proxy"] = self.proxy - if self.proxy_headers: - args["proxy_headers"] = self.proxy_headers - - # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE` - if method in ["POST", "PUT", "PATCH", "OPTIONS", "DELETE"]: - if re.search("json", headers["Content-Type"], re.IGNORECASE): - if body is not None: - body = json.dumps(body) - args["data"] = body - elif headers["Content-Type"] == "application/x-www-form-urlencoded": - args["data"] = aiohttp.FormData(post_params) - elif headers["Content-Type"] == "multipart/form-data": - # must del headers['Content-Type'], or the correct - # Content-Type which generated by aiohttp - del headers["Content-Type"] - data = aiohttp.FormData() - for param in post_params: - k, v = param - if isinstance(v, tuple) and len(v) == 3: - data.add_field(k, value=v[1], filename=v[0], content_type=v[2]) - else: - # Ensures that dict objects are serialized - if isinstance(v, dict): - v = json.dumps(v) - elif isinstance(v, int): - v = str(v) - data.add_field(k, v) - args["data"] = data - - # Pass a `bytes` or `str` parameter directly in the body to support - # other content types than Json when `body` argument is provided - # in serialized form - elif isinstance(body, str) or isinstance(body, bytes): - args["data"] = body - else: - # Cannot generate the request from given parameters - msg = """Cannot prepare a request message for provided - arguments. Please check that your arguments match - declared content type.""" - raise ApiException(status=0, reason=msg) - - pool_manager: Union[aiohttp.ClientSession, aiohttp_retry.RetryClient] - - # https pool manager - if self.pool_manager is None: - self.pool_manager = aiohttp.ClientSession( - connector=aiohttp.TCPConnector( - limit=self.maxsize, ssl=self.ssl_context - ), - trust_env=True, - ) - pool_manager = self.pool_manager - - if self.retries is not None and method in ALLOW_RETRY_METHODS: - if self.retry_client is None: - self.retry_client = aiohttp_retry.RetryClient( - client_session=self.pool_manager, - retry_options=aiohttp_retry.ExponentialRetry( - attempts=self.retries, - factor=2.0, - start_timeout=0.1, - max_timeout=120.0, - ), - ) - pool_manager = self.retry_client - - r = await pool_manager.request(**args) - - return RESTResponse(r) diff --git a/src/together/generated/test/__init__.py b/src/together/generated/test/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/together/generated/test/test_audio_api.py b/src/together/generated/test/test_audio_api.py deleted file mode 100644 index deddc486..00000000 --- a/src/together/generated/test/test_audio_api.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.audio_api import AudioApi - - -class TestAudioApi(unittest.IsolatedAsyncioTestCase): - """AudioApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = AudioApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_audio_speech(self) -> None: - """Test case for audio_speech - - Create audio generation request - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_audio_speech_request.py b/src/together/generated/test/test_audio_speech_request.py deleted file mode 100644 index 43362fcd..00000000 --- a/src/together/generated/test/test_audio_speech_request.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.audio_speech_request import AudioSpeechRequest - - -class TestAudioSpeechRequest(unittest.TestCase): - """AudioSpeechRequest unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> AudioSpeechRequest: - """Test AudioSpeechRequest - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `AudioSpeechRequest` - """ - model = AudioSpeechRequest() - if include_optional: - return AudioSpeechRequest( - model = cartesia/sonic, - input = '', - voice = None, - response_format = 'wav', - language = 'en', - response_encoding = 'pcm_f32le', - sample_rate = 1.337, - stream = True - ) - else: - return AudioSpeechRequest( - model = cartesia/sonic, - input = '', - voice = None, - ) - """ - - def testAudioSpeechRequest(self): - """Test AudioSpeechRequest""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_audio_speech_request_model.py b/src/together/generated/test/test_audio_speech_request_model.py deleted file mode 100644 index beb8ec7c..00000000 --- a/src/together/generated/test/test_audio_speech_request_model.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel - - -class TestAudioSpeechRequestModel(unittest.TestCase): - """AudioSpeechRequestModel unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> AudioSpeechRequestModel: - """Test AudioSpeechRequestModel - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `AudioSpeechRequestModel` - """ - model = AudioSpeechRequestModel() - if include_optional: - return AudioSpeechRequestModel( - ) - else: - return AudioSpeechRequestModel( - ) - """ - - def testAudioSpeechRequestModel(self): - """Test AudioSpeechRequestModel""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_audio_speech_request_voice.py b/src/together/generated/test/test_audio_speech_request_voice.py deleted file mode 100644 index 744d89c5..00000000 --- a/src/together/generated/test/test_audio_speech_request_voice.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice - - -class TestAudioSpeechRequestVoice(unittest.TestCase): - """AudioSpeechRequestVoice unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> AudioSpeechRequestVoice: - """Test AudioSpeechRequestVoice - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `AudioSpeechRequestVoice` - """ - model = AudioSpeechRequestVoice() - if include_optional: - return AudioSpeechRequestVoice( - ) - else: - return AudioSpeechRequestVoice( - ) - """ - - def testAudioSpeechRequestVoice(self): - """Test AudioSpeechRequestVoice""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_audio_speech_stream_chunk.py b/src/together/generated/test/test_audio_speech_stream_chunk.py deleted file mode 100644 index 1335b885..00000000 --- a/src/together/generated/test/test_audio_speech_stream_chunk.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk - - -class TestAudioSpeechStreamChunk(unittest.TestCase): - """AudioSpeechStreamChunk unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> AudioSpeechStreamChunk: - """Test AudioSpeechStreamChunk - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `AudioSpeechStreamChunk` - """ - model = AudioSpeechStreamChunk() - if include_optional: - return AudioSpeechStreamChunk( - object = 'audio.tts.chunk', - model = 'cartesia/sonic', - b64 = '' - ) - else: - return AudioSpeechStreamChunk( - object = 'audio.tts.chunk', - model = 'cartesia/sonic', - b64 = '', - ) - """ - - def testAudioSpeechStreamChunk(self): - """Test AudioSpeechStreamChunk""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_audio_speech_stream_event.py b/src/together/generated/test/test_audio_speech_stream_event.py deleted file mode 100644 index 68337a10..00000000 --- a/src/together/generated/test/test_audio_speech_stream_event.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent - - -class TestAudioSpeechStreamEvent(unittest.TestCase): - """AudioSpeechStreamEvent unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> AudioSpeechStreamEvent: - """Test AudioSpeechStreamEvent - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `AudioSpeechStreamEvent` - """ - model = AudioSpeechStreamEvent() - if include_optional: - return AudioSpeechStreamEvent( - data = together.generated.models.audio_speech_stream_chunk.AudioSpeechStreamChunk( - object = 'audio.tts.chunk', - model = 'cartesia/sonic', - b64 = '', ) - ) - else: - return AudioSpeechStreamEvent( - data = together.generated.models.audio_speech_stream_chunk.AudioSpeechStreamChunk( - object = 'audio.tts.chunk', - model = 'cartesia/sonic', - b64 = '', ), - ) - """ - - def testAudioSpeechStreamEvent(self): - """Test AudioSpeechStreamEvent""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_audio_speech_stream_response.py b/src/together/generated/test/test_audio_speech_stream_response.py deleted file mode 100644 index e9e245bc..00000000 --- a/src/together/generated/test/test_audio_speech_stream_response.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.audio_speech_stream_response import ( - AudioSpeechStreamResponse, -) - - -class TestAudioSpeechStreamResponse(unittest.TestCase): - """AudioSpeechStreamResponse unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> AudioSpeechStreamResponse: - """Test AudioSpeechStreamResponse - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `AudioSpeechStreamResponse` - """ - model = AudioSpeechStreamResponse() - if include_optional: - return AudioSpeechStreamResponse( - data = '[DONE]' - ) - else: - return AudioSpeechStreamResponse( - data = '[DONE]', - ) - """ - - def testAudioSpeechStreamResponse(self): - """Test AudioSpeechStreamResponse""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_autoscaling.py b/src/together/generated/test/test_autoscaling.py deleted file mode 100644 index 4f8dff60..00000000 --- a/src/together/generated/test/test_autoscaling.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.autoscaling import Autoscaling - - -class TestAutoscaling(unittest.TestCase): - """Autoscaling unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> Autoscaling: - """Test Autoscaling - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `Autoscaling` - """ - model = Autoscaling() - if include_optional: - return Autoscaling( - min_replicas = 56, - max_replicas = 56 - ) - else: - return Autoscaling( - min_replicas = 56, - max_replicas = 56, - ) - """ - - def testAutoscaling(self): - """Test Autoscaling""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_api.py b/src/together/generated/test/test_chat_api.py deleted file mode 100644 index 55bb7cba..00000000 --- a/src/together/generated/test/test_chat_api.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.chat_api import ChatApi - - -class TestChatApi(unittest.IsolatedAsyncioTestCase): - """ChatApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = ChatApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_chat_completions(self) -> None: - """Test case for chat_completions - - Create chat completion - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_assistant_message_param.py b/src/together/generated/test/test_chat_completion_assistant_message_param.py deleted file mode 100644 index 072b5db2..00000000 --- a/src/together/generated/test/test_chat_completion_assistant_message_param.py +++ /dev/null @@ -1,70 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_assistant_message_param import ( - ChatCompletionAssistantMessageParam, -) - - -class TestChatCompletionAssistantMessageParam(unittest.TestCase): - """ChatCompletionAssistantMessageParam unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionAssistantMessageParam: - """Test ChatCompletionAssistantMessageParam - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionAssistantMessageParam` - """ - model = ChatCompletionAssistantMessageParam() - if include_optional: - return ChatCompletionAssistantMessageParam( - content = '', - role = 'assistant', - name = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( - arguments = '', - name = '', ) - ) - else: - return ChatCompletionAssistantMessageParam( - role = 'assistant', - ) - """ - - def testChatCompletionAssistantMessageParam(self): - """Test ChatCompletionAssistantMessageParam""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choice.py b/src/together/generated/test/test_chat_completion_choice.py deleted file mode 100644 index 9618c968..00000000 --- a/src/together/generated/test/test_chat_completion_choice.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_choice import ChatCompletionChoice - - -class TestChatCompletionChoice(unittest.TestCase): - """ChatCompletionChoice unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionChoice: - """Test ChatCompletionChoice - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionChoice` - """ - model = ChatCompletionChoice() - if include_optional: - return ChatCompletionChoice( - index = 56, - finish_reason = 'stop', - logprobs = together.generated.models.logprobs_part.LogprobsPart( - token_ids = [ - 1.337 - ], - tokens = [ - '' - ], - token_logprobs = [ - 1.337 - ], ), - delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( - token_id = 56, - role = 'system', - content = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( - arguments = '', - name = '', ), ) - ) - else: - return ChatCompletionChoice( - index = 56, - finish_reason = 'stop', - delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( - token_id = 56, - role = 'system', - content = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( - arguments = '', - name = '', ), ), - ) - """ - - def testChatCompletionChoice(self): - """Test ChatCompletionChoice""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choice_delta.py b/src/together/generated/test/test_chat_completion_choice_delta.py deleted file mode 100644 index 6e430d9e..00000000 --- a/src/together/generated/test/test_chat_completion_choice_delta.py +++ /dev/null @@ -1,70 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_choice_delta import ( - ChatCompletionChoiceDelta, -) - - -class TestChatCompletionChoiceDelta(unittest.TestCase): - """ChatCompletionChoiceDelta unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionChoiceDelta: - """Test ChatCompletionChoiceDelta - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionChoiceDelta` - """ - model = ChatCompletionChoiceDelta() - if include_optional: - return ChatCompletionChoiceDelta( - token_id = 56, - role = 'system', - content = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( - arguments = '', - name = '', ) - ) - else: - return ChatCompletionChoiceDelta( - role = 'system', - ) - """ - - def testChatCompletionChoiceDelta(self): - """Test ChatCompletionChoiceDelta""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choice_delta_function_call.py b/src/together/generated/test/test_chat_completion_choice_delta_function_call.py deleted file mode 100644 index 0797b639..00000000 --- a/src/together/generated/test/test_chat_completion_choice_delta_function_call.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_choice_delta_function_call import ( - ChatCompletionChoiceDeltaFunctionCall, -) - - -class TestChatCompletionChoiceDeltaFunctionCall(unittest.TestCase): - """ChatCompletionChoiceDeltaFunctionCall unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionChoiceDeltaFunctionCall: - """Test ChatCompletionChoiceDeltaFunctionCall - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionChoiceDeltaFunctionCall` - """ - model = ChatCompletionChoiceDeltaFunctionCall() - if include_optional: - return ChatCompletionChoiceDeltaFunctionCall( - arguments = '', - name = '' - ) - else: - return ChatCompletionChoiceDeltaFunctionCall( - arguments = '', - name = '', - ) - """ - - def testChatCompletionChoiceDeltaFunctionCall(self): - """Test ChatCompletionChoiceDeltaFunctionCall""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choices_data_inner.py b/src/together/generated/test/test_chat_completion_choices_data_inner.py deleted file mode 100644 index d61c850b..00000000 --- a/src/together/generated/test/test_chat_completion_choices_data_inner.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_choices_data_inner import ( - ChatCompletionChoicesDataInner, -) - - -class TestChatCompletionChoicesDataInner(unittest.TestCase): - """ChatCompletionChoicesDataInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionChoicesDataInner: - """Test ChatCompletionChoicesDataInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionChoicesDataInner` - """ - model = ChatCompletionChoicesDataInner() - if include_optional: - return ChatCompletionChoicesDataInner( - text = '', - index = 56, - seed = 56, - finish_reason = 'stop', - message = together.generated.models.chat_completion_message.ChatCompletionMessage( - content = '', - role = 'assistant', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( - arguments = '', - name = '', ), ), - logprobs = None - ) - else: - return ChatCompletionChoicesDataInner( - ) - """ - - def testChatCompletionChoicesDataInner(self): - """Test ChatCompletionChoicesDataInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py b/src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py deleted file mode 100644 index 88ae0977..00000000 --- a/src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_choices_data_inner_logprobs import ( - ChatCompletionChoicesDataInnerLogprobs, -) - - -class TestChatCompletionChoicesDataInnerLogprobs(unittest.TestCase): - """ChatCompletionChoicesDataInnerLogprobs unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionChoicesDataInnerLogprobs: - """Test ChatCompletionChoicesDataInnerLogprobs - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionChoicesDataInnerLogprobs` - """ - model = ChatCompletionChoicesDataInnerLogprobs() - if include_optional: - return ChatCompletionChoicesDataInnerLogprobs( - token_ids = [ - 1.337 - ], - tokens = [ - '' - ], - token_logprobs = [ - 1.337 - ] - ) - else: - return ChatCompletionChoicesDataInnerLogprobs( - ) - """ - - def testChatCompletionChoicesDataInnerLogprobs(self): - """Test ChatCompletionChoicesDataInnerLogprobs""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_chunk.py b/src/together/generated/test/test_chat_completion_chunk.py deleted file mode 100644 index f935abc8..00000000 --- a/src/together/generated/test/test_chat_completion_chunk.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_chunk import ChatCompletionChunk - - -class TestChatCompletionChunk(unittest.TestCase): - """ChatCompletionChunk unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionChunk: - """Test ChatCompletionChunk - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionChunk` - """ - model = ChatCompletionChunk() - if include_optional: - return ChatCompletionChunk( - id = '', - object = 'chat.completion.chunk', - created = 56, - system_fingerprint = '', - model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', - choices = [ - together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( - index = 56, - finish_reason = 'stop', - logprobs = 1.337, - seed = 56, - delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( - token_id = 56, - role = 'system', - content = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( - arguments = '', - name = '', ), ), ) - ], - usage = None - ) - else: - return ChatCompletionChunk( - id = '', - object = 'chat.completion.chunk', - created = 56, - model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', - choices = [ - together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( - index = 56, - finish_reason = 'stop', - logprobs = 1.337, - seed = 56, - delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( - token_id = 56, - role = 'system', - content = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( - arguments = '', - name = '', ), ), ) - ], - ) - """ - - def testChatCompletionChunk(self): - """Test ChatCompletionChunk""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_chunk_choices_inner.py b/src/together/generated/test/test_chat_completion_chunk_choices_inner.py deleted file mode 100644 index e7317378..00000000 --- a/src/together/generated/test/test_chat_completion_chunk_choices_inner.py +++ /dev/null @@ -1,92 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_chunk_choices_inner import ( - ChatCompletionChunkChoicesInner, -) - - -class TestChatCompletionChunkChoicesInner(unittest.TestCase): - """ChatCompletionChunkChoicesInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionChunkChoicesInner: - """Test ChatCompletionChunkChoicesInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionChunkChoicesInner` - """ - model = ChatCompletionChunkChoicesInner() - if include_optional: - return ChatCompletionChunkChoicesInner( - index = 56, - finish_reason = 'stop', - logprobs = 1.337, - seed = 56, - delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( - token_id = 56, - role = 'system', - content = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( - arguments = '', - name = '', ), ) - ) - else: - return ChatCompletionChunkChoicesInner( - index = 56, - finish_reason = 'stop', - delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( - token_id = 56, - role = 'system', - content = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( - arguments = '', - name = '', ), ), - ) - """ - - def testChatCompletionChunkChoicesInner(self): - """Test ChatCompletionChunkChoicesInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_event.py b/src/together/generated/test/test_chat_completion_event.py deleted file mode 100644 index c219c114..00000000 --- a/src/together/generated/test/test_chat_completion_event.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_event import ChatCompletionEvent - - -class TestChatCompletionEvent(unittest.TestCase): - """ChatCompletionEvent unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionEvent: - """Test ChatCompletionEvent - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionEvent` - """ - model = ChatCompletionEvent() - if include_optional: - return ChatCompletionEvent( - data = together.generated.models.chat_completion_chunk.ChatCompletionChunk( - id = '', - object = 'chat.completion.chunk', - created = 56, - system_fingerprint = '', - model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', - choices = [ - together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( - index = 56, - finish_reason = 'stop', - logprobs = 1.337, - seed = 56, - delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( - token_id = 56, - role = 'system', - content = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( - arguments = '', - name = '', ), ), ) - ], - usage = null, ) - ) - else: - return ChatCompletionEvent( - data = together.generated.models.chat_completion_chunk.ChatCompletionChunk( - id = '', - object = 'chat.completion.chunk', - created = 56, - system_fingerprint = '', - model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', - choices = [ - together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( - index = 56, - finish_reason = 'stop', - logprobs = 1.337, - seed = 56, - delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( - token_id = 56, - role = 'system', - content = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( - arguments = '', - name = '', ), ), ) - ], - usage = null, ), - ) - """ - - def testChatCompletionEvent(self): - """Test ChatCompletionEvent""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_function_message_param.py b/src/together/generated/test/test_chat_completion_function_message_param.py deleted file mode 100644 index 07cfa130..00000000 --- a/src/together/generated/test/test_chat_completion_function_message_param.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_function_message_param import ( - ChatCompletionFunctionMessageParam, -) - - -class TestChatCompletionFunctionMessageParam(unittest.TestCase): - """ChatCompletionFunctionMessageParam unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionFunctionMessageParam: - """Test ChatCompletionFunctionMessageParam - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionFunctionMessageParam` - """ - model = ChatCompletionFunctionMessageParam() - if include_optional: - return ChatCompletionFunctionMessageParam( - role = 'function', - content = '', - name = '' - ) - else: - return ChatCompletionFunctionMessageParam( - role = 'function', - content = '', - name = '', - ) - """ - - def testChatCompletionFunctionMessageParam(self): - """Test ChatCompletionFunctionMessageParam""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_message.py b/src/together/generated/test/test_chat_completion_message.py deleted file mode 100644 index 6e60a844..00000000 --- a/src/together/generated/test/test_chat_completion_message.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_message import ChatCompletionMessage - - -class TestChatCompletionMessage(unittest.TestCase): - """ChatCompletionMessage unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionMessage: - """Test ChatCompletionMessage - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionMessage` - """ - model = ChatCompletionMessage() - if include_optional: - return ChatCompletionMessage( - content = '', - role = 'assistant', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( - arguments = '', - name = '', ) - ) - else: - return ChatCompletionMessage( - content = '', - role = 'assistant', - ) - """ - - def testChatCompletionMessage(self): - """Test ChatCompletionMessage""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_message_function_call.py b/src/together/generated/test/test_chat_completion_message_function_call.py deleted file mode 100644 index 90f0dbc7..00000000 --- a/src/together/generated/test/test_chat_completion_message_function_call.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_message_function_call import ( - ChatCompletionMessageFunctionCall, -) - - -class TestChatCompletionMessageFunctionCall(unittest.TestCase): - """ChatCompletionMessageFunctionCall unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionMessageFunctionCall: - """Test ChatCompletionMessageFunctionCall - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionMessageFunctionCall` - """ - model = ChatCompletionMessageFunctionCall() - if include_optional: - return ChatCompletionMessageFunctionCall( - arguments = '', - name = '' - ) - else: - return ChatCompletionMessageFunctionCall( - arguments = '', - name = '', - ) - """ - - def testChatCompletionMessageFunctionCall(self): - """Test ChatCompletionMessageFunctionCall""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_message_param.py b/src/together/generated/test/test_chat_completion_message_param.py deleted file mode 100644 index d463e95d..00000000 --- a/src/together/generated/test/test_chat_completion_message_param.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_message_param import ( - ChatCompletionMessageParam, -) - - -class TestChatCompletionMessageParam(unittest.TestCase): - """ChatCompletionMessageParam unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionMessageParam: - """Test ChatCompletionMessageParam - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionMessageParam` - """ - model = ChatCompletionMessageParam() - if include_optional: - return ChatCompletionMessageParam( - content = '', - role = 'function', - name = '', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( - arguments = '', - name = '', ), - tool_call_id = '' - ) - else: - return ChatCompletionMessageParam( - content = '', - role = 'function', - name = '', - tool_call_id = '', - ) - """ - - def testChatCompletionMessageParam(self): - """Test ChatCompletionMessageParam""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request.py b/src/together/generated/test/test_chat_completion_request.py deleted file mode 100644 index d84e32d6..00000000 --- a/src/together/generated/test/test_chat_completion_request.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_request import ChatCompletionRequest - - -class TestChatCompletionRequest(unittest.TestCase): - """ChatCompletionRequest unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionRequest: - """Test ChatCompletionRequest - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionRequest` - """ - model = ChatCompletionRequest() - if include_optional: - return ChatCompletionRequest( - messages = [ - together.generated.models.chat_completion_request_messages_inner.ChatCompletionRequest_messages_inner( - role = 'system', - content = '', ) - ], - model = meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo, - max_tokens = 56, - stop = [ - '' - ], - temperature = 1.337, - top_p = 1.337, - top_k = 56, - context_length_exceeded_behavior = 'error', - repetition_penalty = 1.337, - stream = True, - logprobs = 0, - echo = True, - n = 1, - min_p = 1.337, - presence_penalty = 1.337, - frequency_penalty = 1.337, - logit_bias = {1024=-10.5, 105=21.4}, - seed = 42, - function_call = None, - response_format = together.generated.models.chat_completion_request_response_format.ChatCompletionRequest_response_format( - type = 'json', - schema = { - 'key' : '' - }, ), - tools = [ - together.generated.models.tools_part.ToolsPart( - type = 'tool_type', - function = together.generated.models.tools_part_function.ToolsPart_function( - description = 'A description of the function.', - name = 'function_name', - parameters = { }, ), ) - ], - tool_choice = None, - safety_model = 'safety_model_name' - ) - else: - return ChatCompletionRequest( - messages = [ - together.generated.models.chat_completion_request_messages_inner.ChatCompletionRequest_messages_inner( - role = 'system', - content = '', ) - ], - model = meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo, - ) - """ - - def testChatCompletionRequest(self): - """Test ChatCompletionRequest""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_function_call.py b/src/together/generated/test/test_chat_completion_request_function_call.py deleted file mode 100644 index 55125eb8..00000000 --- a/src/together/generated/test/test_chat_completion_request_function_call.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_request_function_call import ( - ChatCompletionRequestFunctionCall, -) - - -class TestChatCompletionRequestFunctionCall(unittest.TestCase): - """ChatCompletionRequestFunctionCall unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionRequestFunctionCall: - """Test ChatCompletionRequestFunctionCall - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionRequestFunctionCall` - """ - model = ChatCompletionRequestFunctionCall() - if include_optional: - return ChatCompletionRequestFunctionCall( - name = '' - ) - else: - return ChatCompletionRequestFunctionCall( - name = '', - ) - """ - - def testChatCompletionRequestFunctionCall(self): - """Test ChatCompletionRequestFunctionCall""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_function_call_one_of.py b/src/together/generated/test/test_chat_completion_request_function_call_one_of.py deleted file mode 100644 index 58ec5841..00000000 --- a/src/together/generated/test/test_chat_completion_request_function_call_one_of.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_request_function_call_one_of import ( - ChatCompletionRequestFunctionCallOneOf, -) - - -class TestChatCompletionRequestFunctionCallOneOf(unittest.TestCase): - """ChatCompletionRequestFunctionCallOneOf unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionRequestFunctionCallOneOf: - """Test ChatCompletionRequestFunctionCallOneOf - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionRequestFunctionCallOneOf` - """ - model = ChatCompletionRequestFunctionCallOneOf() - if include_optional: - return ChatCompletionRequestFunctionCallOneOf( - name = '' - ) - else: - return ChatCompletionRequestFunctionCallOneOf( - name = '', - ) - """ - - def testChatCompletionRequestFunctionCallOneOf(self): - """Test ChatCompletionRequestFunctionCallOneOf""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_messages_inner.py b/src/together/generated/test/test_chat_completion_request_messages_inner.py deleted file mode 100644 index 4d799742..00000000 --- a/src/together/generated/test/test_chat_completion_request_messages_inner.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_request_messages_inner import ( - ChatCompletionRequestMessagesInner, -) - - -class TestChatCompletionRequestMessagesInner(unittest.TestCase): - """ChatCompletionRequestMessagesInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionRequestMessagesInner: - """Test ChatCompletionRequestMessagesInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionRequestMessagesInner` - """ - model = ChatCompletionRequestMessagesInner() - if include_optional: - return ChatCompletionRequestMessagesInner( - role = 'system', - content = '' - ) - else: - return ChatCompletionRequestMessagesInner( - role = 'system', - content = '', - ) - """ - - def testChatCompletionRequestMessagesInner(self): - """Test ChatCompletionRequestMessagesInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_model.py b/src/together/generated/test/test_chat_completion_request_model.py deleted file mode 100644 index 1f18e0f7..00000000 --- a/src/together/generated/test/test_chat_completion_request_model.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_request_model import ( - ChatCompletionRequestModel, -) - - -class TestChatCompletionRequestModel(unittest.TestCase): - """ChatCompletionRequestModel unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionRequestModel: - """Test ChatCompletionRequestModel - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionRequestModel` - """ - model = ChatCompletionRequestModel() - if include_optional: - return ChatCompletionRequestModel( - ) - else: - return ChatCompletionRequestModel( - ) - """ - - def testChatCompletionRequestModel(self): - """Test ChatCompletionRequestModel""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_response_format.py b/src/together/generated/test/test_chat_completion_request_response_format.py deleted file mode 100644 index e6f5241b..00000000 --- a/src/together/generated/test/test_chat_completion_request_response_format.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_request_response_format import ( - ChatCompletionRequestResponseFormat, -) - - -class TestChatCompletionRequestResponseFormat(unittest.TestCase): - """ChatCompletionRequestResponseFormat unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionRequestResponseFormat: - """Test ChatCompletionRequestResponseFormat - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionRequestResponseFormat` - """ - model = ChatCompletionRequestResponseFormat() - if include_optional: - return ChatCompletionRequestResponseFormat( - type = 'json', - var_schema = { - 'key' : '' - } - ) - else: - return ChatCompletionRequestResponseFormat( - ) - """ - - def testChatCompletionRequestResponseFormat(self): - """Test ChatCompletionRequestResponseFormat""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_tool_choice.py b/src/together/generated/test/test_chat_completion_request_tool_choice.py deleted file mode 100644 index b04e7456..00000000 --- a/src/together/generated/test/test_chat_completion_request_tool_choice.py +++ /dev/null @@ -1,66 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_request_tool_choice import ( - ChatCompletionRequestToolChoice, -) - - -class TestChatCompletionRequestToolChoice(unittest.TestCase): - """ChatCompletionRequestToolChoice unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionRequestToolChoice: - """Test ChatCompletionRequestToolChoice - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionRequestToolChoice` - """ - model = ChatCompletionRequestToolChoice() - if include_optional: - return ChatCompletionRequestToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ) - ) - else: - return ChatCompletionRequestToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), - ) - """ - - def testChatCompletionRequestToolChoice(self): - """Test ChatCompletionRequestToolChoice""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_response.py b/src/together/generated/test/test_chat_completion_response.py deleted file mode 100644 index e02f8cb3..00000000 --- a/src/together/generated/test/test_chat_completion_response.py +++ /dev/null @@ -1,110 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_response import ChatCompletionResponse - - -class TestChatCompletionResponse(unittest.TestCase): - """ChatCompletionResponse unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionResponse: - """Test ChatCompletionResponse - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionResponse` - """ - model = ChatCompletionResponse() - if include_optional: - return ChatCompletionResponse( - id = '', - choices = [ - together.generated.models.chat_completion_choices_data_inner.ChatCompletionChoicesData_inner( - text = '', - index = 56, - seed = 56, - finish_reason = 'stop', - message = together.generated.models.chat_completion_message.ChatCompletionMessage( - content = '', - role = 'assistant', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( - arguments = '', - name = '', ), ), - logprobs = null, ) - ], - usage = together.generated.models.usage_data.UsageData( - prompt_tokens = 56, - completion_tokens = 56, - total_tokens = 56, ), - created = 56, - model = '', - object = 'chat.completion' - ) - else: - return ChatCompletionResponse( - id = '', - choices = [ - together.generated.models.chat_completion_choices_data_inner.ChatCompletionChoicesData_inner( - text = '', - index = 56, - seed = 56, - finish_reason = 'stop', - message = together.generated.models.chat_completion_message.ChatCompletionMessage( - content = '', - role = 'assistant', - tool_calls = [ - together.generated.models.tool_choice.ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), ) - ], - function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( - arguments = '', - name = '', ), ), - logprobs = null, ) - ], - created = 56, - model = '', - object = 'chat.completion', - ) - """ - - def testChatCompletionResponse(self): - """Test ChatCompletionResponse""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_stream.py b/src/together/generated/test/test_chat_completion_stream.py deleted file mode 100644 index 29643caa..00000000 --- a/src/together/generated/test/test_chat_completion_stream.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_stream import ChatCompletionStream - - -class TestChatCompletionStream(unittest.TestCase): - """ChatCompletionStream unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionStream: - """Test ChatCompletionStream - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionStream` - """ - model = ChatCompletionStream() - if include_optional: - return ChatCompletionStream( - data = '[DONE]' - ) - else: - return ChatCompletionStream( - data = '[DONE]', - ) - """ - - def testChatCompletionStream(self): - """Test ChatCompletionStream""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_system_message_param.py b/src/together/generated/test/test_chat_completion_system_message_param.py deleted file mode 100644 index 33b8a0c8..00000000 --- a/src/together/generated/test/test_chat_completion_system_message_param.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_system_message_param import ( - ChatCompletionSystemMessageParam, -) - - -class TestChatCompletionSystemMessageParam(unittest.TestCase): - """ChatCompletionSystemMessageParam unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionSystemMessageParam: - """Test ChatCompletionSystemMessageParam - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionSystemMessageParam` - """ - model = ChatCompletionSystemMessageParam() - if include_optional: - return ChatCompletionSystemMessageParam( - content = '', - role = 'system', - name = '' - ) - else: - return ChatCompletionSystemMessageParam( - content = '', - role = 'system', - ) - """ - - def testChatCompletionSystemMessageParam(self): - """Test ChatCompletionSystemMessageParam""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_token.py b/src/together/generated/test/test_chat_completion_token.py deleted file mode 100644 index 131f6c4d..00000000 --- a/src/together/generated/test/test_chat_completion_token.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_token import ChatCompletionToken - - -class TestChatCompletionToken(unittest.TestCase): - """ChatCompletionToken unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionToken: - """Test ChatCompletionToken - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionToken` - """ - model = ChatCompletionToken() - if include_optional: - return ChatCompletionToken( - id = 56, - text = '', - logprob = 1.337, - special = True - ) - else: - return ChatCompletionToken( - id = 56, - text = '', - logprob = 1.337, - special = True, - ) - """ - - def testChatCompletionToken(self): - """Test ChatCompletionToken""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_tool.py b/src/together/generated/test/test_chat_completion_tool.py deleted file mode 100644 index 2f795a90..00000000 --- a/src/together/generated/test/test_chat_completion_tool.py +++ /dev/null @@ -1,66 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_tool import ChatCompletionTool - - -class TestChatCompletionTool(unittest.TestCase): - """ChatCompletionTool unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionTool: - """Test ChatCompletionTool - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionTool` - """ - model = ChatCompletionTool() - if include_optional: - return ChatCompletionTool( - type = 'function', - function = together.generated.models.chat_completion_tool_function.ChatCompletionTool_function( - description = '', - name = '', - parameters = { - 'key' : null - }, ) - ) - else: - return ChatCompletionTool( - type = 'function', - function = together.generated.models.chat_completion_tool_function.ChatCompletionTool_function( - description = '', - name = '', - parameters = { - 'key' : null - }, ), - ) - """ - - def testChatCompletionTool(self): - """Test ChatCompletionTool""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_tool_function.py b/src/together/generated/test/test_chat_completion_tool_function.py deleted file mode 100644 index 0d370610..00000000 --- a/src/together/generated/test/test_chat_completion_tool_function.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_tool_function import ( - ChatCompletionToolFunction, -) - - -class TestChatCompletionToolFunction(unittest.TestCase): - """ChatCompletionToolFunction unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionToolFunction: - """Test ChatCompletionToolFunction - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionToolFunction` - """ - model = ChatCompletionToolFunction() - if include_optional: - return ChatCompletionToolFunction( - description = '', - name = '', - parameters = { - 'key' : null - } - ) - else: - return ChatCompletionToolFunction( - name = '', - ) - """ - - def testChatCompletionToolFunction(self): - """Test ChatCompletionToolFunction""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_tool_message_param.py b/src/together/generated/test/test_chat_completion_tool_message_param.py deleted file mode 100644 index 90aece7d..00000000 --- a/src/together/generated/test/test_chat_completion_tool_message_param.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_tool_message_param import ( - ChatCompletionToolMessageParam, -) - - -class TestChatCompletionToolMessageParam(unittest.TestCase): - """ChatCompletionToolMessageParam unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionToolMessageParam: - """Test ChatCompletionToolMessageParam - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionToolMessageParam` - """ - model = ChatCompletionToolMessageParam() - if include_optional: - return ChatCompletionToolMessageParam( - role = 'tool', - content = '', - tool_call_id = '' - ) - else: - return ChatCompletionToolMessageParam( - role = 'tool', - content = '', - tool_call_id = '', - ) - """ - - def testChatCompletionToolMessageParam(self): - """Test ChatCompletionToolMessageParam""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_chat_completion_user_message_param.py b/src/together/generated/test/test_chat_completion_user_message_param.py deleted file mode 100644 index 7571b70e..00000000 --- a/src/together/generated/test/test_chat_completion_user_message_param.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.chat_completion_user_message_param import ( - ChatCompletionUserMessageParam, -) - - -class TestChatCompletionUserMessageParam(unittest.TestCase): - """ChatCompletionUserMessageParam unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ChatCompletionUserMessageParam: - """Test ChatCompletionUserMessageParam - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ChatCompletionUserMessageParam` - """ - model = ChatCompletionUserMessageParam() - if include_optional: - return ChatCompletionUserMessageParam( - content = '', - role = 'user', - name = '' - ) - else: - return ChatCompletionUserMessageParam( - content = '', - role = 'user', - ) - """ - - def testChatCompletionUserMessageParam(self): - """Test ChatCompletionUserMessageParam""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_api.py b/src/together/generated/test/test_completion_api.py deleted file mode 100644 index 7a0eaeae..00000000 --- a/src/together/generated/test/test_completion_api.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.completion_api import CompletionApi - - -class TestCompletionApi(unittest.IsolatedAsyncioTestCase): - """CompletionApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = CompletionApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_completions(self) -> None: - """Test case for completions - - Create completion - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_choice.py b/src/together/generated/test/test_completion_choice.py deleted file mode 100644 index a3ce4b5c..00000000 --- a/src/together/generated/test/test_completion_choice.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_choice import CompletionChoice - - -class TestCompletionChoice(unittest.TestCase): - """CompletionChoice unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionChoice: - """Test CompletionChoice - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionChoice` - """ - model = CompletionChoice() - if include_optional: - return CompletionChoice( - text = '' - ) - else: - return CompletionChoice( - ) - """ - - def testCompletionChoice(self): - """Test CompletionChoice""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_choices_data_inner.py b/src/together/generated/test/test_completion_choices_data_inner.py deleted file mode 100644 index d71d81c7..00000000 --- a/src/together/generated/test/test_completion_choices_data_inner.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_choices_data_inner import ( - CompletionChoicesDataInner, -) - - -class TestCompletionChoicesDataInner(unittest.TestCase): - """CompletionChoicesDataInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionChoicesDataInner: - """Test CompletionChoicesDataInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionChoicesDataInner` - """ - model = CompletionChoicesDataInner() - if include_optional: - return CompletionChoicesDataInner( - text = 'The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture.', - seed = 56, - finish_reason = 'stop', - logprobs = together.generated.models.logprobs_part.LogprobsPart( - token_ids = [ - 1.337 - ], - tokens = [ - '' - ], - token_logprobs = [ - 1.337 - ], ) - ) - else: - return CompletionChoicesDataInner( - ) - """ - - def testCompletionChoicesDataInner(self): - """Test CompletionChoicesDataInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_chunk.py b/src/together/generated/test/test_completion_chunk.py deleted file mode 100644 index 448b44ff..00000000 --- a/src/together/generated/test/test_completion_chunk.py +++ /dev/null @@ -1,77 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_chunk import CompletionChunk - - -class TestCompletionChunk(unittest.TestCase): - """CompletionChunk unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionChunk: - """Test CompletionChunk - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionChunk` - """ - model = CompletionChunk() - if include_optional: - return CompletionChunk( - id = '', - token = together.generated.models.completion_token.CompletionToken( - id = 56, - text = '', - logprob = 1.337, - special = True, ), - choices = [ - together.generated.models.completion_choice.CompletionChoice( - text = '', ) - ], - usage = None, - seed = 56, - finish_reason = None - ) - else: - return CompletionChunk( - id = '', - token = together.generated.models.completion_token.CompletionToken( - id = 56, - text = '', - logprob = 1.337, - special = True, ), - choices = [ - together.generated.models.completion_choice.CompletionChoice( - text = '', ) - ], - usage = None, - finish_reason = None, - ) - """ - - def testCompletionChunk(self): - """Test CompletionChunk""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_chunk_usage.py b/src/together/generated/test/test_completion_chunk_usage.py deleted file mode 100644 index 09f1a850..00000000 --- a/src/together/generated/test/test_completion_chunk_usage.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_chunk_usage import CompletionChunkUsage - - -class TestCompletionChunkUsage(unittest.TestCase): - """CompletionChunkUsage unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionChunkUsage: - """Test CompletionChunkUsage - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionChunkUsage` - """ - model = CompletionChunkUsage() - if include_optional: - return CompletionChunkUsage( - prompt_tokens = 56, - completion_tokens = 56, - total_tokens = 56 - ) - else: - return CompletionChunkUsage( - prompt_tokens = 56, - completion_tokens = 56, - total_tokens = 56, - ) - """ - - def testCompletionChunkUsage(self): - """Test CompletionChunkUsage""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_event.py b/src/together/generated/test/test_completion_event.py deleted file mode 100644 index 26181ffc..00000000 --- a/src/together/generated/test/test_completion_event.py +++ /dev/null @@ -1,80 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_event import CompletionEvent - - -class TestCompletionEvent(unittest.TestCase): - """CompletionEvent unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionEvent: - """Test CompletionEvent - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionEvent` - """ - model = CompletionEvent() - if include_optional: - return CompletionEvent( - data = together.generated.models.completion_chunk.CompletionChunk( - id = '', - token = together.generated.models.completion_token.CompletionToken( - id = 56, - text = '', - logprob = 1.337, - special = True, ), - choices = [ - together.generated.models.completion_choice.CompletionChoice( - text = '', ) - ], - usage = null, - seed = 56, - finish_reason = null, ) - ) - else: - return CompletionEvent( - data = together.generated.models.completion_chunk.CompletionChunk( - id = '', - token = together.generated.models.completion_token.CompletionToken( - id = 56, - text = '', - logprob = 1.337, - special = True, ), - choices = [ - together.generated.models.completion_choice.CompletionChoice( - text = '', ) - ], - usage = null, - seed = 56, - finish_reason = null, ), - ) - """ - - def testCompletionEvent(self): - """Test CompletionEvent""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_request.py b/src/together/generated/test/test_completion_request.py deleted file mode 100644 index 3a823073..00000000 --- a/src/together/generated/test/test_completion_request.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_request import CompletionRequest - - -class TestCompletionRequest(unittest.TestCase): - """CompletionRequest unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionRequest: - """Test CompletionRequest - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionRequest` - """ - model = CompletionRequest() - if include_optional: - return CompletionRequest( - prompt = '[INST] What is the capital of France? [/INST]', - model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', - max_tokens = 56, - stop = [ - '' - ], - temperature = 1.337, - top_p = 1.337, - top_k = 56, - repetition_penalty = 1.337, - stream = True, - logprobs = 0, - echo = True, - n = 1, - safety_model = 'safety_model_name', - min_p = 1.337, - presence_penalty = 1.337, - frequency_penalty = 1.337, - logit_bias = {1024=-10.5, 105=21.4}, - seed = 42 - ) - else: - return CompletionRequest( - prompt = '[INST] What is the capital of France? [/INST]', - model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', - ) - """ - - def testCompletionRequest(self): - """Test CompletionRequest""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_request_model.py b/src/together/generated/test/test_completion_request_model.py deleted file mode 100644 index d3cfa734..00000000 --- a/src/together/generated/test/test_completion_request_model.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_request_model import CompletionRequestModel - - -class TestCompletionRequestModel(unittest.TestCase): - """CompletionRequestModel unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionRequestModel: - """Test CompletionRequestModel - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionRequestModel` - """ - model = CompletionRequestModel() - if include_optional: - return CompletionRequestModel( - ) - else: - return CompletionRequestModel( - ) - """ - - def testCompletionRequestModel(self): - """Test CompletionRequestModel""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_request_safety_model.py b/src/together/generated/test/test_completion_request_safety_model.py deleted file mode 100644 index fb7228a9..00000000 --- a/src/together/generated/test/test_completion_request_safety_model.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_request_safety_model import ( - CompletionRequestSafetyModel, -) - - -class TestCompletionRequestSafetyModel(unittest.TestCase): - """CompletionRequestSafetyModel unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionRequestSafetyModel: - """Test CompletionRequestSafetyModel - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionRequestSafetyModel` - """ - model = CompletionRequestSafetyModel() - if include_optional: - return CompletionRequestSafetyModel( - ) - else: - return CompletionRequestSafetyModel( - ) - """ - - def testCompletionRequestSafetyModel(self): - """Test CompletionRequestSafetyModel""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_response.py b/src/together/generated/test/test_completion_response.py deleted file mode 100644 index f4003cf5..00000000 --- a/src/together/generated/test/test_completion_response.py +++ /dev/null @@ -1,114 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_response import CompletionResponse - - -class TestCompletionResponse(unittest.TestCase): - """CompletionResponse unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionResponse: - """Test CompletionResponse - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionResponse` - """ - model = CompletionResponse() - if include_optional: - return CompletionResponse( - id = '', - choices = [ - together.generated.models.completion_choices_data_inner.CompletionChoicesData_inner( - text = 'The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture.', - seed = 56, - finish_reason = 'stop', - logprobs = together.generated.models.logprobs_part.LogprobsPart( - token_ids = [ - 1.337 - ], - tokens = [ - '' - ], - token_logprobs = [ - 1.337 - ], ), ) - ], - prompt = [ - together.generated.models.prompt_part_inner.PromptPart_inner( - text = '[INST] What is the capital of France? [/INST]', - logprobs = together.generated.models.logprobs_part.LogprobsPart( - token_ids = [ - 1.337 - ], - tokens = [ - '' - ], - token_logprobs = [ - 1.337 - ], ), ) - ], - usage = together.generated.models.usage_data.UsageData( - prompt_tokens = 56, - completion_tokens = 56, - total_tokens = 56, ), - created = 56, - model = '', - object = 'text_completion' - ) - else: - return CompletionResponse( - id = '', - choices = [ - together.generated.models.completion_choices_data_inner.CompletionChoicesData_inner( - text = 'The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture.', - seed = 56, - finish_reason = 'stop', - logprobs = together.generated.models.logprobs_part.LogprobsPart( - token_ids = [ - 1.337 - ], - tokens = [ - '' - ], - token_logprobs = [ - 1.337 - ], ), ) - ], - usage = together.generated.models.usage_data.UsageData( - prompt_tokens = 56, - completion_tokens = 56, - total_tokens = 56, ), - created = 56, - model = '', - object = 'text_completion', - ) - """ - - def testCompletionResponse(self): - """Test CompletionResponse""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_stream.py b/src/together/generated/test/test_completion_stream.py deleted file mode 100644 index 9edbd934..00000000 --- a/src/together/generated/test/test_completion_stream.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_stream import CompletionStream - - -class TestCompletionStream(unittest.TestCase): - """CompletionStream unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionStream: - """Test CompletionStream - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionStream` - """ - model = CompletionStream() - if include_optional: - return CompletionStream( - data = '[DONE]' - ) - else: - return CompletionStream( - data = '[DONE]', - ) - """ - - def testCompletionStream(self): - """Test CompletionStream""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_completion_token.py b/src/together/generated/test/test_completion_token.py deleted file mode 100644 index a15263c6..00000000 --- a/src/together/generated/test/test_completion_token.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.completion_token import CompletionToken - - -class TestCompletionToken(unittest.TestCase): - """CompletionToken unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CompletionToken: - """Test CompletionToken - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CompletionToken` - """ - model = CompletionToken() - if include_optional: - return CompletionToken( - id = 56, - text = '', - logprob = 1.337, - special = True - ) - else: - return CompletionToken( - id = 56, - text = '', - logprob = 1.337, - special = True, - ) - """ - - def testCompletionToken(self): - """Test CompletionToken""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_create_endpoint_request.py b/src/together/generated/test/test_create_endpoint_request.py deleted file mode 100644 index b5e35dd0..00000000 --- a/src/together/generated/test/test_create_endpoint_request.py +++ /dev/null @@ -1,66 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.create_endpoint_request import CreateEndpointRequest - - -class TestCreateEndpointRequest(unittest.TestCase): - """CreateEndpointRequest unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> CreateEndpointRequest: - """Test CreateEndpointRequest - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `CreateEndpointRequest` - """ - model = CreateEndpointRequest() - if include_optional: - return CreateEndpointRequest( - display_name = '', - model = '', - hardware = '', - autoscaling = together.generated.models.autoscaling.Autoscaling( - min_replicas = 56, - max_replicas = 56, ), - disable_prompt_cache = True, - disable_speculative_decoding = True, - state = 'STARTED' - ) - else: - return CreateEndpointRequest( - model = '', - hardware = '', - autoscaling = together.generated.models.autoscaling.Autoscaling( - min_replicas = 56, - max_replicas = 56, ), - ) - """ - - def testCreateEndpointRequest(self): - """Test CreateEndpointRequest""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_dedicated_endpoint.py b/src/together/generated/test/test_dedicated_endpoint.py deleted file mode 100644 index 61edfee9..00000000 --- a/src/together/generated/test/test_dedicated_endpoint.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.dedicated_endpoint import DedicatedEndpoint - - -class TestDedicatedEndpoint(unittest.TestCase): - """DedicatedEndpoint unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> DedicatedEndpoint: - """Test DedicatedEndpoint - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `DedicatedEndpoint` - """ - model = DedicatedEndpoint() - if include_optional: - return DedicatedEndpoint( - object = 'endpoint', - id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', - name = 'devuser/meta-llama/Llama-3-8b-chat-hf-a32b82a1', - display_name = 'My Llama3 70b endpoint', - model = 'meta-llama/Llama-3-8b-chat-hf', - hardware = '1x_nvidia_a100_80gb_sxm', - type = 'dedicated', - owner = 'devuser', - state = 'STARTED', - autoscaling = together.generated.models.autoscaling.Autoscaling( - min_replicas = 56, - max_replicas = 56, ), - created_at = '2025-02-04T10:43:55.405Z' - ) - else: - return DedicatedEndpoint( - object = 'endpoint', - id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', - name = 'devuser/meta-llama/Llama-3-8b-chat-hf-a32b82a1', - display_name = 'My Llama3 70b endpoint', - model = 'meta-llama/Llama-3-8b-chat-hf', - hardware = '1x_nvidia_a100_80gb_sxm', - type = 'dedicated', - owner = 'devuser', - state = 'STARTED', - autoscaling = together.generated.models.autoscaling.Autoscaling( - min_replicas = 56, - max_replicas = 56, ), - created_at = '2025-02-04T10:43:55.405Z', - ) - """ - - def testDedicatedEndpoint(self): - """Test DedicatedEndpoint""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_embeddings_api.py b/src/together/generated/test/test_embeddings_api.py deleted file mode 100644 index c63a7b1e..00000000 --- a/src/together/generated/test/test_embeddings_api.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.embeddings_api import EmbeddingsApi - - -class TestEmbeddingsApi(unittest.IsolatedAsyncioTestCase): - """EmbeddingsApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = EmbeddingsApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_embeddings(self) -> None: - """Test case for embeddings - - Create embedding - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_embeddings_request.py b/src/together/generated/test/test_embeddings_request.py deleted file mode 100644 index 0652a2ff..00000000 --- a/src/together/generated/test/test_embeddings_request.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.embeddings_request import EmbeddingsRequest - - -class TestEmbeddingsRequest(unittest.TestCase): - """EmbeddingsRequest unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> EmbeddingsRequest: - """Test EmbeddingsRequest - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `EmbeddingsRequest` - """ - model = EmbeddingsRequest() - if include_optional: - return EmbeddingsRequest( - model = 'togethercomputer/m2-bert-80M-8k-retrieval', - input = Our solar system orbits the Milky Way galaxy at about 515,000 mph - ) - else: - return EmbeddingsRequest( - model = 'togethercomputer/m2-bert-80M-8k-retrieval', - input = Our solar system orbits the Milky Way galaxy at about 515,000 mph, - ) - """ - - def testEmbeddingsRequest(self): - """Test EmbeddingsRequest""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_embeddings_request_input.py b/src/together/generated/test/test_embeddings_request_input.py deleted file mode 100644 index bb51de9a..00000000 --- a/src/together/generated/test/test_embeddings_request_input.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.embeddings_request_input import EmbeddingsRequestInput - - -class TestEmbeddingsRequestInput(unittest.TestCase): - """EmbeddingsRequestInput unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> EmbeddingsRequestInput: - """Test EmbeddingsRequestInput - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `EmbeddingsRequestInput` - """ - model = EmbeddingsRequestInput() - if include_optional: - return EmbeddingsRequestInput( - ) - else: - return EmbeddingsRequestInput( - ) - """ - - def testEmbeddingsRequestInput(self): - """Test EmbeddingsRequestInput""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_embeddings_request_model.py b/src/together/generated/test/test_embeddings_request_model.py deleted file mode 100644 index e31f5837..00000000 --- a/src/together/generated/test/test_embeddings_request_model.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.embeddings_request_model import EmbeddingsRequestModel - - -class TestEmbeddingsRequestModel(unittest.TestCase): - """EmbeddingsRequestModel unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> EmbeddingsRequestModel: - """Test EmbeddingsRequestModel - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `EmbeddingsRequestModel` - """ - model = EmbeddingsRequestModel() - if include_optional: - return EmbeddingsRequestModel( - ) - else: - return EmbeddingsRequestModel( - ) - """ - - def testEmbeddingsRequestModel(self): - """Test EmbeddingsRequestModel""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_embeddings_response.py b/src/together/generated/test/test_embeddings_response.py deleted file mode 100644 index 0a09847c..00000000 --- a/src/together/generated/test/test_embeddings_response.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.embeddings_response import EmbeddingsResponse - - -class TestEmbeddingsResponse(unittest.TestCase): - """EmbeddingsResponse unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> EmbeddingsResponse: - """Test EmbeddingsResponse - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `EmbeddingsResponse` - """ - model = EmbeddingsResponse() - if include_optional: - return EmbeddingsResponse( - object = 'list', - model = '', - data = [ - together.generated.models.embeddings_response_data_inner.EmbeddingsResponse_data_inner( - object = 'embedding', - embedding = [ - 1.337 - ], - index = 56, ) - ] - ) - else: - return EmbeddingsResponse( - object = 'list', - model = '', - data = [ - together.generated.models.embeddings_response_data_inner.EmbeddingsResponse_data_inner( - object = 'embedding', - embedding = [ - 1.337 - ], - index = 56, ) - ], - ) - """ - - def testEmbeddingsResponse(self): - """Test EmbeddingsResponse""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_embeddings_response_data_inner.py b/src/together/generated/test/test_embeddings_response_data_inner.py deleted file mode 100644 index 88a95c8b..00000000 --- a/src/together/generated/test/test_embeddings_response_data_inner.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.embeddings_response_data_inner import ( - EmbeddingsResponseDataInner, -) - - -class TestEmbeddingsResponseDataInner(unittest.TestCase): - """EmbeddingsResponseDataInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> EmbeddingsResponseDataInner: - """Test EmbeddingsResponseDataInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `EmbeddingsResponseDataInner` - """ - model = EmbeddingsResponseDataInner() - if include_optional: - return EmbeddingsResponseDataInner( - object = 'embedding', - embedding = [ - 1.337 - ], - index = 56 - ) - else: - return EmbeddingsResponseDataInner( - object = 'embedding', - embedding = [ - 1.337 - ], - index = 56, - ) - """ - - def testEmbeddingsResponseDataInner(self): - """Test EmbeddingsResponseDataInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_endpoint_pricing.py b/src/together/generated/test/test_endpoint_pricing.py deleted file mode 100644 index e99001c4..00000000 --- a/src/together/generated/test/test_endpoint_pricing.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.endpoint_pricing import EndpointPricing - - -class TestEndpointPricing(unittest.TestCase): - """EndpointPricing unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> EndpointPricing: - """Test EndpointPricing - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `EndpointPricing` - """ - model = EndpointPricing() - if include_optional: - return EndpointPricing( - cents_per_minute = 1.337 - ) - else: - return EndpointPricing( - cents_per_minute = 1.337, - ) - """ - - def testEndpointPricing(self): - """Test EndpointPricing""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_endpoints_api.py b/src/together/generated/test/test_endpoints_api.py deleted file mode 100644 index 9d384219..00000000 --- a/src/together/generated/test/test_endpoints_api.py +++ /dev/null @@ -1,66 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.endpoints_api import EndpointsApi - - -class TestEndpointsApi(unittest.IsolatedAsyncioTestCase): - """EndpointsApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = EndpointsApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_create_endpoint(self) -> None: - """Test case for create_endpoint - - Create a dedicated endpoint, it will start automatically - """ - pass - - async def test_delete_endpoint(self) -> None: - """Test case for delete_endpoint - - Delete endpoint - """ - pass - - async def test_get_endpoint(self) -> None: - """Test case for get_endpoint - - Get endpoint by ID - """ - pass - - async def test_list_endpoints(self) -> None: - """Test case for list_endpoints - - List all endpoints, can be filtered by type - """ - pass - - async def test_update_endpoint(self) -> None: - """Test case for update_endpoint - - Update endpoint, this can also be used to start or stop a dedicated endpoint - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_error_data.py b/src/together/generated/test/test_error_data.py deleted file mode 100644 index 0f91ac2e..00000000 --- a/src/together/generated/test/test_error_data.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.error_data import ErrorData - - -class TestErrorData(unittest.TestCase): - """ErrorData unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ErrorData: - """Test ErrorData - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ErrorData` - """ - model = ErrorData() - if include_optional: - return ErrorData( - error = together.generated.models.error_data_error.ErrorData_error( - message = '', - type = '', - param = '', - code = '', ) - ) - else: - return ErrorData( - error = together.generated.models.error_data_error.ErrorData_error( - message = '', - type = '', - param = '', - code = '', ), - ) - """ - - def testErrorData(self): - """Test ErrorData""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_error_data_error.py b/src/together/generated/test/test_error_data_error.py deleted file mode 100644 index a6952f0a..00000000 --- a/src/together/generated/test/test_error_data_error.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.error_data_error import ErrorDataError - - -class TestErrorDataError(unittest.TestCase): - """ErrorDataError unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ErrorDataError: - """Test ErrorDataError - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ErrorDataError` - """ - model = ErrorDataError() - if include_optional: - return ErrorDataError( - message = '', - type = '', - param = '', - code = '' - ) - else: - return ErrorDataError( - message = '', - type = '', - ) - """ - - def testErrorDataError(self): - """Test ErrorDataError""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_file_delete_response.py b/src/together/generated/test/test_file_delete_response.py deleted file mode 100644 index 5e0cf618..00000000 --- a/src/together/generated/test/test_file_delete_response.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.file_delete_response import FileDeleteResponse - - -class TestFileDeleteResponse(unittest.TestCase): - """FileDeleteResponse unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FileDeleteResponse: - """Test FileDeleteResponse - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FileDeleteResponse` - """ - model = FileDeleteResponse() - if include_optional: - return FileDeleteResponse( - id = '', - deleted = True - ) - else: - return FileDeleteResponse( - ) - """ - - def testFileDeleteResponse(self): - """Test FileDeleteResponse""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_file_list.py b/src/together/generated/test/test_file_list.py deleted file mode 100644 index e3984f30..00000000 --- a/src/together/generated/test/test_file_list.py +++ /dev/null @@ -1,76 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.file_list import FileList - - -class TestFileList(unittest.TestCase): - """FileList unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FileList: - """Test FileList - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FileList` - """ - model = FileList() - if include_optional: - return FileList( - data = [ - together.generated.models.file_response.FileResponse( - id = '', - object = 'file', - created_at = 1715021438, - filename = 'my_file.jsonl', - bytes = 2664, - purpose = 'fine-tune', - processed = True, - file_type = 'jsonl', - line_count = 56, ) - ] - ) - else: - return FileList( - data = [ - together.generated.models.file_response.FileResponse( - id = '', - object = 'file', - created_at = 1715021438, - filename = 'my_file.jsonl', - bytes = 2664, - purpose = 'fine-tune', - processed = True, - file_type = 'jsonl', - line_count = 56, ) - ], - ) - """ - - def testFileList(self): - """Test FileList""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_file_object.py b/src/together/generated/test/test_file_object.py deleted file mode 100644 index a242cc02..00000000 --- a/src/together/generated/test/test_file_object.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.file_object import FileObject - - -class TestFileObject(unittest.TestCase): - """FileObject unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FileObject: - """Test FileObject - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FileObject` - """ - model = FileObject() - if include_optional: - return FileObject( - object = '', - id = '', - filename = '', - size = 56 - ) - else: - return FileObject( - ) - """ - - def testFileObject(self): - """Test FileObject""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_file_response.py b/src/together/generated/test/test_file_response.py deleted file mode 100644 index 06164546..00000000 --- a/src/together/generated/test/test_file_response.py +++ /dev/null @@ -1,70 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.file_response import FileResponse - - -class TestFileResponse(unittest.TestCase): - """FileResponse unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FileResponse: - """Test FileResponse - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FileResponse` - """ - model = FileResponse() - if include_optional: - return FileResponse( - id = '', - object = 'file', - created_at = 1715021438, - filename = 'my_file.jsonl', - bytes = 2664, - purpose = 'fine-tune', - processed = True, - file_type = 'jsonl', - line_count = 56 - ) - else: - return FileResponse( - id = '', - object = 'file', - created_at = 1715021438, - filename = 'my_file.jsonl', - bytes = 2664, - purpose = 'fine-tune', - processed = True, - file_type = 'jsonl', - line_count = 56, - ) - """ - - def testFileResponse(self): - """Test FileResponse""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_files_api.py b/src/together/generated/test/test_files_api.py deleted file mode 100644 index 0e5269ff..00000000 --- a/src/together/generated/test/test_files_api.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.files_api import FilesApi - - -class TestFilesApi(unittest.IsolatedAsyncioTestCase): - """FilesApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = FilesApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_files_get(self) -> None: - """Test case for files_get - - List all files - """ - pass - - async def test_files_id_content_get(self) -> None: - """Test case for files_id_content_get - - Get file contents - """ - pass - - async def test_files_id_delete(self) -> None: - """Test case for files_id_delete - - Delete a file - """ - pass - - async def test_files_id_get(self) -> None: - """Test case for files_id_get - - List file - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_fine_tune_event.py b/src/together/generated/test/test_fine_tune_event.py deleted file mode 100644 index f61a7b5f..00000000 --- a/src/together/generated/test/test_fine_tune_event.py +++ /dev/null @@ -1,79 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.fine_tune_event import FineTuneEvent - - -class TestFineTuneEvent(unittest.TestCase): - """FineTuneEvent unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FineTuneEvent: - """Test FineTuneEvent - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FineTuneEvent` - """ - model = FineTuneEvent() - if include_optional: - return FineTuneEvent( - object = 'fine-tune-event', - created_at = '', - level = ERROR_TO_EXAMPLE_VALUE, - message = '', - type = 'job_pending', - param_count = 56, - token_count = 56, - total_steps = 56, - wandb_url = '', - step = 56, - checkpoint_path = '', - model_path = '', - training_offset = 56, - hash = '' - ) - else: - return FineTuneEvent( - object = 'fine-tune-event', - created_at = '', - message = '', - type = 'job_pending', - param_count = 56, - token_count = 56, - total_steps = 56, - wandb_url = '', - step = 56, - checkpoint_path = '', - model_path = '', - training_offset = 56, - hash = '', - ) - """ - - def testFineTuneEvent(self): - """Test FineTuneEvent""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_fine_tunes_post_request.py b/src/together/generated/test/test_fine_tunes_post_request.py deleted file mode 100644 index 6e8a1471..00000000 --- a/src/together/generated/test/test_fine_tunes_post_request.py +++ /dev/null @@ -1,76 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.fine_tunes_post_request import FineTunesPostRequest - - -class TestFineTunesPostRequest(unittest.TestCase): - """FineTunesPostRequest unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FineTunesPostRequest: - """Test FineTunesPostRequest - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FineTunesPostRequest` - """ - model = FineTunesPostRequest() - if include_optional: - return FineTunesPostRequest( - training_file = '', - validation_file = '', - model = '', - n_epochs = 56, - n_checkpoints = 56, - n_evals = 56, - batch_size = 56, - learning_rate = 1.337, - lr_scheduler = together.generated.models.lr_scheduler.LRScheduler( - lr_scheduler_type = '', - lr_scheduler_args = together.generated.models.linear_lr_scheduler_args.LinearLRSchedulerArgs( - min_lr_ratio = 1.337, ), ), - warmup_ratio = 1.337, - max_grad_norm = 1.337, - weight_decay = 1.337, - suffix = '', - wandb_api_key = '', - wandb_base_url = '', - wandb_project_name = '', - wandb_name = '', - train_on_inputs = True, - training_type = together.generated.models._fine_tunes_post_request_training_type._fine_tunes_post_request_training_type() - ) - else: - return FineTunesPostRequest( - training_file = '', - model = '', - ) - """ - - def testFineTunesPostRequest(self): - """Test FineTunesPostRequest""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py b/src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py deleted file mode 100644 index 06d1d703..00000000 --- a/src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.fine_tunes_post_request_train_on_inputs import ( - FineTunesPostRequestTrainOnInputs, -) - - -class TestFineTunesPostRequestTrainOnInputs(unittest.TestCase): - """FineTunesPostRequestTrainOnInputs unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FineTunesPostRequestTrainOnInputs: - """Test FineTunesPostRequestTrainOnInputs - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FineTunesPostRequestTrainOnInputs` - """ - model = FineTunesPostRequestTrainOnInputs() - if include_optional: - return FineTunesPostRequestTrainOnInputs( - ) - else: - return FineTunesPostRequestTrainOnInputs( - ) - """ - - def testFineTunesPostRequestTrainOnInputs(self): - """Test FineTunesPostRequestTrainOnInputs""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_fine_tunes_post_request_training_type.py b/src/together/generated/test/test_fine_tunes_post_request_training_type.py deleted file mode 100644 index b92881ff..00000000 --- a/src/together/generated/test/test_fine_tunes_post_request_training_type.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.fine_tunes_post_request_training_type import ( - FineTunesPostRequestTrainingType, -) - - -class TestFineTunesPostRequestTrainingType(unittest.TestCase): - """FineTunesPostRequestTrainingType unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FineTunesPostRequestTrainingType: - """Test FineTunesPostRequestTrainingType - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FineTunesPostRequestTrainingType` - """ - model = FineTunesPostRequestTrainingType() - if include_optional: - return FineTunesPostRequestTrainingType( - type = 'Full', - lora_r = 56, - lora_alpha = 56, - lora_dropout = 1.337, - lora_trainable_modules = 'all-linear' - ) - else: - return FineTunesPostRequestTrainingType( - type = 'Full', - lora_r = 56, - lora_alpha = 56, - ) - """ - - def testFineTunesPostRequestTrainingType(self): - """Test FineTunesPostRequestTrainingType""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_fine_tuning_api.py b/src/together/generated/test/test_fine_tuning_api.py deleted file mode 100644 index dab43fa7..00000000 --- a/src/together/generated/test/test_fine_tuning_api.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.fine_tuning_api import FineTuningApi - - -class TestFineTuningApi(unittest.IsolatedAsyncioTestCase): - """FineTuningApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = FineTuningApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_fine_tunes_get(self) -> None: - """Test case for fine_tunes_get - - List all jobs - """ - pass - - async def test_fine_tunes_id_cancel_post(self) -> None: - """Test case for fine_tunes_id_cancel_post - - Cancel job - """ - pass - - async def test_fine_tunes_id_events_get(self) -> None: - """Test case for fine_tunes_id_events_get - - List job events - """ - pass - - async def test_fine_tunes_id_get(self) -> None: - """Test case for fine_tunes_id_get - - List job - """ - pass - - async def test_fine_tunes_post(self) -> None: - """Test case for fine_tunes_post - - Create job - """ - pass - - async def test_finetune_download_get(self) -> None: - """Test case for finetune_download_get - - Download model - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_finetune_download_result.py b/src/together/generated/test/test_finetune_download_result.py deleted file mode 100644 index a0136246..00000000 --- a/src/together/generated/test/test_finetune_download_result.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.finetune_download_result import FinetuneDownloadResult - - -class TestFinetuneDownloadResult(unittest.TestCase): - """FinetuneDownloadResult unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FinetuneDownloadResult: - """Test FinetuneDownloadResult - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FinetuneDownloadResult` - """ - model = FinetuneDownloadResult() - if include_optional: - return FinetuneDownloadResult( - object = ERROR_TO_EXAMPLE_VALUE, - id = '', - checkpoint_step = 56, - filename = '', - size = 56 - ) - else: - return FinetuneDownloadResult( - ) - """ - - def testFinetuneDownloadResult(self): - """Test FinetuneDownloadResult""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_finetune_event_levels.py b/src/together/generated/test/test_finetune_event_levels.py deleted file mode 100644 index c82e8354..00000000 --- a/src/together/generated/test/test_finetune_event_levels.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.finetune_event_levels import FinetuneEventLevels - - -class TestFinetuneEventLevels(unittest.TestCase): - """FinetuneEventLevels unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def testFinetuneEventLevels(self): - """Test FinetuneEventLevels""" - # inst = FinetuneEventLevels() - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_finetune_event_type.py b/src/together/generated/test/test_finetune_event_type.py deleted file mode 100644 index 6340f74d..00000000 --- a/src/together/generated/test/test_finetune_event_type.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.finetune_event_type import FinetuneEventType - - -class TestFinetuneEventType(unittest.TestCase): - """FinetuneEventType unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def testFinetuneEventType(self): - """Test FinetuneEventType""" - # inst = FinetuneEventType() - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_finetune_job_status.py b/src/together/generated/test/test_finetune_job_status.py deleted file mode 100644 index 2bbee5ee..00000000 --- a/src/together/generated/test/test_finetune_job_status.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.finetune_job_status import FinetuneJobStatus - - -class TestFinetuneJobStatus(unittest.TestCase): - """FinetuneJobStatus unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def testFinetuneJobStatus(self): - """Test FinetuneJobStatus""" - # inst = FinetuneJobStatus() - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_finetune_list.py b/src/together/generated/test/test_finetune_list.py deleted file mode 100644 index 40d16304..00000000 --- a/src/together/generated/test/test_finetune_list.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.finetune_list import FinetuneList - - -class TestFinetuneList(unittest.TestCase): - """FinetuneList unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FinetuneList: - """Test FinetuneList - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FinetuneList` - """ - model = FinetuneList() - if include_optional: - return FinetuneList( - data = ERROR_TO_EXAMPLE_VALUE - ) - else: - return FinetuneList( - data = ERROR_TO_EXAMPLE_VALUE, - ) - """ - - def testFinetuneList(self): - """Test FinetuneList""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_finetune_list_events.py b/src/together/generated/test/test_finetune_list_events.py deleted file mode 100644 index 5170de82..00000000 --- a/src/together/generated/test/test_finetune_list_events.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.finetune_list_events import FinetuneListEvents - - -class TestFinetuneListEvents(unittest.TestCase): - """FinetuneListEvents unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FinetuneListEvents: - """Test FinetuneListEvents - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FinetuneListEvents` - """ - model = FinetuneListEvents() - if include_optional: - return FinetuneListEvents( - data = ERROR_TO_EXAMPLE_VALUE - ) - else: - return FinetuneListEvents( - data = ERROR_TO_EXAMPLE_VALUE, - ) - """ - - def testFinetuneListEvents(self): - """Test FinetuneListEvents""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_finetune_response.py b/src/together/generated/test/test_finetune_response.py deleted file mode 100644 index 288a4a9b..00000000 --- a/src/together/generated/test/test_finetune_response.py +++ /dev/null @@ -1,89 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.finetune_response import FinetuneResponse - - -class TestFinetuneResponse(unittest.TestCase): - """FinetuneResponse unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FinetuneResponse: - """Test FinetuneResponse - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FinetuneResponse` - """ - model = FinetuneResponse() - if include_optional: - return FinetuneResponse( - id = '', - training_file = '', - validation_file = '', - model = '', - model_output_name = '', - model_output_path = '', - trainingfile_numlines = 56, - trainingfile_size = 56, - created_at = '', - updated_at = '', - n_epochs = 56, - n_checkpoints = 56, - n_evals = 56, - batch_size = 56, - learning_rate = 1.337, - lr_scheduler = together.generated.models.lr_scheduler.LRScheduler( - lr_scheduler_type = '', - lr_scheduler_args = together.generated.models.linear_lr_scheduler_args.LinearLRSchedulerArgs( - min_lr_ratio = 1.337, ), ), - warmup_ratio = 1.337, - max_grad_norm = 1.337, - weight_decay = 1.337, - eval_steps = 56, - train_on_inputs = None, - training_type = together.generated.models._fine_tunes_post_request_training_type._fine_tunes_post_request_training_type(), - status = 'pending', - job_id = '', - events = ERROR_TO_EXAMPLE_VALUE, - token_count = 56, - param_count = 56, - total_price = 56, - epochs_completed = 56, - queue_depth = 56, - wandb_project_name = '', - wandb_url = '' - ) - else: - return FinetuneResponse( - id = '', - status = 'pending', - ) - """ - - def testFinetuneResponse(self): - """Test FinetuneResponse""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_finetune_response_train_on_inputs.py b/src/together/generated/test/test_finetune_response_train_on_inputs.py deleted file mode 100644 index f6133122..00000000 --- a/src/together/generated/test/test_finetune_response_train_on_inputs.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.finetune_response_train_on_inputs import ( - FinetuneResponseTrainOnInputs, -) - - -class TestFinetuneResponseTrainOnInputs(unittest.TestCase): - """FinetuneResponseTrainOnInputs unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FinetuneResponseTrainOnInputs: - """Test FinetuneResponseTrainOnInputs - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FinetuneResponseTrainOnInputs` - """ - model = FinetuneResponseTrainOnInputs() - if include_optional: - return FinetuneResponseTrainOnInputs( - ) - else: - return FinetuneResponseTrainOnInputs( - ) - """ - - def testFinetuneResponseTrainOnInputs(self): - """Test FinetuneResponseTrainOnInputs""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_finish_reason.py b/src/together/generated/test/test_finish_reason.py deleted file mode 100644 index 02204fb6..00000000 --- a/src/together/generated/test/test_finish_reason.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.finish_reason import FinishReason - - -class TestFinishReason(unittest.TestCase): - """FinishReason unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def testFinishReason(self): - """Test FinishReason""" - # inst = FinishReason() - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_full_training_type.py b/src/together/generated/test/test_full_training_type.py deleted file mode 100644 index 303187e3..00000000 --- a/src/together/generated/test/test_full_training_type.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.full_training_type import FullTrainingType - - -class TestFullTrainingType(unittest.TestCase): - """FullTrainingType unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FullTrainingType: - """Test FullTrainingType - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `FullTrainingType` - """ - model = FullTrainingType() - if include_optional: - return FullTrainingType( - type = 'Full' - ) - else: - return FullTrainingType( - type = 'Full', - ) - """ - - def testFullTrainingType(self): - """Test FullTrainingType""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_hardware_api.py b/src/together/generated/test/test_hardware_api.py deleted file mode 100644 index a347ff1c..00000000 --- a/src/together/generated/test/test_hardware_api.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.hardware_api import HardwareApi - - -class TestHardwareApi(unittest.IsolatedAsyncioTestCase): - """HardwareApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = HardwareApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_list_hardware(self) -> None: - """Test case for list_hardware - - List available hardware configurations - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_hardware_availability.py b/src/together/generated/test/test_hardware_availability.py deleted file mode 100644 index cf7d4016..00000000 --- a/src/together/generated/test/test_hardware_availability.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.hardware_availability import HardwareAvailability - - -class TestHardwareAvailability(unittest.TestCase): - """HardwareAvailability unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> HardwareAvailability: - """Test HardwareAvailability - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `HardwareAvailability` - """ - model = HardwareAvailability() - if include_optional: - return HardwareAvailability( - status = 'available' - ) - else: - return HardwareAvailability( - status = 'available', - ) - """ - - def testHardwareAvailability(self): - """Test HardwareAvailability""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_hardware_spec.py b/src/together/generated/test/test_hardware_spec.py deleted file mode 100644 index f9888c33..00000000 --- a/src/together/generated/test/test_hardware_spec.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.hardware_spec import HardwareSpec - - -class TestHardwareSpec(unittest.TestCase): - """HardwareSpec unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> HardwareSpec: - """Test HardwareSpec - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `HardwareSpec` - """ - model = HardwareSpec() - if include_optional: - return HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56 - ) - else: - return HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56, - ) - """ - - def testHardwareSpec(self): - """Test HardwareSpec""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_hardware_with_status.py b/src/together/generated/test/test_hardware_with_status.py deleted file mode 100644 index 8727755f..00000000 --- a/src/together/generated/test/test_hardware_with_status.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.hardware_with_status import HardwareWithStatus - - -class TestHardwareWithStatus(unittest.TestCase): - """HardwareWithStatus unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> HardwareWithStatus: - """Test HardwareWithStatus - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `HardwareWithStatus` - """ - model = HardwareWithStatus() - if include_optional: - return HardwareWithStatus( - object = 'hardware', - id = '', - pricing = together.generated.models.endpoint_pricing.EndpointPricing( - cents_per_minute = 1.337, ), - specs = together.generated.models.hardware_spec.HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56, ), - availability = together.generated.models.hardware_availability.HardwareAvailability( - status = 'available', ), - updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') - ) - else: - return HardwareWithStatus( - object = 'hardware', - id = '', - pricing = together.generated.models.endpoint_pricing.EndpointPricing( - cents_per_minute = 1.337, ), - specs = together.generated.models.hardware_spec.HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56, ), - updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), - ) - """ - - def testHardwareWithStatus(self): - """Test HardwareWithStatus""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_image_response.py b/src/together/generated/test/test_image_response.py deleted file mode 100644 index fd124ab7..00000000 --- a/src/together/generated/test/test_image_response.py +++ /dev/null @@ -1,70 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.image_response import ImageResponse - - -class TestImageResponse(unittest.TestCase): - """ImageResponse unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ImageResponse: - """Test ImageResponse - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ImageResponse` - """ - model = ImageResponse() - if include_optional: - return ImageResponse( - id = '', - model = '', - object = 'list', - data = [ - together.generated.models.image_response_data_inner.ImageResponse_data_inner( - index = 56, - b64_json = '', - url = '', ) - ] - ) - else: - return ImageResponse( - id = '', - model = '', - object = 'list', - data = [ - together.generated.models.image_response_data_inner.ImageResponse_data_inner( - index = 56, - b64_json = '', - url = '', ) - ], - ) - """ - - def testImageResponse(self): - """Test ImageResponse""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_image_response_data_inner.py b/src/together/generated/test/test_image_response_data_inner.py deleted file mode 100644 index f12a697c..00000000 --- a/src/together/generated/test/test_image_response_data_inner.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.image_response_data_inner import ImageResponseDataInner - - -class TestImageResponseDataInner(unittest.TestCase): - """ImageResponseDataInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ImageResponseDataInner: - """Test ImageResponseDataInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ImageResponseDataInner` - """ - model = ImageResponseDataInner() - if include_optional: - return ImageResponseDataInner( - index = 56, - b64_json = '', - url = '' - ) - else: - return ImageResponseDataInner( - index = 56, - ) - """ - - def testImageResponseDataInner(self): - """Test ImageResponseDataInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_images_api.py b/src/together/generated/test/test_images_api.py deleted file mode 100644 index 14888a0f..00000000 --- a/src/together/generated/test/test_images_api.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.images_api import ImagesApi - - -class TestImagesApi(unittest.IsolatedAsyncioTestCase): - """ImagesApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = ImagesApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_images_generations_post(self) -> None: - """Test case for images_generations_post - - Create image - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_images_generations_post_request.py b/src/together/generated/test/test_images_generations_post_request.py deleted file mode 100644 index 4376a118..00000000 --- a/src/together/generated/test/test_images_generations_post_request.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.images_generations_post_request import ( - ImagesGenerationsPostRequest, -) - - -class TestImagesGenerationsPostRequest(unittest.TestCase): - """ImagesGenerationsPostRequest unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ImagesGenerationsPostRequest: - """Test ImagesGenerationsPostRequest - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ImagesGenerationsPostRequest` - """ - model = ImagesGenerationsPostRequest() - if include_optional: - return ImagesGenerationsPostRequest( - prompt = 'cat floating in space, cinematic', - model = 'black-forest-labs/FLUX.1-schnell', - steps = 56, - image_url = '', - seed = 56, - n = 56, - height = 56, - width = 56, - negative_prompt = '', - response_format = 'base64', - guidance = 1.337, - output_format = 'jpeg', - image_loras = [ - together.generated.models._images_generations_post_request_image_loras_inner._images_generations_post_request_image_loras_inner( - path = '', - scale = 1.337, ) - ] - ) - else: - return ImagesGenerationsPostRequest( - prompt = 'cat floating in space, cinematic', - model = 'black-forest-labs/FLUX.1-schnell', - ) - """ - - def testImagesGenerationsPostRequest(self): - """Test ImagesGenerationsPostRequest""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_images_generations_post_request_image_loras_inner.py b/src/together/generated/test/test_images_generations_post_request_image_loras_inner.py deleted file mode 100644 index 1fa6c7b0..00000000 --- a/src/together/generated/test/test_images_generations_post_request_image_loras_inner.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.images_generations_post_request_image_loras_inner import ( - ImagesGenerationsPostRequestImageLorasInner, -) - - -class TestImagesGenerationsPostRequestImageLorasInner(unittest.TestCase): - """ImagesGenerationsPostRequestImageLorasInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance( - self, include_optional - ) -> ImagesGenerationsPostRequestImageLorasInner: - """Test ImagesGenerationsPostRequestImageLorasInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ImagesGenerationsPostRequestImageLorasInner` - """ - model = ImagesGenerationsPostRequestImageLorasInner() - if include_optional: - return ImagesGenerationsPostRequestImageLorasInner( - path = '', - scale = 1.337 - ) - else: - return ImagesGenerationsPostRequestImageLorasInner( - path = '', - scale = 1.337, - ) - """ - - def testImagesGenerationsPostRequestImageLorasInner(self): - """Test ImagesGenerationsPostRequestImageLorasInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_images_generations_post_request_model.py b/src/together/generated/test/test_images_generations_post_request_model.py deleted file mode 100644 index 3cc3c613..00000000 --- a/src/together/generated/test/test_images_generations_post_request_model.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.images_generations_post_request_model import ( - ImagesGenerationsPostRequestModel, -) - - -class TestImagesGenerationsPostRequestModel(unittest.TestCase): - """ImagesGenerationsPostRequestModel unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ImagesGenerationsPostRequestModel: - """Test ImagesGenerationsPostRequestModel - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ImagesGenerationsPostRequestModel` - """ - model = ImagesGenerationsPostRequestModel() - if include_optional: - return ImagesGenerationsPostRequestModel( - ) - else: - return ImagesGenerationsPostRequestModel( - ) - """ - - def testImagesGenerationsPostRequestModel(self): - """Test ImagesGenerationsPostRequestModel""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_linear_lr_scheduler_args.py b/src/together/generated/test/test_linear_lr_scheduler_args.py deleted file mode 100644 index a1181988..00000000 --- a/src/together/generated/test/test_linear_lr_scheduler_args.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs - - -class TestLinearLRSchedulerArgs(unittest.TestCase): - """LinearLRSchedulerArgs unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> LinearLRSchedulerArgs: - """Test LinearLRSchedulerArgs - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `LinearLRSchedulerArgs` - """ - model = LinearLRSchedulerArgs() - if include_optional: - return LinearLRSchedulerArgs( - min_lr_ratio = 1.337 - ) - else: - return LinearLRSchedulerArgs( - ) - """ - - def testLinearLRSchedulerArgs(self): - """Test LinearLRSchedulerArgs""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_list_endpoint.py b/src/together/generated/test/test_list_endpoint.py deleted file mode 100644 index 54ad619a..00000000 --- a/src/together/generated/test/test_list_endpoint.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.list_endpoint import ListEndpoint - - -class TestListEndpoint(unittest.TestCase): - """ListEndpoint unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ListEndpoint: - """Test ListEndpoint - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ListEndpoint` - """ - model = ListEndpoint() - if include_optional: - return ListEndpoint( - object = 'endpoint', - id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', - name = 'allenai/OLMo-7B', - model = 'allenai/OLMo-7B', - type = 'serverless', - owner = 'together', - state = 'STARTED', - created_at = '2024-02-28T21:34:35.444Z' - ) - else: - return ListEndpoint( - object = 'endpoint', - id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', - name = 'allenai/OLMo-7B', - model = 'allenai/OLMo-7B', - type = 'serverless', - owner = 'together', - state = 'STARTED', - created_at = '2024-02-28T21:34:35.444Z', - ) - """ - - def testListEndpoint(self): - """Test ListEndpoint""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_list_endpoints200_response.py b/src/together/generated/test/test_list_endpoints200_response.py deleted file mode 100644 index 246d4f99..00000000 --- a/src/together/generated/test/test_list_endpoints200_response.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.list_endpoints200_response import ( - ListEndpoints200Response, -) - - -class TestListEndpoints200Response(unittest.TestCase): - """ListEndpoints200Response unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ListEndpoints200Response: - """Test ListEndpoints200Response - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ListEndpoints200Response` - """ - model = ListEndpoints200Response() - if include_optional: - return ListEndpoints200Response( - object = 'list', - data = [ - together.generated.models.list_endpoint.ListEndpoint( - object = 'endpoint', - id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', - name = 'allenai/OLMo-7B', - model = 'allenai/OLMo-7B', - type = 'serverless', - owner = 'together', - state = 'STARTED', - created_at = '2024-02-28T21:34:35.444Z', ) - ] - ) - else: - return ListEndpoints200Response( - object = 'list', - data = [ - together.generated.models.list_endpoint.ListEndpoint( - object = 'endpoint', - id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', - name = 'allenai/OLMo-7B', - model = 'allenai/OLMo-7B', - type = 'serverless', - owner = 'together', - state = 'STARTED', - created_at = '2024-02-28T21:34:35.444Z', ) - ], - ) - """ - - def testListEndpoints200Response(self): - """Test ListEndpoints200Response""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response.py b/src/together/generated/test/test_list_hardware200_response.py deleted file mode 100644 index 90843a2e..00000000 --- a/src/together/generated/test/test_list_hardware200_response.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.list_hardware200_response import ListHardware200Response - - -class TestListHardware200Response(unittest.TestCase): - """ListHardware200Response unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ListHardware200Response: - """Test ListHardware200Response - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ListHardware200Response` - """ - model = ListHardware200Response() - if include_optional: - return ListHardware200Response( - object = 'list', - data = [ - together.generated.models.hardware_with_status.HardwareWithStatus( - object = 'hardware', - id = '', - pricing = together.generated.models.endpoint_pricing.EndpointPricing( - cents_per_minute = 1.337, ), - specs = together.generated.models.hardware_spec.HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56, ), - availability = together.generated.models.hardware_availability.HardwareAvailability( - status = 'available', ), - updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ) - ] - ) - else: - return ListHardware200Response( - object = 'list', - data = [ - together.generated.models.hardware_with_status.HardwareWithStatus( - object = 'hardware', - id = '', - pricing = together.generated.models.endpoint_pricing.EndpointPricing( - cents_per_minute = 1.337, ), - specs = together.generated.models.hardware_spec.HardwareSpec( - gpu_type = '', - gpu_link = '', - gpu_memory = 1.337, - gpu_count = 56, ), - availability = together.generated.models.hardware_availability.HardwareAvailability( - status = 'available', ), - updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ) - ], - ) - """ - - def testListHardware200Response(self): - """Test ListHardware200Response""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_lo_ra_training_type.py b/src/together/generated/test/test_lo_ra_training_type.py deleted file mode 100644 index dcd0309c..00000000 --- a/src/together/generated/test/test_lo_ra_training_type.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.lo_ra_training_type import LoRATrainingType - - -class TestLoRATrainingType(unittest.TestCase): - """LoRATrainingType unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> LoRATrainingType: - """Test LoRATrainingType - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `LoRATrainingType` - """ - model = LoRATrainingType() - if include_optional: - return LoRATrainingType( - type = 'Lora', - lora_r = 56, - lora_alpha = 56, - lora_dropout = 1.337, - lora_trainable_modules = 'all-linear' - ) - else: - return LoRATrainingType( - type = 'Lora', - lora_r = 56, - lora_alpha = 56, - ) - """ - - def testLoRATrainingType(self): - """Test LoRATrainingType""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_logprobs_part.py b/src/together/generated/test/test_logprobs_part.py deleted file mode 100644 index b37d38e9..00000000 --- a/src/together/generated/test/test_logprobs_part.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.logprobs_part import LogprobsPart - - -class TestLogprobsPart(unittest.TestCase): - """LogprobsPart unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> LogprobsPart: - """Test LogprobsPart - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `LogprobsPart` - """ - model = LogprobsPart() - if include_optional: - return LogprobsPart( - token_ids = [ - 1.337 - ], - tokens = [ - '' - ], - token_logprobs = [ - 1.337 - ] - ) - else: - return LogprobsPart( - ) - """ - - def testLogprobsPart(self): - """Test LogprobsPart""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_lr_scheduler.py b/src/together/generated/test/test_lr_scheduler.py deleted file mode 100644 index 281f4102..00000000 --- a/src/together/generated/test/test_lr_scheduler.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.lr_scheduler import LRScheduler - - -class TestLRScheduler(unittest.TestCase): - """LRScheduler unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> LRScheduler: - """Test LRScheduler - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `LRScheduler` - """ - model = LRScheduler() - if include_optional: - return LRScheduler( - lr_scheduler_type = '', - lr_scheduler_args = together.generated.models.linear_lr_scheduler_args.LinearLRSchedulerArgs( - min_lr_ratio = 1.337, ) - ) - else: - return LRScheduler( - lr_scheduler_type = '', - ) - """ - - def testLRScheduler(self): - """Test LRScheduler""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_model_info.py b/src/together/generated/test/test_model_info.py deleted file mode 100644 index 24c4f5ca..00000000 --- a/src/together/generated/test/test_model_info.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.model_info import ModelInfo - - -class TestModelInfo(unittest.TestCase): - """ModelInfo unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ModelInfo: - """Test ModelInfo - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ModelInfo` - """ - model = ModelInfo() - if include_optional: - return ModelInfo( - id = 'Austism/chronos-hermes-13b', - object = 'model', - created = 1692896905, - type = 'chat', - display_name = 'Chronos Hermes (13B)', - organization = 'Austism', - link = '', - license = 'other', - context_length = 2048, - pricing = together.generated.models.pricing.Pricing( - hourly = 0, - input = 0.3, - output = 0.3, - base = 0, - finetune = 0, ) - ) - else: - return ModelInfo( - id = 'Austism/chronos-hermes-13b', - object = 'model', - created = 1692896905, - type = 'chat', - ) - """ - - def testModelInfo(self): - """Test ModelInfo""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_models_api.py b/src/together/generated/test/test_models_api.py deleted file mode 100644 index 0ba1e2b4..00000000 --- a/src/together/generated/test/test_models_api.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.models_api import ModelsApi - - -class TestModelsApi(unittest.IsolatedAsyncioTestCase): - """ModelsApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = ModelsApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_models(self) -> None: - """Test case for models - - List all models - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_pricing.py b/src/together/generated/test/test_pricing.py deleted file mode 100644 index 8cf572bd..00000000 --- a/src/together/generated/test/test_pricing.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.pricing import Pricing - - -class TestPricing(unittest.TestCase): - """Pricing unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> Pricing: - """Test Pricing - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `Pricing` - """ - model = Pricing() - if include_optional: - return Pricing( - hourly = 0, - input = 0.3, - output = 0.3, - base = 0, - finetune = 0 - ) - else: - return Pricing( - hourly = 0, - input = 0.3, - output = 0.3, - base = 0, - finetune = 0, - ) - """ - - def testPricing(self): - """Test Pricing""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_prompt_part_inner.py b/src/together/generated/test/test_prompt_part_inner.py deleted file mode 100644 index 5d588326..00000000 --- a/src/together/generated/test/test_prompt_part_inner.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.prompt_part_inner import PromptPartInner - - -class TestPromptPartInner(unittest.TestCase): - """PromptPartInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> PromptPartInner: - """Test PromptPartInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `PromptPartInner` - """ - model = PromptPartInner() - if include_optional: - return PromptPartInner( - text = '[INST] What is the capital of France? [/INST]', - logprobs = together.generated.models.logprobs_part.LogprobsPart( - token_ids = [ - 1.337 - ], - tokens = [ - '' - ], - token_logprobs = [ - 1.337 - ], ) - ) - else: - return PromptPartInner( - ) - """ - - def testPromptPartInner(self): - """Test PromptPartInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_rerank_api.py b/src/together/generated/test/test_rerank_api.py deleted file mode 100644 index 2acd64cd..00000000 --- a/src/together/generated/test/test_rerank_api.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.api.rerank_api import RerankApi - - -class TestRerankApi(unittest.IsolatedAsyncioTestCase): - """RerankApi unit test stubs""" - - async def asyncSetUp(self) -> None: - self.api = RerankApi() - - async def asyncTearDown(self) -> None: - await self.api.api_client.close() - - async def test_rerank(self) -> None: - """Test case for rerank - - Create a rerank request - """ - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_rerank_request.py b/src/together/generated/test/test_rerank_request.py deleted file mode 100644 index c8489b9f..00000000 --- a/src/together/generated/test/test_rerank_request.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.rerank_request import RerankRequest - - -class TestRerankRequest(unittest.TestCase): - """RerankRequest unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> RerankRequest: - """Test RerankRequest - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `RerankRequest` - """ - model = RerankRequest() - if include_optional: - return RerankRequest( - model = 'Salesforce/Llama-Rank-V1', - query = 'What animals can I find near Peru?', - documents = [{title=Llama, text=The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.}, {title=Panda, text=The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.}, {title=Guanaco, text=The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.}, {title=Wild Bactrian camel, text=The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.}], - top_n = 2, - return_documents = True, - rank_fields = [title, text] - ) - else: - return RerankRequest( - model = 'Salesforce/Llama-Rank-V1', - query = 'What animals can I find near Peru?', - documents = [{title=Llama, text=The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.}, {title=Panda, text=The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.}, {title=Guanaco, text=The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.}, {title=Wild Bactrian camel, text=The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.}], - ) - """ - - def testRerankRequest(self): - """Test RerankRequest""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_rerank_request_documents.py b/src/together/generated/test/test_rerank_request_documents.py deleted file mode 100644 index 53fe08af..00000000 --- a/src/together/generated/test/test_rerank_request_documents.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.rerank_request_documents import RerankRequestDocuments - - -class TestRerankRequestDocuments(unittest.TestCase): - """RerankRequestDocuments unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> RerankRequestDocuments: - """Test RerankRequestDocuments - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `RerankRequestDocuments` - """ - model = RerankRequestDocuments() - if include_optional: - return RerankRequestDocuments( - ) - else: - return RerankRequestDocuments( - ) - """ - - def testRerankRequestDocuments(self): - """Test RerankRequestDocuments""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_rerank_request_model.py b/src/together/generated/test/test_rerank_request_model.py deleted file mode 100644 index 285741b2..00000000 --- a/src/together/generated/test/test_rerank_request_model.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.rerank_request_model import RerankRequestModel - - -class TestRerankRequestModel(unittest.TestCase): - """RerankRequestModel unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> RerankRequestModel: - """Test RerankRequestModel - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `RerankRequestModel` - """ - model = RerankRequestModel() - if include_optional: - return RerankRequestModel( - ) - else: - return RerankRequestModel( - ) - """ - - def testRerankRequestModel(self): - """Test RerankRequestModel""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_rerank_response.py b/src/together/generated/test/test_rerank_response.py deleted file mode 100644 index 187a5ac1..00000000 --- a/src/together/generated/test/test_rerank_response.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.rerank_response import RerankResponse - - -class TestRerankResponse(unittest.TestCase): - """RerankResponse unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> RerankResponse: - """Test RerankResponse - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `RerankResponse` - """ - model = RerankResponse() - if include_optional: - return RerankResponse( - object = 'rerank', - id = '9dfa1a09-5ebc-4a40-970f-586cb8f4ae47', - model = 'salesforce/turboranker-0.8-3778-6328', - results = [{index=0, relevance_score=0.29980177813003117, document={text={"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}}}, {index=2, relevance_score=0.2752447527354349, document={text={"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}}}], - usage = {prompt_tokens=1837, completion_tokens=0, total_tokens=1837} - ) - else: - return RerankResponse( - object = 'rerank', - model = 'salesforce/turboranker-0.8-3778-6328', - results = [{index=0, relevance_score=0.29980177813003117, document={text={"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}}}, {index=2, relevance_score=0.2752447527354349, document={text={"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}}}], - ) - """ - - def testRerankResponse(self): - """Test RerankResponse""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_rerank_response_results_inner.py b/src/together/generated/test/test_rerank_response_results_inner.py deleted file mode 100644 index 1ff263c5..00000000 --- a/src/together/generated/test/test_rerank_response_results_inner.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.rerank_response_results_inner import ( - RerankResponseResultsInner, -) - - -class TestRerankResponseResultsInner(unittest.TestCase): - """RerankResponseResultsInner unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> RerankResponseResultsInner: - """Test RerankResponseResultsInner - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `RerankResponseResultsInner` - """ - model = RerankResponseResultsInner() - if include_optional: - return RerankResponseResultsInner( - index = 56, - relevance_score = 1.337, - document = together.generated.models.rerank_response_results_inner_document.RerankResponse_results_inner_document( - text = '', ) - ) - else: - return RerankResponseResultsInner( - index = 56, - relevance_score = 1.337, - document = together.generated.models.rerank_response_results_inner_document.RerankResponse_results_inner_document( - text = '', ), - ) - """ - - def testRerankResponseResultsInner(self): - """Test RerankResponseResultsInner""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_rerank_response_results_inner_document.py b/src/together/generated/test/test_rerank_response_results_inner_document.py deleted file mode 100644 index 02fb87ce..00000000 --- a/src/together/generated/test/test_rerank_response_results_inner_document.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.rerank_response_results_inner_document import ( - RerankResponseResultsInnerDocument, -) - - -class TestRerankResponseResultsInnerDocument(unittest.TestCase): - """RerankResponseResultsInnerDocument unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> RerankResponseResultsInnerDocument: - """Test RerankResponseResultsInnerDocument - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `RerankResponseResultsInnerDocument` - """ - model = RerankResponseResultsInnerDocument() - if include_optional: - return RerankResponseResultsInnerDocument( - text = '' - ) - else: - return RerankResponseResultsInnerDocument( - ) - """ - - def testRerankResponseResultsInnerDocument(self): - """Test RerankResponseResultsInnerDocument""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_stream_sentinel.py b/src/together/generated/test/test_stream_sentinel.py deleted file mode 100644 index 58961b3f..00000000 --- a/src/together/generated/test/test_stream_sentinel.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.stream_sentinel import StreamSentinel - - -class TestStreamSentinel(unittest.TestCase): - """StreamSentinel unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> StreamSentinel: - """Test StreamSentinel - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `StreamSentinel` - """ - model = StreamSentinel() - if include_optional: - return StreamSentinel( - data = '[DONE]' - ) - else: - return StreamSentinel( - data = '[DONE]', - ) - """ - - def testStreamSentinel(self): - """Test StreamSentinel""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_tool_choice.py b/src/together/generated/test/test_tool_choice.py deleted file mode 100644 index b34a312a..00000000 --- a/src/together/generated/test/test_tool_choice.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.tool_choice import ToolChoice - - -class TestToolChoice(unittest.TestCase): - """ToolChoice unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ToolChoice: - """Test ToolChoice - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ToolChoice` - """ - model = ToolChoice() - if include_optional: - return ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ) - ) - else: - return ToolChoice( - index = 1.337, - id = '', - type = 'function', - function = together.generated.models.tool_choice_function.ToolChoice_function( - name = 'function_name', - arguments = '', ), - ) - """ - - def testToolChoice(self): - """Test ToolChoice""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_tool_choice_function.py b/src/together/generated/test/test_tool_choice_function.py deleted file mode 100644 index d7a2a8fa..00000000 --- a/src/together/generated/test/test_tool_choice_function.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.tool_choice_function import ToolChoiceFunction - - -class TestToolChoiceFunction(unittest.TestCase): - """ToolChoiceFunction unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ToolChoiceFunction: - """Test ToolChoiceFunction - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ToolChoiceFunction` - """ - model = ToolChoiceFunction() - if include_optional: - return ToolChoiceFunction( - name = 'function_name', - arguments = '' - ) - else: - return ToolChoiceFunction( - name = 'function_name', - arguments = '', - ) - """ - - def testToolChoiceFunction(self): - """Test ToolChoiceFunction""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_tools_part.py b/src/together/generated/test/test_tools_part.py deleted file mode 100644 index 6f3aad82..00000000 --- a/src/together/generated/test/test_tools_part.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.tools_part import ToolsPart - - -class TestToolsPart(unittest.TestCase): - """ToolsPart unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ToolsPart: - """Test ToolsPart - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ToolsPart` - """ - model = ToolsPart() - if include_optional: - return ToolsPart( - type = 'tool_type', - function = together.generated.models.tools_part_function.ToolsPart_function( - description = 'A description of the function.', - name = 'function_name', - parameters = { }, ) - ) - else: - return ToolsPart( - ) - """ - - def testToolsPart(self): - """Test ToolsPart""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_tools_part_function.py b/src/together/generated/test/test_tools_part_function.py deleted file mode 100644 index 35de3a5b..00000000 --- a/src/together/generated/test/test_tools_part_function.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.tools_part_function import ToolsPartFunction - - -class TestToolsPartFunction(unittest.TestCase): - """ToolsPartFunction unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> ToolsPartFunction: - """Test ToolsPartFunction - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `ToolsPartFunction` - """ - model = ToolsPartFunction() - if include_optional: - return ToolsPartFunction( - description = 'A description of the function.', - name = 'function_name', - parameters = { } - ) - else: - return ToolsPartFunction( - ) - """ - - def testToolsPartFunction(self): - """Test ToolsPartFunction""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_update_endpoint_request.py b/src/together/generated/test/test_update_endpoint_request.py deleted file mode 100644 index 3ce2db6f..00000000 --- a/src/together/generated/test/test_update_endpoint_request.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.update_endpoint_request import UpdateEndpointRequest - - -class TestUpdateEndpointRequest(unittest.TestCase): - """UpdateEndpointRequest unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> UpdateEndpointRequest: - """Test UpdateEndpointRequest - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `UpdateEndpointRequest` - """ - model = UpdateEndpointRequest() - if include_optional: - return UpdateEndpointRequest( - display_name = 'My Llama3 70b endpoint', - state = 'STARTED', - autoscaling = together.generated.models.autoscaling.Autoscaling( - min_replicas = 56, - max_replicas = 56, ) - ) - else: - return UpdateEndpointRequest( - ) - """ - - def testUpdateEndpointRequest(self): - """Test UpdateEndpointRequest""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/generated/test/test_usage_data.py b/src/together/generated/test/test_usage_data.py deleted file mode 100644 index e8de8bd8..00000000 --- a/src/together/generated/test/test_usage_data.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 - -""" - Together APIs - - The Together REST API. Please see https://docs.together.ai for more details. - - The version of the OpenAPI document: 2.0.0 - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from together.generated.models.usage_data import UsageData - - -class TestUsageData(unittest.TestCase): - """UsageData unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> UsageData: - """Test UsageData - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included""" - # uncomment below to create an instance of `UsageData` - """ - model = UsageData() - if include_optional: - return UsageData( - prompt_tokens = 56, - completion_tokens = 56, - total_tokens = 56 - ) - else: - return UsageData( - prompt_tokens = 56, - completion_tokens = 56, - total_tokens = 56, - ) - """ - - def testUsageData(self): - """Test UsageData""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/together/resources/endpoints.py b/src/together/resources/endpoints.py index 21ed47ed..176894f5 100644 --- a/src/together/resources/endpoints.py +++ b/src/together/resources/endpoints.py @@ -1,50 +1,52 @@ from __future__ import annotations -import asyncio -from typing import Any, Dict, List, Literal, Optional - -from together.generated.api.endpoints_api import EndpointsApi -from together.generated.api.hardware_api import HardwareApi -from together.generated.api_client import ApiClient -from together.generated.configuration import Configuration -from together.generated.models.autoscaling import Autoscaling -from together.generated.models.create_endpoint_request import CreateEndpointRequest -from together.generated.models.dedicated_endpoint import DedicatedEndpoint -from together.generated.models.hardware_with_status import HardwareWithStatus -from together.generated.models.list_endpoint import ListEndpoint -from together.generated.models.update_endpoint_request import UpdateEndpointRequest -from together.types import TogetherClient - - -class BaseEndpoints: - """Base class containing common endpoint functionality and documentation.""" - - def _get_api_client( - self, client: TogetherClient - ) -> tuple[ApiClient, EndpointsApi, HardwareApi]: - api_client = ApiClient( - configuration=Configuration( - host=client.base_url.rstrip("/") if client.base_url else "", - ), - header_name="Authorization", - header_value=f"Bearer {client.api_key}" if client.api_key else None, - ) - return api_client, EndpointsApi(api_client), HardwareApi(api_client) +from typing import Dict, List, Literal, Optional, Union +from together.abstract import api_requestor +from together.together_response import TogetherResponse +from together.types import TogetherClient, TogetherRequest +from together.types.endpoints import DedicatedEndpoint, HardwareWithStatus, ListEndpoint -class Endpoints(BaseEndpoints): - """Synchronous endpoints client.""" +class Endpoints: def __init__(self, client: TogetherClient) -> None: - self.api_client, self._api, self._hardware_api = self._get_api_client(client) - self._loop = asyncio.new_event_loop() - asyncio.set_event_loop(self._loop) + self._client = client + + def list( + self, type: Optional[Literal["dedicated", "serverless"]] = None + ) -> List[ListEndpoint]: + """ + List all endpoints, can be filtered by type. - def __del__(self) -> None: - if hasattr(self, "api_client"): - # Using type: ignore since close() is untyped in the library - self._loop.run_until_complete(self.api_client.close()) # type: ignore - self._loop.close() + Args: + type (str, optional): Filter endpoints by type ("dedicated" or "serverless"). Defaults to None. + + Returns: + List[ListEndpoint]: List of endpoint objects + """ + requestor = api_requestor.APIRequestor( + client=self._client, + ) + + params = {} + if type is not None: + params["type"] = type + + response, _, _ = requestor.request( + options=TogetherRequest( + method="GET", + url="endpoints", + params=params, + ), + stream=False, + ) + + response.data = response.data["data"] + + assert isinstance(response, TogetherResponse) + assert isinstance(response.data, list) + + return [ListEndpoint(**endpoint) for endpoint in response.data] def create( self, @@ -74,41 +76,37 @@ def create( Returns: DedicatedEndpoint: Object containing endpoint information """ + requestor = api_requestor.APIRequestor( + client=self._client, + ) - async def _create() -> DedicatedEndpoint: - request = CreateEndpointRequest( - model=model, - hardware=hardware, - autoscaling=Autoscaling( - min_replicas=min_replicas, max_replicas=max_replicas - ), - display_name=display_name, - disable_prompt_cache=disable_prompt_cache, - disable_speculative_decoding=disable_speculative_decoding, - state=state, - ) - return await self._api.create_endpoint(create_endpoint_request=request) - - return self._loop.run_until_complete(_create()) - - def list( - self, type: Literal["dedicated", "serverless"] | None = None - ) -> List[ListEndpoint]: - """ - List all endpoints. + data: Dict[str, Union[str, bool, Dict[str, int]]] = { + "model": model, + "hardware": hardware, + "autoscaling": { + "min_replicas": min_replicas, + "max_replicas": max_replicas, + }, + "disable_prompt_cache": disable_prompt_cache, + "disable_speculative_decoding": disable_speculative_decoding, + "state": state, + } - Args: - type (str, optional): Filter endpoints by type ("dedicated" or "serverless"). Defaults to None. + if display_name is not None: + data["display_name"] = display_name - Returns: - Dict[str, Any]: Response containing list of endpoints in the data field - """ + response, _, _ = requestor.request( + options=TogetherRequest( + method="POST", + url="endpoints", + params=data, + ), + stream=False, + ) - async def _list() -> List[ListEndpoint]: - response = await self._api.list_endpoints(type=type) - return response.data + assert isinstance(response, TogetherResponse) - return self._loop.run_until_complete(_list()) + return DedicatedEndpoint(**response.data) def get(self, endpoint_id: str) -> DedicatedEndpoint: """ @@ -120,11 +118,21 @@ def get(self, endpoint_id: str) -> DedicatedEndpoint: Returns: DedicatedEndpoint: Object containing endpoint information """ + requestor = api_requestor.APIRequestor( + client=self._client, + ) + + response, _, _ = requestor.request( + options=TogetherRequest( + method="GET", + url=f"endpoints/{endpoint_id}", + ), + stream=False, + ) - async def _get() -> DedicatedEndpoint: - return await self._api.get_endpoint(endpoint_id=endpoint_id) + assert isinstance(response, TogetherResponse) - return self._loop.run_until_complete(_get()) + return DedicatedEndpoint(**response.data) def delete(self, endpoint_id: str) -> None: """ @@ -133,11 +141,17 @@ def delete(self, endpoint_id: str) -> None: Args: endpoint_id (str): ID of the endpoint to delete """ + requestor = api_requestor.APIRequestor( + client=self._client, + ) - async def _delete() -> None: - return await self._api.delete_endpoint(endpoint_id=endpoint_id) - - return self._loop.run_until_complete(_delete()) + requestor.request( + options=TogetherRequest( + method="DELETE", + url=f"endpoints/{endpoint_id}", + ), + stream=False, + ) def update( self, @@ -161,56 +175,116 @@ def update( Returns: DedicatedEndpoint: Object containing endpoint information """ + requestor = api_requestor.APIRequestor( + client=self._client, + ) - async def _update() -> DedicatedEndpoint: - kwargs: Dict[str, Any] = {} - if min_replicas is not None or max_replicas is not None: - current_min = min_replicas - current_max = max_replicas - if current_min is None or current_max is None: - # Get current values if only one is specified - current = await self._api.get_endpoint(endpoint_id=endpoint_id) - current_min = current_min or current.autoscaling.min_replicas - current_max = current_max or current.autoscaling.max_replicas - kwargs["autoscaling"] = Autoscaling( - min_replicas=current_min, - max_replicas=current_max, - ) - if state is not None: - kwargs["state"] = state - if display_name is not None: - kwargs["display_name"] = display_name - - request = UpdateEndpointRequest(**kwargs) - return await self._api.update_endpoint( - endpoint_id=endpoint_id, update_endpoint_request=request - ) - - return self._loop.run_until_complete(_update()) + data: Dict[str, Union[str, Dict[str, int]]] = {} + + if min_replicas is not None or max_replicas is not None: + current_min = min_replicas + current_max = max_replicas + if current_min is None or current_max is None: + # Get current values if only one is specified + current = self.get(endpoint_id=endpoint_id) + current_min = current_min or current.autoscaling.min_replicas + current_max = current_max or current.autoscaling.max_replicas + data["autoscaling"] = { + "min_replicas": current_min, + "max_replicas": current_max, + } + + if state is not None: + data["state"] = state + + if display_name is not None: + data["display_name"] = display_name + + response, _, _ = requestor.request( + options=TogetherRequest( + method="PATCH", + url=f"endpoints/{endpoint_id}", + params=data, + ), + stream=False, + ) + + assert isinstance(response, TogetherResponse) + + return DedicatedEndpoint(**response.data) def list_hardware(self, model: Optional[str] = None) -> List[HardwareWithStatus]: """ List available hardware configurations. Args: - model (str, optional): Filter hardware configurations by model compatibility. Defaults to None. + model (str, optional): Filter hardware configurations by model compatibility. When provided, + the response includes availability status for each compatible configuration. Returns: - List[HardwareWithStatus]: List of hardware configurations with their availability status + List[HardwareWithStatus]: List of hardware configurations with their status """ + requestor = api_requestor.APIRequestor( + client=self._client, + ) - async def _list_hardware() -> List[HardwareWithStatus]: - response = await self._hardware_api.list_hardware(model=model) - return response.data + params = {} + if model is not None: + params["model"] = model - return self._loop.run_until_complete(_list_hardware()) + response, _, _ = requestor.request( + options=TogetherRequest( + method="GET", + url="hardware", + params=params, + ), + stream=False, + ) + assert isinstance(response, TogetherResponse) + assert isinstance(response.data, dict) + assert isinstance(response.data["data"], list) -class AsyncEndpoints(BaseEndpoints): - """Asynchronous endpoints client.""" + return [HardwareWithStatus(**item) for item in response.data["data"]] + +class AsyncEndpoints: def __init__(self, client: TogetherClient) -> None: - self.api_client, self._api, self._hardware_api = self._get_api_client(client) + self._client = client + + async def list( + self, type: Optional[Literal["dedicated", "serverless"]] = None + ) -> List[ListEndpoint]: + """ + List all endpoints, can be filtered by type. + + Args: + type (str, optional): Filter endpoints by type ("dedicated" or "serverless"). Defaults to None. + + Returns: + List[ListEndpoint]: List of endpoint objects + """ + requestor = api_requestor.APIRequestor( + client=self._client, + ) + + params = {} + if type is not None: + params["type"] = type + + response, _, _ = await requestor.arequest( + options=TogetherRequest( + method="GET", + url="endpoints", + params=params, + ), + stream=False, + ) + + assert isinstance(response, TogetherResponse) + assert isinstance(response.data, list) + + return [ListEndpoint(**endpoint) for endpoint in response.data] async def create( self, @@ -240,33 +314,37 @@ async def create( Returns: DedicatedEndpoint: Object containing endpoint information """ - request = CreateEndpointRequest( - model=model, - hardware=hardware, - autoscaling=Autoscaling( - min_replicas=min_replicas, max_replicas=max_replicas - ), - display_name=display_name, - disable_prompt_cache=disable_prompt_cache, - disable_speculative_decoding=disable_speculative_decoding, - state=state, + requestor = api_requestor.APIRequestor( + client=self._client, ) - return await self._api.create_endpoint(create_endpoint_request=request) - async def list( - self, type: Literal["dedicated", "serverless"] | None = None - ) -> List[ListEndpoint]: - """ - List all endpoints. + data: Dict[str, Union[str, bool, Dict[str, int]]] = { + "model": model, + "hardware": hardware, + "autoscaling": { + "min_replicas": min_replicas, + "max_replicas": max_replicas, + }, + "disable_prompt_cache": disable_prompt_cache, + "disable_speculative_decoding": disable_speculative_decoding, + "state": state, + } - Args: - type (str, optional): Filter endpoints by type ("dedicated" or "serverless"). Defaults to None. + if display_name is not None: + data["display_name"] = display_name - Returns: - Dict[str, Any]: Response containing list of endpoints in the data field - """ - response = await self._api.list_endpoints(type=type) - return response.data + response, _, _ = await requestor.arequest( + options=TogetherRequest( + method="POST", + url="endpoints", + params=data, + ), + stream=False, + ) + + assert isinstance(response, TogetherResponse) + + return DedicatedEndpoint(**response.data) async def get(self, endpoint_id: str) -> DedicatedEndpoint: """ @@ -278,7 +356,21 @@ async def get(self, endpoint_id: str) -> DedicatedEndpoint: Returns: DedicatedEndpoint: Object containing endpoint information """ - return await self._api.get_endpoint(endpoint_id=endpoint_id) + requestor = api_requestor.APIRequestor( + client=self._client, + ) + + response, _, _ = await requestor.arequest( + options=TogetherRequest( + method="GET", + url=f"endpoints/{endpoint_id}", + ), + stream=False, + ) + + assert isinstance(response, TogetherResponse) + + return DedicatedEndpoint(**response.data) async def delete(self, endpoint_id: str) -> None: """ @@ -287,7 +379,17 @@ async def delete(self, endpoint_id: str) -> None: Args: endpoint_id (str): ID of the endpoint to delete """ - return await self._api.delete_endpoint(endpoint_id=endpoint_id) + requestor = api_requestor.APIRequestor( + client=self._client, + ) + + await requestor.arequest( + options=TogetherRequest( + method="DELETE", + url=f"endpoints/{endpoint_id}", + ), + stream=False, + ) async def update( self, @@ -311,29 +413,44 @@ async def update( Returns: DedicatedEndpoint: Object containing endpoint information """ - kwargs: Dict[str, Any] = {} + requestor = api_requestor.APIRequestor( + client=self._client, + ) + + data: Dict[str, Union[str, Dict[str, int]]] = {} + if min_replicas is not None or max_replicas is not None: current_min = min_replicas current_max = max_replicas if current_min is None or current_max is None: # Get current values if only one is specified - current = await self._api.get_endpoint(endpoint_id=endpoint_id) + current = await self.get(endpoint_id=endpoint_id) current_min = current_min or current.autoscaling.min_replicas current_max = current_max or current.autoscaling.max_replicas - kwargs["autoscaling"] = Autoscaling( - min_replicas=current_min, - max_replicas=current_max, - ) + data["autoscaling"] = { + "min_replicas": current_min, + "max_replicas": current_max, + } + if state is not None: - kwargs["state"] = state + data["state"] = state + if display_name is not None: - kwargs["display_name"] = display_name + data["display_name"] = display_name - request = UpdateEndpointRequest(**kwargs) - return await self._api.update_endpoint( - endpoint_id=endpoint_id, update_endpoint_request=request + response, _, _ = await requestor.arequest( + options=TogetherRequest( + method="PATCH", + url=f"endpoints/{endpoint_id}", + params=data, + ), + stream=False, ) + assert isinstance(response, TogetherResponse) + + return DedicatedEndpoint(**response.data) + async def list_hardware( self, model: Optional[str] = None ) -> List[HardwareWithStatus]: @@ -341,10 +458,31 @@ async def list_hardware( List available hardware configurations. Args: - model (str, optional): Filter hardware configurations by model compatibility. Defaults to None. + model (str, optional): Filter hardware configurations by model compatibility. When provided, + the response includes availability status for each compatible configuration. Returns: - List[HardwareWithStatus]: List of hardware configurations with their availability status + List[HardwareWithStatus]: List of hardware configurations with their status """ - response = await self._hardware_api.list_hardware(model=model) - return response.data + requestor = api_requestor.APIRequestor( + client=self._client, + ) + + params = {} + if model is not None: + params["model"] = model + + response, _, _ = await requestor.arequest( + options=TogetherRequest( + method="GET", + url="hardware", + params=params, + ), + stream=False, + ) + + assert isinstance(response, TogetherResponse) + assert isinstance(response.data, dict) + assert isinstance(response.data["data"], list) + + return [HardwareWithStatus(**item) for item in response.data["data"]] diff --git a/src/together/types/endpoints.py b/src/together/types/endpoints.py index 42299bb4..f669e73c 100644 --- a/src/together/types/endpoints.py +++ b/src/together/types/endpoints.py @@ -1,12 +1,120 @@ from __future__ import annotations -from together.generated.models.autoscaling import Autoscaling -from together.generated.models.dedicated_endpoint import DedicatedEndpoint -from together.generated.models.list_endpoint import ListEndpoint +from datetime import datetime +from typing import Any, Dict, Literal, Optional, Union + +from pydantic import BaseModel, Field + + +class TogetherJSONModel(BaseModel): + """Base model with JSON serialization support.""" + + def model_dump(self, **kwargs: Any) -> Dict[str, Any]: + exclude_none = kwargs.pop("exclude_none", True) + data = super().model_dump(exclude_none=exclude_none, **kwargs) + + # Convert datetime objects to ISO format strings + for key, value in data.items(): + if isinstance(value, datetime): + data[key] = value.isoformat() + + return data + + +class Autoscaling(TogetherJSONModel): + """Configuration for automatic scaling of replicas based on demand.""" + + min_replicas: int = Field( + description="The minimum number of replicas to maintain, even when there is no load" + ) + max_replicas: int = Field( + description="The maximum number of replicas to scale up to under load" + ) + + +class EndpointPricing(TogetherJSONModel): + """Pricing details for using an endpoint.""" + + cents_per_minute: float = Field( + description="Cost per minute of endpoint uptime in cents" + ) + + +class HardwareSpec(TogetherJSONModel): + """Detailed specifications of a hardware configuration.""" + + gpu_type: str = Field(description="The type/model of GPU") + gpu_link: str = Field(description="The GPU interconnect technology") + gpu_memory: Union[float, int] = Field(description="Amount of GPU memory in GB") + gpu_count: int = Field(description="Number of GPUs in this configuration") + + +class HardwareAvailability(TogetherJSONModel): + """Indicates the current availability status of a hardware configuration.""" + + status: Literal["available", "unavailable", "insufficient"] = Field( + description="The availability status of the hardware configuration" + ) + + +class HardwareWithStatus(TogetherJSONModel): + """Hardware configuration details with optional availability status.""" + + object: Literal["hardware"] = Field(description="The type of object") + id: str = Field(description="Unique identifier for the hardware configuration") + pricing: EndpointPricing = Field( + description="Pricing details for this hardware configuration" + ) + specs: HardwareSpec = Field(description="Detailed specifications of this hardware") + availability: Optional[HardwareAvailability] = Field( + default=None, + description="Current availability status of this hardware configuration", + ) + updated_at: datetime = Field( + description="Timestamp of when the hardware status was last updated" + ) + + +class BaseEndpoint(TogetherJSONModel): + """Base class for endpoint models with common fields.""" + + object: Literal["endpoint"] = Field(description="The type of object") + id: str = Field(description="Unique identifier for the endpoint") + name: str = Field(description="System name for the endpoint") + model: str = Field(description="The model deployed on this endpoint") + type: str = Field(description="The type of endpoint") + owner: str = Field(description="The owner of this endpoint") + state: Literal["PENDING", "STARTING", "STARTED", "STOPPING", "STOPPED", "ERROR"] = ( + Field(description="Current state of the endpoint") + ) + created_at: datetime = Field(description="Timestamp when the endpoint was created") + + +class ListEndpoint(BaseEndpoint): + """Details about an endpoint when listed via the list endpoint.""" + + type: Literal["dedicated", "serverless"] = Field(description="The type of endpoint") + + +class DedicatedEndpoint(BaseEndpoint): + """Details about a dedicated endpoint deployment.""" + + type: Literal["dedicated"] = Field(description="The type of endpoint") + display_name: str = Field(description="Human-readable name for the endpoint") + hardware: str = Field( + description="The hardware configuration used for this endpoint" + ) + autoscaling: Autoscaling = Field( + description="Configuration for automatic scaling of the endpoint" + ) __all__ = [ "DedicatedEndpoint", "ListEndpoint", "Autoscaling", + "EndpointPricing", + "HardwareSpec", + "HardwareAvailability", + "HardwareWithStatus", ] From 1b1390e85b2a8d99498f4c31a48bc52a71c1ed50 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 19:43:46 +0000 Subject: [PATCH 26/29] manually update type for now --- src/together/cli/api/endpoints.py | 53 ++++++++++++------------------- src/together/types/endpoints.py | 5 ++- 2 files changed, 24 insertions(+), 34 deletions(-) diff --git a/src/together/cli/api/endpoints.py b/src/together/cli/api/endpoints.py index 5380d112..3d306063 100644 --- a/src/together/cli/api/endpoints.py +++ b/src/together/cli/api/endpoints.py @@ -13,36 +13,9 @@ def print_endpoint( - endpoint: Union[DedicatedEndpoint, ListEndpoint], json: bool = False + endpoint: Union[DedicatedEndpoint, ListEndpoint], ) -> None: """Print endpoint details in a Docker-like format or JSON.""" - if json: - import json as json_lib - - output: Dict[str, Any] = { - "id": endpoint.id, - "name": endpoint.name, - "model": endpoint.model, - "type": endpoint.type, - "owner": endpoint.owner, - "state": endpoint.state, - "created_at": endpoint.created_at.isoformat(), - } - - if isinstance(endpoint, DedicatedEndpoint): - output.update( - { - "display_name": endpoint.display_name, - "hardware": endpoint.hardware, - "autoscaling": { - "min_replicas": endpoint.autoscaling.min_replicas, - "max_replicas": endpoint.autoscaling.max_replicas, - }, - } - ) - - click.echo(json_lib.dumps(output, indent=2)) - return # Print header info click.echo(f"ID:\t\t{endpoint.id}") @@ -244,7 +217,12 @@ def create( def get(client: Together, endpoint_id: str, json: bool) -> None: """Get a dedicated inference endpoint.""" endpoint = client.endpoints.get(endpoint_id) - print_endpoint(endpoint, json=json) + if json: + import json as json_lib + + click.echo(json_lib.dumps(endpoint.model_dump(), indent=2)) + else: + print_endpoint(endpoint) @endpoints.command() @@ -362,10 +340,19 @@ def list( click.echo("No dedicated endpoints found", err=True) return - click.echo("Dedicated endpoints:", err=True) - for endpoint in endpoints: - print_endpoint(endpoint, json=json) - click.echo() + click.echo("Endpoints:", err=True) + if json: + import json as json_lib + + click.echo( + json_lib.dumps([endpoint.model_dump() for endpoint in endpoints], indent=2) + ) + else: + for endpoint in endpoints: + print_endpoint( + endpoint, + ) + click.echo() @endpoints.command() diff --git a/src/together/types/endpoints.py b/src/together/types/endpoints.py index f669e73c..22ab1934 100644 --- a/src/together/types/endpoints.py +++ b/src/together/types/endpoints.py @@ -79,7 +79,9 @@ class BaseEndpoint(TogetherJSONModel): """Base class for endpoint models with common fields.""" object: Literal["endpoint"] = Field(description="The type of object") - id: str = Field(description="Unique identifier for the endpoint") + id: Optional[str] = Field( + default=None, description="Unique identifier for the endpoint" + ) name: str = Field(description="System name for the endpoint") model: str = Field(description="The model deployed on this endpoint") type: str = Field(description="The type of endpoint") @@ -99,6 +101,7 @@ class ListEndpoint(BaseEndpoint): class DedicatedEndpoint(BaseEndpoint): """Details about a dedicated endpoint deployment.""" + id: str = Field(description="Unique identifier for the endpoint") type: Literal["dedicated"] = Field(description="The type of endpoint") display_name: str = Field(description="Human-readable name for the endpoint") hardware: str = Field( From 724dee93db94de537333501e66998319b598665e Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 19:45:55 +0000 Subject: [PATCH 27/29] revert poetry --- poetry.lock | 297 ++++++++++++++++++++++++++-------------------------- 1 file changed, 149 insertions(+), 148 deletions(-) diff --git a/poetry.lock b/poetry.lock index e66ce00c..3e932655 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,103 +2,98 @@ [[package]] name = "aiohappyeyeballs" -version = "2.4.6" +version = "2.4.4" description = "Happy Eyeballs for asyncio" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "aiohappyeyeballs-2.4.6-py3-none-any.whl", hash = "sha256:147ec992cf873d74f5062644332c539fcd42956dc69453fe5204195e560517e1"}, - {file = "aiohappyeyeballs-2.4.6.tar.gz", hash = "sha256:9b05052f9042985d32ecbe4b59a77ae19c006a78f1344d7fdad69d28ded3d0b0"}, + {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, + {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, ] [[package]] name = "aiohttp" -version = "3.11.12" +version = "3.11.11" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" files = [ - {file = "aiohttp-3.11.12-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:aa8a8caca81c0a3e765f19c6953416c58e2f4cc1b84829af01dd1c771bb2f91f"}, - {file = "aiohttp-3.11.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:84ede78acde96ca57f6cf8ccb8a13fbaf569f6011b9a52f870c662d4dc8cd854"}, - {file = "aiohttp-3.11.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:584096938a001378484aa4ee54e05dc79c7b9dd933e271c744a97b3b6f644957"}, - {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:392432a2dde22b86f70dd4a0e9671a349446c93965f261dbaecfaf28813e5c42"}, - {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:88d385b8e7f3a870146bf5ea31786ef7463e99eb59e31db56e2315535d811f55"}, - {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b10a47e5390c4b30a0d58ee12581003be52eedd506862ab7f97da7a66805befb"}, - {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b5263dcede17b6b0c41ef0c3ccce847d82a7da98709e75cf7efde3e9e3b5cae"}, - {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50c5c7b8aa5443304c55c262c5693b108c35a3b61ef961f1e782dd52a2f559c7"}, - {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1c031a7572f62f66f1257db37ddab4cb98bfaf9b9434a3b4840bf3560f5e788"}, - {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:7e44eba534381dd2687be50cbd5f2daded21575242ecfdaf86bbeecbc38dae8e"}, - {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:145a73850926018ec1681e734cedcf2716d6a8697d90da11284043b745c286d5"}, - {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:2c311e2f63e42c1bf86361d11e2c4a59f25d9e7aabdbdf53dc38b885c5435cdb"}, - {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ea756b5a7bac046d202a9a3889b9a92219f885481d78cd318db85b15cc0b7bcf"}, - {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:526c900397f3bbc2db9cb360ce9c35134c908961cdd0ac25b1ae6ffcaa2507ff"}, - {file = "aiohttp-3.11.12-cp310-cp310-win32.whl", hash = "sha256:b8d3bb96c147b39c02d3db086899679f31958c5d81c494ef0fc9ef5bb1359b3d"}, - {file = "aiohttp-3.11.12-cp310-cp310-win_amd64.whl", hash = "sha256:7fe3d65279bfbee8de0fb4f8c17fc4e893eed2dba21b2f680e930cc2b09075c5"}, - {file = "aiohttp-3.11.12-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:87a2e00bf17da098d90d4145375f1d985a81605267e7f9377ff94e55c5d769eb"}, - {file = "aiohttp-3.11.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b34508f1cd928ce915ed09682d11307ba4b37d0708d1f28e5774c07a7674cac9"}, - {file = "aiohttp-3.11.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:936d8a4f0f7081327014742cd51d320296b56aa6d324461a13724ab05f4b2933"}, - {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de1378f72def7dfb5dbd73d86c19eda0ea7b0a6873910cc37d57e80f10d64e1"}, - {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9d45dbb3aaec05cf01525ee1a7ac72de46a8c425cb75c003acd29f76b1ffe94"}, - {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:930ffa1925393381e1e0a9b82137fa7b34c92a019b521cf9f41263976666a0d6"}, - {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8340def6737118f5429a5df4e88f440746b791f8f1c4ce4ad8a595f42c980bd5"}, - {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4016e383f91f2814e48ed61e6bda7d24c4d7f2402c75dd28f7e1027ae44ea204"}, - {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3c0600bcc1adfaaac321422d615939ef300df81e165f6522ad096b73439c0f58"}, - {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:0450ada317a65383b7cce9576096150fdb97396dcfe559109b403c7242faffef"}, - {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:850ff6155371fd802a280f8d369d4e15d69434651b844bde566ce97ee2277420"}, - {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8fd12d0f989c6099e7b0f30dc6e0d1e05499f3337461f0b2b0dadea6c64b89df"}, - {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:76719dd521c20a58a6c256d058547b3a9595d1d885b830013366e27011ffe804"}, - {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:97fe431f2ed646a3b56142fc81d238abcbaff08548d6912acb0b19a0cadc146b"}, - {file = "aiohttp-3.11.12-cp311-cp311-win32.whl", hash = "sha256:e10c440d142fa8b32cfdb194caf60ceeceb3e49807072e0dc3a8887ea80e8c16"}, - {file = "aiohttp-3.11.12-cp311-cp311-win_amd64.whl", hash = "sha256:246067ba0cf5560cf42e775069c5d80a8989d14a7ded21af529a4e10e3e0f0e6"}, - {file = "aiohttp-3.11.12-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e392804a38353900c3fd8b7cacbea5132888f7129f8e241915e90b85f00e3250"}, - {file = "aiohttp-3.11.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8fa1510b96c08aaad49303ab11f8803787c99222288f310a62f493faf883ede1"}, - {file = "aiohttp-3.11.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dc065a4285307607df3f3686363e7f8bdd0d8ab35f12226362a847731516e42c"}, - {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddb31f8474695cd61fc9455c644fc1606c164b93bff2490390d90464b4655df"}, - {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9dec0000d2d8621d8015c293e24589d46fa218637d820894cb7356c77eca3259"}, - {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3552fe98e90fdf5918c04769f338a87fa4f00f3b28830ea9b78b1bdc6140e0d"}, - {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dfe7f984f28a8ae94ff3a7953cd9678550dbd2a1f9bda5dd9c5ae627744c78e"}, - {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a481a574af914b6e84624412666cbfbe531a05667ca197804ecc19c97b8ab1b0"}, - {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1987770fb4887560363b0e1a9b75aa303e447433c41284d3af2840a2f226d6e0"}, - {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:a4ac6a0f0f6402854adca4e3259a623f5c82ec3f0c049374133bcb243132baf9"}, - {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c96a43822f1f9f69cc5c3706af33239489a6294be486a0447fb71380070d4d5f"}, - {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a5e69046f83c0d3cb8f0d5bd9b8838271b1bc898e01562a04398e160953e8eb9"}, - {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:68d54234c8d76d8ef74744f9f9fc6324f1508129e23da8883771cdbb5818cbef"}, - {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9fd9dcf9c91affe71654ef77426f5cf8489305e1c66ed4816f5a21874b094b9"}, - {file = "aiohttp-3.11.12-cp312-cp312-win32.whl", hash = "sha256:0ed49efcd0dc1611378beadbd97beb5d9ca8fe48579fc04a6ed0844072261b6a"}, - {file = "aiohttp-3.11.12-cp312-cp312-win_amd64.whl", hash = "sha256:54775858c7f2f214476773ce785a19ee81d1294a6bedc5cc17225355aab74802"}, - {file = "aiohttp-3.11.12-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:413ad794dccb19453e2b97c2375f2ca3cdf34dc50d18cc2693bd5aed7d16f4b9"}, - {file = "aiohttp-3.11.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4a93d28ed4b4b39e6f46fd240896c29b686b75e39cc6992692e3922ff6982b4c"}, - {file = "aiohttp-3.11.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d589264dbba3b16e8951b6f145d1e6b883094075283dafcab4cdd564a9e353a0"}, - {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5148ca8955affdfeb864aca158ecae11030e952b25b3ae15d4e2b5ba299bad2"}, - {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:525410e0790aab036492eeea913858989c4cb070ff373ec3bc322d700bdf47c1"}, - {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bd8695be2c80b665ae3f05cb584093a1e59c35ecb7d794d1edd96e8cc9201d7"}, - {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0203433121484b32646a5f5ea93ae86f3d9559d7243f07e8c0eab5ff8e3f70e"}, - {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40cd36749a1035c34ba8d8aaf221b91ca3d111532e5ccb5fa8c3703ab1b967ed"}, - {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a7442662afebbf7b4c6d28cb7aab9e9ce3a5df055fc4116cc7228192ad6cb484"}, - {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:8a2fb742ef378284a50766e985804bd6adb5adb5aa781100b09befdbfa757b65"}, - {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2cee3b117a8d13ab98b38d5b6bdcd040cfb4181068d05ce0c474ec9db5f3c5bb"}, - {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f6a19bcab7fbd8f8649d6595624856635159a6527861b9cdc3447af288a00c00"}, - {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e4cecdb52aaa9994fbed6b81d4568427b6002f0a91c322697a4bfcc2b2363f5a"}, - {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:30f546358dfa0953db92ba620101fefc81574f87b2346556b90b5f3ef16e55ce"}, - {file = "aiohttp-3.11.12-cp313-cp313-win32.whl", hash = "sha256:ce1bb21fc7d753b5f8a5d5a4bae99566386b15e716ebdb410154c16c91494d7f"}, - {file = "aiohttp-3.11.12-cp313-cp313-win_amd64.whl", hash = "sha256:f7914ab70d2ee8ab91c13e5402122edbc77821c66d2758abb53aabe87f013287"}, - {file = "aiohttp-3.11.12-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c3623053b85b4296cd3925eeb725e386644fd5bc67250b3bb08b0f144803e7b"}, - {file = "aiohttp-3.11.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:67453e603cea8e85ed566b2700efa1f6916aefbc0c9fcb2e86aaffc08ec38e78"}, - {file = "aiohttp-3.11.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6130459189e61baac5a88c10019b21e1f0c6d00ebc770e9ce269475650ff7f73"}, - {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9060addfa4ff753b09392efe41e6af06ea5dd257829199747b9f15bfad819460"}, - {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34245498eeb9ae54c687a07ad7f160053911b5745e186afe2d0c0f2898a1ab8a"}, - {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dc0fba9a74b471c45ca1a3cb6e6913ebfae416678d90529d188886278e7f3f6"}, - {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a478aa11b328983c4444dacb947d4513cb371cd323f3845e53caeda6be5589d5"}, - {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c160a04283c8c6f55b5bf6d4cad59bb9c5b9c9cd08903841b25f1f7109ef1259"}, - {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:edb69b9589324bdc40961cdf0657815df674f1743a8d5ad9ab56a99e4833cfdd"}, - {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ee84c2a22a809c4f868153b178fe59e71423e1f3d6a8cd416134bb231fbf6d3"}, - {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:bf4480a5438f80e0f1539e15a7eb8b5f97a26fe087e9828e2c0ec2be119a9f72"}, - {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b2732ef3bafc759f653a98881b5b9cdef0716d98f013d376ee8dfd7285abf1"}, - {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f752e80606b132140883bb262a457c475d219d7163d996dc9072434ffb0784c4"}, - {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ab3247d58b393bda5b1c8f31c9edece7162fc13265334217785518dd770792b8"}, - {file = "aiohttp-3.11.12-cp39-cp39-win32.whl", hash = "sha256:0d5176f310a7fe6f65608213cc74f4228e4f4ce9fd10bcb2bb6da8fc66991462"}, - {file = "aiohttp-3.11.12-cp39-cp39-win_amd64.whl", hash = "sha256:74bd573dde27e58c760d9ca8615c41a57e719bff315c9adb6f2a4281a28e8798"}, - {file = "aiohttp-3.11.12.tar.gz", hash = "sha256:7603ca26d75b1b86160ce1bbe2787a0b706e592af5b2504e12caa88a217767b0"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a60804bff28662cbcf340a4d61598891f12eea3a66af48ecfdc975ceec21e3c8"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b4fa1cb5f270fb3eab079536b764ad740bb749ce69a94d4ec30ceee1b5940d5"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:731468f555656767cda219ab42e033355fe48c85fbe3ba83a349631541715ba2"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb23d8bb86282b342481cad4370ea0853a39e4a32a0042bb52ca6bdde132df43"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f047569d655f81cb70ea5be942ee5d4421b6219c3f05d131f64088c73bb0917f"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd7659baae9ccf94ae5fe8bfaa2c7bc2e94d24611528395ce88d009107e00c6d"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af01e42ad87ae24932138f154105e88da13ce7d202a6de93fafdafb2883a00ef"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5854be2f3e5a729800bac57a8d76af464e160f19676ab6aea74bde18ad19d438"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6526e5fb4e14f4bbf30411216780c9967c20c5a55f2f51d3abd6de68320cc2f3"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:85992ee30a31835fc482468637b3e5bd085fa8fe9392ba0bdcbdc1ef5e9e3c55"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:88a12ad8ccf325a8a5ed80e6d7c3bdc247d66175afedbe104ee2aaca72960d8e"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0a6d3fbf2232e3a08c41eca81ae4f1dff3d8f1a30bae415ebe0af2d2458b8a33"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84a585799c58b795573c7fa9b84c455adf3e1d72f19a2bf498b54a95ae0d194c"}, + {file = "aiohttp-3.11.11-cp310-cp310-win32.whl", hash = "sha256:bfde76a8f430cf5c5584553adf9926534352251d379dcb266ad2b93c54a29745"}, + {file = "aiohttp-3.11.11-cp310-cp310-win_amd64.whl", hash = "sha256:0fd82b8e9c383af11d2b26f27a478640b6b83d669440c0a71481f7c865a51da9"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ba74ec819177af1ef7f59063c6d35a214a8fde6f987f7661f4f0eecc468a8f76"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4af57160800b7a815f3fe0eba9b46bf28aafc195555f1824555fa2cfab6c1538"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffa336210cf9cd8ed117011085817d00abe4c08f99968deef0013ea283547204"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81b8fe282183e4a3c7a1b72f5ade1094ed1c6345a8f153506d114af5bf8accd9"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af41686ccec6a0f2bdc66686dc0f403c41ac2089f80e2214a0f82d001052c03"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70d1f9dde0e5dd9e292a6d4d00058737052b01f3532f69c0c65818dac26dc287"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:249cc6912405917344192b9f9ea5cd5b139d49e0d2f5c7f70bdfaf6b4dbf3a2e"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0eb98d90b6690827dcc84c246811feeb4e1eea683c0eac6caed7549be9c84665"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec82bf1fda6cecce7f7b915f9196601a1bd1a3079796b76d16ae4cce6d0ef89b"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9fd46ce0845cfe28f108888b3ab17abff84ff695e01e73657eec3f96d72eef34"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bd176afcf8f5d2aed50c3647d4925d0db0579d96f75a31e77cbaf67d8a87742d"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:ec2aa89305006fba9ffb98970db6c8221541be7bee4c1d027421d6f6df7d1ce2"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:92cde43018a2e17d48bb09c79e4d4cb0e236de5063ce897a5e40ac7cb4878773"}, + {file = "aiohttp-3.11.11-cp311-cp311-win32.whl", hash = "sha256:aba807f9569455cba566882c8938f1a549f205ee43c27b126e5450dc9f83cc62"}, + {file = "aiohttp-3.11.11-cp311-cp311-win_amd64.whl", hash = "sha256:ae545f31489548c87b0cced5755cfe5a5308d00407000e72c4fa30b19c3220ac"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e595c591a48bbc295ebf47cb91aebf9bd32f3ff76749ecf282ea7f9f6bb73886"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3ea1b59dc06396b0b424740a10a0a63974c725b1c64736ff788a3689d36c02d2"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8811f3f098a78ffa16e0ea36dffd577eb031aea797cbdba81be039a4169e242c"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7227b87a355ce1f4bf83bfae4399b1f5bb42e0259cb9405824bd03d2f4336a"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d40f9da8cabbf295d3a9dae1295c69975b86d941bc20f0a087f0477fa0a66231"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffb3dc385f6bb1568aa974fe65da84723210e5d9707e360e9ecb51f59406cd2e"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8f5f7515f3552d899c61202d99dcb17d6e3b0de777900405611cd747cecd1b8"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3499c7ffbfd9c6a3d8d6a2b01c26639da7e43d47c7b4f788016226b1e711caa8"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8e2bf8029dbf0810c7bfbc3e594b51c4cc9101fbffb583a3923aea184724203c"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b6212a60e5c482ef90f2d788835387070a88d52cf6241d3916733c9176d39eab"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d119fafe7b634dbfa25a8c597718e69a930e4847f0b88e172744be24515140da"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:6fba278063559acc730abf49845d0e9a9e1ba74f85f0ee6efd5803f08b285853"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:92fc484e34b733704ad77210c7957679c5c3877bd1e6b6d74b185e9320cc716e"}, + {file = "aiohttp-3.11.11-cp312-cp312-win32.whl", hash = "sha256:9f5b3c1ed63c8fa937a920b6c1bec78b74ee09593b3f5b979ab2ae5ef60d7600"}, + {file = "aiohttp-3.11.11-cp312-cp312-win_amd64.whl", hash = "sha256:1e69966ea6ef0c14ee53ef7a3d68b564cc408121ea56c0caa2dc918c1b2f553d"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:541d823548ab69d13d23730a06f97460f4238ad2e5ed966aaf850d7c369782d9"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:929f3ed33743a49ab127c58c3e0a827de0664bfcda566108989a14068f820194"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0882c2820fd0132240edbb4a51eb8ceb6eef8181db9ad5291ab3332e0d71df5f"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63de12e44935d5aca7ed7ed98a255a11e5cb47f83a9fded7a5e41c40277d104"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa54f8ef31d23c506910c21163f22b124facb573bff73930735cf9fe38bf7dff"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a344d5dc18074e3872777b62f5f7d584ae4344cd6006c17ba12103759d407af3"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7fb429ab1aafa1f48578eb315ca45bd46e9c37de11fe45c7f5f4138091e2f1"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c341c7d868750e31961d6d8e60ff040fb9d3d3a46d77fd85e1ab8e76c3e9a5c4"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed9ee95614a71e87f1a70bc81603f6c6760128b140bc4030abe6abaa988f1c3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:de8d38f1c2810fa2a4f1d995a2e9c70bb8737b18da04ac2afbf3971f65781d87"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a9b7371665d4f00deb8f32208c7c5e652059b0fda41cf6dbcac6114a041f1cc2"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:620598717fce1b3bd14dd09947ea53e1ad510317c85dda2c9c65b622edc96b12"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf8d9bfee991d8acc72d060d53860f356e07a50f0e0d09a8dfedea1c554dd0d5"}, + {file = "aiohttp-3.11.11-cp313-cp313-win32.whl", hash = "sha256:9d73ee3725b7a737ad86c2eac5c57a4a97793d9f442599bea5ec67ac9f4bdc3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-win_amd64.whl", hash = "sha256:c7a06301c2fb096bdb0bd25fe2011531c1453b9f2c163c8031600ec73af1cc99"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3e23419d832d969f659c208557de4a123e30a10d26e1e14b73431d3c13444c2e"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21fef42317cf02e05d3b09c028712e1d73a9606f02467fd803f7c1f39cc59add"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1f21bb8d0235fc10c09ce1d11ffbd40fc50d3f08a89e4cf3a0c503dc2562247a"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1642eceeaa5ab6c9b6dfeaaa626ae314d808188ab23ae196a34c9d97efb68350"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2170816e34e10f2fd120f603e951630f8a112e1be3b60963a1f159f5699059a6"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8be8508d110d93061197fd2d6a74f7401f73b6d12f8822bbcd6d74f2b55d71b1"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eed954b161e6b9b65f6be446ed448ed3921763cc432053ceb606f89d793927e"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6c9af134da4bc9b3bd3e6a70072509f295d10ee60c697826225b60b9959acdd"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44167fc6a763d534a6908bdb2592269b4bf30a03239bcb1654781adf5e49caf1"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:479b8c6ebd12aedfe64563b85920525d05d394b85f166b7873c8bde6da612f9c"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:10b4ff0ad793d98605958089fabfa350e8e62bd5d40aa65cdc69d6785859f94e"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b540bd67cfb54e6f0865ceccd9979687210d7ed1a1cc8c01f8e67e2f1e883d28"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dac54e8ce2ed83b1f6b1a54005c87dfed139cf3f777fdc8afc76e7841101226"}, + {file = "aiohttp-3.11.11-cp39-cp39-win32.whl", hash = "sha256:568c1236b2fde93b7720f95a890741854c1200fba4a3471ff48b2934d2d93fd3"}, + {file = "aiohttp-3.11.11-cp39-cp39-win_amd64.whl", hash = "sha256:943a8b052e54dfd6439fd7989f67fc6a7f2138d0a2cf0a7de5f18aa4fe7eb3b1"}, + {file = "aiohttp-3.11.11.tar.gz", hash = "sha256:bb49c7f1e6ebf3821a42d81d494f538107610c3a705987f53068546b0e90303e"}, ] [package.dependencies] @@ -686,13 +681,13 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t [[package]] name = "identify" -version = "2.6.7" +version = "2.6.6" description = "File identification library for Python" optional = false python-versions = ">=3.9" files = [ - {file = "identify-2.6.7-py2.py3-none-any.whl", hash = "sha256:155931cb617a401807b09ecec6635d6c692d180090a1cedca8ef7d58ba5b6aa0"}, - {file = "identify-2.6.7.tar.gz", hash = "sha256:3fa266b42eba321ee0b2bb0936a6a6b9e36a1351cbb69055b3082f4193035684"}, + {file = "identify-2.6.6-py2.py3-none-any.whl", hash = "sha256:cbd1810bce79f8b671ecb20f53ee0ae8e86ae84b557de31d89709dc2a48ba881"}, + {file = "identify-2.6.6.tar.gz", hash = "sha256:7bec12768ed44ea4761efb47806f0a41f86e7c0a5fdf5950d4648c90eca7e251"}, ] [package.extras] @@ -888,43 +883,49 @@ dill = ">=0.3.8" [[package]] name = "mypy" -version = "1.15.0" +version = "1.14.1" description = "Optional static typing for Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, - {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, - {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, - {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, - {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, - {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, - {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, - {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, - {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, - {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, - {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, - {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, - {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, - {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, - {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, - {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, - {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, - {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, - {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, - {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, - {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, - {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, - {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, - {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, - {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, - {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, - {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, - {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, - {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, - {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, - {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, - {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, + {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, + {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, + {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, + {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, + {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, + {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, + {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, + {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, + {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, + {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, + {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, + {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, + {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, + {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, ] [package.dependencies] @@ -1918,29 +1919,29 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruff" -version = "0.9.6" +version = "0.9.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.9.6-py3-none-linux_armv6l.whl", hash = "sha256:2f218f356dd2d995839f1941322ff021c72a492c470f0b26a34f844c29cdf5ba"}, - {file = "ruff-0.9.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b908ff4df65dad7b251c9968a2e4560836d8f5487c2f0cc238321ed951ea0504"}, - {file = "ruff-0.9.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b109c0ad2ececf42e75fa99dc4043ff72a357436bb171900714a9ea581ddef83"}, - {file = "ruff-0.9.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1de4367cca3dac99bcbd15c161404e849bb0bfd543664db39232648dc00112dc"}, - {file = "ruff-0.9.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3ee4d7c2c92ddfdaedf0bf31b2b176fa7aa8950efc454628d477394d35638b"}, - {file = "ruff-0.9.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc1edd1775270e6aa2386119aea692039781429f0be1e0949ea5884e011aa8e"}, - {file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4a091729086dffa4bd070aa5dab7e39cc6b9d62eb2bef8f3d91172d30d599666"}, - {file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1bbc6808bf7b15796cef0815e1dfb796fbd383e7dbd4334709642649625e7c5"}, - {file = "ruff-0.9.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:589d1d9f25b5754ff230dce914a174a7c951a85a4e9270613a2b74231fdac2f5"}, - {file = "ruff-0.9.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc61dd5131742e21103fbbdcad683a8813be0e3c204472d520d9a5021ca8b217"}, - {file = "ruff-0.9.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5e2d9126161d0357e5c8f30b0bd6168d2c3872372f14481136d13de9937f79b6"}, - {file = "ruff-0.9.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:68660eab1a8e65babb5229a1f97b46e3120923757a68b5413d8561f8a85d4897"}, - {file = "ruff-0.9.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c4cae6c4cc7b9b4017c71114115db0445b00a16de3bcde0946273e8392856f08"}, - {file = "ruff-0.9.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19f505b643228b417c1111a2a536424ddde0db4ef9023b9e04a46ed8a1cb4656"}, - {file = "ruff-0.9.6-py3-none-win32.whl", hash = "sha256:194d8402bceef1b31164909540a597e0d913c0e4952015a5b40e28c146121b5d"}, - {file = "ruff-0.9.6-py3-none-win_amd64.whl", hash = "sha256:03482d5c09d90d4ee3f40d97578423698ad895c87314c4de39ed2af945633caa"}, - {file = "ruff-0.9.6-py3-none-win_arm64.whl", hash = "sha256:0e2bb706a2be7ddfea4a4af918562fdc1bcb16df255e5fa595bbd800ce322a5a"}, - {file = "ruff-0.9.6.tar.gz", hash = "sha256:81761592f72b620ec8fa1068a6fd00e98a5ebee342a3642efd84454f3031dca9"}, + {file = "ruff-0.9.4-py3-none-linux_armv6l.whl", hash = "sha256:64e73d25b954f71ff100bb70f39f1ee09e880728efb4250c632ceed4e4cdf706"}, + {file = "ruff-0.9.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6ce6743ed64d9afab4fafeaea70d3631b4d4b28b592db21a5c2d1f0ef52934bf"}, + {file = "ruff-0.9.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:54499fb08408e32b57360f6f9de7157a5fec24ad79cb3f42ef2c3f3f728dfe2b"}, + {file = "ruff-0.9.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37c892540108314a6f01f105040b5106aeb829fa5fb0561d2dcaf71485021137"}, + {file = "ruff-0.9.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de9edf2ce4b9ddf43fd93e20ef635a900e25f622f87ed6e3047a664d0e8f810e"}, + {file = "ruff-0.9.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c90c32357c74f11deb7fbb065126d91771b207bf9bfaaee01277ca59b574ec"}, + {file = "ruff-0.9.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:56acd6c694da3695a7461cc55775f3a409c3815ac467279dfa126061d84b314b"}, + {file = "ruff-0.9.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0c93e7d47ed951b9394cf352d6695b31498e68fd5782d6cbc282425655f687a"}, + {file = "ruff-0.9.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4c8772670aecf037d1bf7a07c39106574d143b26cfe5ed1787d2f31e800214"}, + {file = "ruff-0.9.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfc5f1d7afeda8d5d37660eeca6d389b142d7f2b5a1ab659d9214ebd0e025231"}, + {file = "ruff-0.9.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:faa935fc00ae854d8b638c16a5f1ce881bc3f67446957dd6f2af440a5fc8526b"}, + {file = "ruff-0.9.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a6c634fc6f5a0ceae1ab3e13c58183978185d131a29c425e4eaa9f40afe1e6d6"}, + {file = "ruff-0.9.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:433dedf6ddfdec7f1ac7575ec1eb9844fa60c4c8c2f8887a070672b8d353d34c"}, + {file = "ruff-0.9.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d612dbd0f3a919a8cc1d12037168bfa536862066808960e0cc901404b77968f0"}, + {file = "ruff-0.9.4-py3-none-win32.whl", hash = "sha256:db1192ddda2200671f9ef61d9597fcef89d934f5d1705e571a93a67fb13a4402"}, + {file = "ruff-0.9.4-py3-none-win_amd64.whl", hash = "sha256:05bebf4cdbe3ef75430d26c375773978950bbf4ee3c95ccb5448940dc092408e"}, + {file = "ruff-0.9.4-py3-none-win_arm64.whl", hash = "sha256:585792f1e81509e38ac5123492f8875fbc36f3ede8185af0a26df348e5154f41"}, + {file = "ruff-0.9.4.tar.gz", hash = "sha256:6907ee3529244bb0ed066683e075f09285b38dd5b4039370df6ff06041ca19e7"}, ] [[package]] @@ -2139,13 +2140,13 @@ telegram = ["requests"] [[package]] name = "transformers" -version = "4.48.3" +version = "4.48.2" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.9.0" files = [ - {file = "transformers-4.48.3-py3-none-any.whl", hash = "sha256:78697f990f5ef350c23b46bf86d5081ce96b49479ab180b2de7687267de8fd36"}, - {file = "transformers-4.48.3.tar.gz", hash = "sha256:a5e8f1e9a6430aa78215836be70cecd3f872d99eeda300f41ad6cc841724afdb"}, + {file = "transformers-4.48.2-py3-none-any.whl", hash = "sha256:493bc5b0268b116eff305edf6656367fc89cf570e7a9d5891369e04751db698a"}, + {file = "transformers-4.48.2.tar.gz", hash = "sha256:dcfb73473e61f22fb3366fe2471ed2e42779ecdd49527a1bdf1937574855d516"}, ] [package.dependencies] @@ -2303,13 +2304,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.29.2" +version = "20.29.1" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" files = [ - {file = "virtualenv-20.29.2-py3-none-any.whl", hash = "sha256:febddfc3d1ea571bdb1dc0f98d7b45d24def7428214d4fb73cc486c9568cce6a"}, - {file = "virtualenv-20.29.2.tar.gz", hash = "sha256:fdaabebf6d03b5ba83ae0a02cfe96f48a716f4fae556461d180825866f75b728"}, + {file = "virtualenv-20.29.1-py3-none-any.whl", hash = "sha256:4e4cb403c0b0da39e13b46b1b2476e505cb0046b25f242bee80f62bf990b2779"}, + {file = "virtualenv-20.29.1.tar.gz", hash = "sha256:b8b8970138d32fb606192cb97f6cd4bb644fa486be9308fb9b63f81091b5dc35"}, ] [package.dependencies] From c752304d46aba437d074c648131fcf6a4e3833b3 Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 19:48:21 +0000 Subject: [PATCH 28/29] remove some unnecessary changes --- Makefile | 1 + scripts/.gitignore | 1 - src/together/resources/models.py | 6 +++++- 3 files changed, 6 insertions(+), 2 deletions(-) delete mode 100644 scripts/.gitignore diff --git a/Makefile b/Makefile index a9390125..4d63680a 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,7 @@ install: format: poetry run pre-commit run --all-files + # Documentation html: diff --git a/scripts/.gitignore b/scripts/.gitignore deleted file mode 100644 index c3f732c5..00000000 --- a/scripts/.gitignore +++ /dev/null @@ -1 +0,0 @@ -openapi-generator-cli.jar diff --git a/src/together/resources/models.py b/src/together/resources/models.py index 6b4f955b..9a85e9bb 100644 --- a/src/together/resources/models.py +++ b/src/together/resources/models.py @@ -4,7 +4,11 @@ from together.abstract import api_requestor from together.together_response import TogetherResponse -from together.types import ModelObject, TogetherClient, TogetherRequest +from together.types import ( + ModelObject, + TogetherClient, + TogetherRequest, +) class Models: From fb69922947b3b5bd80150db5d43d6c54f0b86a2c Mon Sep 17 00:00:00 2001 From: orangetin <126978607+orangetin@users.noreply.github.com> Date: Thu, 13 Feb 2025 12:08:30 -0800 Subject: [PATCH 29/29] bump together-python package version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0a4534cd..be4a95ca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api" [tool.poetry] name = "together" -version = "1.4.0" +version = "1.4.1" authors = [ "Together AI " ]