From 6d7a32b64f23ca24a677f0ec76fe00c4900ff3d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 12:10:40 +0000 Subject: [PATCH 1/9] build(deps): bump github.com/databricks/databricks-sdk-go Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.71.0 to 0.72.0. - [Release notes](https://github.com/databricks/databricks-sdk-go/releases) - [Changelog](https://github.com/databricks/databricks-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/databricks/databricks-sdk-go/compare/v0.71.0...v0.72.0) --- updated-dependencies: - dependency-name: github.com/databricks/databricks-sdk-go dependency-version: 0.72.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4d304427d3..b4958c97a5 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/BurntSushi/toml v1.5.0 // MIT github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.71.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.72.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index e1ccc0bf14..0b620a2308 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.71.0 h1:YVNcvQUcgzlKesxDolDXSQPbNcCldubYLvM71hzVmUY= -github.com/databricks/databricks-sdk-go v0.71.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= +github.com/databricks/databricks-sdk-go v0.72.0 h1:vNS4zlpvNYiXsy/7/lzV7cuu/yOcT/1xpfuJw3+W3TA= +github.com/databricks/databricks-sdk-go v0.72.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 2012a2522652cbe734a943b4a5978b318e0648a8 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Jun 2025 14:11:14 +0200 Subject: [PATCH 2/9] regenerate --- .codegen/_openapi_sha | 2 +- .gitattributes | 7 +- bundle/config/variable/resolve_metastore.go | 23 +- .../config/variable/resolve_metastore_test.go | 15 +- .../internal/schema/annotations_openapi.yml | 56 +- bundle/schema/jsonschema.json | 70 +- .../ip-access-lists/ip-access-lists.go | 3 + cmd/account/log-delivery/log-delivery.go | 89 +- .../metastore-assignments.go | 2 + .../network-connectivity.go | 62 +- .../network-policies/network-policies.go | 5 +- .../service-principal-federation-policy.go | 2 + .../service-principal-secrets.go | 1 + .../workspace-assignment.go | 3 + .../workspace-network-configuration.go | 42 +- cmd/account/workspaces/workspaces.go | 3 + cmd/workspace/alerts-v2/alerts-v2.go | 9 +- .../artifact-allowlists.go | 1 + .../clean-room-assets/clean-room-assets.go | 15 +- cmd/workspace/cmd.go | 10 +- cmd/workspace/custom-llms/custom-llms.go | 287 +++++ .../dashboard-email-subscriptions.go | 218 ++++ cmd/workspace/database/database.go | 1082 +++++++++++++++++ cmd/workspace/experiments/experiments.go | 221 +--- cmd/workspace/forecasting/forecasting.go | 1 + cmd/workspace/genie/genie.go | 60 + .../git-credentials/git-credentials.go | 3 + cmd/workspace/grants/grants.go | 92 +- .../ip-access-lists/ip-access-lists.go | 3 + cmd/workspace/jobs/jobs.go | 10 + .../lakeview-embedded/lakeview-embedded.go | 62 - cmd/workspace/metastores/metastores.go | 114 +- .../model-registry/model-registry.go | 14 +- .../model-versions/model-versions.go | 3 + .../permission-migration.go | 1 + cmd/workspace/pipelines/pipelines.go | 4 +- .../policy-compliance-for-jobs.go | 1 + .../provider-personalization-requests.go | 1 + cmd/workspace/providers/providers.go | 3 +- .../quality-monitor-v2/quality-monitor-v2.go | 400 ++++++ cmd/workspace/recipients/recipients.go | 4 +- .../registered-models/registered-models.go | 1 + cmd/workspace/repos/repos.go | 3 + cmd/workspace/schemas/schemas.go | 68 +- cmd/workspace/secrets/secrets.go | 1 + .../serving-endpoints/serving-endpoints.go | 1 + cmd/workspace/settings/settings.go | 4 + .../sql-results-download.go | 218 ++++ .../table-constraints/table-constraints.go | 1 + .../vector-search-endpoints.go | 1 + .../vector-search-indexes.go | 1 + cmd/workspace/volumes/volumes.go | 1 + .../bundles/compute/_models/environment.py | 40 +- .../databricks/bundles/jobs/__init__.py | 8 + .../bundles/jobs/_models/dashboard_task.py | 5 +- .../bundles/jobs/_models/dbt_cloud_task.py | 50 + .../databricks/bundles/jobs/_models/job.py | 5 +- .../databricks/bundles/jobs/_models/task.py | 18 + .../pipelines/_models/ingestion_config.py | 5 +- .../_models/ingestion_source_type.py | 2 + .../bundles/pipelines/_models/pipeline.py | 14 + 61 files changed, 2730 insertions(+), 721 deletions(-) create mode 100755 cmd/workspace/custom-llms/custom-llms.go create mode 100755 cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go create mode 100755 cmd/workspace/database/database.go create mode 100755 cmd/workspace/quality-monitor-v2/quality-monitor-v2.go create mode 100755 cmd/workspace/sql-results-download/sql-results-download.go create mode 100644 experimental/python/databricks/bundles/jobs/_models/dbt_cloud_task.py diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 3e67081803..ac1c24d104 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -2cee201b2e8d656f7306b2f9ec98edfa721e9829 \ No newline at end of file +a8f547d3728fba835fbdda301e846829c5cbbef5 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 629be14230..66a1ee60c5 100755 --- a/.gitattributes +++ b/.gitattributes @@ -63,10 +63,12 @@ cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true cmd/workspace/credentials/credentials.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true +cmd/workspace/custom-llms/custom-llms.go linguist-generated=true +cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true -cmd/workspace/database-instances/database-instances.go linguist-generated=true +cmd/workspace/database/database.go linguist-generated=true cmd/workspace/default-namespace/default-namespace.go linguist-generated=true cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go linguist-generated=true @@ -110,10 +112,10 @@ cmd/workspace/provider-personalization-requests/provider-personalization-request cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true cmd/workspace/provider-providers/provider-providers.go linguist-generated=true cmd/workspace/providers/providers.go linguist-generated=true +cmd/workspace/quality-monitor-v2/quality-monitor-v2.go linguist-generated=true cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true cmd/workspace/queries-legacy/queries-legacy.go linguist-generated=true cmd/workspace/queries/queries.go linguist-generated=true -cmd/workspace/query-execution/query-execution.go linguist-generated=true cmd/workspace/query-history/query-history.go linguist-generated=true cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go linguist-generated=true cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true @@ -131,6 +133,7 @@ cmd/workspace/service-principals/service-principals.go linguist-generated=true cmd/workspace/serving-endpoints/serving-endpoints.go linguist-generated=true cmd/workspace/settings/settings.go linguist-generated=true cmd/workspace/shares/shares.go linguist-generated=true +cmd/workspace/sql-results-download/sql-results-download.go linguist-generated=true cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true cmd/workspace/system-schemas/system-schemas.go linguist-generated=true cmd/workspace/table-constraints/table-constraints.go linguist-generated=true diff --git a/bundle/config/variable/resolve_metastore.go b/bundle/config/variable/resolve_metastore.go index 8a0a8c7edb..5460ccb3d3 100644 --- a/bundle/config/variable/resolve_metastore.go +++ b/bundle/config/variable/resolve_metastore.go @@ -2,8 +2,10 @@ package variable import ( "context" + "fmt" "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/catalog" ) type resolveMetastore struct { @@ -11,11 +13,28 @@ type resolveMetastore struct { } func (l resolveMetastore) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { - entity, err := w.Metastores.GetByName(ctx, l.name) + result, err := w.Metastores.ListAll(ctx, catalog.ListMetastoresRequest{}) if err != nil { return "", err } - return entity.MetastoreId, nil + + // Collect all metastores with the given name. + var entities []catalog.MetastoreInfo + for _, entity := range result { + if entity.Name == l.name { + entities = append(entities, entity) + } + } + + // Return the ID of the first matching metastore. + switch len(entities) { + case 0: + return "", fmt.Errorf("metastoren named %q does not exist", l.name) + case 1: + return entities[0].MetastoreId, nil + default: + return "", fmt.Errorf("there are %d instances of clusters named %q", len(entities), l.name) + } } func (l resolveMetastore) String() string { diff --git a/bundle/config/variable/resolve_metastore_test.go b/bundle/config/variable/resolve_metastore_test.go index 55c4d92d09..5d772e65bf 100644 --- a/bundle/config/variable/resolve_metastore_test.go +++ b/bundle/config/variable/resolve_metastore_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/stretchr/testify/assert" @@ -17,9 +16,9 @@ func TestResolveMetastore_ResolveSuccess(t *testing.T) { api := m.GetMockMetastoresAPI() api.EXPECT(). - GetByName(mock.Anything, "metastore"). - Return(&catalog.MetastoreInfo{ - MetastoreId: "abcd", + ListAll(mock.Anything, mock.Anything). + Return([]catalog.MetastoreInfo{ + {MetastoreId: "abcd", Name: "metastore"}, }, nil) ctx := context.Background() @@ -34,13 +33,15 @@ func TestResolveMetastore_ResolveNotFound(t *testing.T) { api := m.GetMockMetastoresAPI() api.EXPECT(). - GetByName(mock.Anything, "metastore"). - Return(nil, &apierr.APIError{StatusCode: 404}) + ListAll(mock.Anything, mock.Anything). + Return([]catalog.MetastoreInfo{ + {MetastoreId: "abcd", Name: "different"}, + }, nil) ctx := context.Background() l := resolveMetastore{name: "metastore"} _, err := l.Resolve(ctx, m.WorkspaceClient) - require.ErrorIs(t, err, apierr.ErrNotFound) + require.ErrorContains(t, err, "metastoren named \"metastore\" does not exist") } func TestResolveMetastore_String(t *testing.T) { diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index 395284c861..3f84637d46 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -534,6 +534,11 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: "storage": "description": |- DBFS root directory for storing checkpoints and tables. + "tags": + "description": |- + A map of tags associated with the pipeline. + These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. + A maximum of 25 tags can be added to the pipeline. "target": "description": |- Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field. @@ -1425,24 +1430,19 @@ github.com/databricks/databricks-sdk-go/service/compute.Environment: In this minimal environment spec, only pip dependencies are supported. "client": "description": |- - Client version used by the environment - The client is the user-facing environment of the runtime. - Each client comes with a specific set of pre-installed libraries. - The version is a string, consisting of the major client version. + Use `environment_version` instead. + "deprecation_message": |- + This field is deprecated "dependencies": "description": |- List of pip dependencies, as supported by the version of pip in this environment. - Each dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/ - Allowed dependency could be , , (WSFS or Volumes in Databricks), - E.g. dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] + Each dependency is a valid pip requirements file line per https://pip.pypa.io/en/stable/reference/requirements-file-format/. + Allowed dependencies include a requirement specifier, an archive URL, a local project path (such as WSFS or UC Volumes in Databricks), or a VCS project URL. "environment_version": "description": |- - We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized - correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field - for any other purpose, e.g. notebook storage. - This field is not yet exposed to customers (e.g. in the jobs API). - "x-databricks-preview": |- - PRIVATE + Required. Environment version used by the environment. + Each version comes with a specific Python version and a set of Python packages. + The version is a string, consisting of an integer. "jar_dependencies": "description": |- List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`. @@ -1787,6 +1787,13 @@ github.com/databricks/databricks-sdk-go/service/jobs.DashboardTask: "description": |- Optional: The warehouse id to execute the dashboard with for the schedule. If not specified, the default warehouse of the dashboard will be used. +github.com/databricks/databricks-sdk-go/service/jobs.DbtCloudTask: + "connection_resource_name": + "description": |- + The resource name of the UC connection that authenticates the dbt Cloud for this task + "dbt_cloud_job_id": + "description": |- + Id of the dbt Cloud job to be triggered github.com/databricks/databricks-sdk-go/service/jobs.DbtTask: "catalog": "description": |- @@ -2540,6 +2547,11 @@ github.com/databricks/databricks-sdk-go/service/jobs.Task: "dashboard_task": "description": |- The task refreshes a dashboard and sends a snapshot to subscribers. + "dbt_cloud_task": + "description": |- + Task type for dbt cloud + "x-databricks-preview": |- + PRIVATE "dbt_task": "description": |- The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse. @@ -2878,6 +2890,8 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: MANAGED_POSTGRESQL - |- ORACLE + - |- + TERADATA - |- SHAREPOINT - |- @@ -3692,9 +3706,15 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput: "instance_profile_arn": "description": |- ARN of the instance profile that the served entity uses to access AWS resources. + "max_provisioned_concurrency": + "description": |- + The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified. "max_provisioned_throughput": "description": |- The maximum tokens per second that the endpoint can scale up to. + "min_provisioned_concurrency": + "description": |- + The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified. "min_provisioned_throughput": "description": |- The minimum tokens per second that the endpoint can scale down to. @@ -3709,7 +3729,7 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput: Whether the compute resources for the served entity should scale down to zero. "workload_size": "description": |- - The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. + The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified. "workload_type": "description": |- The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). @@ -3720,9 +3740,15 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput: "instance_profile_arn": "description": |- ARN of the instance profile that the served entity uses to access AWS resources. + "max_provisioned_concurrency": + "description": |- + The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified. "max_provisioned_throughput": "description": |- The maximum tokens per second that the endpoint can scale up to. + "min_provisioned_concurrency": + "description": |- + The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified. "min_provisioned_throughput": "description": |- The minimum tokens per second that the endpoint can scale down to. @@ -3739,7 +3765,7 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput: Whether the compute resources for the served entity should scale down to zero. "workload_size": "description": |- - The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. + The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified. "workload_type": "description": |- The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index cf79d61226..5b220c5c0b 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1083,6 +1083,10 @@ "description": "DBFS root directory for storing checkpoints and tables.", "$ref": "#/$defs/string" }, + "tags": { + "description": "A map of tags associated with the pipeline.\nThese are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations.\nA maximum of 25 tags can be added to the pipeline.", + "$ref": "#/$defs/map/string" + }, "target": { "description": "Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field.", "$ref": "#/$defs/string" @@ -3325,18 +3329,18 @@ "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", "properties": { "client": { - "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version.", - "$ref": "#/$defs/string" + "description": "Use `environment_version` instead.", + "$ref": "#/$defs/string", + "deprecationMessage": "This field is deprecated", + "deprecated": true }, "dependencies": { "description": "List of pip dependencies, as supported by the version of pip in this environment.", "$ref": "#/$defs/slice/string" }, "environment_version": { - "description": "We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized\ncorrectly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field\nfor any other purpose, e.g. notebook storage.\nThis field is not yet exposed to customers (e.g. in the jobs API).", - "$ref": "#/$defs/string", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "description": "Required. Environment version used by the environment.\nEach version comes with a specific Python version and a set of Python packages.\nThe version is a string, consisting of an integer.", + "$ref": "#/$defs/string" }, "jar_dependencies": { "description": "List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`.", @@ -3345,10 +3349,7 @@ "doNotSuggest": true } }, - "additionalProperties": false, - "required": [ - "client" - ] + "additionalProperties": false }, { "type": "string", @@ -4009,6 +4010,28 @@ } ] }, + "jobs.DbtCloudTask": { + "oneOf": [ + { + "type": "object", + "properties": { + "connection_resource_name": { + "description": "The resource name of the UC connection that authenticates the dbt Cloud for this task", + "$ref": "#/$defs/string" + }, + "dbt_cloud_job_id": { + "description": "Id of the dbt Cloud job to be triggered", + "$ref": "#/$defs/int64" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "jobs.DbtTask": { "oneOf": [ { @@ -5382,6 +5405,12 @@ "description": "The task refreshes a dashboard and sends a snapshot to subscribers.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DashboardTask" }, + "dbt_cloud_task": { + "description": "Task type for dbt cloud", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtCloudTask", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "dbt_task": { "description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtTask" @@ -5982,6 +6011,7 @@ "SERVICENOW", "MANAGED_POSTGRESQL", "ORACLE", + "TERADATA", "SHAREPOINT", "DYNAMICS365" ] @@ -7412,10 +7442,18 @@ "description": "ARN of the instance profile that the served entity uses to access AWS resources.", "$ref": "#/$defs/string" }, + "max_provisioned_concurrency": { + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.", + "$ref": "#/$defs/int" + }, "max_provisioned_throughput": { "description": "The maximum tokens per second that the endpoint can scale up to.", "$ref": "#/$defs/int" }, + "min_provisioned_concurrency": { + "description": "The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.", + "$ref": "#/$defs/int" + }, "min_provisioned_throughput": { "description": "The minimum tokens per second that the endpoint can scale down to.", "$ref": "#/$defs/int" @@ -7433,7 +7471,7 @@ "$ref": "#/$defs/bool" }, "workload_size": { - "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.", + "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.", "$ref": "#/$defs/string" }, "workload_type": { @@ -7462,10 +7500,18 @@ "description": "ARN of the instance profile that the served entity uses to access AWS resources.", "$ref": "#/$defs/string" }, + "max_provisioned_concurrency": { + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.", + "$ref": "#/$defs/int" + }, "max_provisioned_throughput": { "description": "The maximum tokens per second that the endpoint can scale up to.", "$ref": "#/$defs/int" }, + "min_provisioned_concurrency": { + "description": "The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.", + "$ref": "#/$defs/int" + }, "min_provisioned_throughput": { "description": "The minimum tokens per second that the endpoint can scale down to.", "$ref": "#/$defs/int" @@ -7489,7 +7535,7 @@ "$ref": "#/$defs/bool" }, "workload_size": { - "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.", + "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.", "$ref": "#/$defs/string" }, "workload_type": { diff --git a/cmd/account/ip-access-lists/ip-access-lists.go b/cmd/account/ip-access-lists/ip-access-lists.go index f738af42c2..ccd2d8bbea 100755 --- a/cmd/account/ip-access-lists/ip-access-lists.go +++ b/cmd/account/ip-access-lists/ip-access-lists.go @@ -150,6 +150,7 @@ func newCreate() *cobra.Command { createReq.Label = args[0] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &createReq.ListType) if err != nil { return fmt.Errorf("invalid LIST_TYPE: %s", args[1]) @@ -437,12 +438,14 @@ func newReplace() *cobra.Command { replaceReq.Label = args[1] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &replaceReq.ListType) if err != nil { return fmt.Errorf("invalid LIST_TYPE: %s", args[2]) } } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[3], &replaceReq.Enabled) if err != nil { return fmt.Errorf("invalid ENABLED: %s", args[3]) diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index e2833263b2..f35f5b795b 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -20,66 +20,10 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "log-delivery", - Short: `These APIs manage log delivery configurations for this account.`, - Long: `These APIs manage log delivery configurations for this account. The two - supported log types for this API are _billable usage logs_ and _audit logs_. - This feature is in Public Preview. This feature works with all account ID - types. - - Log delivery works with all account types. However, if your account is on the - E2 version of the platform or on a select custom plan that allows multiple - workspaces per account, you can optionally configure different storage - destinations for each workspace. Log delivery status is also provided to know - the latest status of log delivery attempts. The high-level flow of billable - usage delivery: - - 1. **Create storage**: In AWS, [create a new AWS S3 bucket] with a specific - bucket policy. Using Databricks APIs, call the Account API to create a - [storage configuration object](:method:Storage/Create) that uses the bucket - name. 2. **Create credentials**: In AWS, create the appropriate AWS IAM role. - For full details, including the required IAM role policies and trust - relationship, see [Billable usage log delivery]. Using Databricks APIs, call - the Account API to create a [credential configuration - object](:method:Credentials/Create) that uses the IAM role"s ARN. 3. **Create - log delivery configuration**: Using Databricks APIs, call the Account API to - [create a log delivery configuration](:method:LogDelivery/Create) that uses - the credential and storage configuration objects from previous steps. You can - specify if the logs should include all events of that log type in your account - (_Account level_ delivery) or only events for a specific set of workspaces - (_workspace level_ delivery). Account level log delivery applies to all - current and future workspaces plus account level logs, while workspace level - log delivery solely delivers logs related to the specified workspaces. You can - create multiple types of delivery configurations per account. - - For billable usage delivery: * For more information about billable usage logs, - see [Billable usage log delivery]. For the CSV schema, see the [Usage page]. * - The delivery location is //billable-usage/csv/, where - is the name of the optional delivery path prefix you set up during - log delivery configuration. Files are named - workspaceId=-usageMonth=.csv. * All billable usage logs - apply to specific workspaces (_workspace level_ logs). You can aggregate usage - for your entire account by creating an _account level_ delivery configuration - that delivers logs for all current and future workspaces in your account. * - The files are delivered daily by overwriting the month's CSV file for each - workspace. - - For audit log delivery: * For more information about about audit log delivery, - see [Audit log delivery], which includes information about the used JSON - schema. * The delivery location is - //workspaceId=/date=/auditlogs_.json. - Files may get overwritten with the same content multiple times to achieve - exactly-once delivery. * If the audit log delivery configuration included - specific workspace IDs, only _workspace-level_ audit logs for those workspaces - are delivered. If the log delivery configuration applies to the entire account - (_account level_ delivery configuration), the audit log delivery includes - workspace-level audit logs for all workspaces in the account as well as - account-level audit logs. See [Audit log delivery] for details. * Auditable - events are typically available in logs within 15 minutes. - - [Audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [Billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html - [Usage page]: https://docs.databricks.com/administration-guide/account-settings/usage.html - [create a new AWS S3 bucket]: https://docs.databricks.com/administration-guide/account-api/aws-storage.html`, + Short: `These APIs manage Log delivery configurations for this account.`, + Long: `These APIs manage Log delivery configurations for this account. Log delivery + configs enable you to configure the delivery of the specified type of logs to + your storage account.`, GroupID: "billing", Annotations: map[string]string{ "package": "billing", @@ -119,8 +63,6 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: complex arg: log_delivery_configuration - cmd.Use = "create" cmd.Short = `Create a new log delivery configuration.` cmd.Long = `Create a new log delivery configuration. @@ -153,11 +95,6 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) - return check(cmd, args) - } - cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -174,6 +111,8 @@ func newCreate() *cobra.Command { return err } } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } response, err := a.LogDelivery.Create(ctx, createReq) @@ -219,7 +158,7 @@ func newGet() *cobra.Command { specified by ID. Arguments: - LOG_DELIVERY_CONFIGURATION_ID: Databricks log delivery configuration ID` + LOG_DELIVERY_CONFIGURATION_ID: The log delivery configuration id of customer` cmd.Annotations = make(map[string]string) @@ -236,14 +175,14 @@ func newGet() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Log Delivery drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "Databricks log delivery configuration ID") + id, err := cmdio.Select(ctx, names, "The log delivery configuration id of customer") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have databricks log delivery configuration id") + return fmt.Errorf("expected to have the log delivery configuration id of customer") } getReq.LogDeliveryConfigurationId = args[0] @@ -282,9 +221,10 @@ func newList() *cobra.Command { // TODO: short flags - cmd.Flags().StringVar(&listReq.CredentialsId, "credentials-id", listReq.CredentialsId, `Filter by credential configuration ID.`) - cmd.Flags().Var(&listReq.Status, "status", `Filter by status ENABLED or DISABLED. Supported values: [DISABLED, ENABLED]`) - cmd.Flags().StringVar(&listReq.StorageConfigurationId, "storage-configuration-id", listReq.StorageConfigurationId, `Filter by storage configuration ID.`) + cmd.Flags().StringVar(&listReq.CredentialsId, "credentials-id", listReq.CredentialsId, `The Credentials id to filter the search results with.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token received from a previous get all budget configurations call.`) + cmd.Flags().Var(&listReq.Status, "status", `The log delivery status to filter the search results with. Supported values: [DISABLED, ENABLED]`) + cmd.Flags().StringVar(&listReq.StorageConfigurationId, "storage-configuration-id", listReq.StorageConfigurationId, `The Storage Configuration id to filter the search results with.`) cmd.Use = "list" cmd.Short = `Get all log delivery configurations.` @@ -350,7 +290,7 @@ func newPatchStatus() *cobra.Command { [Create log delivery](:method:LogDelivery/Create). Arguments: - LOG_DELIVERY_CONFIGURATION_ID: Databricks log delivery configuration ID + LOG_DELIVERY_CONFIGURATION_ID: The log delivery configuration id of customer STATUS: Status of log delivery configuration. Set to ENABLED (enabled) or DISABLED (disabled). Defaults to ENABLED. You can [enable or disable the configuration](#operation/patch-log-delivery-config-status) later. @@ -391,6 +331,7 @@ func newPatchStatus() *cobra.Command { } patchStatusReq.LogDeliveryConfigurationId = args[0] if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &patchStatusReq.Status) if err != nil { return fmt.Errorf("invalid STATUS: %s", args[1]) diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index 8dc3171df1..fb4ff141ab 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -98,6 +98,7 @@ func newCreate() *cobra.Command { } } } + _, err = fmt.Sscan(args[0], &createReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -362,6 +363,7 @@ func newUpdate() *cobra.Command { } } } + _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index 5b098ed01d..b7682f8780 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -46,7 +46,7 @@ func New() *cobra.Command { cmd.AddCommand(newGetPrivateEndpointRule()) cmd.AddCommand(newListNetworkConnectivityConfigurations()) cmd.AddCommand(newListPrivateEndpointRules()) - cmd.AddCommand(newUpdateNccAzurePrivateEndpointRulePublic()) + cmd.AddCommand(newUpdatePrivateEndpointRule()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -178,9 +178,12 @@ func newCreatePrivateEndpointRule() *cobra.Command { cmd.Flags().Var(&createPrivateEndpointRuleJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: domain_names - cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, "group-id", createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, `Only used by private endpoints to Azure first-party services.`) + cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.EndpointService, "endpoint-service", createPrivateEndpointRuleReq.PrivateEndpointRule.EndpointService, `The full target AWS endpoint service name that connects to the destination resources of the private endpoint.`) + cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, "group-id", createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, `Not used by customer-managed private endpoint services.`) + cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.ResourceId, "resource-id", createPrivateEndpointRuleReq.PrivateEndpointRule.ResourceId, `The Azure resource ID of the target resource.`) + // TODO: array: resource_names - cmd.Use = "create-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID RESOURCE_ID" + cmd.Use = "create-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID" cmd.Short = `Create a private endpoint rule.` cmd.Long = `Create a private endpoint rule. @@ -196,20 +199,12 @@ func newCreatePrivateEndpointRule() *cobra.Command { [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link Arguments: - NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectivity Configuration ID. - RESOURCE_ID: The Azure resource ID of the target resource.` + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectivity Configuration ID.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(1)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, provide only NETWORK_CONNECTIVITY_CONFIG_ID as positional arguments. Provide 'resource_id' in your JSON input") - } - return nil - } - check := root.ExactArgs(2) + check := root.ExactArgs(1) return check(cmd, args) } @@ -231,9 +226,6 @@ func newCreatePrivateEndpointRule() *cobra.Command { } } createPrivateEndpointRuleReq.NetworkConnectivityConfigId = args[0] - if !cmd.Flags().Changed("json") { - createPrivateEndpointRuleReq.PrivateEndpointRule.ResourceId = args[1] - } response, err := a.NetworkConnectivity.CreatePrivateEndpointRule(ctx, createPrivateEndpointRuleReq) if err != nil { @@ -604,28 +596,30 @@ func newListPrivateEndpointRules() *cobra.Command { return cmd } -// start update-ncc-azure-private-endpoint-rule-public command +// start update-private-endpoint-rule command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var updateNccAzurePrivateEndpointRulePublicOverrides []func( +var updatePrivateEndpointRuleOverrides []func( *cobra.Command, - *settings.UpdateNccAzurePrivateEndpointRulePublicRequest, + *settings.UpdateNccPrivateEndpointRuleRequest, ) -func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { +func newUpdatePrivateEndpointRule() *cobra.Command { cmd := &cobra.Command{} - var updateNccAzurePrivateEndpointRulePublicReq settings.UpdateNccAzurePrivateEndpointRulePublicRequest - updateNccAzurePrivateEndpointRulePublicReq.PrivateEndpointRule = settings.UpdatePrivateEndpointRule{} - var updateNccAzurePrivateEndpointRulePublicJson flags.JsonFlag + var updatePrivateEndpointRuleReq settings.UpdateNccPrivateEndpointRuleRequest + updatePrivateEndpointRuleReq.PrivateEndpointRule = settings.UpdatePrivateEndpointRule{} + var updatePrivateEndpointRuleJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&updateNccAzurePrivateEndpointRulePublicJson, "json", `either inline JSON string or @path/to/file.json with request body`) + cmd.Flags().Var(&updatePrivateEndpointRuleJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: domain_names + cmd.Flags().BoolVar(&updatePrivateEndpointRuleReq.PrivateEndpointRule.Enabled, "enabled", updatePrivateEndpointRuleReq.PrivateEndpointRule.Enabled, `Only used by private endpoints towards an AWS S3 service.`) + // TODO: array: resource_names - cmd.Use = "update-ncc-azure-private-endpoint-rule-public NETWORK_CONNECTIVITY_CONFIG_ID PRIVATE_ENDPOINT_RULE_ID" + cmd.Use = "update-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID PRIVATE_ENDPOINT_RULE_ID" cmd.Short = `Update a private endpoint rule.` cmd.Long = `Update a private endpoint rule. @@ -633,12 +627,10 @@ func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { customer-managed resources is allowed to be updated. Arguments: - NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectivity Configuration ID. + NETWORK_CONNECTIVITY_CONFIG_ID: The ID of a network connectivity configuration, which is the parent + resource of this private endpoint rule object. PRIVATE_ENDPOINT_RULE_ID: Your private endpoint rule ID.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -652,7 +644,7 @@ func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { a := cmdctx.AccountClient(ctx) if cmd.Flags().Changed("json") { - diags := updateNccAzurePrivateEndpointRulePublicJson.Unmarshal(&updateNccAzurePrivateEndpointRulePublicReq.PrivateEndpointRule) + diags := updatePrivateEndpointRuleJson.Unmarshal(&updatePrivateEndpointRuleReq.PrivateEndpointRule) if diags.HasError() { return diags.Error() } @@ -663,10 +655,10 @@ func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { } } } - updateNccAzurePrivateEndpointRulePublicReq.NetworkConnectivityConfigId = args[0] - updateNccAzurePrivateEndpointRulePublicReq.PrivateEndpointRuleId = args[1] + updatePrivateEndpointRuleReq.NetworkConnectivityConfigId = args[0] + updatePrivateEndpointRuleReq.PrivateEndpointRuleId = args[1] - response, err := a.NetworkConnectivity.UpdateNccAzurePrivateEndpointRulePublic(ctx, updateNccAzurePrivateEndpointRulePublicReq) + response, err := a.NetworkConnectivity.UpdatePrivateEndpointRule(ctx, updatePrivateEndpointRuleReq) if err != nil { return err } @@ -678,8 +670,8 @@ func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range updateNccAzurePrivateEndpointRulePublicOverrides { - fn(cmd, &updateNccAzurePrivateEndpointRulePublicReq) + for _, fn := range updatePrivateEndpointRuleOverrides { + fn(cmd, &updatePrivateEndpointRuleReq) } return cmd diff --git a/cmd/account/network-policies/network-policies.go b/cmd/account/network-policies/network-policies.go index 00f5a72f71..c22c6576bb 100755 --- a/cmd/account/network-policies/network-policies.go +++ b/cmd/account/network-policies/network-policies.go @@ -30,10 +30,7 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "settings", }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + RunE: root.ReportUnknownSubcommand, } // Add methods diff --git a/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go index abd425acc5..079dc94c8b 100755 --- a/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go +++ b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go @@ -152,6 +152,7 @@ func newCreate() *cobra.Command { } } } + _, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId) if err != nil { return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) @@ -413,6 +414,7 @@ func newUpdate() *cobra.Command { } } } + _, err = fmt.Sscan(args[0], &updateReq.ServicePrincipalId) if err != nil { return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) diff --git a/cmd/account/service-principal-secrets/service-principal-secrets.go b/cmd/account/service-principal-secrets/service-principal-secrets.go index b28fa4ef23..cfeb70bcf9 100755 --- a/cmd/account/service-principal-secrets/service-principal-secrets.go +++ b/cmd/account/service-principal-secrets/service-principal-secrets.go @@ -107,6 +107,7 @@ func newCreate() *cobra.Command { } } } + _, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId) if err != nil { return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index 41998765a0..fd0d16bef4 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -87,6 +87,7 @@ func newDelete() *cobra.Command { if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) } + _, err = fmt.Sscan(args[1], &deleteReq.PrincipalId) if err != nil { return fmt.Errorf("invalid PRINCIPAL_ID: %s", args[1]) @@ -286,10 +287,12 @@ func newUpdate() *cobra.Command { } } } + _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) } + _, err = fmt.Sscan(args[1], &updateReq.PrincipalId) if err != nil { return fmt.Errorf("invalid PRINCIPAL_ID: %s", args[1]) diff --git a/cmd/account/workspace-network-configuration/workspace-network-configuration.go b/cmd/account/workspace-network-configuration/workspace-network-configuration.go index c94ac5a96f..946b8aae0d 100755 --- a/cmd/account/workspace-network-configuration/workspace-network-configuration.go +++ b/cmd/account/workspace-network-configuration/workspace-network-configuration.go @@ -20,22 +20,19 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "workspace-network-configuration", - Short: `These APIs allow configuration of network settings for Databricks workspaces.`, - Long: `These APIs allow configuration of network settings for Databricks workspaces. - Each workspace is always associated with exactly one network policy that - controls which network destinations can be accessed from the Databricks - environment. By default, workspaces are associated with the 'default-policy' - network policy. You cannot create or delete a workspace's network - configuration, only update it to associate the workspace with a different - policy.`, + Short: `These APIs allow configuration of network settings for Databricks workspaces by selecting which network policy to associate with the workspace.`, + Long: `These APIs allow configuration of network settings for Databricks workspaces + by selecting which network policy to associate with the workspace. Each + workspace is always associated with exactly one network policy that controls + which network destinations can be accessed from the Databricks environment. By + default, workspaces are associated with the 'default-policy' network policy. + You cannot create or delete a workspace's network option, only update it to + associate the workspace with a different policy`, GroupID: "settings", Annotations: map[string]string{ "package": "settings", }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + RunE: root.ReportUnknownSubcommand, } // Add methods @@ -67,12 +64,12 @@ func newGetWorkspaceNetworkOptionRpc() *cobra.Command { // TODO: short flags cmd.Use = "get-workspace-network-option-rpc WORKSPACE_ID" - cmd.Short = `Get workspace network configuration.` - cmd.Long = `Get workspace network configuration. + cmd.Short = `Get workspace network option.` + cmd.Long = `Get workspace network option. - Gets the network configuration for a workspace. Every workspace has exactly - one network policy binding, with 'default-policy' used if no explicit - assignment exists. + Gets the network option for a workspace. Every workspace has exactly one + network policy binding, with 'default-policy' used if no explicit assignment + exists. Arguments: WORKSPACE_ID: The workspace ID.` @@ -136,12 +133,12 @@ func newUpdateWorkspaceNetworkOptionRpc() *cobra.Command { cmd.Flags().Int64Var(&updateWorkspaceNetworkOptionRpcReq.WorkspaceNetworkOption.WorkspaceId, "workspace-id", updateWorkspaceNetworkOptionRpcReq.WorkspaceNetworkOption.WorkspaceId, `The workspace ID.`) cmd.Use = "update-workspace-network-option-rpc WORKSPACE_ID" - cmd.Short = `Update workspace network configuration.` - cmd.Long = `Update workspace network configuration. + cmd.Short = `Update workspace network option.` + cmd.Long = `Update workspace network option. - Updates the network configuration for a workspace. This operation associates - the workspace with the specified network policy. To revert to the default - policy, specify 'default-policy' as the network_policy_id. + Updates the network option for a workspace. This operation associates the + workspace with the specified network policy. To revert to the default policy, + specify 'default-policy' as the network_policy_id. Arguments: WORKSPACE_ID: The workspace ID.` @@ -170,6 +167,7 @@ func newUpdateWorkspaceNetworkOptionRpc() *cobra.Command { } } } + _, err = fmt.Sscan(args[0], &updateWorkspaceNetworkOptionRpcReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 1ed97595ae..bb8a304b38 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -238,6 +238,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have workspace id") } + _, err = fmt.Sscan(args[0], &deleteReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -324,6 +325,7 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have workspace id") } + _, err = fmt.Sscan(args[0], &getReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -589,6 +591,7 @@ func newUpdate() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have workspace id") } + _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/workspace/alerts-v2/alerts-v2.go b/cmd/workspace/alerts-v2/alerts-v2.go index 37db596a0e..1e1267fc4e 100755 --- a/cmd/workspace/alerts-v2/alerts-v2.go +++ b/cmd/workspace/alerts-v2/alerts-v2.go @@ -20,16 +20,13 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "alerts-v2", - Short: `TODO: Add description.`, - Long: `TODO: Add description`, + Short: `New version of SQL Alerts.`, + Long: `New version of SQL Alerts`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + RunE: root.ReportUnknownSubcommand, } // Add methods diff --git a/cmd/workspace/artifact-allowlists/artifact-allowlists.go b/cmd/workspace/artifact-allowlists/artifact-allowlists.go index ff5a3c02c9..b8b8a2ca28 100755 --- a/cmd/workspace/artifact-allowlists/artifact-allowlists.go +++ b/cmd/workspace/artifact-allowlists/artifact-allowlists.go @@ -162,6 +162,7 @@ func newUpdate() *cobra.Command { } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } + _, err = fmt.Sscan(args[0], &updateReq.ArtifactType) if err != nil { return fmt.Errorf("invalid ARTIFACT_TYPE: %s", args[0]) diff --git a/cmd/workspace/clean-room-assets/clean-room-assets.go b/cmd/workspace/clean-room-assets/clean-room-assets.go index e8c4b9cb3b..e03fb52ef7 100755 --- a/cmd/workspace/clean-room-assets/clean-room-assets.go +++ b/cmd/workspace/clean-room-assets/clean-room-assets.go @@ -149,7 +149,7 @@ func newDelete() *cobra.Command { // TODO: short flags - cmd.Use = "delete CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Use = "delete CLEAN_ROOM_NAME ASSET_TYPE NAME" cmd.Short = `Delete an asset.` cmd.Long = `Delete an asset. @@ -159,7 +159,7 @@ func newDelete() *cobra.Command { CLEAN_ROOM_NAME: Name of the clean room. ASSET_TYPE: The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] - ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + NAME: The fully qualified name of the asset, it is same as the name field in CleanRoomAsset.` cmd.Annotations = make(map[string]string) @@ -175,11 +175,12 @@ func newDelete() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) deleteReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &deleteReq.AssetType) if err != nil { return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) } - deleteReq.AssetFullName = args[2] + deleteReq.Name = args[2] err = w.CleanRoomAssets.Delete(ctx, deleteReq) if err != nil { @@ -216,7 +217,7 @@ func newGet() *cobra.Command { // TODO: short flags - cmd.Use = "get CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Use = "get CLEAN_ROOM_NAME ASSET_TYPE NAME" cmd.Short = `Get an asset.` cmd.Long = `Get an asset. @@ -226,7 +227,7 @@ func newGet() *cobra.Command { CLEAN_ROOM_NAME: Name of the clean room. ASSET_TYPE: The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] - ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + NAME: The fully qualified name of the asset, it is same as the name field in CleanRoomAsset.` cmd.Annotations = make(map[string]string) @@ -242,11 +243,12 @@ func newGet() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) getReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &getReq.AssetType) if err != nil { return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) } - getReq.AssetFullName = args[2] + getReq.Name = args[2] response, err := w.CleanRoomAssets.Get(ctx, getReq) if err != nil { @@ -396,6 +398,7 @@ func newUpdate() *cobra.Command { } } updateReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &updateReq.AssetType) if err != nil { return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index c496d588bb..545b9c486e 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -24,10 +24,11 @@ import ( credentials "github.com/databricks/cli/cmd/workspace/credentials" credentials_manager "github.com/databricks/cli/cmd/workspace/credentials-manager" current_user "github.com/databricks/cli/cmd/workspace/current-user" + custom_llms "github.com/databricks/cli/cmd/workspace/custom-llms" dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" dashboards "github.com/databricks/cli/cmd/workspace/dashboards" data_sources "github.com/databricks/cli/cmd/workspace/data-sources" - database_instances "github.com/databricks/cli/cmd/workspace/database-instances" + database "github.com/databricks/cli/cmd/workspace/database" experiments "github.com/databricks/cli/cmd/workspace/experiments" external_locations "github.com/databricks/cli/cmd/workspace/external-locations" forecasting "github.com/databricks/cli/cmd/workspace/forecasting" @@ -63,10 +64,10 @@ import ( provider_provider_analytics_dashboards "github.com/databricks/cli/cmd/workspace/provider-provider-analytics-dashboards" provider_providers "github.com/databricks/cli/cmd/workspace/provider-providers" providers "github.com/databricks/cli/cmd/workspace/providers" + quality_monitor_v2 "github.com/databricks/cli/cmd/workspace/quality-monitor-v2" quality_monitors "github.com/databricks/cli/cmd/workspace/quality-monitors" queries "github.com/databricks/cli/cmd/workspace/queries" queries_legacy "github.com/databricks/cli/cmd/workspace/queries-legacy" - query_execution "github.com/databricks/cli/cmd/workspace/query-execution" query_history "github.com/databricks/cli/cmd/workspace/query-history" query_visualizations "github.com/databricks/cli/cmd/workspace/query-visualizations" query_visualizations_legacy "github.com/databricks/cli/cmd/workspace/query-visualizations-legacy" @@ -125,10 +126,11 @@ func All() []*cobra.Command { out = append(out, credentials.New()) out = append(out, credentials_manager.New()) out = append(out, current_user.New()) + out = append(out, custom_llms.New()) out = append(out, dashboard_widgets.New()) out = append(out, dashboards.New()) out = append(out, data_sources.New()) - out = append(out, database_instances.New()) + out = append(out, database.New()) out = append(out, experiments.New()) out = append(out, external_locations.New()) out = append(out, functions.New()) @@ -163,10 +165,10 @@ func All() []*cobra.Command { out = append(out, provider_provider_analytics_dashboards.New()) out = append(out, provider_providers.New()) out = append(out, providers.New()) + out = append(out, quality_monitor_v2.New()) out = append(out, quality_monitors.New()) out = append(out, queries.New()) out = append(out, queries_legacy.New()) - out = append(out, query_execution.New()) out = append(out, query_history.New()) out = append(out, query_visualizations.New()) out = append(out, query_visualizations_legacy.New()) diff --git a/cmd/workspace/custom-llms/custom-llms.go b/cmd/workspace/custom-llms/custom-llms.go new file mode 100755 index 0000000000..34ad043881 --- /dev/null +++ b/cmd/workspace/custom-llms/custom-llms.go @@ -0,0 +1,287 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package custom_llms + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/aibuilder" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "custom-llms", + Short: `The Custom LLMs service manages state and powers the UI for the Custom LLM product.`, + Long: `The Custom LLMs service manages state and powers the UI for the Custom LLM + product.`, + GroupID: "aibuilder", + Annotations: map[string]string{ + "package": "aibuilder", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCancel()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start cancel command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cancelOverrides []func( + *cobra.Command, + *aibuilder.CancelCustomLlmOptimizationRunRequest, +) + +func newCancel() *cobra.Command { + cmd := &cobra.Command{} + + var cancelReq aibuilder.CancelCustomLlmOptimizationRunRequest + + // TODO: short flags + + cmd.Use = "cancel ID" + cmd.Short = `Cancel a Custom LLM Optimization Run.` + cmd.Long = `Cancel a Custom LLM Optimization Run.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + cancelReq.Id = args[0] + + err = w.CustomLlms.Cancel(ctx, cancelReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range cancelOverrides { + fn(cmd, &cancelReq) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *aibuilder.StartCustomLlmOptimizationRunRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq aibuilder.StartCustomLlmOptimizationRunRequest + + // TODO: short flags + + cmd.Use = "create ID" + cmd.Short = `Start a Custom LLM Optimization Run.` + cmd.Long = `Start a Custom LLM Optimization Run. + + Arguments: + ID: The Id of the tile.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + createReq.Id = args[0] + + response, err := w.CustomLlms.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *aibuilder.GetCustomLlmRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq aibuilder.GetCustomLlmRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get a Custom LLM.` + cmd.Long = `Get a Custom LLM. + + Arguments: + ID: The id of the custom llm` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getReq.Id = args[0] + + response, err := w.CustomLlms.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *aibuilder.UpdateCustomLlmRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq aibuilder.UpdateCustomLlmRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update a Custom LLM.` + cmd.Long = `Update a Custom LLM. + + Arguments: + ID: The id of the custom llm` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.CustomLlms.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CustomLlms diff --git a/cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go b/cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go new file mode 100755 index 0000000000..0da11badd3 --- /dev/null +++ b/cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go @@ -0,0 +1,218 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package dashboard_email_subscriptions + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "dashboard-email-subscriptions", + Short: `Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can send subscription emails containing PDFs and/or images of the dashboard.`, + Long: `Controls whether schedules or workload tasks for refreshing AI/BI Dashboards + in the workspace can send subscription emails containing PDFs and/or images of + the dashboard. By default, this setting is enabled (set to true)`, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteDashboardEmailSubscriptionsRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteDashboardEmailSubscriptionsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the Dashboard Email Subscriptions setting.` + cmd.Long = `Delete the Dashboard Email Subscriptions setting. + + Reverts the Dashboard Email Subscriptions setting to its default value.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.DashboardEmailSubscriptions().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetDashboardEmailSubscriptionsRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetDashboardEmailSubscriptionsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the Dashboard Email Subscriptions setting.` + cmd.Long = `Get the Dashboard Email Subscriptions setting. + + Gets the Dashboard Email Subscriptions setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.DashboardEmailSubscriptions().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateDashboardEmailSubscriptionsRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateDashboardEmailSubscriptionsRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the Dashboard Email Subscriptions setting.` + cmd.Long = `Update the Dashboard Email Subscriptions setting. + + Updates the Dashboard Email Subscriptions setting.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.DashboardEmailSubscriptions().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service DashboardEmailSubscriptions diff --git a/cmd/workspace/database/database.go b/cmd/workspace/database/database.go new file mode 100755 index 0000000000..f955d5953f --- /dev/null +++ b/cmd/workspace/database/database.go @@ -0,0 +1,1082 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package database + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/database" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "database", + Short: `Database Instances provide access to a database via REST API or direct SQL.`, + Long: `Database Instances provide access to a database via REST API or direct SQL.`, + GroupID: "database", + Annotations: map[string]string{ + "package": "database", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCreateDatabaseCatalog()) + cmd.AddCommand(newCreateDatabaseInstance()) + cmd.AddCommand(newCreateDatabaseTable()) + cmd.AddCommand(newCreateSyncedDatabaseTable()) + cmd.AddCommand(newDeleteDatabaseCatalog()) + cmd.AddCommand(newDeleteDatabaseInstance()) + cmd.AddCommand(newDeleteDatabaseTable()) + cmd.AddCommand(newDeleteSyncedDatabaseTable()) + cmd.AddCommand(newFindDatabaseInstanceByUid()) + cmd.AddCommand(newGenerateDatabaseCredential()) + cmd.AddCommand(newGetDatabaseCatalog()) + cmd.AddCommand(newGetDatabaseInstance()) + cmd.AddCommand(newGetDatabaseTable()) + cmd.AddCommand(newGetSyncedDatabaseTable()) + cmd.AddCommand(newListDatabaseInstances()) + cmd.AddCommand(newUpdateDatabaseInstance()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-database-catalog command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createDatabaseCatalogOverrides []func( + *cobra.Command, + *database.CreateDatabaseCatalogRequest, +) + +func newCreateDatabaseCatalog() *cobra.Command { + cmd := &cobra.Command{} + + var createDatabaseCatalogReq database.CreateDatabaseCatalogRequest + createDatabaseCatalogReq.Catalog = database.DatabaseCatalog{} + var createDatabaseCatalogJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createDatabaseCatalogJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&createDatabaseCatalogReq.Catalog.CreateDatabaseIfNotExists, "create-database-if-not-exists", createDatabaseCatalogReq.Catalog.CreateDatabaseIfNotExists, ``) + + cmd.Use = "create-database-catalog NAME DATABASE_INSTANCE_NAME DATABASE_NAME" + cmd.Short = `Create a Database Catalog.` + cmd.Long = `Create a Database Catalog. + + Arguments: + NAME: The name of the catalog in UC. + DATABASE_INSTANCE_NAME: The name of the DatabaseInstance housing the database. + DATABASE_NAME: The name of the database (in a instance) associated with the catalog.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'database_instance_name', 'database_name' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createDatabaseCatalogJson.Unmarshal(&createDatabaseCatalogReq.Catalog) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createDatabaseCatalogReq.Catalog.Name = args[0] + } + if !cmd.Flags().Changed("json") { + createDatabaseCatalogReq.Catalog.DatabaseInstanceName = args[1] + } + if !cmd.Flags().Changed("json") { + createDatabaseCatalogReq.Catalog.DatabaseName = args[2] + } + + response, err := w.Database.CreateDatabaseCatalog(ctx, createDatabaseCatalogReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createDatabaseCatalogOverrides { + fn(cmd, &createDatabaseCatalogReq) + } + + return cmd +} + +// start create-database-instance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createDatabaseInstanceOverrides []func( + *cobra.Command, + *database.CreateDatabaseInstanceRequest, +) + +func newCreateDatabaseInstance() *cobra.Command { + cmd := &cobra.Command{} + + var createDatabaseInstanceReq database.CreateDatabaseInstanceRequest + createDatabaseInstanceReq.DatabaseInstance = database.DatabaseInstance{} + var createDatabaseInstanceJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createDatabaseInstanceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", createDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) + cmd.Flags().BoolVar(&createDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", createDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether the instance is stopped.`) + + cmd.Use = "create-database-instance NAME" + cmd.Short = `Create a Database Instance.` + cmd.Long = `Create a Database Instance. + + Arguments: + NAME: The name of the instance. This is the unique identifier for the instance.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createDatabaseInstanceJson.Unmarshal(&createDatabaseInstanceReq.DatabaseInstance) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createDatabaseInstanceReq.DatabaseInstance.Name = args[0] + } + + response, err := w.Database.CreateDatabaseInstance(ctx, createDatabaseInstanceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createDatabaseInstanceOverrides { + fn(cmd, &createDatabaseInstanceReq) + } + + return cmd +} + +// start create-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createDatabaseTableOverrides []func( + *cobra.Command, + *database.CreateDatabaseTableRequest, +) + +func newCreateDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var createDatabaseTableReq database.CreateDatabaseTableRequest + createDatabaseTableReq.Table = database.DatabaseTable{} + var createDatabaseTableJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createDatabaseTableJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createDatabaseTableReq.Table.DatabaseInstanceName, "database-instance-name", createDatabaseTableReq.Table.DatabaseInstanceName, `Name of the target database instance.`) + cmd.Flags().StringVar(&createDatabaseTableReq.Table.LogicalDatabaseName, "logical-database-name", createDatabaseTableReq.Table.LogicalDatabaseName, `Target Postgres database object (logical database) name for this table.`) + + cmd.Use = "create-database-table NAME" + cmd.Short = `Create a Database Table.` + cmd.Long = `Create a Database Table. + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createDatabaseTableJson.Unmarshal(&createDatabaseTableReq.Table) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createDatabaseTableReq.Table.Name = args[0] + } + + response, err := w.Database.CreateDatabaseTable(ctx, createDatabaseTableReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createDatabaseTableOverrides { + fn(cmd, &createDatabaseTableReq) + } + + return cmd +} + +// start create-synced-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createSyncedDatabaseTableOverrides []func( + *cobra.Command, + *database.CreateSyncedDatabaseTableRequest, +) + +func newCreateSyncedDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var createSyncedDatabaseTableReq database.CreateSyncedDatabaseTableRequest + createSyncedDatabaseTableReq.SyncedTable = database.SyncedDatabaseTable{} + var createSyncedDatabaseTableJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createSyncedDatabaseTableJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: data_synchronization_status + cmd.Flags().StringVar(&createSyncedDatabaseTableReq.SyncedTable.DatabaseInstanceName, "database-instance-name", createSyncedDatabaseTableReq.SyncedTable.DatabaseInstanceName, `Name of the target database instance.`) + cmd.Flags().StringVar(&createSyncedDatabaseTableReq.SyncedTable.LogicalDatabaseName, "logical-database-name", createSyncedDatabaseTableReq.SyncedTable.LogicalDatabaseName, `Target Postgres database object (logical database) name for this table.`) + // TODO: complex arg: spec + + cmd.Use = "create-synced-database-table NAME" + cmd.Short = `Create a Synced Database Table.` + cmd.Long = `Create a Synced Database Table. + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createSyncedDatabaseTableJson.Unmarshal(&createSyncedDatabaseTableReq.SyncedTable) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createSyncedDatabaseTableReq.SyncedTable.Name = args[0] + } + + response, err := w.Database.CreateSyncedDatabaseTable(ctx, createSyncedDatabaseTableReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createSyncedDatabaseTableOverrides { + fn(cmd, &createSyncedDatabaseTableReq) + } + + return cmd +} + +// start delete-database-catalog command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteDatabaseCatalogOverrides []func( + *cobra.Command, + *database.DeleteDatabaseCatalogRequest, +) + +func newDeleteDatabaseCatalog() *cobra.Command { + cmd := &cobra.Command{} + + var deleteDatabaseCatalogReq database.DeleteDatabaseCatalogRequest + + // TODO: short flags + + cmd.Use = "delete-database-catalog NAME" + cmd.Short = `Delete a Database Catalog.` + cmd.Long = `Delete a Database Catalog.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteDatabaseCatalogReq.Name = args[0] + + err = w.Database.DeleteDatabaseCatalog(ctx, deleteDatabaseCatalogReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteDatabaseCatalogOverrides { + fn(cmd, &deleteDatabaseCatalogReq) + } + + return cmd +} + +// start delete-database-instance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteDatabaseInstanceOverrides []func( + *cobra.Command, + *database.DeleteDatabaseInstanceRequest, +) + +func newDeleteDatabaseInstance() *cobra.Command { + cmd := &cobra.Command{} + + var deleteDatabaseInstanceReq database.DeleteDatabaseInstanceRequest + + // TODO: short flags + + cmd.Flags().BoolVar(&deleteDatabaseInstanceReq.Force, "force", deleteDatabaseInstanceReq.Force, `By default, a instance cannot be deleted if it has descendant instances created via PITR.`) + cmd.Flags().BoolVar(&deleteDatabaseInstanceReq.Purge, "purge", deleteDatabaseInstanceReq.Purge, `If false, the database instance is soft deleted.`) + + cmd.Use = "delete-database-instance NAME" + cmd.Short = `Delete a Database Instance.` + cmd.Long = `Delete a Database Instance. + + Arguments: + NAME: Name of the instance to delete.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteDatabaseInstanceReq.Name = args[0] + + err = w.Database.DeleteDatabaseInstance(ctx, deleteDatabaseInstanceReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteDatabaseInstanceOverrides { + fn(cmd, &deleteDatabaseInstanceReq) + } + + return cmd +} + +// start delete-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteDatabaseTableOverrides []func( + *cobra.Command, + *database.DeleteDatabaseTableRequest, +) + +func newDeleteDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var deleteDatabaseTableReq database.DeleteDatabaseTableRequest + + // TODO: short flags + + cmd.Use = "delete-database-table NAME" + cmd.Short = `Delete a Database Table.` + cmd.Long = `Delete a Database Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteDatabaseTableReq.Name = args[0] + + err = w.Database.DeleteDatabaseTable(ctx, deleteDatabaseTableReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteDatabaseTableOverrides { + fn(cmd, &deleteDatabaseTableReq) + } + + return cmd +} + +// start delete-synced-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteSyncedDatabaseTableOverrides []func( + *cobra.Command, + *database.DeleteSyncedDatabaseTableRequest, +) + +func newDeleteSyncedDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var deleteSyncedDatabaseTableReq database.DeleteSyncedDatabaseTableRequest + + // TODO: short flags + + cmd.Use = "delete-synced-database-table NAME" + cmd.Short = `Delete a Synced Database Table.` + cmd.Long = `Delete a Synced Database Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteSyncedDatabaseTableReq.Name = args[0] + + err = w.Database.DeleteSyncedDatabaseTable(ctx, deleteSyncedDatabaseTableReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteSyncedDatabaseTableOverrides { + fn(cmd, &deleteSyncedDatabaseTableReq) + } + + return cmd +} + +// start find-database-instance-by-uid command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var findDatabaseInstanceByUidOverrides []func( + *cobra.Command, + *database.FindDatabaseInstanceByUidRequest, +) + +func newFindDatabaseInstanceByUid() *cobra.Command { + cmd := &cobra.Command{} + + var findDatabaseInstanceByUidReq database.FindDatabaseInstanceByUidRequest + + // TODO: short flags + + cmd.Flags().StringVar(&findDatabaseInstanceByUidReq.Uid, "uid", findDatabaseInstanceByUidReq.Uid, `UID of the cluster to get.`) + + cmd.Use = "find-database-instance-by-uid" + cmd.Short = `Find a Database Instance by uid.` + cmd.Long = `Find a Database Instance by uid.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Database.FindDatabaseInstanceByUid(ctx, findDatabaseInstanceByUidReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range findDatabaseInstanceByUidOverrides { + fn(cmd, &findDatabaseInstanceByUidReq) + } + + return cmd +} + +// start generate-database-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var generateDatabaseCredentialOverrides []func( + *cobra.Command, + *database.GenerateDatabaseCredentialRequest, +) + +func newGenerateDatabaseCredential() *cobra.Command { + cmd := &cobra.Command{} + + var generateDatabaseCredentialReq database.GenerateDatabaseCredentialRequest + var generateDatabaseCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&generateDatabaseCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: instance_names + cmd.Flags().StringVar(&generateDatabaseCredentialReq.RequestId, "request-id", generateDatabaseCredentialReq.RequestId, ``) + + cmd.Use = "generate-database-credential" + cmd.Short = `Generates a credential that can be used to access database instances.` + cmd.Long = `Generates a credential that can be used to access database instances.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := generateDatabaseCredentialJson.Unmarshal(&generateDatabaseCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.Database.GenerateDatabaseCredential(ctx, generateDatabaseCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range generateDatabaseCredentialOverrides { + fn(cmd, &generateDatabaseCredentialReq) + } + + return cmd +} + +// start get-database-catalog command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getDatabaseCatalogOverrides []func( + *cobra.Command, + *database.GetDatabaseCatalogRequest, +) + +func newGetDatabaseCatalog() *cobra.Command { + cmd := &cobra.Command{} + + var getDatabaseCatalogReq database.GetDatabaseCatalogRequest + + // TODO: short flags + + cmd.Use = "get-database-catalog NAME" + cmd.Short = `Get a Database Catalog.` + cmd.Long = `Get a Database Catalog.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getDatabaseCatalogReq.Name = args[0] + + response, err := w.Database.GetDatabaseCatalog(ctx, getDatabaseCatalogReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getDatabaseCatalogOverrides { + fn(cmd, &getDatabaseCatalogReq) + } + + return cmd +} + +// start get-database-instance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getDatabaseInstanceOverrides []func( + *cobra.Command, + *database.GetDatabaseInstanceRequest, +) + +func newGetDatabaseInstance() *cobra.Command { + cmd := &cobra.Command{} + + var getDatabaseInstanceReq database.GetDatabaseInstanceRequest + + // TODO: short flags + + cmd.Use = "get-database-instance NAME" + cmd.Short = `Get a Database Instance.` + cmd.Long = `Get a Database Instance. + + Arguments: + NAME: Name of the cluster to get.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getDatabaseInstanceReq.Name = args[0] + + response, err := w.Database.GetDatabaseInstance(ctx, getDatabaseInstanceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getDatabaseInstanceOverrides { + fn(cmd, &getDatabaseInstanceReq) + } + + return cmd +} + +// start get-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getDatabaseTableOverrides []func( + *cobra.Command, + *database.GetDatabaseTableRequest, +) + +func newGetDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var getDatabaseTableReq database.GetDatabaseTableRequest + + // TODO: short flags + + cmd.Use = "get-database-table NAME" + cmd.Short = `Get a Database Table.` + cmd.Long = `Get a Database Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getDatabaseTableReq.Name = args[0] + + response, err := w.Database.GetDatabaseTable(ctx, getDatabaseTableReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getDatabaseTableOverrides { + fn(cmd, &getDatabaseTableReq) + } + + return cmd +} + +// start get-synced-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getSyncedDatabaseTableOverrides []func( + *cobra.Command, + *database.GetSyncedDatabaseTableRequest, +) + +func newGetSyncedDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var getSyncedDatabaseTableReq database.GetSyncedDatabaseTableRequest + + // TODO: short flags + + cmd.Use = "get-synced-database-table NAME" + cmd.Short = `Get a Synced Database Table.` + cmd.Long = `Get a Synced Database Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getSyncedDatabaseTableReq.Name = args[0] + + response, err := w.Database.GetSyncedDatabaseTable(ctx, getSyncedDatabaseTableReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getSyncedDatabaseTableOverrides { + fn(cmd, &getSyncedDatabaseTableReq) + } + + return cmd +} + +// start list-database-instances command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listDatabaseInstancesOverrides []func( + *cobra.Command, + *database.ListDatabaseInstancesRequest, +) + +func newListDatabaseInstances() *cobra.Command { + cmd := &cobra.Command{} + + var listDatabaseInstancesReq database.ListDatabaseInstancesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listDatabaseInstancesReq.PageSize, "page-size", listDatabaseInstancesReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listDatabaseInstancesReq.PageToken, "page-token", listDatabaseInstancesReq.PageToken, `Pagination token to go to the next page of Database Instances.`) + + cmd.Use = "list-database-instances" + cmd.Short = `List Database Instances.` + cmd.Long = `List Database Instances.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response := w.Database.ListDatabaseInstances(ctx, listDatabaseInstancesReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listDatabaseInstancesOverrides { + fn(cmd, &listDatabaseInstancesReq) + } + + return cmd +} + +// start update-database-instance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateDatabaseInstanceOverrides []func( + *cobra.Command, + *database.UpdateDatabaseInstanceRequest, +) + +func newUpdateDatabaseInstance() *cobra.Command { + cmd := &cobra.Command{} + + var updateDatabaseInstanceReq database.UpdateDatabaseInstanceRequest + updateDatabaseInstanceReq.DatabaseInstance = database.DatabaseInstance{} + var updateDatabaseInstanceJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateDatabaseInstanceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", updateDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) + cmd.Flags().BoolVar(&updateDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", updateDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether the instance is stopped.`) + + cmd.Use = "update-database-instance NAME" + cmd.Short = `Update a Database Instance.` + cmd.Long = `Update a Database Instance. + + Arguments: + NAME: The name of the instance. This is the unique identifier for the instance.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateDatabaseInstanceJson.Unmarshal(&updateDatabaseInstanceReq.DatabaseInstance) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateDatabaseInstanceReq.Name = args[0] + + response, err := w.Database.UpdateDatabaseInstance(ctx, updateDatabaseInstanceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateDatabaseInstanceOverrides { + fn(cmd, &updateDatabaseInstanceReq) + } + + return cmd +} + +// end service Database diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index d4a4738d07..fec727b883 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -49,8 +49,6 @@ func New() *cobra.Command { cmd.AddCommand(newDeleteTag()) cmd.AddCommand(newFinalizeLoggedModel()) cmd.AddCommand(newGetByName()) - cmd.AddCommand(newGetCredentialsForTraceDataDownload()) - cmd.AddCommand(newGetCredentialsForTraceDataUpload()) cmd.AddCommand(newGetExperiment()) cmd.AddCommand(newGetHistory()) cmd.AddCommand(newGetLoggedModel()) @@ -59,7 +57,6 @@ func New() *cobra.Command { cmd.AddCommand(newGetRun()) cmd.AddCommand(newListArtifacts()) cmd.AddCommand(newListExperiments()) - cmd.AddCommand(newListLoggedModelArtifacts()) cmd.AddCommand(newLogBatch()) cmd.AddCommand(newLogInputs()) cmd.AddCommand(newLogLoggedModelParams()) @@ -209,9 +206,6 @@ func newCreateLoggedModel() *cobra.Command { Arguments: EXPERIMENT_ID: The ID of the experiment that owns the model.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -449,9 +443,6 @@ func newDeleteLoggedModel() *cobra.Command { Arguments: MODEL_ID: The ID of the logged model to delete.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -509,9 +500,6 @@ func newDeleteLoggedModelTag() *cobra.Command { MODEL_ID: The ID of the logged model to delete the tag from. TAG_KEY: The tag key.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -696,6 +684,7 @@ func newDeleteRuns() *cobra.Command { deleteRunsReq.ExperimentId = args[0] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &deleteRunsReq.MaxTimestampMillis) if err != nil { return fmt.Errorf("invalid MAX_TIMESTAMP_MILLIS: %s", args[1]) @@ -833,12 +822,9 @@ func newFinalizeLoggedModel() *cobra.Command { MODEL_ID: The ID of the logged model to finalize. STATUS: Whether or not the model is ready for use. "LOGGED_MODEL_UPLOAD_FAILED" indicates that something went wrong when - logging the model weights / agent code). + logging the model weights / agent code. Supported values: [LOGGED_MODEL_PENDING, LOGGED_MODEL_READY, LOGGED_MODEL_UPLOAD_FAILED]` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -872,6 +858,7 @@ func newFinalizeLoggedModel() *cobra.Command { } finalizeLoggedModelReq.ModelId = args[0] if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &finalizeLoggedModelReq.Status) if err != nil { return fmt.Errorf("invalid STATUS: %s", args[1]) @@ -963,124 +950,6 @@ func newGetByName() *cobra.Command { return cmd } -// start get-credentials-for-trace-data-download command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getCredentialsForTraceDataDownloadOverrides []func( - *cobra.Command, - *ml.GetCredentialsForTraceDataDownloadRequest, -) - -func newGetCredentialsForTraceDataDownload() *cobra.Command { - cmd := &cobra.Command{} - - var getCredentialsForTraceDataDownloadReq ml.GetCredentialsForTraceDataDownloadRequest - - // TODO: short flags - - cmd.Use = "get-credentials-for-trace-data-download REQUEST_ID" - cmd.Short = `Get credentials to download trace data.` - cmd.Long = `Get credentials to download trace data. - - Arguments: - REQUEST_ID: The ID of the trace to fetch artifact download credentials for.` - - // This command is being previewed; hide from help output. - cmd.Hidden = true - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - getCredentialsForTraceDataDownloadReq.RequestId = args[0] - - response, err := w.Experiments.GetCredentialsForTraceDataDownload(ctx, getCredentialsForTraceDataDownloadReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getCredentialsForTraceDataDownloadOverrides { - fn(cmd, &getCredentialsForTraceDataDownloadReq) - } - - return cmd -} - -// start get-credentials-for-trace-data-upload command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getCredentialsForTraceDataUploadOverrides []func( - *cobra.Command, - *ml.GetCredentialsForTraceDataUploadRequest, -) - -func newGetCredentialsForTraceDataUpload() *cobra.Command { - cmd := &cobra.Command{} - - var getCredentialsForTraceDataUploadReq ml.GetCredentialsForTraceDataUploadRequest - - // TODO: short flags - - cmd.Use = "get-credentials-for-trace-data-upload REQUEST_ID" - cmd.Short = `Get credentials to upload trace data.` - cmd.Long = `Get credentials to upload trace data. - - Arguments: - REQUEST_ID: The ID of the trace to fetch artifact upload credentials for.` - - // This command is being previewed; hide from help output. - cmd.Hidden = true - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - getCredentialsForTraceDataUploadReq.RequestId = args[0] - - response, err := w.Experiments.GetCredentialsForTraceDataUpload(ctx, getCredentialsForTraceDataUploadReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getCredentialsForTraceDataUploadOverrides { - fn(cmd, &getCredentialsForTraceDataUploadReq) - } - - return cmd -} - // start get-experiment command // Slice with functions to override default command behavior. @@ -1222,9 +1091,6 @@ func newGetLoggedModel() *cobra.Command { Arguments: MODEL_ID: The ID of the logged model to retrieve.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -1554,72 +1420,6 @@ func newListExperiments() *cobra.Command { return cmd } -// start list-logged-model-artifacts command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var listLoggedModelArtifactsOverrides []func( - *cobra.Command, - *ml.ListLoggedModelArtifactsRequest, -) - -func newListLoggedModelArtifacts() *cobra.Command { - cmd := &cobra.Command{} - - var listLoggedModelArtifactsReq ml.ListLoggedModelArtifactsRequest - - // TODO: short flags - - cmd.Flags().StringVar(&listLoggedModelArtifactsReq.ArtifactDirectoryPath, "artifact-directory-path", listLoggedModelArtifactsReq.ArtifactDirectoryPath, `Filter artifacts matching this path (a relative path from the root artifact directory).`) - cmd.Flags().StringVar(&listLoggedModelArtifactsReq.PageToken, "page-token", listLoggedModelArtifactsReq.PageToken, `Token indicating the page of artifact results to fetch.`) - - cmd.Use = "list-logged-model-artifacts MODEL_ID" - cmd.Short = `List artifacts for a logged model.` - cmd.Long = `List artifacts for a logged model. - - List artifacts for a logged model. Takes an optional - artifact_directory_path prefix which if specified, the response contains - only artifacts with the specified prefix. - - Arguments: - MODEL_ID: The ID of the logged model for which to list the artifacts.` - - // This command is being previewed; hide from help output. - cmd.Hidden = true - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - listLoggedModelArtifactsReq.ModelId = args[0] - - response, err := w.Experiments.ListLoggedModelArtifacts(ctx, listLoggedModelArtifactsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range listLoggedModelArtifactsOverrides { - fn(cmd, &listLoggedModelArtifactsReq) - } - - return cmd -} - // start log-batch command // Slice with functions to override default command behavior. @@ -1854,9 +1654,6 @@ func newLogLoggedModelParams() *cobra.Command { Arguments: MODEL_ID: The ID of the logged model to log params for.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -1975,12 +1772,14 @@ func newLogMetric() *cobra.Command { logMetricReq.Key = args[0] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &logMetricReq.Value) if err != nil { return fmt.Errorf("invalid VALUE: %s", args[1]) } } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &logMetricReq.Timestamp) if err != nil { return fmt.Errorf("invalid TIMESTAMP: %s", args[2]) @@ -2110,9 +1909,6 @@ func newLogOutputs() *cobra.Command { Arguments: RUN_ID: The ID of the Run from which to log outputs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -2498,6 +2294,7 @@ func newRestoreRuns() *cobra.Command { restoreRunsReq.ExperimentId = args[0] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &restoreRunsReq.MinTimestampMillis) if err != nil { return fmt.Errorf("invalid MIN_TIMESTAMP_MILLIS: %s", args[1]) @@ -2625,9 +2422,6 @@ func newSearchLoggedModels() *cobra.Command { Search for Logged Models that satisfy specified search criteria.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -2863,9 +2657,6 @@ func newSetLoggedModelTags() *cobra.Command { Arguments: MODEL_ID: The ID of the logged model to set the tags on.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/forecasting/forecasting.go b/cmd/workspace/forecasting/forecasting.go index f414c92087..4303043584 100755 --- a/cmd/workspace/forecasting/forecasting.go +++ b/cmd/workspace/forecasting/forecasting.go @@ -149,6 +149,7 @@ func newCreateExperiment() *cobra.Command { createExperimentReq.ForecastGranularity = args[3] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[4], &createExperimentReq.ForecastHorizon) if err != nil { return fmt.Errorf("invalid FORECAST_HORIZON: %s", args[4]) diff --git a/cmd/workspace/genie/genie.go b/cmd/workspace/genie/genie.go index 1b58dbb81b..bc17c02bff 100755 --- a/cmd/workspace/genie/genie.go +++ b/cmd/workspace/genie/genie.go @@ -45,6 +45,7 @@ func New() *cobra.Command { cmd.AddCommand(newGetMessageQueryResult()) cmd.AddCommand(newGetMessageQueryResultByAttachment()) cmd.AddCommand(newGetSpace()) + cmd.AddCommand(newListSpaces()) cmd.AddCommand(newStartConversation()) // Apply optional overrides to this command. @@ -766,6 +767,65 @@ func newGetSpace() *cobra.Command { return cmd } +// start list-spaces command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listSpacesOverrides []func( + *cobra.Command, + *dashboards.GenieListSpacesRequest, +) + +func newListSpaces() *cobra.Command { + cmd := &cobra.Command{} + + var listSpacesReq dashboards.GenieListSpacesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listSpacesReq.PageSize, "page-size", listSpacesReq.PageSize, `Maximum number of spaces to return per page.`) + cmd.Flags().StringVar(&listSpacesReq.PageToken, "page-token", listSpacesReq.PageToken, `Pagination token for getting the next page of results.`) + + cmd.Use = "list-spaces" + cmd.Short = `List Genie spaces.` + cmd.Long = `List Genie spaces. + + Get list of Genie Spaces.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Genie.ListSpaces(ctx, listSpacesReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listSpacesOverrides { + fn(cmd, &listSpacesReq) + } + + return cmd +} + // start start-conversation command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index d003851a05..031b1c3f5b 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -188,6 +188,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the id for the corresponding credential to access") } + _, err = fmt.Sscan(args[0], &deleteReq.CredentialId) if err != nil { return fmt.Errorf("invalid CREDENTIAL_ID: %s", args[0]) @@ -261,6 +262,7 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the id for the corresponding credential to access") } + _, err = fmt.Sscan(args[0], &getReq.CredentialId) if err != nil { return fmt.Errorf("invalid CREDENTIAL_ID: %s", args[0]) @@ -390,6 +392,7 @@ func newUpdate() *cobra.Command { } } } + _, err = fmt.Sscan(args[0], &updateReq.CredentialId) if err != nil { return fmt.Errorf("invalid CREDENTIAL_ID: %s", args[0]) diff --git a/cmd/workspace/grants/grants.go b/cmd/workspace/grants/grants.go index 9abbef1cf1..9fa89b72d5 100755 --- a/cmd/workspace/grants/grants.go +++ b/cmd/workspace/grants/grants.go @@ -3,8 +3,6 @@ package grants import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" @@ -68,36 +66,18 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&getReq.MaxResults, "max-results", getReq.MaxResults, `Specifies the maximum number of privileges to return (page length).`) + cmd.Flags().StringVar(&getReq.PageToken, "page-token", getReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) cmd.Flags().StringVar(&getReq.Principal, "principal", getReq.Principal, `If provided, only the permissions for the specified principal (user or group) are returned.`) cmd.Use = "get SECURABLE_TYPE FULL_NAME" cmd.Short = `Get permissions.` cmd.Long = `Get permissions. - Gets the permissions for a securable. + Gets the permissions for a securable. Does not include inherited permissions. Arguments: - SECURABLE_TYPE: Type of securable. - Supported values: [ - CATALOG, - CLEAN_ROOM, - CONNECTION, - CREDENTIAL, - EXTERNAL_LOCATION, - EXTERNAL_METADATA, - FUNCTION, - METASTORE, - PIPELINE, - PROVIDER, - RECIPIENT, - SCHEMA, - SHARE, - STAGING_TABLE, - STORAGE_CREDENTIAL, - TABLE, - UNKNOWN_SECURABLE_TYPE, - VOLUME, - ] + SECURABLE_TYPE: Type of securable. FULL_NAME: Full name of securable.` cmd.Annotations = make(map[string]string) @@ -112,10 +92,7 @@ func newGet() *cobra.Command { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - _, err = fmt.Sscan(args[0], &getReq.SecurableType) - if err != nil { - return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) - } + getReq.SecurableType = args[0] getReq.FullName = args[1] response, err := w.Grants.Get(ctx, getReq) @@ -153,36 +130,19 @@ func newGetEffective() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&getEffectiveReq.MaxResults, "max-results", getEffectiveReq.MaxResults, `Specifies the maximum number of privileges to return (page length).`) + cmd.Flags().StringVar(&getEffectiveReq.PageToken, "page-token", getEffectiveReq.PageToken, `Opaque token for the next page of results (pagination).`) cmd.Flags().StringVar(&getEffectiveReq.Principal, "principal", getEffectiveReq.Principal, `If provided, only the effective permissions for the specified principal (user or group) are returned.`) cmd.Use = "get-effective SECURABLE_TYPE FULL_NAME" cmd.Short = `Get effective permissions.` cmd.Long = `Get effective permissions. - Gets the effective permissions for a securable. + Gets the effective permissions for a securable. Includes inherited permissions + from any parent securables. Arguments: - SECURABLE_TYPE: Type of securable. - Supported values: [ - CATALOG, - CLEAN_ROOM, - CONNECTION, - CREDENTIAL, - EXTERNAL_LOCATION, - EXTERNAL_METADATA, - FUNCTION, - METASTORE, - PIPELINE, - PROVIDER, - RECIPIENT, - SCHEMA, - SHARE, - STAGING_TABLE, - STORAGE_CREDENTIAL, - TABLE, - UNKNOWN_SECURABLE_TYPE, - VOLUME, - ] + SECURABLE_TYPE: Type of securable. FULL_NAME: Full name of securable.` cmd.Annotations = make(map[string]string) @@ -197,10 +157,7 @@ func newGetEffective() *cobra.Command { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - _, err = fmt.Sscan(args[0], &getEffectiveReq.SecurableType) - if err != nil { - return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) - } + getEffectiveReq.SecurableType = args[0] getEffectiveReq.FullName = args[1] response, err := w.Grants.GetEffective(ctx, getEffectiveReq) @@ -249,27 +206,7 @@ func newUpdate() *cobra.Command { Updates the permissions for a securable. Arguments: - SECURABLE_TYPE: Type of securable. - Supported values: [ - CATALOG, - CLEAN_ROOM, - CONNECTION, - CREDENTIAL, - EXTERNAL_LOCATION, - EXTERNAL_METADATA, - FUNCTION, - METASTORE, - PIPELINE, - PROVIDER, - RECIPIENT, - SCHEMA, - SHARE, - STAGING_TABLE, - STORAGE_CREDENTIAL, - TABLE, - UNKNOWN_SECURABLE_TYPE, - VOLUME, - ] + SECURABLE_TYPE: Type of securable. FULL_NAME: Full name of securable.` cmd.Annotations = make(map[string]string) @@ -296,10 +233,7 @@ func newUpdate() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &updateReq.SecurableType) - if err != nil { - return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) - } + updateReq.SecurableType = args[0] updateReq.FullName = args[1] response, err := w.Grants.Update(ctx, updateReq) diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index 07f7010264..db6ddfe8a1 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -151,6 +151,7 @@ func newCreate() *cobra.Command { createReq.Label = args[0] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &createReq.ListType) if err != nil { return fmt.Errorf("invalid LIST_TYPE: %s", args[1]) @@ -440,12 +441,14 @@ func newReplace() *cobra.Command { replaceReq.Label = args[1] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &replaceReq.ListType) if err != nil { return fmt.Errorf("invalid LIST_TYPE: %s", args[2]) } } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[3], &replaceReq.Enabled) if err != nil { return fmt.Errorf("invalid ENABLED: %s", args[3]) diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index f36bf44ee5..dbc98009f1 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -229,6 +229,7 @@ func newCancelRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have this field is required") } + _, err = fmt.Sscan(args[0], &cancelRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -412,6 +413,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier of the job to delete") } + _, err = fmt.Sscan(args[0], &deleteReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) @@ -511,6 +513,7 @@ func newDeleteRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have id of the run to delete") } + _, err = fmt.Sscan(args[0], &deleteRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -587,6 +590,7 @@ func newExportRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier for the run") } + _, err = fmt.Sscan(args[0], &exportRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -672,6 +676,7 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier of the job to retrieve information about") } + _, err = fmt.Sscan(args[0], &getReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) @@ -900,6 +905,7 @@ func newGetRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier of the run for which to retrieve the metadata") } + _, err = fmt.Sscan(args[0], &getRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -982,6 +988,7 @@ func newGetRunOutput() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier for the run") } + _, err = fmt.Sscan(args[0], &getRunOutputReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -1219,6 +1226,7 @@ func newRepairRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the job run id of the run to repair") } + _, err = fmt.Sscan(args[0], &repairRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -1422,6 +1430,7 @@ func newRunNow() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the id of the job to be executed") } + _, err = fmt.Sscan(args[0], &runNowReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) @@ -1742,6 +1751,7 @@ func newUpdate() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier of the job to update") } + _, err = fmt.Sscan(args[0], &updateReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) diff --git a/cmd/workspace/lakeview-embedded/lakeview-embedded.go b/cmd/workspace/lakeview-embedded/lakeview-embedded.go index 782b4effcf..06ed7f1f73 100755 --- a/cmd/workspace/lakeview-embedded/lakeview-embedded.go +++ b/cmd/workspace/lakeview-embedded/lakeview-embedded.go @@ -27,7 +27,6 @@ func New() *cobra.Command { } // Add methods - cmd.AddCommand(newGetPublishedDashboardEmbedded()) cmd.AddCommand(newGetPublishedDashboardTokenInfo()) // Apply optional overrides to this command. @@ -38,67 +37,6 @@ func New() *cobra.Command { return cmd } -// start get-published-dashboard-embedded command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getPublishedDashboardEmbeddedOverrides []func( - *cobra.Command, - *dashboards.GetPublishedDashboardEmbeddedRequest, -) - -func newGetPublishedDashboardEmbedded() *cobra.Command { - cmd := &cobra.Command{} - - var getPublishedDashboardEmbeddedReq dashboards.GetPublishedDashboardEmbeddedRequest - - // TODO: short flags - - cmd.Use = "get-published-dashboard-embedded DASHBOARD_ID" - cmd.Short = `Read a published dashboard in an embedded ui.` - cmd.Long = `Read a published dashboard in an embedded ui. - - Get the current published dashboard within an embedded context. - - Arguments: - DASHBOARD_ID: UUID identifying the published dashboard.` - - // This command is being previewed; hide from help output. - cmd.Hidden = true - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - getPublishedDashboardEmbeddedReq.DashboardId = args[0] - - err = w.LakeviewEmbedded.GetPublishedDashboardEmbedded(ctx, getPublishedDashboardEmbeddedReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getPublishedDashboardEmbeddedOverrides { - fn(cmd, &getPublishedDashboardEmbeddedReq) - } - - return cmd -} - // start get-published-dashboard-token-info command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 99dc7cfbb3..daa980915e 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -90,9 +90,9 @@ func newAssign() *cobra.Command { Arguments: WORKSPACE_ID: A workspace ID. METASTORE_ID: The unique ID of the metastore. - DEFAULT_CATALOG_NAME: The name of the default catalog in the metastore. This field is depracted. - Please use "Default Namespace API" to configure the default catalog for a - Databricks workspace.` + DEFAULT_CATALOG_NAME: The name of the default catalog in the metastore. This field is + deprecated. Please use "Default Namespace API" to configure the default + catalog for a Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -125,6 +125,7 @@ func newAssign() *cobra.Command { } } } + _, err = fmt.Sscan(args[0], &assignReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -314,28 +315,16 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down." - names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Unique ID of the metastore") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have unique id of the metastore") - } deleteReq.Id = args[0] err = w.Metastores.Delete(ctx, deleteReq) @@ -385,28 +374,16 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down." - names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Unique ID of the metastore") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have unique id of the metastore") - } getReq.Id = args[0] response, err := w.Metastores.Get(ctx, getReq) @@ -434,11 +411,19 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *catalog.ListMetastoresRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq catalog.ListMetastoresRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of metastores to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "list" cmd.Short = `List metastores.` cmd.Long = `List metastores. @@ -449,11 +434,17 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - response := w.Metastores.List(ctx) + + response := w.Metastores.List(ctx, listReq) return cmdio.RenderIterator(ctx, response) } @@ -463,7 +454,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd @@ -614,6 +605,11 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -631,23 +627,6 @@ func newUpdate() *cobra.Command { } } } - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down." - names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Unique ID of the metastore") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have unique id of the metastore") - } updateReq.Id = args[0] response, err := w.Metastores.Update(ctx, updateReq) @@ -704,6 +683,11 @@ func newUpdateAssignment() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -721,23 +705,7 @@ func newUpdateAssignment() *cobra.Command { } } } - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Metastores drop-down." - names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "A workspace ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have a workspace id") - } + _, err = fmt.Sscan(args[0], &updateAssignmentReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index 4fbabec8f0..c93fbb929d 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -162,12 +162,14 @@ func newApproveTransitionRequest() *cobra.Command { approveTransitionRequestReq.Version = args[1] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &approveTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) } } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[3], &approveTransitionRequestReq.ArchiveExistingVersions) if err != nil { return fmt.Errorf("invalid ARCHIVE_EXISTING_VERSIONS: %s", args[3]) @@ -539,6 +541,7 @@ func newCreateTransitionRequest() *cobra.Command { createTransitionRequestReq.Version = args[1] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &createTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) @@ -585,7 +588,7 @@ func newCreateWebhook() *cobra.Command { cmd.Flags().StringVar(&createWebhookReq.Description, "description", createWebhookReq.Description, `User-specified description for the webhook.`) // TODO: complex arg: http_url_spec // TODO: complex arg: job_spec - cmd.Flags().StringVar(&createWebhookReq.ModelName, "model-name", createWebhookReq.ModelName, `Name of the model whose events would trigger this webhook.`) + cmd.Flags().StringVar(&createWebhookReq.ModelName, "model-name", createWebhookReq.ModelName, `If model name is not specified, a registry-wide webhook is created that listens for the specified events across all versions of all registered models.`) cmd.Flags().Var(&createWebhookReq.Status, "status", `Enable or disable triggering the webhook, or put the webhook into test mode. Supported values: [ACTIVE, DISABLED, TEST_MODE]`) cmd.Use = "create-webhook" @@ -657,7 +660,10 @@ func newDeleteComment() *cobra.Command { cmd.Short = `Delete a comment.` cmd.Long = `Delete a comment. - Deletes a comment on a model version.` + Deletes a comment on a model version. + + Arguments: + ID: Unique identifier of an activity` cmd.Annotations = make(map[string]string) @@ -989,6 +995,7 @@ func newDeleteTransitionRequest() *cobra.Command { deleteTransitionRequestReq.Name = args[0] deleteTransitionRequestReq.Version = args[1] + _, err = fmt.Sscan(args[2], &deleteTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) @@ -1695,6 +1702,7 @@ func newRejectTransitionRequest() *cobra.Command { rejectTransitionRequestReq.Version = args[1] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &rejectTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) @@ -2363,12 +2371,14 @@ func newTransitionStage() *cobra.Command { transitionStageReq.Version = args[1] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &transitionStageReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) } } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[3], &transitionStageReq.ArchiveExistingVersions) if err != nil { return fmt.Errorf("invalid ARCHIVE_EXISTING_VERSIONS: %s", args[3]) diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index 773eb4c8be..e827e5ec9b 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -95,6 +95,7 @@ func newDelete() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) deleteReq.FullName = args[0] + _, err = fmt.Sscan(args[1], &deleteReq.Version) if err != nil { return fmt.Errorf("invalid VERSION: %s", args[1]) @@ -166,6 +167,7 @@ func newGet() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) getReq.FullName = args[0] + _, err = fmt.Sscan(args[1], &getReq.Version) if err != nil { return fmt.Errorf("invalid VERSION: %s", args[1]) @@ -391,6 +393,7 @@ func newUpdate() *cobra.Command { } } updateReq.FullName = args[0] + _, err = fmt.Sscan(args[1], &updateReq.Version) if err != nil { return fmt.Errorf("invalid VERSION: %s", args[1]) diff --git a/cmd/workspace/permission-migration/permission-migration.go b/cmd/workspace/permission-migration/permission-migration.go index f18428fff0..feb7e2a011 100755 --- a/cmd/workspace/permission-migration/permission-migration.go +++ b/cmd/workspace/permission-migration/permission-migration.go @@ -106,6 +106,7 @@ func newMigratePermissions() *cobra.Command { } } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[0], &migratePermissionsReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 3000842482..29a9a7d5fe 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -153,7 +153,8 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a pipeline.` cmd.Long = `Delete a pipeline. - Deletes a pipeline.` + Deletes a pipeline. Deleting a pipeline is a permanent action that stops and + removes the pipeline and its tables. You cannot undo this action.` cmd.Annotations = make(map[string]string) @@ -980,6 +981,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Schema, "schema", updateReq.Schema, `The default schema (database) where tables are read from or published to.`) cmd.Flags().BoolVar(&updateReq.Serverless, "serverless", updateReq.Serverless, `Whether serverless compute is enabled for this pipeline.`) cmd.Flags().StringVar(&updateReq.Storage, "storage", updateReq.Storage, `DBFS root directory for storing checkpoints and tables.`) + // TODO: map via StringToStringVar: tags cmd.Flags().StringVar(&updateReq.Target, "target", updateReq.Target, `Target schema (database) to add tables in this pipeline to.`) // TODO: complex arg: trigger diff --git a/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go index 9de9733957..6fda5e053e 100755 --- a/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go +++ b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go @@ -118,6 +118,7 @@ func newEnforceCompliance() *cobra.Command { } } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[0], &enforceComplianceReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) diff --git a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go index e62462d78f..094a959827 100755 --- a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go +++ b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go @@ -158,6 +158,7 @@ func newUpdate() *cobra.Command { updateReq.ListingId = args[0] updateReq.RequestId = args[1] if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &updateReq.Status) if err != nil { return fmt.Errorf("invalid STATUS: %s", args[2]) diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index b91638478d..925e923d6f 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -79,7 +79,7 @@ func newCreate() *cobra.Command { Arguments: NAME: The name of the Provider. AUTHENTICATION_TYPE: The delta sharing authentication type. - Supported values: [DATABRICKS, OAUTH_CLIENT_CREDENTIALS, TOKEN]` + Supported values: [DATABRICKS, OAUTH_CLIENT_CREDENTIALS, OIDC_FEDERATION, TOKEN]` cmd.Annotations = make(map[string]string) @@ -116,6 +116,7 @@ func newCreate() *cobra.Command { createReq.Name = args[0] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &createReq.AuthenticationType) if err != nil { return fmt.Errorf("invalid AUTHENTICATION_TYPE: %s", args[1]) diff --git a/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go b/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go new file mode 100755 index 0000000000..ea0175fdd5 --- /dev/null +++ b/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go @@ -0,0 +1,400 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package quality_monitor_v2 + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/qualitymonitorv2" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "quality-monitor-v2", + Short: `Manage data quality of UC objects (currently support schema).`, + Long: `Manage data quality of UC objects (currently support schema)`, + GroupID: "qualitymonitorv2", + Annotations: map[string]string{ + "package": "qualitymonitorv2", + }, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCreateQualityMonitor()) + cmd.AddCommand(newDeleteQualityMonitor()) + cmd.AddCommand(newGetQualityMonitor()) + cmd.AddCommand(newListQualityMonitor()) + cmd.AddCommand(newUpdateQualityMonitor()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.CreateQualityMonitorRequest, +) + +func newCreateQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var createQualityMonitorReq qualitymonitorv2.CreateQualityMonitorRequest + createQualityMonitorReq.QualityMonitor = qualitymonitorv2.QualityMonitor{} + var createQualityMonitorJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createQualityMonitorJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: anomaly_detection_config + + cmd.Use = "create-quality-monitor OBJECT_TYPE OBJECT_ID" + cmd.Short = `Create a quality monitor.` + cmd.Long = `Create a quality monitor. + + Create a quality monitor on UC object + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'object_type', 'object_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createQualityMonitorJson.Unmarshal(&createQualityMonitorReq.QualityMonitor) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createQualityMonitorReq.QualityMonitor.ObjectType = args[0] + } + if !cmd.Flags().Changed("json") { + createQualityMonitorReq.QualityMonitor.ObjectId = args[1] + } + + response, err := w.QualityMonitorV2.CreateQualityMonitor(ctx, createQualityMonitorReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createQualityMonitorOverrides { + fn(cmd, &createQualityMonitorReq) + } + + return cmd +} + +// start delete-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.DeleteQualityMonitorRequest, +) + +func newDeleteQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var deleteQualityMonitorReq qualitymonitorv2.DeleteQualityMonitorRequest + + // TODO: short flags + + cmd.Use = "delete-quality-monitor OBJECT_TYPE OBJECT_ID" + cmd.Short = `Delete a quality monitor.` + cmd.Long = `Delete a quality monitor. + + Delete a quality monitor on UC object + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteQualityMonitorReq.ObjectType = args[0] + deleteQualityMonitorReq.ObjectId = args[1] + + err = w.QualityMonitorV2.DeleteQualityMonitor(ctx, deleteQualityMonitorReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteQualityMonitorOverrides { + fn(cmd, &deleteQualityMonitorReq) + } + + return cmd +} + +// start get-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.GetQualityMonitorRequest, +) + +func newGetQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var getQualityMonitorReq qualitymonitorv2.GetQualityMonitorRequest + + // TODO: short flags + + cmd.Use = "get-quality-monitor OBJECT_TYPE OBJECT_ID" + cmd.Short = `Read a quality monitor.` + cmd.Long = `Read a quality monitor. + + Read a quality monitor on UC object + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getQualityMonitorReq.ObjectType = args[0] + getQualityMonitorReq.ObjectId = args[1] + + response, err := w.QualityMonitorV2.GetQualityMonitor(ctx, getQualityMonitorReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getQualityMonitorOverrides { + fn(cmd, &getQualityMonitorReq) + } + + return cmd +} + +// start list-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.ListQualityMonitorRequest, +) + +func newListQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var listQualityMonitorReq qualitymonitorv2.ListQualityMonitorRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listQualityMonitorReq.PageSize, "page-size", listQualityMonitorReq.PageSize, ``) + cmd.Flags().StringVar(&listQualityMonitorReq.PageToken, "page-token", listQualityMonitorReq.PageToken, ``) + + cmd.Use = "list-quality-monitor" + cmd.Short = `List quality monitors.` + cmd.Long = `List quality monitors. + + (Unimplemented) List quality monitors` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response := w.QualityMonitorV2.ListQualityMonitor(ctx, listQualityMonitorReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listQualityMonitorOverrides { + fn(cmd, &listQualityMonitorReq) + } + + return cmd +} + +// start update-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.UpdateQualityMonitorRequest, +) + +func newUpdateQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var updateQualityMonitorReq qualitymonitorv2.UpdateQualityMonitorRequest + updateQualityMonitorReq.QualityMonitor = qualitymonitorv2.QualityMonitor{} + var updateQualityMonitorJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateQualityMonitorJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: anomaly_detection_config + + cmd.Use = "update-quality-monitor OBJECT_TYPE OBJECT_ID OBJECT_TYPE OBJECT_ID" + cmd.Short = `Update a quality monitor.` + cmd.Long = `Update a quality monitor. + + (Unimplemented) Update a quality monitor on UC object + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id. + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(2)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only OBJECT_TYPE, OBJECT_ID as positional arguments. Provide 'object_type', 'object_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(4) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateQualityMonitorJson.Unmarshal(&updateQualityMonitorReq.QualityMonitor) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateQualityMonitorReq.ObjectType = args[0] + updateQualityMonitorReq.ObjectId = args[1] + if !cmd.Flags().Changed("json") { + updateQualityMonitorReq.QualityMonitor.ObjectType = args[2] + } + if !cmd.Flags().Changed("json") { + updateQualityMonitorReq.QualityMonitor.ObjectId = args[3] + } + + response, err := w.QualityMonitorV2.UpdateQualityMonitor(ctx, updateQualityMonitorReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateQualityMonitorOverrides { + fn(cmd, &updateQualityMonitorReq) + } + + return cmd +} + +// end service QualityMonitorV2 diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 8b036de7af..2ac9ac895b 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -99,7 +99,7 @@ func newCreate() *cobra.Command { Arguments: NAME: Name of Recipient. AUTHENTICATION_TYPE: The delta sharing authentication type. - Supported values: [DATABRICKS, OAUTH_CLIENT_CREDENTIALS, TOKEN]` + Supported values: [DATABRICKS, OAUTH_CLIENT_CREDENTIALS, OIDC_FEDERATION, TOKEN]` cmd.Annotations = make(map[string]string) @@ -136,6 +136,7 @@ func newCreate() *cobra.Command { createReq.Name = args[0] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &createReq.AuthenticationType) if err != nil { return fmt.Errorf("invalid AUTHENTICATION_TYPE: %s", args[1]) @@ -402,6 +403,7 @@ func newRotateToken() *cobra.Command { } rotateTokenReq.Name = args[0] if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &rotateTokenReq.ExistingTokenExpireInSeconds) if err != nil { return fmt.Errorf("invalid EXISTING_TOKEN_EXPIRE_IN_SECONDS: %s", args[1]) diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index b327444e60..fea7b7820a 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -531,6 +531,7 @@ func newSetAlias() *cobra.Command { setAliasReq.FullName = args[0] setAliasReq.Alias = args[1] if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &setAliasReq.VersionNum) if err != nil { return fmt.Errorf("invalid VERSION_NUM: %s", args[2]) diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 547afb97f1..169afffd11 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -199,6 +199,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the id for the corresponding repo to delete") } + _, err = fmt.Sscan(args[0], &deleteReq.RepoId) if err != nil { return fmt.Errorf("invalid REPO_ID: %s", args[0]) @@ -272,6 +273,7 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have id of the git folder (repo) object in the workspace") } + _, err = fmt.Sscan(args[0], &getReq.RepoId) if err != nil { return fmt.Errorf("invalid REPO_ID: %s", args[0]) @@ -647,6 +649,7 @@ func newUpdate() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have id of the git folder (repo) object in the workspace") } + _, err = fmt.Sscan(args[0], &updateReq.RepoId) if err != nil { return fmt.Errorf("invalid REPO_ID: %s", args[0]) diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index bcd3273c66..9c3425d6b1 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -169,28 +169,16 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME argument specified. Loading names for Schemas drop-down." - names, err := w.Schemas.SchemaInfoNameToFullNameMap(ctx, catalog.ListSchemasRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Schemas drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Full name of the schema") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have full name of the schema") - } deleteReq.FullName = args[0] err = w.Schemas.Delete(ctx, deleteReq) @@ -243,28 +231,16 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME argument specified. Loading names for Schemas drop-down." - names, err := w.Schemas.SchemaInfoNameToFullNameMap(ctx, catalog.ListSchemasRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Schemas drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Full name of the schema") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have full name of the schema") - } getReq.FullName = args[0] response, err := w.Schemas.Get(ctx, getReq) @@ -368,7 +344,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) - cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `. Supported values: [DISABLE, ENABLE, INHERIT]`) + cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the schema.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of schema.`) // TODO: map via StringToStringVar: properties @@ -388,6 +364,11 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -405,23 +386,6 @@ func newUpdate() *cobra.Command { } } } - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME argument specified. Loading names for Schemas drop-down." - names, err := w.Schemas.SchemaInfoNameToFullNameMap(ctx, catalog.ListSchemasRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Schemas drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Full name of the schema") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have full name of the schema") - } updateReq.FullName = args[0] response, err := w.Schemas.Update(ctx, updateReq) diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index 2e882ad94e..36b2717bf2 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -804,6 +804,7 @@ func newPutAcl() *cobra.Command { putAclReq.Principal = args[1] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &putAclReq.Permission) if err != nil { return fmt.Errorf("invalid PERMISSION: %s", args[2]) diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 29f94f6036..80991abe75 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -723,6 +723,7 @@ func newHttpRequest() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) httpRequestReq.ConnectionName = args[0] + _, err = fmt.Sscan(args[1], &httpRequestReq.Method) if err != nil { return fmt.Errorf("invalid METHOD: %s", args[1]) diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 50519f2adf..2754412a77 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -10,6 +10,7 @@ import ( aibi_dashboard_embedding_approved_domains "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-approved-domains" automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update" compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile" + dashboard_email_subscriptions "github.com/databricks/cli/cmd/workspace/dashboard-email-subscriptions" default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace" disable_legacy_access "github.com/databricks/cli/cmd/workspace/disable-legacy-access" disable_legacy_dbfs "github.com/databricks/cli/cmd/workspace/disable-legacy-dbfs" @@ -19,6 +20,7 @@ import ( enhanced_security_monitoring "github.com/databricks/cli/cmd/workspace/enhanced-security-monitoring" llm_proxy_partner_powered_workspace "github.com/databricks/cli/cmd/workspace/llm-proxy-partner-powered-workspace" restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins" + sql_results_download "github.com/databricks/cli/cmd/workspace/sql-results-download" ) // Slice with functions to override default command behavior. @@ -42,6 +44,7 @@ func New() *cobra.Command { cmd.AddCommand(aibi_dashboard_embedding_approved_domains.New()) cmd.AddCommand(automatic_cluster_update.New()) cmd.AddCommand(compliance_security_profile.New()) + cmd.AddCommand(dashboard_email_subscriptions.New()) cmd.AddCommand(default_namespace.New()) cmd.AddCommand(disable_legacy_access.New()) cmd.AddCommand(disable_legacy_dbfs.New()) @@ -51,6 +54,7 @@ func New() *cobra.Command { cmd.AddCommand(enhanced_security_monitoring.New()) cmd.AddCommand(llm_proxy_partner_powered_workspace.New()) cmd.AddCommand(restrict_workspace_admins.New()) + cmd.AddCommand(sql_results_download.New()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { diff --git a/cmd/workspace/sql-results-download/sql-results-download.go b/cmd/workspace/sql-results-download/sql-results-download.go new file mode 100755 index 0000000000..b807a767a2 --- /dev/null +++ b/cmd/workspace/sql-results-download/sql-results-download.go @@ -0,0 +1,218 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package sql_results_download + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "sql-results-download", + Short: `Controls whether users within the workspace are allowed to download results from the SQL Editor and AI/BI Dashboards UIs.`, + Long: `Controls whether users within the workspace are allowed to download results + from the SQL Editor and AI/BI Dashboards UIs. By default, this setting is + enabled (set to true)`, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteSqlResultsDownloadRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteSqlResultsDownloadRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the SQL Results Download setting.` + cmd.Long = `Delete the SQL Results Download setting. + + Reverts the SQL Results Download setting to its default value.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.SqlResultsDownload().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetSqlResultsDownloadRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetSqlResultsDownloadRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the SQL Results Download setting.` + cmd.Long = `Get the SQL Results Download setting. + + Gets the SQL Results Download setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.SqlResultsDownload().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateSqlResultsDownloadRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateSqlResultsDownloadRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the SQL Results Download setting.` + cmd.Long = `Update the SQL Results Download setting. + + Updates the SQL Results Download setting.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.SqlResultsDownload().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service SqlResultsDownload diff --git a/cmd/workspace/table-constraints/table-constraints.go b/cmd/workspace/table-constraints/table-constraints.go index d5e60e2b01..a784d64232 100755 --- a/cmd/workspace/table-constraints/table-constraints.go +++ b/cmd/workspace/table-constraints/table-constraints.go @@ -178,6 +178,7 @@ func newDelete() *cobra.Command { deleteReq.FullName = args[0] deleteReq.ConstraintName = args[1] + _, err = fmt.Sscan(args[2], &deleteReq.Cascade) if err != nil { return fmt.Errorf("invalid CASCADE: %s", args[2]) diff --git a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go index f8b848bbae..a1011b66c9 100755 --- a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go +++ b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go @@ -117,6 +117,7 @@ func newCreateEndpoint() *cobra.Command { createEndpointReq.Name = args[0] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[1], &createEndpointReq.EndpointType) if err != nil { return fmt.Errorf("invalid ENDPOINT_TYPE: %s", args[1]) diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index 13b3f483ad..e25be419ad 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -137,6 +137,7 @@ func newCreateIndex() *cobra.Command { createIndexReq.PrimaryKey = args[2] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[3], &createIndexReq.IndexType) if err != nil { return fmt.Errorf("invalid INDEX_TYPE: %s", args[3]) diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index ecb18762f0..50c87a36d3 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -148,6 +148,7 @@ func newCreate() *cobra.Command { createReq.Name = args[2] } if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[3], &createReq.VolumeType) if err != nil { return fmt.Errorf("invalid VOLUME_TYPE: %s", args[3]) diff --git a/experimental/python/databricks/bundles/compute/_models/environment.py b/experimental/python/databricks/bundles/compute/_models/environment.py index 16cc9d7140..c8bdee0917 100644 --- a/experimental/python/databricks/bundles/compute/_models/environment.py +++ b/experimental/python/databricks/bundles/compute/_models/environment.py @@ -3,11 +3,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value -from databricks.bundles.core._variable import ( - VariableOr, - VariableOrList, - VariableOrOptional, -) +from databricks.bundles.core._variable import VariableOrList, VariableOrOptional if TYPE_CHECKING: from typing_extensions import Self @@ -20,14 +16,6 @@ class Environment: In this minimal environment spec, only pip dependencies are supported. """ - client: VariableOr[str] - """ - Client version used by the environment - The client is the user-facing environment of the runtime. - Each client comes with a specific set of pre-installed libraries. - The version is a string, consisting of the major client version. - """ - dependencies: VariableOrList[str] = field(default_factory=list) """ List of pip dependencies, as supported by the version of pip in this environment. @@ -35,12 +23,9 @@ class Environment: environment_version: VariableOrOptional[str] = None """ - :meta private: [EXPERIMENTAL] - - We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized - correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field - for any other purpose, e.g. notebook storage. - This field is not yet exposed to customers (e.g. in the jobs API). + Required. Environment version used by the environment. + Each version comes with a specific Python version and a set of Python packages. + The version is a string, consisting of an integer. """ jar_dependencies: VariableOrList[str] = field(default_factory=list) @@ -61,14 +46,6 @@ def as_dict(self) -> "EnvironmentDict": class EnvironmentDict(TypedDict, total=False): """""" - client: VariableOr[str] - """ - Client version used by the environment - The client is the user-facing environment of the runtime. - Each client comes with a specific set of pre-installed libraries. - The version is a string, consisting of the major client version. - """ - dependencies: VariableOrList[str] """ List of pip dependencies, as supported by the version of pip in this environment. @@ -76,12 +53,9 @@ class EnvironmentDict(TypedDict, total=False): environment_version: VariableOrOptional[str] """ - :meta private: [EXPERIMENTAL] - - We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized - correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field - for any other purpose, e.g. notebook storage. - This field is not yet exposed to customers (e.g. in the jobs API). + Required. Environment version used by the environment. + Each version comes with a specific Python version and a set of Python packages. + The version is a string, consisting of an integer. """ jar_dependencies: VariableOrList[str] diff --git a/experimental/python/databricks/bundles/jobs/__init__.py b/experimental/python/databricks/bundles/jobs/__init__.py index 32fd0e6599..3eb3d43185 100644 --- a/experimental/python/databricks/bundles/jobs/__init__.py +++ b/experimental/python/databricks/bundles/jobs/__init__.py @@ -53,6 +53,9 @@ "DbfsStorageInfo", "DbfsStorageInfoDict", "DbfsStorageInfoParam", + "DbtCloudTask", + "DbtCloudTaskDict", + "DbtCloudTaskParam", "DbtTask", "DbtTaskDict", "DbtTaskParam", @@ -445,6 +448,11 @@ DashboardTaskDict, DashboardTaskParam, ) +from databricks.bundles.jobs._models.dbt_cloud_task import ( + DbtCloudTask, + DbtCloudTaskDict, + DbtCloudTaskParam, +) from databricks.bundles.jobs._models.dbt_task import DbtTask, DbtTaskDict, DbtTaskParam from databricks.bundles.jobs._models.file_arrival_trigger_configuration import ( FileArrivalTriggerConfiguration, diff --git a/experimental/python/databricks/bundles/jobs/_models/dashboard_task.py b/experimental/python/databricks/bundles/jobs/_models/dashboard_task.py index 6284ca36d3..b42ef0bdd5 100644 --- a/experimental/python/databricks/bundles/jobs/_models/dashboard_task.py +++ b/experimental/python/databricks/bundles/jobs/_models/dashboard_task.py @@ -4,10 +4,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrOptional -from databricks.bundles.jobs._models.subscription import ( - Subscription, - SubscriptionParam, -) +from databricks.bundles.jobs._models.subscription import Subscription, SubscriptionParam if TYPE_CHECKING: from typing_extensions import Self diff --git a/experimental/python/databricks/bundles/jobs/_models/dbt_cloud_task.py b/experimental/python/databricks/bundles/jobs/_models/dbt_cloud_task.py new file mode 100644 index 0000000000..d1d862c7ef --- /dev/null +++ b/experimental/python/databricks/bundles/jobs/_models/dbt_cloud_task.py @@ -0,0 +1,50 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class DbtCloudTask: + """ + :meta private: [EXPERIMENTAL] + """ + + connection_resource_name: VariableOrOptional[str] = None + """ + The resource name of the UC connection that authenticates the dbt Cloud for this task + """ + + dbt_cloud_job_id: VariableOrOptional[int] = None + """ + Id of the dbt Cloud job to be triggered + """ + + @classmethod + def from_dict(cls, value: "DbtCloudTaskDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "DbtCloudTaskDict": + return _transform_to_json_value(self) # type:ignore + + +class DbtCloudTaskDict(TypedDict, total=False): + """""" + + connection_resource_name: VariableOrOptional[str] + """ + The resource name of the UC connection that authenticates the dbt Cloud for this task + """ + + dbt_cloud_job_id: VariableOrOptional[int] + """ + Id of the dbt Cloud job to be triggered + """ + + +DbtCloudTaskParam = DbtCloudTaskDict | DbtCloudTask diff --git a/experimental/python/databricks/bundles/jobs/_models/job.py b/experimental/python/databricks/bundles/jobs/_models/job.py index c72a20a329..ca40311133 100644 --- a/experimental/python/databricks/bundles/jobs/_models/job.py +++ b/experimental/python/databricks/bundles/jobs/_models/job.py @@ -17,10 +17,7 @@ CronSchedule, CronScheduleParam, ) -from databricks.bundles.jobs._models.git_source import ( - GitSource, - GitSourceParam, -) +from databricks.bundles.jobs._models.git_source import GitSource, GitSourceParam from databricks.bundles.jobs._models.job_cluster import JobCluster, JobClusterParam from databricks.bundles.jobs._models.job_email_notifications import ( JobEmailNotifications, diff --git a/experimental/python/databricks/bundles/jobs/_models/task.py b/experimental/python/databricks/bundles/jobs/_models/task.py index 7120c970a8..8da07a4ab3 100644 --- a/experimental/python/databricks/bundles/jobs/_models/task.py +++ b/experimental/python/databricks/bundles/jobs/_models/task.py @@ -28,6 +28,10 @@ DashboardTask, DashboardTaskParam, ) +from databricks.bundles.jobs._models.dbt_cloud_task import ( + DbtCloudTask, + DbtCloudTaskParam, +) from databricks.bundles.jobs._models.dbt_task import DbtTask, DbtTaskParam from databricks.bundles.jobs._models.for_each_task import ( ForEachTask, @@ -121,6 +125,13 @@ class Task: The task refreshes a dashboard and sends a snapshot to subscribers. """ + dbt_cloud_task: VariableOrOptional[DbtCloudTask] = None + """ + :meta private: [EXPERIMENTAL] + + Task type for dbt cloud + """ + dbt_task: VariableOrOptional[DbtTask] = None """ The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse. @@ -319,6 +330,13 @@ class TaskDict(TypedDict, total=False): The task refreshes a dashboard and sends a snapshot to subscribers. """ + dbt_cloud_task: VariableOrOptional[DbtCloudTaskParam] + """ + :meta private: [EXPERIMENTAL] + + Task type for dbt cloud + """ + dbt_task: VariableOrOptional[DbtTaskParam] """ The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse. diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py index 988227c43e..c452222df9 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py @@ -4,10 +4,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrOptional -from databricks.bundles.pipelines._models.report_spec import ( - ReportSpec, - ReportSpecParam, -) +from databricks.bundles.pipelines._models.report_spec import ReportSpec, ReportSpecParam from databricks.bundles.pipelines._models.schema_spec import SchemaSpec, SchemaSpecParam from databricks.bundles.pipelines._models.table_spec import TableSpec, TableSpecParam diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py index 50754bee6a..b5ed997cfd 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py @@ -13,6 +13,7 @@ class IngestionSourceType(Enum): SERVICENOW = "SERVICENOW" MANAGED_POSTGRESQL = "MANAGED_POSTGRESQL" ORACLE = "ORACLE" + TERADATA = "TERADATA" SHAREPOINT = "SHAREPOINT" DYNAMICS365 = "DYNAMICS365" @@ -29,6 +30,7 @@ class IngestionSourceType(Enum): "SERVICENOW", "MANAGED_POSTGRESQL", "ORACLE", + "TERADATA", "SHAREPOINT", "DYNAMICS365", ] diff --git a/experimental/python/databricks/bundles/pipelines/_models/pipeline.py b/experimental/python/databricks/bundles/pipelines/_models/pipeline.py index 8bf25fd1f1..936842ea92 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/pipeline.py +++ b/experimental/python/databricks/bundles/pipelines/_models/pipeline.py @@ -182,6 +182,13 @@ class Pipeline(Resource): DBFS root directory for storing checkpoints and tables. """ + tags: VariableOrDict[str] = field(default_factory=dict) + """ + A map of tags associated with the pipeline. + These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. + A maximum of 25 tags can be added to the pipeline. + """ + target: VariableOrOptional[str] = None """ Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field. @@ -325,6 +332,13 @@ class PipelineDict(TypedDict, total=False): DBFS root directory for storing checkpoints and tables. """ + tags: VariableOrDict[str] + """ + A map of tags associated with the pipeline. + These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. + A maximum of 25 tags can be added to the pipeline. + """ + target: VariableOrOptional[str] """ Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field. From cc0c70d77203ec6eeb36383d90bd74c1430e1d61 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Jun 2025 14:25:37 +0200 Subject: [PATCH 3/9] remove unnecessary new line --- .codegen/service.go.tmpl | 2 +- cmd/account/ip-access-lists/ip-access-lists.go | 3 --- cmd/account/log-delivery/log-delivery.go | 1 - .../metastore-assignments/metastore-assignments.go | 2 -- .../service-principal-federation-policy.go | 2 -- .../service-principal-secrets.go | 1 - .../workspace-assignment/workspace-assignment.go | 3 --- .../workspace-network-configuration.go | 1 - cmd/account/workspaces/workspaces.go | 3 --- .../artifact-allowlists/artifact-allowlists.go | 1 - cmd/workspace/clean-room-assets/clean-room-assets.go | 3 --- cmd/workspace/experiments/experiments.go | 5 ----- cmd/workspace/forecasting/forecasting.go | 1 - cmd/workspace/git-credentials/git-credentials.go | 3 --- cmd/workspace/ip-access-lists/ip-access-lists.go | 3 --- cmd/workspace/jobs/jobs.go | 10 ---------- cmd/workspace/metastores/metastores.go | 2 -- cmd/workspace/model-registry/model-registry.go | 7 ------- cmd/workspace/model-versions/model-versions.go | 3 --- .../permission-migration/permission-migration.go | 1 - .../policy-compliance-for-jobs.go | 1 - .../provider-personalization-requests.go | 1 - cmd/workspace/providers/providers.go | 1 - cmd/workspace/recipients/recipients.go | 2 -- cmd/workspace/registered-models/registered-models.go | 1 - cmd/workspace/repos/repos.go | 3 --- cmd/workspace/secrets/secrets.go | 1 - cmd/workspace/serving-endpoints/serving-endpoints.go | 1 - cmd/workspace/table-constraints/table-constraints.go | 1 - .../vector-search-endpoints/vector-search-endpoints.go | 1 - .../vector-search-indexes/vector-search-indexes.go | 1 - cmd/workspace/volumes/volumes.go | 1 - 32 files changed, 1 insertion(+), 71 deletions(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index b28e565c19..6e411f06d0 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -412,7 +412,7 @@ func new{{.PascalName}}() *cobra.Command { {{- if $optionalIfJsonIsUsed }} if !cmd.Flags().Changed("json") { {{- end }} - {{if and (not $field.Entity.IsString) (not $field.Entity.IsFieldMask) (not $field.Entity.IsTimestamp) (not $field.Entity.IsDuration) -}} {{/* TODO: add support for well known types */}} + {{if and (not $field.Entity.IsString) (not $field.Entity.IsFieldMask) (not $field.Entity.IsTimestamp) (not $field.Entity.IsDuration) -}} {{/* TODO: add support for well known types */ -}} _, err = fmt.Sscan(args[{{$arg}}], &{{- template "request-body-obj" (dict "Method" $method "Field" $field)}}) if err != nil { return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}]) diff --git a/cmd/account/ip-access-lists/ip-access-lists.go b/cmd/account/ip-access-lists/ip-access-lists.go index ccd2d8bbea..f738af42c2 100755 --- a/cmd/account/ip-access-lists/ip-access-lists.go +++ b/cmd/account/ip-access-lists/ip-access-lists.go @@ -150,7 +150,6 @@ func newCreate() *cobra.Command { createReq.Label = args[0] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &createReq.ListType) if err != nil { return fmt.Errorf("invalid LIST_TYPE: %s", args[1]) @@ -438,14 +437,12 @@ func newReplace() *cobra.Command { replaceReq.Label = args[1] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &replaceReq.ListType) if err != nil { return fmt.Errorf("invalid LIST_TYPE: %s", args[2]) } } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[3], &replaceReq.Enabled) if err != nil { return fmt.Errorf("invalid ENABLED: %s", args[3]) diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index f35f5b795b..036ea0c449 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -331,7 +331,6 @@ func newPatchStatus() *cobra.Command { } patchStatusReq.LogDeliveryConfigurationId = args[0] if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &patchStatusReq.Status) if err != nil { return fmt.Errorf("invalid STATUS: %s", args[1]) diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index fb4ff141ab..8dc3171df1 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -98,7 +98,6 @@ func newCreate() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &createReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -363,7 +362,6 @@ func newUpdate() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go index 079dc94c8b..abd425acc5 100755 --- a/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go +++ b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go @@ -152,7 +152,6 @@ func newCreate() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId) if err != nil { return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) @@ -414,7 +413,6 @@ func newUpdate() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &updateReq.ServicePrincipalId) if err != nil { return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) diff --git a/cmd/account/service-principal-secrets/service-principal-secrets.go b/cmd/account/service-principal-secrets/service-principal-secrets.go index cfeb70bcf9..b28fa4ef23 100755 --- a/cmd/account/service-principal-secrets/service-principal-secrets.go +++ b/cmd/account/service-principal-secrets/service-principal-secrets.go @@ -107,7 +107,6 @@ func newCreate() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &createReq.ServicePrincipalId) if err != nil { return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index fd0d16bef4..41998765a0 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -87,7 +87,6 @@ func newDelete() *cobra.Command { if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) } - _, err = fmt.Sscan(args[1], &deleteReq.PrincipalId) if err != nil { return fmt.Errorf("invalid PRINCIPAL_ID: %s", args[1]) @@ -287,12 +286,10 @@ func newUpdate() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) } - _, err = fmt.Sscan(args[1], &updateReq.PrincipalId) if err != nil { return fmt.Errorf("invalid PRINCIPAL_ID: %s", args[1]) diff --git a/cmd/account/workspace-network-configuration/workspace-network-configuration.go b/cmd/account/workspace-network-configuration/workspace-network-configuration.go index 946b8aae0d..dee1c53af5 100755 --- a/cmd/account/workspace-network-configuration/workspace-network-configuration.go +++ b/cmd/account/workspace-network-configuration/workspace-network-configuration.go @@ -167,7 +167,6 @@ func newUpdateWorkspaceNetworkOptionRpc() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &updateWorkspaceNetworkOptionRpcReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index bb8a304b38..1ed97595ae 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -238,7 +238,6 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have workspace id") } - _, err = fmt.Sscan(args[0], &deleteReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -325,7 +324,6 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have workspace id") } - _, err = fmt.Sscan(args[0], &getReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -591,7 +589,6 @@ func newUpdate() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have workspace id") } - _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/workspace/artifact-allowlists/artifact-allowlists.go b/cmd/workspace/artifact-allowlists/artifact-allowlists.go index b8b8a2ca28..ff5a3c02c9 100755 --- a/cmd/workspace/artifact-allowlists/artifact-allowlists.go +++ b/cmd/workspace/artifact-allowlists/artifact-allowlists.go @@ -162,7 +162,6 @@ func newUpdate() *cobra.Command { } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } - _, err = fmt.Sscan(args[0], &updateReq.ArtifactType) if err != nil { return fmt.Errorf("invalid ARTIFACT_TYPE: %s", args[0]) diff --git a/cmd/workspace/clean-room-assets/clean-room-assets.go b/cmd/workspace/clean-room-assets/clean-room-assets.go index e03fb52ef7..14242dd59b 100755 --- a/cmd/workspace/clean-room-assets/clean-room-assets.go +++ b/cmd/workspace/clean-room-assets/clean-room-assets.go @@ -175,7 +175,6 @@ func newDelete() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) deleteReq.CleanRoomName = args[0] - _, err = fmt.Sscan(args[1], &deleteReq.AssetType) if err != nil { return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) @@ -243,7 +242,6 @@ func newGet() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) getReq.CleanRoomName = args[0] - _, err = fmt.Sscan(args[1], &getReq.AssetType) if err != nil { return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) @@ -398,7 +396,6 @@ func newUpdate() *cobra.Command { } } updateReq.CleanRoomName = args[0] - _, err = fmt.Sscan(args[1], &updateReq.AssetType) if err != nil { return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index fec727b883..1f94baf770 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -684,7 +684,6 @@ func newDeleteRuns() *cobra.Command { deleteRunsReq.ExperimentId = args[0] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &deleteRunsReq.MaxTimestampMillis) if err != nil { return fmt.Errorf("invalid MAX_TIMESTAMP_MILLIS: %s", args[1]) @@ -858,7 +857,6 @@ func newFinalizeLoggedModel() *cobra.Command { } finalizeLoggedModelReq.ModelId = args[0] if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &finalizeLoggedModelReq.Status) if err != nil { return fmt.Errorf("invalid STATUS: %s", args[1]) @@ -1772,14 +1770,12 @@ func newLogMetric() *cobra.Command { logMetricReq.Key = args[0] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &logMetricReq.Value) if err != nil { return fmt.Errorf("invalid VALUE: %s", args[1]) } } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &logMetricReq.Timestamp) if err != nil { return fmt.Errorf("invalid TIMESTAMP: %s", args[2]) @@ -2294,7 +2290,6 @@ func newRestoreRuns() *cobra.Command { restoreRunsReq.ExperimentId = args[0] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &restoreRunsReq.MinTimestampMillis) if err != nil { return fmt.Errorf("invalid MIN_TIMESTAMP_MILLIS: %s", args[1]) diff --git a/cmd/workspace/forecasting/forecasting.go b/cmd/workspace/forecasting/forecasting.go index 4303043584..f414c92087 100755 --- a/cmd/workspace/forecasting/forecasting.go +++ b/cmd/workspace/forecasting/forecasting.go @@ -149,7 +149,6 @@ func newCreateExperiment() *cobra.Command { createExperimentReq.ForecastGranularity = args[3] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[4], &createExperimentReq.ForecastHorizon) if err != nil { return fmt.Errorf("invalid FORECAST_HORIZON: %s", args[4]) diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index 031b1c3f5b..d003851a05 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -188,7 +188,6 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the id for the corresponding credential to access") } - _, err = fmt.Sscan(args[0], &deleteReq.CredentialId) if err != nil { return fmt.Errorf("invalid CREDENTIAL_ID: %s", args[0]) @@ -262,7 +261,6 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the id for the corresponding credential to access") } - _, err = fmt.Sscan(args[0], &getReq.CredentialId) if err != nil { return fmt.Errorf("invalid CREDENTIAL_ID: %s", args[0]) @@ -392,7 +390,6 @@ func newUpdate() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &updateReq.CredentialId) if err != nil { return fmt.Errorf("invalid CREDENTIAL_ID: %s", args[0]) diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index db6ddfe8a1..07f7010264 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -151,7 +151,6 @@ func newCreate() *cobra.Command { createReq.Label = args[0] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &createReq.ListType) if err != nil { return fmt.Errorf("invalid LIST_TYPE: %s", args[1]) @@ -441,14 +440,12 @@ func newReplace() *cobra.Command { replaceReq.Label = args[1] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &replaceReq.ListType) if err != nil { return fmt.Errorf("invalid LIST_TYPE: %s", args[2]) } } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[3], &replaceReq.Enabled) if err != nil { return fmt.Errorf("invalid ENABLED: %s", args[3]) diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index dbc98009f1..f36bf44ee5 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -229,7 +229,6 @@ func newCancelRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have this field is required") } - _, err = fmt.Sscan(args[0], &cancelRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -413,7 +412,6 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier of the job to delete") } - _, err = fmt.Sscan(args[0], &deleteReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) @@ -513,7 +511,6 @@ func newDeleteRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have id of the run to delete") } - _, err = fmt.Sscan(args[0], &deleteRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -590,7 +587,6 @@ func newExportRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier for the run") } - _, err = fmt.Sscan(args[0], &exportRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -676,7 +672,6 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier of the job to retrieve information about") } - _, err = fmt.Sscan(args[0], &getReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) @@ -905,7 +900,6 @@ func newGetRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier of the run for which to retrieve the metadata") } - _, err = fmt.Sscan(args[0], &getRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -988,7 +982,6 @@ func newGetRunOutput() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier for the run") } - _, err = fmt.Sscan(args[0], &getRunOutputReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -1226,7 +1219,6 @@ func newRepairRun() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the job run id of the run to repair") } - _, err = fmt.Sscan(args[0], &repairRunReq.RunId) if err != nil { return fmt.Errorf("invalid RUN_ID: %s", args[0]) @@ -1430,7 +1422,6 @@ func newRunNow() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the id of the job to be executed") } - _, err = fmt.Sscan(args[0], &runNowReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) @@ -1751,7 +1742,6 @@ func newUpdate() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the canonical identifier of the job to update") } - _, err = fmt.Sscan(args[0], &updateReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index daa980915e..fe5bfb5178 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -125,7 +125,6 @@ func newAssign() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &assignReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -705,7 +704,6 @@ func newUpdateAssignment() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &updateAssignmentReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index c93fbb929d..a5a41d7f59 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -162,14 +162,12 @@ func newApproveTransitionRequest() *cobra.Command { approveTransitionRequestReq.Version = args[1] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &approveTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) } } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[3], &approveTransitionRequestReq.ArchiveExistingVersions) if err != nil { return fmt.Errorf("invalid ARCHIVE_EXISTING_VERSIONS: %s", args[3]) @@ -541,7 +539,6 @@ func newCreateTransitionRequest() *cobra.Command { createTransitionRequestReq.Version = args[1] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &createTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) @@ -995,7 +992,6 @@ func newDeleteTransitionRequest() *cobra.Command { deleteTransitionRequestReq.Name = args[0] deleteTransitionRequestReq.Version = args[1] - _, err = fmt.Sscan(args[2], &deleteTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) @@ -1702,7 +1698,6 @@ func newRejectTransitionRequest() *cobra.Command { rejectTransitionRequestReq.Version = args[1] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &rejectTransitionRequestReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) @@ -2371,14 +2366,12 @@ func newTransitionStage() *cobra.Command { transitionStageReq.Version = args[1] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &transitionStageReq.Stage) if err != nil { return fmt.Errorf("invalid STAGE: %s", args[2]) } } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[3], &transitionStageReq.ArchiveExistingVersions) if err != nil { return fmt.Errorf("invalid ARCHIVE_EXISTING_VERSIONS: %s", args[3]) diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index e827e5ec9b..773eb4c8be 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -95,7 +95,6 @@ func newDelete() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) deleteReq.FullName = args[0] - _, err = fmt.Sscan(args[1], &deleteReq.Version) if err != nil { return fmt.Errorf("invalid VERSION: %s", args[1]) @@ -167,7 +166,6 @@ func newGet() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) getReq.FullName = args[0] - _, err = fmt.Sscan(args[1], &getReq.Version) if err != nil { return fmt.Errorf("invalid VERSION: %s", args[1]) @@ -393,7 +391,6 @@ func newUpdate() *cobra.Command { } } updateReq.FullName = args[0] - _, err = fmt.Sscan(args[1], &updateReq.Version) if err != nil { return fmt.Errorf("invalid VERSION: %s", args[1]) diff --git a/cmd/workspace/permission-migration/permission-migration.go b/cmd/workspace/permission-migration/permission-migration.go index feb7e2a011..f18428fff0 100755 --- a/cmd/workspace/permission-migration/permission-migration.go +++ b/cmd/workspace/permission-migration/permission-migration.go @@ -106,7 +106,6 @@ func newMigratePermissions() *cobra.Command { } } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[0], &migratePermissionsReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go index 6fda5e053e..9de9733957 100755 --- a/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go +++ b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go @@ -118,7 +118,6 @@ func newEnforceCompliance() *cobra.Command { } } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[0], &enforceComplianceReq.JobId) if err != nil { return fmt.Errorf("invalid JOB_ID: %s", args[0]) diff --git a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go index 094a959827..e62462d78f 100755 --- a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go +++ b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go @@ -158,7 +158,6 @@ func newUpdate() *cobra.Command { updateReq.ListingId = args[0] updateReq.RequestId = args[1] if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &updateReq.Status) if err != nil { return fmt.Errorf("invalid STATUS: %s", args[2]) diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index 925e923d6f..5228982aec 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -116,7 +116,6 @@ func newCreate() *cobra.Command { createReq.Name = args[0] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &createReq.AuthenticationType) if err != nil { return fmt.Errorf("invalid AUTHENTICATION_TYPE: %s", args[1]) diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 2ac9ac895b..1864707766 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -136,7 +136,6 @@ func newCreate() *cobra.Command { createReq.Name = args[0] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &createReq.AuthenticationType) if err != nil { return fmt.Errorf("invalid AUTHENTICATION_TYPE: %s", args[1]) @@ -403,7 +402,6 @@ func newRotateToken() *cobra.Command { } rotateTokenReq.Name = args[0] if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &rotateTokenReq.ExistingTokenExpireInSeconds) if err != nil { return fmt.Errorf("invalid EXISTING_TOKEN_EXPIRE_IN_SECONDS: %s", args[1]) diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index fea7b7820a..b327444e60 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -531,7 +531,6 @@ func newSetAlias() *cobra.Command { setAliasReq.FullName = args[0] setAliasReq.Alias = args[1] if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &setAliasReq.VersionNum) if err != nil { return fmt.Errorf("invalid VERSION_NUM: %s", args[2]) diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 169afffd11..547afb97f1 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -199,7 +199,6 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the id for the corresponding repo to delete") } - _, err = fmt.Sscan(args[0], &deleteReq.RepoId) if err != nil { return fmt.Errorf("invalid REPO_ID: %s", args[0]) @@ -273,7 +272,6 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have id of the git folder (repo) object in the workspace") } - _, err = fmt.Sscan(args[0], &getReq.RepoId) if err != nil { return fmt.Errorf("invalid REPO_ID: %s", args[0]) @@ -649,7 +647,6 @@ func newUpdate() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have id of the git folder (repo) object in the workspace") } - _, err = fmt.Sscan(args[0], &updateReq.RepoId) if err != nil { return fmt.Errorf("invalid REPO_ID: %s", args[0]) diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index 36b2717bf2..2e882ad94e 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -804,7 +804,6 @@ func newPutAcl() *cobra.Command { putAclReq.Principal = args[1] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &putAclReq.Permission) if err != nil { return fmt.Errorf("invalid PERMISSION: %s", args[2]) diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 80991abe75..29f94f6036 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -723,7 +723,6 @@ func newHttpRequest() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) httpRequestReq.ConnectionName = args[0] - _, err = fmt.Sscan(args[1], &httpRequestReq.Method) if err != nil { return fmt.Errorf("invalid METHOD: %s", args[1]) diff --git a/cmd/workspace/table-constraints/table-constraints.go b/cmd/workspace/table-constraints/table-constraints.go index a784d64232..d5e60e2b01 100755 --- a/cmd/workspace/table-constraints/table-constraints.go +++ b/cmd/workspace/table-constraints/table-constraints.go @@ -178,7 +178,6 @@ func newDelete() *cobra.Command { deleteReq.FullName = args[0] deleteReq.ConstraintName = args[1] - _, err = fmt.Sscan(args[2], &deleteReq.Cascade) if err != nil { return fmt.Errorf("invalid CASCADE: %s", args[2]) diff --git a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go index a1011b66c9..f8b848bbae 100755 --- a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go +++ b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go @@ -117,7 +117,6 @@ func newCreateEndpoint() *cobra.Command { createEndpointReq.Name = args[0] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &createEndpointReq.EndpointType) if err != nil { return fmt.Errorf("invalid ENDPOINT_TYPE: %s", args[1]) diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index e25be419ad..13b3f483ad 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -137,7 +137,6 @@ func newCreateIndex() *cobra.Command { createIndexReq.PrimaryKey = args[2] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[3], &createIndexReq.IndexType) if err != nil { return fmt.Errorf("invalid INDEX_TYPE: %s", args[3]) diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 50c87a36d3..ecb18762f0 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -148,7 +148,6 @@ func newCreate() *cobra.Command { createReq.Name = args[2] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[3], &createReq.VolumeType) if err != nil { return fmt.Errorf("invalid VOLUME_TYPE: %s", args[3]) From b1892496fc9e57c594d0e9678aea12de8e4c12b6 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Jun 2025 16:06:14 +0200 Subject: [PATCH 4/9] removed obsolete commands --- .../database-instances/database-instances.go | 824 ------------------ cmd/workspace/metastores/overrides.go | 3 +- .../query-execution/query-execution.go | 247 ------ 3 files changed, 2 insertions(+), 1072 deletions(-) delete mode 100755 cmd/workspace/database-instances/database-instances.go delete mode 100755 cmd/workspace/query-execution/query-execution.go diff --git a/cmd/workspace/database-instances/database-instances.go b/cmd/workspace/database-instances/database-instances.go deleted file mode 100755 index 64fe1d7f82..0000000000 --- a/cmd/workspace/database-instances/database-instances.go +++ /dev/null @@ -1,824 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package database_instances - -import ( - "fmt" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdctx" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/catalog" - "github.com/spf13/cobra" -) - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cmdOverrides []func(*cobra.Command) - -func New() *cobra.Command { - cmd := &cobra.Command{ - Use: "database-instances", - Short: `Database Instances provide access to a database via REST API or direct SQL.`, - Long: `Database Instances provide access to a database via REST API or direct SQL.`, - GroupID: "catalog", - Annotations: map[string]string{ - "package": "catalog", - }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, - } - - // Add methods - cmd.AddCommand(newCreateDatabaseCatalog()) - cmd.AddCommand(newCreateDatabaseInstance()) - cmd.AddCommand(newCreateSyncedDatabaseTable()) - cmd.AddCommand(newDeleteDatabaseCatalog()) - cmd.AddCommand(newDeleteDatabaseInstance()) - cmd.AddCommand(newDeleteSyncedDatabaseTable()) - cmd.AddCommand(newFindDatabaseInstanceByUid()) - cmd.AddCommand(newGetDatabaseCatalog()) - cmd.AddCommand(newGetDatabaseInstance()) - cmd.AddCommand(newGetSyncedDatabaseTable()) - cmd.AddCommand(newListDatabaseInstances()) - cmd.AddCommand(newUpdateDatabaseInstance()) - - // Apply optional overrides to this command. - for _, fn := range cmdOverrides { - fn(cmd) - } - - return cmd -} - -// start create-database-catalog command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var createDatabaseCatalogOverrides []func( - *cobra.Command, - *catalog.CreateDatabaseCatalogRequest, -) - -func newCreateDatabaseCatalog() *cobra.Command { - cmd := &cobra.Command{} - - var createDatabaseCatalogReq catalog.CreateDatabaseCatalogRequest - createDatabaseCatalogReq.Catalog = catalog.DatabaseCatalog{} - var createDatabaseCatalogJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&createDatabaseCatalogJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().BoolVar(&createDatabaseCatalogReq.Catalog.CreateDatabaseIfNotExists, "create-database-if-not-exists", createDatabaseCatalogReq.Catalog.CreateDatabaseIfNotExists, ``) - - cmd.Use = "create-database-catalog NAME DATABASE_INSTANCE_NAME DATABASE_NAME" - cmd.Short = `Create a Database Catalog.` - cmd.Long = `Create a Database Catalog. - - Arguments: - NAME: The name of the catalog in UC. - DATABASE_INSTANCE_NAME: The name of the DatabaseInstance housing the database. - DATABASE_NAME: The name of the database (in a instance) associated with the catalog.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'database_instance_name', 'database_name' in your JSON input") - } - return nil - } - check := root.ExactArgs(3) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := createDatabaseCatalogJson.Unmarshal(&createDatabaseCatalogReq.Catalog) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - if !cmd.Flags().Changed("json") { - createDatabaseCatalogReq.Catalog.Name = args[0] - } - if !cmd.Flags().Changed("json") { - createDatabaseCatalogReq.Catalog.DatabaseInstanceName = args[1] - } - if !cmd.Flags().Changed("json") { - createDatabaseCatalogReq.Catalog.DatabaseName = args[2] - } - - response, err := w.DatabaseInstances.CreateDatabaseCatalog(ctx, createDatabaseCatalogReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range createDatabaseCatalogOverrides { - fn(cmd, &createDatabaseCatalogReq) - } - - return cmd -} - -// start create-database-instance command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var createDatabaseInstanceOverrides []func( - *cobra.Command, - *catalog.CreateDatabaseInstanceRequest, -) - -func newCreateDatabaseInstance() *cobra.Command { - cmd := &cobra.Command{} - - var createDatabaseInstanceReq catalog.CreateDatabaseInstanceRequest - createDatabaseInstanceReq.DatabaseInstance = catalog.DatabaseInstance{} - var createDatabaseInstanceJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&createDatabaseInstanceJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.AdminPassword, "admin-password", createDatabaseInstanceReq.DatabaseInstance.AdminPassword, `Password for admin user to create.`) - cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.AdminRolename, "admin-rolename", createDatabaseInstanceReq.DatabaseInstance.AdminRolename, `Name of the admin role for the instance.`) - cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", createDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) - cmd.Flags().BoolVar(&createDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", createDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether the instance is stopped.`) - - cmd.Use = "create-database-instance NAME" - cmd.Short = `Create a Database Instance.` - cmd.Long = `Create a Database Instance. - - Arguments: - NAME: The name of the instance. This is the unique identifier for the instance.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") - } - return nil - } - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := createDatabaseInstanceJson.Unmarshal(&createDatabaseInstanceReq.DatabaseInstance) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - if !cmd.Flags().Changed("json") { - createDatabaseInstanceReq.DatabaseInstance.Name = args[0] - } - - response, err := w.DatabaseInstances.CreateDatabaseInstance(ctx, createDatabaseInstanceReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range createDatabaseInstanceOverrides { - fn(cmd, &createDatabaseInstanceReq) - } - - return cmd -} - -// start create-synced-database-table command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var createSyncedDatabaseTableOverrides []func( - *cobra.Command, - *catalog.CreateSyncedDatabaseTableRequest, -) - -func newCreateSyncedDatabaseTable() *cobra.Command { - cmd := &cobra.Command{} - - var createSyncedDatabaseTableReq catalog.CreateSyncedDatabaseTableRequest - createSyncedDatabaseTableReq.SyncedTable = catalog.SyncedDatabaseTable{} - var createSyncedDatabaseTableJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&createSyncedDatabaseTableJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - // TODO: complex arg: data_synchronization_status - cmd.Flags().StringVar(&createSyncedDatabaseTableReq.SyncedTable.DatabaseInstanceName, "database-instance-name", createSyncedDatabaseTableReq.SyncedTable.DatabaseInstanceName, `Name of the target database instance.`) - cmd.Flags().StringVar(&createSyncedDatabaseTableReq.SyncedTable.LogicalDatabaseName, "logical-database-name", createSyncedDatabaseTableReq.SyncedTable.LogicalDatabaseName, `Target Postgres database object (logical database) name for this table.`) - // TODO: complex arg: spec - - cmd.Use = "create-synced-database-table NAME" - cmd.Short = `Create a Synced Database Table.` - cmd.Long = `Create a Synced Database Table. - - Arguments: - NAME: Full three-part (catalog, schema, table) name of the table.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") - } - return nil - } - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := createSyncedDatabaseTableJson.Unmarshal(&createSyncedDatabaseTableReq.SyncedTable) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - if !cmd.Flags().Changed("json") { - createSyncedDatabaseTableReq.SyncedTable.Name = args[0] - } - - response, err := w.DatabaseInstances.CreateSyncedDatabaseTable(ctx, createSyncedDatabaseTableReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range createSyncedDatabaseTableOverrides { - fn(cmd, &createSyncedDatabaseTableReq) - } - - return cmd -} - -// start delete-database-catalog command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteDatabaseCatalogOverrides []func( - *cobra.Command, - *catalog.DeleteDatabaseCatalogRequest, -) - -func newDeleteDatabaseCatalog() *cobra.Command { - cmd := &cobra.Command{} - - var deleteDatabaseCatalogReq catalog.DeleteDatabaseCatalogRequest - - // TODO: short flags - - cmd.Use = "delete-database-catalog NAME" - cmd.Short = `Delete a Database Catalog.` - cmd.Long = `Delete a Database Catalog.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - deleteDatabaseCatalogReq.Name = args[0] - - err = w.DatabaseInstances.DeleteDatabaseCatalog(ctx, deleteDatabaseCatalogReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteDatabaseCatalogOverrides { - fn(cmd, &deleteDatabaseCatalogReq) - } - - return cmd -} - -// start delete-database-instance command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteDatabaseInstanceOverrides []func( - *cobra.Command, - *catalog.DeleteDatabaseInstanceRequest, -) - -func newDeleteDatabaseInstance() *cobra.Command { - cmd := &cobra.Command{} - - var deleteDatabaseInstanceReq catalog.DeleteDatabaseInstanceRequest - - // TODO: short flags - - cmd.Flags().BoolVar(&deleteDatabaseInstanceReq.Force, "force", deleteDatabaseInstanceReq.Force, `By default, a instance cannot be deleted if it has descendant instances created via PITR.`) - cmd.Flags().BoolVar(&deleteDatabaseInstanceReq.Purge, "purge", deleteDatabaseInstanceReq.Purge, `If false, the database instance is soft deleted.`) - - cmd.Use = "delete-database-instance NAME" - cmd.Short = `Delete a Database Instance.` - cmd.Long = `Delete a Database Instance. - - Arguments: - NAME: Name of the instance to delete.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - deleteDatabaseInstanceReq.Name = args[0] - - err = w.DatabaseInstances.DeleteDatabaseInstance(ctx, deleteDatabaseInstanceReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteDatabaseInstanceOverrides { - fn(cmd, &deleteDatabaseInstanceReq) - } - - return cmd -} - -// start delete-synced-database-table command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteSyncedDatabaseTableOverrides []func( - *cobra.Command, - *catalog.DeleteSyncedDatabaseTableRequest, -) - -func newDeleteSyncedDatabaseTable() *cobra.Command { - cmd := &cobra.Command{} - - var deleteSyncedDatabaseTableReq catalog.DeleteSyncedDatabaseTableRequest - - // TODO: short flags - - cmd.Use = "delete-synced-database-table NAME" - cmd.Short = `Delete a Synced Database Table.` - cmd.Long = `Delete a Synced Database Table.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - deleteSyncedDatabaseTableReq.Name = args[0] - - err = w.DatabaseInstances.DeleteSyncedDatabaseTable(ctx, deleteSyncedDatabaseTableReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteSyncedDatabaseTableOverrides { - fn(cmd, &deleteSyncedDatabaseTableReq) - } - - return cmd -} - -// start find-database-instance-by-uid command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var findDatabaseInstanceByUidOverrides []func( - *cobra.Command, - *catalog.FindDatabaseInstanceByUidRequest, -) - -func newFindDatabaseInstanceByUid() *cobra.Command { - cmd := &cobra.Command{} - - var findDatabaseInstanceByUidReq catalog.FindDatabaseInstanceByUidRequest - - // TODO: short flags - - cmd.Flags().StringVar(&findDatabaseInstanceByUidReq.Uid, "uid", findDatabaseInstanceByUidReq.Uid, `UID of the cluster to get.`) - - cmd.Use = "find-database-instance-by-uid" - cmd.Short = `Find a Database Instance by uid.` - cmd.Long = `Find a Database Instance by uid.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - response, err := w.DatabaseInstances.FindDatabaseInstanceByUid(ctx, findDatabaseInstanceByUidReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range findDatabaseInstanceByUidOverrides { - fn(cmd, &findDatabaseInstanceByUidReq) - } - - return cmd -} - -// start get-database-catalog command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getDatabaseCatalogOverrides []func( - *cobra.Command, - *catalog.GetDatabaseCatalogRequest, -) - -func newGetDatabaseCatalog() *cobra.Command { - cmd := &cobra.Command{} - - var getDatabaseCatalogReq catalog.GetDatabaseCatalogRequest - - // TODO: short flags - - cmd.Use = "get-database-catalog NAME" - cmd.Short = `Get a Database Catalog.` - cmd.Long = `Get a Database Catalog.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - getDatabaseCatalogReq.Name = args[0] - - response, err := w.DatabaseInstances.GetDatabaseCatalog(ctx, getDatabaseCatalogReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getDatabaseCatalogOverrides { - fn(cmd, &getDatabaseCatalogReq) - } - - return cmd -} - -// start get-database-instance command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getDatabaseInstanceOverrides []func( - *cobra.Command, - *catalog.GetDatabaseInstanceRequest, -) - -func newGetDatabaseInstance() *cobra.Command { - cmd := &cobra.Command{} - - var getDatabaseInstanceReq catalog.GetDatabaseInstanceRequest - - // TODO: short flags - - cmd.Use = "get-database-instance NAME" - cmd.Short = `Get a Database Instance.` - cmd.Long = `Get a Database Instance. - - Arguments: - NAME: Name of the cluster to get.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - getDatabaseInstanceReq.Name = args[0] - - response, err := w.DatabaseInstances.GetDatabaseInstance(ctx, getDatabaseInstanceReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getDatabaseInstanceOverrides { - fn(cmd, &getDatabaseInstanceReq) - } - - return cmd -} - -// start get-synced-database-table command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getSyncedDatabaseTableOverrides []func( - *cobra.Command, - *catalog.GetSyncedDatabaseTableRequest, -) - -func newGetSyncedDatabaseTable() *cobra.Command { - cmd := &cobra.Command{} - - var getSyncedDatabaseTableReq catalog.GetSyncedDatabaseTableRequest - - // TODO: short flags - - cmd.Use = "get-synced-database-table NAME" - cmd.Short = `Get a Synced Database Table.` - cmd.Long = `Get a Synced Database Table.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - getSyncedDatabaseTableReq.Name = args[0] - - response, err := w.DatabaseInstances.GetSyncedDatabaseTable(ctx, getSyncedDatabaseTableReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getSyncedDatabaseTableOverrides { - fn(cmd, &getSyncedDatabaseTableReq) - } - - return cmd -} - -// start list-database-instances command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var listDatabaseInstancesOverrides []func( - *cobra.Command, - *catalog.ListDatabaseInstancesRequest, -) - -func newListDatabaseInstances() *cobra.Command { - cmd := &cobra.Command{} - - var listDatabaseInstancesReq catalog.ListDatabaseInstancesRequest - - // TODO: short flags - - cmd.Flags().IntVar(&listDatabaseInstancesReq.PageSize, "page-size", listDatabaseInstancesReq.PageSize, `Upper bound for items returned.`) - cmd.Flags().StringVar(&listDatabaseInstancesReq.PageToken, "page-token", listDatabaseInstancesReq.PageToken, `Pagination token to go to the next page of Database Instances.`) - - cmd.Use = "list-database-instances" - cmd.Short = `List Database Instances.` - cmd.Long = `List Database Instances.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - response := w.DatabaseInstances.ListDatabaseInstances(ctx, listDatabaseInstancesReq) - return cmdio.RenderIterator(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range listDatabaseInstancesOverrides { - fn(cmd, &listDatabaseInstancesReq) - } - - return cmd -} - -// start update-database-instance command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updateDatabaseInstanceOverrides []func( - *cobra.Command, - *catalog.UpdateDatabaseInstanceRequest, -) - -func newUpdateDatabaseInstance() *cobra.Command { - cmd := &cobra.Command{} - - var updateDatabaseInstanceReq catalog.UpdateDatabaseInstanceRequest - updateDatabaseInstanceReq.DatabaseInstance = catalog.DatabaseInstance{} - var updateDatabaseInstanceJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updateDatabaseInstanceJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.AdminPassword, "admin-password", updateDatabaseInstanceReq.DatabaseInstance.AdminPassword, `Password for admin user to create.`) - cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.AdminRolename, "admin-rolename", updateDatabaseInstanceReq.DatabaseInstance.AdminRolename, `Name of the admin role for the instance.`) - cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", updateDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) - cmd.Flags().BoolVar(&updateDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", updateDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether the instance is stopped.`) - - cmd.Use = "update-database-instance NAME" - cmd.Short = `Update a Database Instance.` - cmd.Long = `Update a Database Instance. - - Arguments: - NAME: The name of the instance. This is the unique identifier for the instance.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := updateDatabaseInstanceJson.Unmarshal(&updateDatabaseInstanceReq.DatabaseInstance) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - updateDatabaseInstanceReq.Name = args[0] - - response, err := w.DatabaseInstances.UpdateDatabaseInstance(ctx, updateDatabaseInstanceReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updateDatabaseInstanceOverrides { - fn(cmd, &updateDatabaseInstanceReq) - } - - return cmd -} - -// end service DatabaseInstances diff --git a/cmd/workspace/metastores/overrides.go b/cmd/workspace/metastores/overrides.go index 3ee6a10714..4f81c5ce5b 100644 --- a/cmd/workspace/metastores/overrides.go +++ b/cmd/workspace/metastores/overrides.go @@ -2,10 +2,11 @@ package metastores import ( "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/spf13/cobra" ) -func listOverride(listCmd *cobra.Command) { +func listOverride(listCmd *cobra.Command, req *catalog.ListMetastoresRequest) { listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` {{header "ID"}} {{header "Name"}} {{"Region"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` diff --git a/cmd/workspace/query-execution/query-execution.go b/cmd/workspace/query-execution/query-execution.go deleted file mode 100755 index 63d57bba3d..0000000000 --- a/cmd/workspace/query-execution/query-execution.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package query_execution - -import ( - "fmt" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdctx" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/dashboards" - "github.com/spf13/cobra" -) - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cmdOverrides []func(*cobra.Command) - -func New() *cobra.Command { - cmd := &cobra.Command{ - Use: "query-execution", - Short: `Query execution APIs for AI / BI Dashboards.`, - Long: `Query execution APIs for AI / BI Dashboards`, - GroupID: "dashboards", - Annotations: map[string]string{ - "package": "dashboards", - }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, - } - - // Add methods - cmd.AddCommand(newCancelPublishedQueryExecution()) - cmd.AddCommand(newExecutePublishedDashboardQuery()) - cmd.AddCommand(newPollPublishedQueryStatus()) - - // Apply optional overrides to this command. - for _, fn := range cmdOverrides { - fn(cmd) - } - - return cmd -} - -// start cancel-published-query-execution command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cancelPublishedQueryExecutionOverrides []func( - *cobra.Command, - *dashboards.CancelPublishedQueryExecutionRequest, -) - -func newCancelPublishedQueryExecution() *cobra.Command { - cmd := &cobra.Command{} - - var cancelPublishedQueryExecutionReq dashboards.CancelPublishedQueryExecutionRequest - - // TODO: short flags - - // TODO: array: tokens - - cmd.Use = "cancel-published-query-execution DASHBOARD_NAME DASHBOARD_REVISION_ID" - cmd.Short = `Cancel the results for the a query for a published, embedded dashboard.` - cmd.Long = `Cancel the results for the a query for a published, embedded dashboard.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(2) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - cancelPublishedQueryExecutionReq.DashboardName = args[0] - cancelPublishedQueryExecutionReq.DashboardRevisionId = args[1] - - response, err := w.QueryExecution.CancelPublishedQueryExecution(ctx, cancelPublishedQueryExecutionReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range cancelPublishedQueryExecutionOverrides { - fn(cmd, &cancelPublishedQueryExecutionReq) - } - - return cmd -} - -// start execute-published-dashboard-query command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var executePublishedDashboardQueryOverrides []func( - *cobra.Command, - *dashboards.ExecutePublishedDashboardQueryRequest, -) - -func newExecutePublishedDashboardQuery() *cobra.Command { - cmd := &cobra.Command{} - - var executePublishedDashboardQueryReq dashboards.ExecutePublishedDashboardQueryRequest - var executePublishedDashboardQueryJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&executePublishedDashboardQueryJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().StringVar(&executePublishedDashboardQueryReq.OverrideWarehouseId, "override-warehouse-id", executePublishedDashboardQueryReq.OverrideWarehouseId, `A dashboard schedule can override the warehouse used as compute for processing the published dashboard queries.`) - - cmd.Use = "execute-published-dashboard-query DASHBOARD_NAME DASHBOARD_REVISION_ID" - cmd.Short = `Execute a query for a published dashboard.` - cmd.Long = `Execute a query for a published dashboard. - - Arguments: - DASHBOARD_NAME: Dashboard name and revision_id is required to retrieve - PublishedDatasetDataModel which contains the list of datasets, - warehouse_id, and embedded_credentials - DASHBOARD_REVISION_ID: ` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'dashboard_name', 'dashboard_revision_id' in your JSON input") - } - return nil - } - check := root.ExactArgs(2) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := executePublishedDashboardQueryJson.Unmarshal(&executePublishedDashboardQueryReq) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - if !cmd.Flags().Changed("json") { - executePublishedDashboardQueryReq.DashboardName = args[0] - } - if !cmd.Flags().Changed("json") { - executePublishedDashboardQueryReq.DashboardRevisionId = args[1] - } - - err = w.QueryExecution.ExecutePublishedDashboardQuery(ctx, executePublishedDashboardQueryReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range executePublishedDashboardQueryOverrides { - fn(cmd, &executePublishedDashboardQueryReq) - } - - return cmd -} - -// start poll-published-query-status command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var pollPublishedQueryStatusOverrides []func( - *cobra.Command, - *dashboards.PollPublishedQueryStatusRequest, -) - -func newPollPublishedQueryStatus() *cobra.Command { - cmd := &cobra.Command{} - - var pollPublishedQueryStatusReq dashboards.PollPublishedQueryStatusRequest - - // TODO: short flags - - // TODO: array: tokens - - cmd.Use = "poll-published-query-status DASHBOARD_NAME DASHBOARD_REVISION_ID" - cmd.Short = `Poll the results for the a query for a published, embedded dashboard.` - cmd.Long = `Poll the results for the a query for a published, embedded dashboard.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(2) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - pollPublishedQueryStatusReq.DashboardName = args[0] - pollPublishedQueryStatusReq.DashboardRevisionId = args[1] - - response, err := w.QueryExecution.PollPublishedQueryStatus(ctx, pollPublishedQueryStatusReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range pollPublishedQueryStatusOverrides { - fn(cmd, &pollPublishedQueryStatusReq) - } - - return cmd -} - -// end service QueryExecution From e4f52100446599bb3088149445109c04a1290337 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Jun 2025 16:11:41 +0200 Subject: [PATCH 5/9] added missing group --- cmd/workspace/groups.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 8827682fa6..1cb736986b 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -76,5 +76,9 @@ func Groups() []cobra.Group { ID: "cleanrooms", Title: "Clean Rooms", }, + { + ID: "aibuilder", + Title: "AI Builder", + }, } } From 39081987855450b648795793fc3591079a088360 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Jun 2025 16:19:08 +0200 Subject: [PATCH 6/9] added missing database group --- cmd/workspace/groups.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 1cb736986b..3088f2e6bb 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -80,5 +80,9 @@ func Groups() []cobra.Group { ID: "aibuilder", Title: "AI Builder", }, + { + ID: "database", + Title: "Database", + }, } } From 61fdfb8042a84a8777c351c39c3ee8539341a734 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Jun 2025 16:24:12 +0200 Subject: [PATCH 7/9] added missing qualitymonitorv2 group id --- cmd/workspace/groups.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 3088f2e6bb..817f915345 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -84,5 +84,9 @@ func Groups() []cobra.Group { ID: "database", Title: "Database", }, + { + ID: "qualitymonitorv2", + Title: "Quality Monitor v2", + }, } } From acd3d0236ca4c8557e3eea7a3e4e1692b3de1d55 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Jun 2025 16:44:06 +0200 Subject: [PATCH 8/9] do not show empty groups --- acceptance/help/output.txt | 4 +++ cmd/cmd.go | 52 +++++++++++++++++++++++++++++++++----- 2 files changed, 49 insertions(+), 7 deletions(-) diff --git a/acceptance/help/output.txt b/acceptance/help/output.txt index 4f2f1163aa..cdfa4d5a82 100644 --- a/acceptance/help/output.txt +++ b/acceptance/help/output.txt @@ -44,6 +44,7 @@ Identity and Access Management Databricks SQL alerts The alerts API can be used to perform CRUD operations on alerts. alerts-legacy The alerts API can be used to perform CRUD operations on alerts. + alerts-v2 New version of SQL Alerts. dashboards In general, there is little need to modify dashboards using the API. data-sources This API is provided to assist you in making new query objects. queries The queries API can be used to perform CRUD operations on queries. @@ -123,6 +124,9 @@ Clean Rooms clean-room-task-runs Clean room task runs are the executions of notebooks in a clean room. clean-rooms A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data. +Quality Monitor v2 + quality-monitor-v2 Manage data quality of UC objects (currently support schema). + Additional Commands: account Databricks Account Commands api Perform Databricks API call diff --git a/cmd/cmd.go b/cmd/cmd.go index 4f5337fd3c..acd8968df4 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -24,6 +24,41 @@ const ( permissionsGroup = "permissions" ) +// filterGroups returns command groups that have at least one available (non-hidden) command. +// Empty groups or groups with only hidden commands are filtered out from the help output. +// Commands that belong to filtered groups will have their GroupID cleared. +func filterGroups(groups []cobra.Group, allCommands []*cobra.Command) []cobra.Group { + var filteredGroups []cobra.Group + + // Create a map to track which groups have available commands + groupHasAvailableCommands := make(map[string]bool) + + // Check each command to see if it belongs to a group and is available + for _, cmd := range allCommands { + if cmd.GroupID != "" && cmd.IsAvailableCommand() { + groupHasAvailableCommands[cmd.GroupID] = true + } + } + + // Collect groups that have available commands + validGroupIDs := make(map[string]bool) + for _, group := range groups { + if groupHasAvailableCommands[group.ID] { + filteredGroups = append(filteredGroups, group) + validGroupIDs[group.ID] = true + } + } + + // Clear GroupID for commands that belong to filtered groups + for _, cmd := range allCommands { + if cmd.GroupID != "" && !validGroupIDs[cmd.GroupID] { + cmd.GroupID = "" + } + } + + return filteredGroups +} + func New(ctx context.Context) *cobra.Command { cli := root.New(ctx) @@ -31,7 +66,8 @@ func New(ctx context.Context) *cobra.Command { cli.AddCommand(account.New()) // Add workspace subcommands. - for _, cmd := range workspace.All() { + workspaceCommands := workspace.All() + for _, cmd := range workspaceCommands { // Built-in groups for the workspace commands. groups := []cobra.Group{ { @@ -60,12 +96,6 @@ func New(ctx context.Context) *cobra.Command { cli.AddCommand(cmd) } - // Add workspace command groups. - groups := workspace.Groups() - for i := range groups { - cli.AddGroup(&groups[i]) - } - // Add other subcommands. cli.AddCommand(api.New()) cli.AddCommand(auth.New()) @@ -77,5 +107,13 @@ func New(ctx context.Context) *cobra.Command { cli.AddCommand(version.New()) cli.AddCommand(selftest.New()) + // Add workspace command groups, filtering out empty groups or groups with only hidden commands. + allGroups := workspace.Groups() + allCommands := cli.Commands() + filteredGroups := filterGroups(allGroups, allCommands) + for i := range filteredGroups { + cli.AddGroup(&filteredGroups[i]) + } + return cli } From 7ac6474af6047ce468fa8454685b7346e1126346 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 10 Jun 2025 16:52:05 +0200 Subject: [PATCH 9/9] fixed walktype test --- libs/structwalk/walktype_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/structwalk/walktype_test.go b/libs/structwalk/walktype_test.go index fd20c8f663..def9ddd095 100644 --- a/libs/structwalk/walktype_test.go +++ b/libs/structwalk/walktype_test.go @@ -117,7 +117,7 @@ func TestTypeJobSettings(t *testing.T) { func TestTypeRoot(t *testing.T) { testStruct(t, reflect.TypeOf(config.Root{}), - 3400, 3500, // 3487 at this time + 3500, 3600, // 3516 at this time map[string]any{ ".bundle.target": "", `.variables[*].lookup.dashboard`: "",