From 1f51c0db12ad0b9b326329a6be9fc8ee3427b208 Mon Sep 17 00:00:00 2001 From: Anton Nekipelov <226657+anton-107@users.noreply.github.com> Date: Wed, 15 Oct 2025 15:35:43 +0200 Subject: [PATCH 1/7] Upgrade go sdk to 0.85.0 --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + .../internal/schema/annotations_openapi.yml | 265 ++++-- .../schema/annotations_openapi_overrides.yml | 20 + .../validation/generated/enum_fields.go | 2 + .../validation/generated/required_fields.go | 2 +- bundle/schema/jsonschema.json | 275 +++++- cmd/account/cmd.go | 19 +- cmd/account/credentials/credentials.go | 58 +- .../encryption-keys/encryption-keys.go | 22 +- .../metastore-assignments.go | 18 +- cmd/account/metastores/metastores.go | 8 +- cmd/account/networks/networks.go | 82 +- cmd/account/private-access/private-access.go | 206 ++-- .../storage-credentials.go | 19 +- cmd/account/storage/storage.go | 90 +- cmd/account/vpc-endpoints/vpc-endpoints.go | 105 +- cmd/account/workspaces/workspaces.go | 349 ++----- cmd/workspace/apps/apps.go | 181 ++++ cmd/workspace/catalogs/catalogs.go | 12 +- cmd/workspace/cmd.go | 2 + cmd/workspace/connections/connections.go | 11 +- cmd/workspace/credentials/credentials.go | 1 + cmd/workspace/data-quality/data-quality.go | 895 ++++++++++++++++++ cmd/workspace/database/database.go | 31 +- .../external-locations/external-locations.go | 12 +- .../external-metadata/external-metadata.go | 2 + cmd/workspace/functions/functions.go | 17 +- .../git-credentials/git-credentials.go | 6 +- cmd/workspace/jobs/jobs.go | 34 +- cmd/workspace/metastores/metastores.go | 11 +- .../model-versions/model-versions.go | 16 + cmd/workspace/permissions/permissions.go | 16 +- cmd/workspace/pipelines/pipelines.go | 38 +- .../registered-models/registered-models.go | 58 +- cmd/workspace/schemas/schemas.go | 9 + .../serving-endpoints/serving-endpoints.go | 75 ++ .../storage-credentials.go | 12 +- .../system-schemas/system-schemas.go | 9 + cmd/workspace/tables/tables.go | 9 + cmd/workspace/volumes/volumes.go | 9 +- cmd/workspace/warehouses/warehouses.go | 15 +- .../bundles/jobs/_models/condition.py | 4 - .../bundles/jobs/_models/environment.py | 4 +- .../table_update_trigger_configuration.py | 8 +- .../bundles/jobs/_models/trigger_settings.py | 6 - .../bundles/volumes/_models/volume_type.py | 8 +- go.mod | 2 +- go.sum | 4 +- internal/genkit/tagging.py | 112 +-- 50 files changed, 2202 insertions(+), 970 deletions(-) create mode 100755 cmd/workspace/data-quality/data-quality.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index bdd95ecce2..20842dcedb 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -f2843dd06e095a39dda2d454a97ceaf6767a2bf3 \ No newline at end of file +c4784cea599325a13472b1455e7434d639362d8b \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index df4740a1f1..5af3377116 100755 --- a/.gitattributes +++ b/.gitattributes @@ -72,6 +72,7 @@ cmd/workspace/current-user/current-user.go linguist-generated=true cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true +cmd/workspace/data-quality/data-quality.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true cmd/workspace/database/database.go linguist-generated=true cmd/workspace/default-namespace/default-namespace.go linguist-generated=true diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index e0f6063a8b..10a05e4368 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -10,6 +10,7 @@ github.com/databricks/cli/bundle/config/resources.App: "x-databricks-field-behaviors_output_only": |- true "budget_policy_id": {} + "compute_size": {} "compute_status": "x-databricks-field-behaviors_output_only": |- true @@ -319,39 +320,51 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: The email of the creator of the instance. "x-databricks-field-behaviors_output_only": |- true + "custom_tags": + "description": |- + Custom tags associated with the instance. This field is only included on create and update responses. + "effective_capacity": + "description": |- + Deprecated. The sku of the instance; this field will always match the value of capacity. + "deprecation_message": |- + This field is deprecated + "x-databricks-field-behaviors_output_only": |- + true + "effective_custom_tags": + "description": |- + The recorded custom tags associated with the instance. + "x-databricks-field-behaviors_output_only": |- + true "effective_enable_pg_native_login": "description": |- - xref AIP-129. `enable_pg_native_login` is owned by the client, while `effective_enable_pg_native_login` is owned by the server. - `enable_pg_native_login` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_enable_pg_native_login` on the other hand will always bet set in all response messages (Create/Update/Get/List). + Whether the instance has PG native password login enabled. "x-databricks-field-behaviors_output_only": |- true "effective_enable_readable_secondaries": "description": |- - xref AIP-129. `enable_readable_secondaries` is owned by the client, while `effective_enable_readable_secondaries` is owned by the server. - `enable_readable_secondaries` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_enable_readable_secondaries` on the other hand will always bet set in all response messages (Create/Update/Get/List). + Whether secondaries serving read-only traffic are enabled. Defaults to false. "x-databricks-field-behaviors_output_only": |- true "effective_node_count": "description": |- - xref AIP-129. `node_count` is owned by the client, while `effective_node_count` is owned by the server. - `node_count` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_node_count` on the other hand will always bet set in all response messages (Create/Update/Get/List). + The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to + 1 primary and 0 secondaries. "x-databricks-field-behaviors_output_only": |- true "effective_retention_window_in_days": "description": |- - xref AIP-129. `retention_window_in_days` is owned by the client, while `effective_retention_window_in_days` is owned by the server. - `retention_window_in_days` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_retention_window_in_days` on the other hand will always bet set in all response messages (Create/Update/Get/List). + The retention window for the instance. This is the time window in days + for which the historical data is retained. "x-databricks-field-behaviors_output_only": |- true "effective_stopped": "description": |- - xref AIP-129. `stopped` is owned by the client, while `effective_stopped` is owned by the server. - `stopped` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_stopped` on the other hand will always bet set in all response messages (Create/Update/Get/List). + Whether the instance is stopped. + "x-databricks-field-behaviors_output_only": |- + true + "effective_usage_policy_id": + "description": |- + The policy that is applied to the instance. "x-databricks-field-behaviors_output_only": |- true "enable_pg_native_login": @@ -407,6 +420,9 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: An immutable UUID identifier for the instance. "x-databricks-field-behaviors_output_only": |- true + "usage_policy_id": + "description": |- + The desired usage policy to associate with the instance. github.com/databricks/cli/bundle/config/resources.Job: "budget_policy_id": "description": |- @@ -514,31 +530,19 @@ github.com/databricks/cli/bundle/config/resources.Job: "description": |- A collection of system notification IDs to notify when runs of this job begin or complete. github.com/databricks/cli/bundle/config/resources.MlflowExperiment: - "_": - "description": |- - An experiment and its metadata. "artifact_location": "description": |- - Location where artifacts for the experiment are stored. - "creation_time": - "description": |- - Creation time - "experiment_id": - "description": |- - Unique identifier for the experiment. - "last_update_time": - "description": |- - Last update time - "lifecycle_stage": - "description": |- - Current life cycle stage of the experiment: "active" or "deleted". - Deleted experiments are not returned by APIs. + Location where all artifacts for the experiment are stored. + If not provided, the remote server will select an appropriate default. "name": "description": |- - Human readable name that identifies the experiment. + Experiment name. "tags": "description": |- - Tags: Additional metadata key-value pairs. + A collection of tags to set on the experiment. Maximum tag size and number of tags per request + depends on the storage backend. All storage backends are guaranteed to support tag keys up + to 250 bytes in size and tag values up to 5000 bytes in size. All storage backends are also + guaranteed to support up to 20 tags per request. github.com/databricks/cli/bundle/config/resources.MlflowModel: "description": "description": |- @@ -736,21 +740,48 @@ github.com/databricks/cli/bundle/config/resources.QualityMonitor: Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. github.com/databricks/cli/bundle/config/resources.RegisteredModel: + "aliases": + "description": |- + List of aliases associated with the registered model + "browse_only": + "description": |- + Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request. "catalog_name": "description": |- The name of the catalog where the schema and the registered model reside "comment": "description": |- The comment attached to the registered model + "created_at": + "description": |- + Creation timestamp of the registered model in milliseconds since the Unix epoch + "created_by": + "description": |- + The identifier of the user who created the registered model + "full_name": + "description": |- + The three-level (fully qualified) name of the registered model + "metastore_id": + "description": |- + The unique identifier of the metastore "name": "description": |- The name of the registered model + "owner": + "description": |- + The identifier of the user who owns the registered model "schema_name": "description": |- The name of the schema where the registered model resides "storage_location": "description": |- The storage location on the cloud under which model version data files are stored + "updated_at": + "description": |- + Last-update timestamp of the registered model in milliseconds since the Unix epoch + "updated_by": + "description": |- + The identifier of the user who updated the registered model last time github.com/databricks/cli/bundle/config/resources.Schema: "catalog_name": "description": |- @@ -768,36 +799,39 @@ github.com/databricks/cli/bundle/config/resources.Schema: "description": |- Storage root URL for managed tables within schema. github.com/databricks/cli/bundle/config/resources.SqlWarehouse: + "_": + "description": |- + Creates a new SQL warehouse. "auto_stop_mins": "description": |- - The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before - it is automatically stopped. + The amount of time in minutes that a SQL warehouse must be idle (i.e., no + RUNNING queries) before it is automatically stopped. Supported values: - - Must be >= 0 mins for serverless warehouses - - Must be == 0 or >= 10 mins for non-serverless warehouses - - 0 indicates no autostop. + - Must be == 0 or >= 10 mins + - 0 indicates no autostop. Defaults to 120 mins "channel": "description": |- Channel Details "cluster_size": - "description": | + "description": |- Size of the clusters allocated for this warehouse. - Increasing the size of a spark cluster allows you to run larger queries on it. - If you want to increase the number of concurrent queries, please tune max_num_clusters. + Increasing the size of a spark cluster allows you to run larger queries on + it. If you want to increase the number of concurrent queries, please tune + max_num_clusters. Supported values: - - 2X-Small - - X-Small - - Small - - Medium - - Large - - X-Large - - 2X-Large - - 3X-Large - - 4X-Large + - 2X-Small + - X-Small + - Small + - Medium + - Large + - X-Large + - 2X-Large + - 3X-Large + - 4X-Large "creator_name": "description": |- warehouse creator name @@ -816,22 +850,25 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: This field is deprecated "max_num_clusters": "description": |- - Maximum number of clusters that the autoscaler will create to handle concurrent queries. + Maximum number of clusters that the autoscaler will create to handle + concurrent queries. Supported values: - - Must be >= min_num_clusters - - Must be <= 30. + - Must be >= min_num_clusters + - Must be <= 40. Defaults to min_clusters if unset. "min_num_clusters": "description": |- - Minimum number of available clusters that will be maintained for this SQL warehouse. - Increasing this will ensure that a larger number of clusters are always running and therefore may reduce - the cold start time for new queries. This is similar to reserved vs. revocable cores in a resource manager. + Minimum number of available clusters that will be maintained for this SQL + warehouse. Increasing this will ensure that a larger number of clusters are + always running and therefore may reduce the cold start time for new + queries. This is similar to reserved vs. revocable cores in a resource + manager. Supported values: - - Must be > 0 - - Must be <= min(max_num_clusters, 30) + - Must be > 0 + - Must be <= min(max_num_clusters, 30) Defaults to 1 "name": @@ -839,25 +876,26 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: Logical name for the cluster. Supported values: - - Must be unique within an org. - - Must be less than 100 characters. + - Must be unique within an org. + - Must be less than 100 characters. "spot_instance_policy": "description": |- - Configurations whether the warehouse should use spot instances. + Configurations whether the endpoint should use spot instances. "tags": "description": |- A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. Supported values: - - Number of tags < 45. + - Number of tags < 45. "warehouse_type": "description": |- - Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. + Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, + you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: "_": "description": |- - Next field marker: 14 + Next field marker: 18 "data_synchronization_status": "description": |- Synced Table data synchronization status @@ -923,7 +961,9 @@ github.com/databricks/cli/bundle/config/resources.Volume: The storage location on the cloud "volume_type": "description": |- - The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) + The type of the volume. An external volume is located in the specified external location. + A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. + [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) github.com/databricks/databricks-sdk-go/service/apps.AppDeployment: "create_time": "description": |- @@ -1000,6 +1040,7 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResource: "description": "description": |- Description of the App Resource. + "genie_space": {} "job": {} "name": "description": |- @@ -1017,6 +1058,21 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResourceDatabaseDatabase "enum": - |- CAN_CONNECT_AND_CREATE +github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpace: + "name": {} + "permission": {} + "space_id": {} +github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpaceGenieSpacePermission: + "_": + "enum": + - |- + CAN_MANAGE + - |- + CAN_EDIT + - |- + CAN_RUN + - |- + CAN_VIEW github.com/databricks/databricks-sdk-go/service/apps.AppResourceJob: "id": "description": |- @@ -1126,6 +1182,15 @@ github.com/databricks/databricks-sdk-go/service/apps.ApplicationStatus: State of the application. "x-databricks-field-behaviors_output_only": |- true +github.com/databricks/databricks-sdk-go/service/apps.ComputeSize: + "_": + "enum": + - |- + MEDIUM + - |- + LARGE + - |- + LIQUID github.com/databricks/databricks-sdk-go/service/apps.ComputeState: "_": "enum": @@ -1284,15 +1349,32 @@ github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries: "timestamp_col": "description": |- Column for the timestamp. +github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: + "alias_name": + "description": |- + Name of the alias, e.g. 'champion' or 'latest_stable' + "catalog_name": + "description": |- + The name of the catalog containing the model version + "id": + "description": |- + The unique identifier of the alias + "model_name": + "description": |- + The name of the parent registered model of the model version, relative to parent schema + "schema_name": + "description": |- + The name of the schema containing the model version, relative to parent catalog + "version_num": + "description": |- + Integer version number of the model version to which this alias points. github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: "_": - "description": |- - The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) "enum": - - |- - EXTERNAL - |- MANAGED + - |- + EXTERNAL github.com/databricks/databricks-sdk-go/service/compute.Adlsgen2Info: "_": "description": |- @@ -1755,16 +1837,9 @@ github.com/databricks/databricks-sdk-go/service/compute.Environment: Required. Environment version used by the environment. Each version comes with a specific Python version and a set of Python packages. The version is a string, consisting of an integer. - "jar_dependencies": - "description": |- - Use `java_dependencies` instead. - "deprecation_message": |- - This field is deprecated - "x-databricks-preview": |- - PRIVATE "java_dependencies": "description": |- - List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`. + List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`. "x-databricks-preview": |- PRIVATE github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes: @@ -2016,6 +2091,13 @@ github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState: ACTIVE - |- TRASHED +github.com/databricks/databricks-sdk-go/service/database.CustomTag: + "key": + "description": |- + The key of the custom tag. + "value": + "description": |- + The value of the custom tag. github.com/databricks/databricks-sdk-go/service/database.DatabaseInstanceRef: "_": "description": |- @@ -3226,7 +3308,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfigura the last time the trigger fired. The minimum allowed value is 60 seconds. "table_names": "description": |- - A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. "wait_after_last_change_seconds": "description": |- If set, the trigger starts a run only after no table updates have occurred for the specified time @@ -3436,9 +3518,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: This field is deprecated "x-databricks-preview": |- PRIVATE - "table_update": - "x-databricks-preview": |- - PRIVATE + "table_update": {} github.com/databricks/databricks-sdk-go/service/jobs.Webhook: "id": {} github.com/databricks/databricks-sdk-go/service/jobs.WebhookNotifications: @@ -4722,8 +4802,6 @@ github.com/databricks/databricks-sdk-go/service/sql.ChannelName: CHANNEL_NAME_CUSTOM github.com/databricks/databricks-sdk-go/service/sql.CreateWarehouseRequestWarehouseType: "_": - "description": |- - Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. "enum": - |- TYPE_UNSPECIFIED @@ -4739,7 +4817,24 @@ github.com/databricks/databricks-sdk-go/service/sql.EndpointTags: github.com/databricks/databricks-sdk-go/service/sql.SpotInstancePolicy: "_": "description": |- - Configurations whether the warehouse should use spot instances. + EndpointSpotInstancePolicy configures whether the endpoint should use spot + instances. + + The breakdown of how the EndpointSpotInstancePolicy converts to per cloud + configurations is: + + +-------+--------------------------------------+--------------------------------+ + | Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED | + +-------+--------------------------------------+--------------------------------+ + | AWS | On Demand Driver with Spot Executors | On Demand Driver and + Executors | | AZURE | On Demand Driver and Executors | On Demand Driver + and Executors | + +-------+--------------------------------------+--------------------------------+ + + While including "spot" in the enum name may limit the the future + extensibility of this field because it limits this enum to denoting "spot or + not", this is the field that PM recommends after discussion with customers + per SC-48783. "enum": - |- POLICY_UNSPECIFIED diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index c2b9b9c23c..51ed3d0db4 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -5,6 +5,9 @@ github.com/databricks/cli/bundle/config/resources.App: "budget_policy_id": "description": |- PLACEHOLDER + "compute_size": + "description": |- + PLACEHOLDER "compute_status": "description": |- PLACEHOLDER @@ -591,6 +594,10 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehousePermissionLevel: CAN_MONITOR - |- CAN_VIEW +github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: + "lifecycle": + "description": |- + PLACEHOLDER github.com/databricks/cli/bundle/config/resources.Volume: "_": "markdown_description": |- @@ -674,6 +681,9 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResource: "database": "description": |- PLACEHOLDER + "genie_space": + "description": |- + PLACEHOLDER "job": "description": |- PLACEHOLDER @@ -699,6 +709,16 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResourceDatabase: "permission": "description": |- PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpace: + "name": + "description": |- + PLACEHOLDER + "permission": + "description": |- + PLACEHOLDER + "space_id": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/apps.AppResourceJob: "id": "description": |- diff --git a/bundle/internal/validation/generated/enum_fields.go b/bundle/internal/validation/generated/enum_fields.go index 2f24e03a4c..70d0737ae9 100644 --- a/bundle/internal/validation/generated/enum_fields.go +++ b/bundle/internal/validation/generated/enum_fields.go @@ -15,10 +15,12 @@ var EnumFields = map[string][]string{ "resources.apps.*.active_deployment.mode": {"AUTO_SYNC", "SNAPSHOT"}, "resources.apps.*.active_deployment.status.state": {"CANCELLED", "FAILED", "IN_PROGRESS", "SUCCEEDED"}, "resources.apps.*.app_status.state": {"CRASHED", "DEPLOYING", "RUNNING", "UNAVAILABLE"}, + "resources.apps.*.compute_size": {"LARGE", "LIQUID", "MEDIUM"}, "resources.apps.*.compute_status.state": {"ACTIVE", "DELETING", "ERROR", "STARTING", "STOPPED", "STOPPING", "UPDATING"}, "resources.apps.*.pending_deployment.mode": {"AUTO_SYNC", "SNAPSHOT"}, "resources.apps.*.pending_deployment.status.state": {"CANCELLED", "FAILED", "IN_PROGRESS", "SUCCEEDED"}, "resources.apps.*.resources[*].database.permission": {"CAN_CONNECT_AND_CREATE"}, + "resources.apps.*.resources[*].genie_space.permission": {"CAN_EDIT", "CAN_MANAGE", "CAN_RUN", "CAN_VIEW"}, "resources.apps.*.resources[*].job.permission": {"CAN_MANAGE", "CAN_MANAGE_RUN", "CAN_VIEW", "IS_OWNER"}, "resources.apps.*.resources[*].secret.permission": {"MANAGE", "READ", "WRITE"}, "resources.apps.*.resources[*].serving_endpoint.permission": {"CAN_MANAGE", "CAN_QUERY", "CAN_VIEW"}, diff --git a/bundle/internal/validation/generated/required_fields.go b/bundle/internal/validation/generated/required_fields.go index 12a96b7905..9e4fb00a0a 100644 --- a/bundle/internal/validation/generated/required_fields.go +++ b/bundle/internal/validation/generated/required_fields.go @@ -19,6 +19,7 @@ var RequiredFields = map[string][]string{ "resources.apps.*.permissions[*]": {"level"}, "resources.apps.*.resources[*]": {"name"}, "resources.apps.*.resources[*].database": {"database_name", "instance_name", "permission"}, + "resources.apps.*.resources[*].genie_space": {"name", "permission", "space_id"}, "resources.apps.*.resources[*].job": {"id", "permission"}, "resources.apps.*.resources[*].secret": {"key", "permission", "scope"}, "resources.apps.*.resources[*].serving_endpoint": {"name", "permission"}, @@ -197,7 +198,6 @@ var RequiredFields = map[string][]string{ "resources.quality_monitors.*.schedule": {"quartz_cron_expression", "timezone_id"}, "resources.quality_monitors.*.time_series": {"granularities", "timestamp_col"}, - "resources.registered_models.*": {"catalog_name", "name", "schema_name"}, "resources.registered_models.*.grants[*]": {"privileges", "principal"}, "resources.schemas.*": {"catalog_name", "name"}, diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index eee0d8782a..681398872c 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -76,6 +76,9 @@ "budget_policy_id": { "$ref": "#/$defs/string" }, + "compute_size": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.ComputeSize" + }, "compute_status": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.ComputeStatus", "doNotSuggest": true @@ -603,34 +606,51 @@ "$ref": "#/$defs/string", "doNotSuggest": true }, + "custom_tags": { + "description": "Custom tags associated with the instance. This field is only included on create and update responses.", + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/database.CustomTag" + }, "effective_capacity": { - "$ref": "#/$defs/string" + "$ref": "#/$defs/string", + "deprecationMessage": "This field is deprecated", + "doNotSuggest": true, + "deprecated": true + }, + "effective_custom_tags": { + "description": "The recorded custom tags associated with the instance.", + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/database.CustomTag", + "doNotSuggest": true }, "effective_enable_pg_native_login": { - "description": "xref AIP-129. `enable_pg_native_login` is owned by the client, while `effective_enable_pg_native_login` is owned by the server.\n`enable_pg_native_login` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_enable_pg_native_login` on the other hand will always bet set in all response messages (Create/Update/Get/List).", + "description": "Whether the instance has PG native password login enabled.", "$ref": "#/$defs/bool", "doNotSuggest": true }, "effective_enable_readable_secondaries": { - "description": "xref AIP-129. `enable_readable_secondaries` is owned by the client, while `effective_enable_readable_secondaries` is owned by the server.\n`enable_readable_secondaries` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_enable_readable_secondaries` on the other hand will always bet set in all response messages (Create/Update/Get/List).", + "description": "Whether secondaries serving read-only traffic are enabled. Defaults to false.", "$ref": "#/$defs/bool", "doNotSuggest": true }, "effective_node_count": { - "description": "xref AIP-129. `node_count` is owned by the client, while `effective_node_count` is owned by the server.\n`node_count` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_node_count` on the other hand will always bet set in all response messages (Create/Update/Get/List).", + "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries.", "$ref": "#/$defs/int", "doNotSuggest": true }, "effective_retention_window_in_days": { - "description": "xref AIP-129. `retention_window_in_days` is owned by the client, while `effective_retention_window_in_days` is owned by the server.\n`retention_window_in_days` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_retention_window_in_days` on the other hand will always bet set in all response messages (Create/Update/Get/List).", + "description": "The retention window for the instance. This is the time window in days\nfor which the historical data is retained.", "$ref": "#/$defs/int", "doNotSuggest": true }, "effective_stopped": { - "description": "xref AIP-129. `stopped` is owned by the client, while `effective_stopped` is owned by the server.\n`stopped` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_stopped` on the other hand will always bet set in all response messages (Create/Update/Get/List).", + "description": "Whether the instance is stopped.", "$ref": "#/$defs/bool", "doNotSuggest": true }, + "effective_usage_policy_id": { + "description": "The policy that is applied to the instance.", + "$ref": "#/$defs/string", + "doNotSuggest": true + }, "enable_pg_native_login": { "description": "Whether to enable PG native password login on the instance. Defaults to false.", "$ref": "#/$defs/bool" @@ -690,6 +710,10 @@ "description": "An immutable UUID identifier for the instance.", "$ref": "#/$defs/string", "doNotSuggest": true + }, + "usage_policy_id": { + "description": "The desired usage policy to associate with the instance.", + "$ref": "#/$defs/string" } }, "additionalProperties": false, @@ -952,10 +976,9 @@ "oneOf": [ { "type": "object", - "description": "An experiment and its metadata.", "properties": { "artifact_location": { - "description": "Location where artifacts for the experiment are stored.", + "description": "Location where all artifacts for the experiment are stored.\nIf not provided, the remote server will select an appropriate default.", "$ref": "#/$defs/string" }, "lifecycle": { @@ -963,14 +986,14 @@ "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, "name": { - "description": "Human readable name that identifies the experiment.", + "description": "Experiment name.", "$ref": "#/$defs/string" }, "permissions": { "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.MlflowExperimentPermission" }, "tags": { - "description": "Tags: Additional metadata key-value pairs.", + "description": "A collection of tags to set on the experiment. Maximum tag size and number of tags per request\ndepends on the storage backend. All storage backends are guaranteed to support tag keys up\nto 250 bytes in size and tag values up to 5000 bytes in size. All storage backends are also\nguaranteed to support up to 20 tags per request.", "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/ml.ExperimentTag" } }, @@ -1533,6 +1556,14 @@ { "type": "object", "properties": { + "aliases": { + "description": "List of aliases associated with the registered model", + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias" + }, + "browse_only": { + "description": "Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request.", + "$ref": "#/$defs/bool" + }, "catalog_name": { "description": "The name of the catalog where the schema and the registered model reside", "$ref": "#/$defs/string" @@ -1541,6 +1572,18 @@ "description": "The comment attached to the registered model", "$ref": "#/$defs/string" }, + "created_at": { + "description": "Creation timestamp of the registered model in milliseconds since the Unix epoch", + "$ref": "#/$defs/int64" + }, + "created_by": { + "description": "The identifier of the user who created the registered model", + "$ref": "#/$defs/string" + }, + "full_name": { + "description": "The three-level (fully qualified) name of the registered model", + "$ref": "#/$defs/string" + }, "grants": { "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Grant" }, @@ -1548,10 +1591,18 @@ "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, + "metastore_id": { + "description": "The unique identifier of the metastore", + "$ref": "#/$defs/string" + }, "name": { "description": "The name of the registered model", "$ref": "#/$defs/string" }, + "owner": { + "description": "The identifier of the user who owns the registered model", + "$ref": "#/$defs/string" + }, "schema_name": { "description": "The name of the schema where the registered model resides", "$ref": "#/$defs/string" @@ -1559,14 +1610,17 @@ "storage_location": { "description": "The storage location on the cloud under which model version data files are stored", "$ref": "#/$defs/string" + }, + "updated_at": { + "description": "Last-update timestamp of the registered model in milliseconds since the Unix epoch", + "$ref": "#/$defs/int64" + }, + "updated_by": { + "description": "The identifier of the user who updated the registered model last time", + "$ref": "#/$defs/string" } }, "additionalProperties": false, - "required": [ - "catalog_name", - "name", - "schema_name" - ], "markdownDescription": "The registered model resource allows you to define models in Unity Catalog. For information about Unity Catalog [registered models](https://docs.databricks.com/api/workspace/registeredmodels/create), see [link](https://docs.databricks.com/machine-learning/manage-model-lifecycle/index.html)." }, { @@ -1760,9 +1814,10 @@ "oneOf": [ { "type": "object", + "description": "Creates a new SQL warehouse.", "properties": { "auto_stop_mins": { - "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before\nit is automatically stopped.\n\nSupported values:\n - Must be \u003e= 0 mins for serverless warehouses\n - Must be == 0 or \u003e= 10 mins for non-serverless warehouses\n - 0 indicates no autostop.\n\nDefaults to 120 mins", + "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values:\n- Must be == 0 or \u003e= 10 mins\n- 0 indicates no autostop.\n\nDefaults to 120 mins", "$ref": "#/$defs/int" }, "channel": { @@ -1770,7 +1825,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.Channel" }, "cluster_size": { - "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on it.\nIf you want to increase the number of concurrent queries, please tune max_num_clusters.\n\nSupported values:\n - 2X-Small\n - X-Small\n - Small\n - Medium\n - Large\n - X-Large\n - 2X-Large\n - 3X-Large\n - 4X-Large\n", + "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", "$ref": "#/$defs/string" }, "creator_name": { @@ -1796,15 +1851,15 @@ "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, "max_num_clusters": { - "description": "Maximum number of clusters that the autoscaler will create to handle concurrent queries.\n\nSupported values:\n - Must be \u003e= min_num_clusters\n - Must be \u003c= 30.\n\nDefaults to min_clusters if unset.", + "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values:\n- Must be \u003e= min_num_clusters\n- Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", "$ref": "#/$defs/int" }, "min_num_clusters": { - "description": "Minimum number of available clusters that will be maintained for this SQL warehouse.\nIncreasing this will ensure that a larger number of clusters are always running and therefore may reduce\nthe cold start time for new queries. This is similar to reserved vs. revocable cores in a resource manager.\n\nSupported values:\n - Must be \u003e 0\n - Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", + "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters are\nalways running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values:\n- Must be \u003e 0\n- Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", "$ref": "#/$defs/int" }, "name": { - "description": "Logical name for the cluster.\n\nSupported values:\n - Must be unique within an org.\n - Must be less than 100 characters.", + "description": "Logical name for the cluster.\n\nSupported values:\n- Must be unique within an org.\n- Must be less than 100 characters.", "$ref": "#/$defs/string" }, "permissions": { @@ -1814,7 +1869,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.SpotInstancePolicy" }, "tags": { - "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated\nwith this SQL warehouse.\n\nSupported values:\n - Number of tags \u003c 45.", + "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated\nwith this SQL warehouse.\n\nSupported values:\n- Number of tags \u003c 45.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.EndpointTags" }, "warehouse_type": { @@ -1879,7 +1934,7 @@ "oneOf": [ { "type": "object", - "description": "Next field marker: 14", + "description": "Next field marker: 18", "properties": { "data_synchronization_status": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTableStatus", @@ -2877,6 +2932,9 @@ "description": "Description of the App Resource.", "$ref": "#/$defs/string" }, + "genie_space": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpace" + }, "job": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.AppResourceJob" }, @@ -2950,6 +3008,51 @@ } ] }, + "apps.AppResourceGenieSpace": { + "oneOf": [ + { + "type": "object", + "properties": { + "name": { + "$ref": "#/$defs/string" + }, + "permission": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpaceGenieSpacePermission" + }, + "space_id": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "name", + "permission", + "space_id" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "apps.AppResourceGenieSpaceGenieSpacePermission": { + "oneOf": [ + { + "type": "string", + "enum": [ + "CAN_MANAGE", + "CAN_EDIT", + "CAN_RUN", + "CAN_VIEW" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "apps.AppResourceJob": { "oneOf": [ { @@ -3212,6 +3315,22 @@ } ] }, + "apps.ComputeSize": { + "oneOf": [ + { + "type": "string", + "enum": [ + "MEDIUM", + "LARGE", + "LIQUID" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "apps.ComputeState": { "oneOf": [ { @@ -3526,14 +3645,51 @@ } ] }, + "catalog.RegisteredModelAlias": { + "oneOf": [ + { + "type": "object", + "properties": { + "alias_name": { + "description": "Name of the alias, e.g. 'champion' or 'latest_stable'", + "$ref": "#/$defs/string" + }, + "catalog_name": { + "description": "The name of the catalog containing the model version", + "$ref": "#/$defs/string" + }, + "id": { + "description": "The unique identifier of the alias", + "$ref": "#/$defs/string" + }, + "model_name": { + "description": "The name of the parent registered model of the model version, relative to parent schema", + "$ref": "#/$defs/string" + }, + "schema_name": { + "description": "The name of the schema containing the model version, relative to parent catalog", + "$ref": "#/$defs/string" + }, + "version_num": { + "description": "Integer version number of the model version to which this alias points.", + "$ref": "#/$defs/int" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "catalog.VolumeType": { "oneOf": [ { "type": "string", - "description": "The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external)", "enum": [ - "EXTERNAL", - "MANAGED" + "MANAGED", + "EXTERNAL" ] }, { @@ -4015,16 +4171,8 @@ "description": "Required. Environment version used by the environment.\nEach version comes with a specific Python version and a set of Python packages.\nThe version is a string, consisting of an integer.", "$ref": "#/$defs/string" }, - "jar_dependencies": { - "description": "Use `java_dependencies` instead.", - "$ref": "#/$defs/slice/string", - "x-databricks-preview": "PRIVATE", - "deprecationMessage": "This field is deprecated", - "doNotSuggest": true, - "deprecated": true - }, "java_dependencies": { - "description": "List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`.", + "description": "List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`.", "$ref": "#/$defs/slice/string", "x-databricks-preview": "PRIVATE", "doNotSuggest": true @@ -4489,6 +4637,28 @@ } ] }, + "database.CustomTag": { + "oneOf": [ + { + "type": "object", + "properties": { + "key": { + "description": "The key of the custom tag.", + "$ref": "#/$defs/string" + }, + "value": { + "description": "The value of the custom tag.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "database.DatabaseInstanceRef": { "oneOf": [ { @@ -6546,7 +6716,7 @@ "$ref": "#/$defs/int" }, "table_names": { - "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "description": "A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", "$ref": "#/$defs/slice/string" }, "wait_after_last_change_seconds": { @@ -6865,9 +7035,7 @@ "deprecated": true }, "table_update": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfiguration", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfiguration" } }, "additionalProperties": false @@ -9127,7 +9295,6 @@ "oneOf": [ { "type": "string", - "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.", "enum": [ "TYPE_UNSPECIFIED", "CLASSIC", @@ -9181,7 +9348,7 @@ "oneOf": [ { "type": "string", - "description": "Configurations whether the warehouse should use spot instances.", + "description": "EndpointSpotInstancePolicy configures whether the endpoint should use spot\ninstances.\n\nThe breakdown of how the EndpointSpotInstancePolicy converts to per cloud\nconfigurations is:\n\n+-------+--------------------------------------+--------------------------------+\n| Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED |\n+-------+--------------------------------------+--------------------------------+\n| AWS | On Demand Driver with Spot Executors | On Demand Driver and\nExecutors | | AZURE | On Demand Driver and Executors | On Demand Driver\nand Executors |\n+-------+--------------------------------------+--------------------------------+\n\nWhile including \"spot\" in the enum name may limit the the future\nextensibility of this field because it limits this enum to denoting \"spot or\nnot\", this is the field that PM recommends after discussion with customers\nper SC-48783.", "enum": [ "POLICY_UNSPECIFIED", "COST_OPTIMIZED", @@ -9935,6 +10102,20 @@ } ] }, + "catalog.RegisteredModelAlias": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "compute.InitScriptInfo": { "oneOf": [ { @@ -9963,6 +10144,20 @@ } ] }, + "database.CustomTag": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.CustomTag" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "database.DatabaseInstanceRef": { "oneOf": [ { diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index d5f27f2b8f..077151954f 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -3,6 +3,7 @@ package account import ( + "github.com/databricks/cli/libs/cmdgroup" "github.com/spf13/cobra" account_access_control "github.com/databricks/cli/cmd/account/access-control" @@ -38,10 +39,6 @@ import ( workspace_assignment "github.com/databricks/cli/cmd/account/workspace-assignment" workspace_network_configuration "github.com/databricks/cli/cmd/account/workspace-network-configuration" workspaces "github.com/databricks/cli/cmd/account/workspaces" - - account_groups "github.com/databricks/cli/cmd/account/groups" - account_service_principals "github.com/databricks/cli/cmd/account/service-principals" - account_users "github.com/databricks/cli/cmd/account/users" ) func New() *cobra.Command { @@ -84,14 +81,12 @@ func New() *cobra.Command { cmd.AddCommand(account_iam_v2.New()) cmd.AddCommand(budgets.New()) - cmd.AddCommand(account_groups.New()) - cmd.AddCommand(account_service_principals.New()) - cmd.AddCommand(account_users.New()) - - // Register all groups with the parent command. - groups := Groups() - for i := range groups { - cmd.AddGroup(&groups[i]) + // Register command groups, filtering out empty groups or groups with only hidden commands. + allGroups := Groups() + allCommands := cmd.Commands() + filteredGroups := cmdgroup.FilterGroups(allGroups, allCommands) + for i := range filteredGroups { + cmd.AddGroup(&filteredGroups[i]) } return cmd diff --git a/cmd/account/credentials/credentials.go b/cmd/account/credentials/credentials.go index a0873e3a0f..65d5e6bfef 100755 --- a/cmd/account/credentials/credentials.go +++ b/cmd/account/credentials/credentials.go @@ -151,35 +151,23 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No CREDENTIALS_ID argument specified. Loading names for Credentials drop-down." - names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Credentials drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks account api credential configuration id") - } deleteReq.CredentialsId = args[0] - err = a.Credentials.Delete(ctx, deleteReq) + response, err := a.Credentials.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -216,32 +204,20 @@ func newGet() *cobra.Command { specified by ID. Arguments: - CREDENTIALS_ID: Databricks Account API credential configuration ID` + CREDENTIALS_ID: Credential configuration ID` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No CREDENTIALS_ID argument specified. Loading names for Credentials drop-down." - names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Credentials drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks account api credential configuration id") - } getReq.CredentialsId = args[0] response, err := a.Credentials.Get(ctx, getReq) @@ -275,11 +251,11 @@ func newList() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" - cmd.Short = `Get all credential configurations.` - cmd.Long = `Get all credential configurations. + cmd.Short = `List credential configuration.` + cmd.Long = `List credential configuration. - Gets all Databricks credential configurations associated with an account - specified by ID.` + List Databricks credential configuration objects for an account, specified by + ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/encryption-keys/encryption-keys.go b/cmd/account/encryption-keys/encryption-keys.go index 23a733903d..5645dec927 100755 --- a/cmd/account/encryption-keys/encryption-keys.go +++ b/cmd/account/encryption-keys/encryption-keys.go @@ -179,11 +179,11 @@ func newDelete() *cobra.Command { deleteReq.CustomerManagedKeyId = args[0] - err = a.EncryptionKeys.Delete(ctx, deleteReq) + response, err := a.EncryptionKeys.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -280,22 +280,10 @@ func newList() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" - cmd.Short = `Get all encryption key configurations.` - cmd.Long = `Get all encryption key configurations. - - Gets all customer-managed key configuration objects for an account. If the key - is specified as a workspace's managed services customer-managed key, - Databricks uses the key to encrypt the workspace's notebooks and secrets in - the control plane, in addition to Databricks SQL queries and query history. If - the key is specified as a workspace's storage customer-managed key, the key is - used to encrypt the workspace's root S3 bucket and optionally can encrypt - cluster EBS volumes data in the data plane. - - **Important**: Customer-managed keys are supported only for some deployment - types, subscription types, and AWS regions. + cmd.Short = `List encryption key configuration.` + cmd.Long = `List encryption key configuration. - This operation is available only if your account is on the E2 version of the - platform.` + Lists Databricks customer-managed key configurations for an account.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index 9c45a876e8..86fcc37120 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -103,11 +103,11 @@ func newCreate() *cobra.Command { } createReq.MetastoreId = args[1] - err = a.MetastoreAssignments.Create(ctx, createReq) + response, err := a.MetastoreAssignments.Create(ctx, createReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -165,11 +165,11 @@ func newDelete() *cobra.Command { } deleteReq.MetastoreId = args[1] - err = a.MetastoreAssignments.Delete(ctx, deleteReq) + response, err := a.MetastoreAssignments.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -203,7 +203,7 @@ func newGet() *cobra.Command { cmd.Long = `Gets the metastore assignment for a workspace. Gets the metastore assignment, if any, for the workspace specified by ID. If - the workspace is assigned a metastore, the mappig will be returned. If no + the workspace is assigned a metastore, the mapping will be returned. If no metastore is assigned to the workspace, the assignment will not be found and a 404 returned. @@ -320,8 +320,8 @@ func newUpdate() *cobra.Command { // TODO: complex arg: metastore_assignment cmd.Use = "update WORKSPACE_ID METASTORE_ID" - cmd.Short = `Updates a metastore assignment to a workspaces.` - cmd.Long = `Updates a metastore assignment to a workspaces. + cmd.Short = `Updates a metastore assignment to a workspace.` + cmd.Long = `Updates a metastore assignment to a workspace. Updates an assignment to a metastore for a workspace. Currently, only the default catalog may be updated. @@ -360,11 +360,11 @@ func newUpdate() *cobra.Command { } updateReq.MetastoreId = args[1] - err = a.MetastoreAssignments.Update(ctx, updateReq) + response, err := a.MetastoreAssignments.Update(ctx, updateReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/metastores/metastores.go b/cmd/account/metastores/metastores.go index 6fa3ee645f..fe3aa6a6a1 100755 --- a/cmd/account/metastores/metastores.go +++ b/cmd/account/metastores/metastores.go @@ -63,8 +63,8 @@ func newCreate() *cobra.Command { // TODO: complex arg: metastore_info cmd.Use = "create" - cmd.Short = `Create metastore.` - cmd.Long = `Create metastore. + cmd.Short = `Create Metastore.` + cmd.Long = `Create Metastore. Creates a Unity Catalog metastore.` @@ -151,11 +151,11 @@ func newDelete() *cobra.Command { deleteReq.MetastoreId = args[0] - err = a.Metastores.Delete(ctx, deleteReq) + response, err := a.Metastores.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/networks/networks.go b/cmd/account/networks/networks.go index 55b46e89a5..fc4c4bf586 100755 --- a/cmd/account/networks/networks.go +++ b/cmd/account/networks/networks.go @@ -3,8 +3,6 @@ package networks import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" @@ -62,33 +60,24 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: gcp_network_info + cmd.Flags().StringVar(&createReq.NetworkName, "network-name", createReq.NetworkName, `The human-readable name of the network configuration.`) // TODO: array: security_group_ids // TODO: array: subnet_ids // TODO: complex arg: vpc_endpoints - cmd.Flags().StringVar(&createReq.VpcId, "vpc-id", createReq.VpcId, `The ID of the VPC associated with this network.`) + cmd.Flags().StringVar(&createReq.VpcId, "vpc-id", createReq.VpcId, `The ID of the VPC associated with this network configuration.`) - cmd.Use = "create NETWORK_NAME" + cmd.Use = "create" cmd.Short = `Create network configuration.` cmd.Long = `Create network configuration. Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be used for new Databricks clusters. This requires a - pre-existing VPC and subnets. - - Arguments: - NETWORK_NAME: The human-readable name of the network configuration.` + pre-existing VPC and subnets.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'network_name' in your JSON input") - } - return nil - } - check := root.ExactArgs(1) + check := root.ExactArgs(0) return check(cmd, args) } @@ -109,9 +98,6 @@ func newCreate() *cobra.Command { } } } - if !cmd.Flags().Changed("json") { - createReq.NetworkName = args[0] - } response, err := a.Networks.Create(ctx, createReq) if err != nil { @@ -162,35 +148,23 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NETWORK_ID argument specified. Loading names for Networks drop-down." - names, err := a.Networks.NetworkNetworkNameToNetworkIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Networks drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks Account API network configuration ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks account api network configuration id") - } deleteReq.NetworkId = args[0] - err = a.Networks.Delete(ctx, deleteReq) + response, err := a.Networks.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -231,28 +205,16 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NETWORK_ID argument specified. Loading names for Networks drop-down." - names, err := a.Networks.NetworkNetworkNameToNetworkIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Networks drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks Account API network configuration ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks account api network configuration id") - } getReq.NetworkId = args[0] response, err := a.Networks.Get(ctx, getReq) @@ -286,14 +248,10 @@ func newList() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" - cmd.Short = `Get all network configurations.` - cmd.Long = `Get all network configurations. - - Gets a list of all Databricks network configurations for an account, specified - by ID. + cmd.Short = `List network configurations.` + cmd.Long = `List network configurations. - This operation is available only if your account is on the E2 version of the - platform.` + Lists Databricks network configurations for an account.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index 486bf96ece..c054739604 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -3,8 +3,6 @@ package private_access import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" @@ -62,45 +60,24 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: allowed_vpc_endpoint_ids - cmd.Flags().Var(&createReq.PrivateAccessLevel, "private-access-level", `Supported values: [ACCOUNT, ENDPOINT]`) + cmd.Flags().Var(&createReq.PrivateAccessLevel, "private-access-level", `The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. Supported values: [ACCOUNT, ENDPOINT]`) + cmd.Flags().StringVar(&createReq.PrivateAccessSettingsName, "private-access-settings-name", createReq.PrivateAccessSettingsName, `The human-readable name of the private access settings object.`) cmd.Flags().BoolVar(&createReq.PublicAccessEnabled, "public-access-enabled", createReq.PublicAccessEnabled, `Determines if the workspace can be accessed over public internet.`) + cmd.Flags().StringVar(&createReq.Region, "region", createReq.Region, `The AWS region for workspaces attached to this private access settings object.`) - cmd.Use = "create PRIVATE_ACCESS_SETTINGS_NAME REGION" + cmd.Use = "create" cmd.Short = `Create private access settings.` cmd.Long = `Create private access settings. - Creates a private access settings object, which specifies how your workspace - is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must - have a private access settings object referenced by ID in the workspace's - private_access_settings_id property. - - You can share one private access settings with multiple workspaces in a single - account. However, private access settings are specific to AWS regions, so only - workspaces in the same AWS region can use a given private access settings - object. - - Before configuring PrivateLink, read the [Databricks article about - PrivateLink]. - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - - Arguments: - PRIVATE_ACCESS_SETTINGS_NAME: The human-readable name of the private access settings object. - REGION: The cloud region for workspaces associated with this private access - settings object.` + Creates a private access settings configuration, which represents network + access restrictions for workspace resources. Private access settings configure + whether workspaces can be accessed from the public internet or only from + private endpoints.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'private_access_settings_name', 'region' in your JSON input") - } - return nil - } - check := root.ExactArgs(2) + check := root.ExactArgs(0) return check(cmd, args) } @@ -121,12 +98,6 @@ func newCreate() *cobra.Command { } } } - if !cmd.Flags().Changed("json") { - createReq.PrivateAccessSettingsName = args[0] - } - if !cmd.Flags().Changed("json") { - createReq.Region = args[1] - } response, err := a.PrivateAccess.Create(ctx, createReq) if err != nil { @@ -162,52 +133,31 @@ func newDelete() *cobra.Command { var deleteReq provisioning.DeletePrivateAccesRequest cmd.Use = "delete PRIVATE_ACCESS_SETTINGS_ID" - cmd.Short = `Delete a private access settings object.` - cmd.Long = `Delete a private access settings object. - - Deletes a private access settings object, which determines how your workspace - is accessed over [AWS PrivateLink]. + cmd.Short = `Delete private access settings.` + cmd.Long = `Delete private access settings. - Before configuring PrivateLink, read the [Databricks article about - PrivateLink].", - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - - Arguments: - PRIVATE_ACCESS_SETTINGS_ID: Databricks Account API private access settings ID.` + Deletes a Databricks private access settings configuration, both specified by + ID.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No PRIVATE_ACCESS_SETTINGS_ID argument specified. Loading names for Private Access drop-down." - names, err := a.PrivateAccess.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Private Access drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks Account API private access settings ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks account api private access settings id") - } deleteReq.PrivateAccessSettingsId = args[0] - err = a.PrivateAccess.Delete(ctx, deleteReq) + response, err := a.PrivateAccess.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -237,45 +187,23 @@ func newGet() *cobra.Command { var getReq provisioning.GetPrivateAccesRequest cmd.Use = "get PRIVATE_ACCESS_SETTINGS_ID" - cmd.Short = `Get a private access settings object.` - cmd.Long = `Get a private access settings object. + cmd.Short = `Get private access settings.` + cmd.Long = `Get private access settings. - Gets a private access settings object, which specifies how your workspace is - accessed over [AWS PrivateLink]. - - Before configuring PrivateLink, read the [Databricks article about - PrivateLink].", - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - - Arguments: - PRIVATE_ACCESS_SETTINGS_ID: Databricks Account API private access settings ID.` + Gets a Databricks private access settings configuration, both specified by ID.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No PRIVATE_ACCESS_SETTINGS_ID argument specified. Loading names for Private Access drop-down." - names, err := a.PrivateAccess.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Private Access drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks Account API private access settings ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks account api private access settings id") - } getReq.PrivateAccessSettingsId = args[0] response, err := a.PrivateAccess.Get(ctx, getReq) @@ -309,11 +237,10 @@ func newList() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" - cmd.Short = `Get all private access settings objects.` - cmd.Long = `Get all private access settings objects. + cmd.Short = `List private access settings.` + cmd.Long = `List private access settings. - Gets a list of all private access settings objects for an account, specified - by ID.` + Lists Databricks private access settings for an account.` cmd.Annotations = make(map[string]string) @@ -353,57 +280,42 @@ func newReplace() *cobra.Command { cmd := &cobra.Command{} var replaceReq provisioning.ReplacePrivateAccessSettingsRequest + replaceReq.CustomerFacingPrivateAccessSettings = provisioning.PrivateAccessSettings{} var replaceJson flags.JsonFlag cmd.Flags().Var(&replaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: allowed_vpc_endpoint_ids - cmd.Flags().Var(&replaceReq.PrivateAccessLevel, "private-access-level", `Supported values: [ACCOUNT, ENDPOINT]`) - cmd.Flags().BoolVar(&replaceReq.PublicAccessEnabled, "public-access-enabled", replaceReq.PublicAccessEnabled, `Determines if the workspace can be accessed over public internet.`) - - cmd.Use = "replace PRIVATE_ACCESS_SETTINGS_ID PRIVATE_ACCESS_SETTINGS_NAME REGION" - cmd.Short = `Replace private access settings.` - cmd.Long = `Replace private access settings. + cmd.Flags().Var(&replaceReq.CustomerFacingPrivateAccessSettings.PrivateAccessLevel, "private-access-level", `The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. Supported values: [ACCOUNT, ENDPOINT]`) + cmd.Flags().StringVar(&replaceReq.CustomerFacingPrivateAccessSettings.PrivateAccessSettingsName, "private-access-settings-name", replaceReq.CustomerFacingPrivateAccessSettings.PrivateAccessSettingsName, `The human-readable name of the private access settings object.`) + cmd.Flags().BoolVar(&replaceReq.CustomerFacingPrivateAccessSettings.PublicAccessEnabled, "public-access-enabled", replaceReq.CustomerFacingPrivateAccessSettings.PublicAccessEnabled, `Determines if the workspace can be accessed over public internet.`) + cmd.Flags().StringVar(&replaceReq.CustomerFacingPrivateAccessSettings.Region, "region", replaceReq.CustomerFacingPrivateAccessSettings.Region, `The AWS region for workspaces attached to this private access settings object.`) + + cmd.Use = "replace PRIVATE_ACCESS_SETTINGS_ID" + cmd.Short = `Update private access settings.` + cmd.Long = `Update private access settings. Updates an existing private access settings object, which specifies how your - workspace is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a + workspace is accessed over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access settings object referenced by ID in the - workspace's private_access_settings_id property. - - This operation completely overwrites your existing private access settings - object attached to your workspaces. All workspaces attached to the private - access settings are affected by any change. If public_access_enabled, - private_access_level, or allowed_vpc_endpoint_ids are updated, effects of - these changes might take several minutes to propagate to the workspace API. - - You can share one private access settings object with multiple workspaces in a - single account. However, private access settings are specific to AWS regions, - so only workspaces in the same AWS region can use a given private access - settings object. - - Before configuring PrivateLink, read the [Databricks article about - PrivateLink]. - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + workspace's private_access_settings_id property. This operation completely + overwrites your existing private access settings object attached to your + workspaces. All workspaces attached to the private access settings are + affected by any change. If public_access_enabled, private_access_level, or + allowed_vpc_endpoint_ids are updated, effects of these changes might take + several minutes to propagate to the workspace API. You can share one private + access settings object with multiple workspaces in a single account. However, + private access settings are specific to AWS regions, so only workspaces in the + same AWS region can use a given private access settings object. Before + configuring PrivateLink, read the Databricks article about PrivateLink. Arguments: - PRIVATE_ACCESS_SETTINGS_ID: Databricks Account API private access settings ID. - PRIVATE_ACCESS_SETTINGS_NAME: The human-readable name of the private access settings object. - REGION: The cloud region for workspaces associated with this private access - settings object.` + PRIVATE_ACCESS_SETTINGS_ID: Databricks private access settings ID.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(1)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, provide only PRIVATE_ACCESS_SETTINGS_ID as positional arguments. Provide 'private_access_settings_name', 'region' in your JSON input") - } - return nil - } - check := root.ExactArgs(3) + check := root.ExactArgs(1) return check(cmd, args) } @@ -413,7 +325,7 @@ func newReplace() *cobra.Command { a := cmdctx.AccountClient(ctx) if cmd.Flags().Changed("json") { - diags := replaceJson.Unmarshal(&replaceReq) + diags := replaceJson.Unmarshal(&replaceReq.CustomerFacingPrivateAccessSettings) if diags.HasError() { return diags.Error() } @@ -425,18 +337,12 @@ func newReplace() *cobra.Command { } } replaceReq.PrivateAccessSettingsId = args[0] - if !cmd.Flags().Changed("json") { - replaceReq.PrivateAccessSettingsName = args[1] - } - if !cmd.Flags().Changed("json") { - replaceReq.Region = args[2] - } - err = a.PrivateAccess.Replace(ctx, replaceReq) + response, err := a.PrivateAccess.Replace(ctx, replaceReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/storage-credentials/storage-credentials.go b/cmd/account/storage-credentials/storage-credentials.go index c1b5d01ef3..c2229d378d 100755 --- a/cmd/account/storage-credentials/storage-credentials.go +++ b/cmd/account/storage-credentials/storage-credentials.go @@ -60,18 +60,18 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: credential_info + cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Optional, default false.`) cmd.Use = "create METASTORE_ID" cmd.Short = `Create a storage credential.` cmd.Long = `Create a storage credential. Creates a new storage credential. The request object is specific to the cloud: + - **AwsIamRole** for AWS credentials - **AzureServicePrincipal** for Azure + credentials - **GcpServiceAccountKey** for GCP credentials - * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure - credentials * **GcpServiceAcountKey** for GCP credentials. - - The caller must be a metastore admin and have the - **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. + The caller must be a metastore admin and have the CREATE_STORAGE_CREDENTIAL + privilege on the metastore. Arguments: METASTORE_ID: Unity Catalog metastore ID` @@ -163,11 +163,11 @@ func newDelete() *cobra.Command { deleteReq.MetastoreId = args[0] deleteReq.StorageCredentialName = args[1] - err = a.StorageCredentials.Delete(ctx, deleteReq) + response, err := a.StorageCredentials.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -206,7 +206,7 @@ func newGet() *cobra.Command { Arguments: METASTORE_ID: Unity Catalog metastore ID - STORAGE_CREDENTIAL_NAME: Name of the storage credential.` + STORAGE_CREDENTIAL_NAME: Required. Name of the storage credential.` cmd.Annotations = make(map[string]string) @@ -314,13 +314,14 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: credential_info + cmd.Flags().BoolVar(&updateReq.SkipValidation, "skip-validation", updateReq.SkipValidation, `Optional.`) cmd.Use = "update METASTORE_ID STORAGE_CREDENTIAL_NAME" cmd.Short = `Updates a storage credential.` cmd.Long = `Updates a storage credential. Updates a storage credential on the metastore. The caller must be the owner of - the storage credential. If the caller is a metastore admin, only the __owner__ + the storage credential. If the caller is a metastore admin, only the **owner** credential can be changed. Arguments: diff --git a/cmd/account/storage/storage.go b/cmd/account/storage/storage.go index eaca50e904..96f4cf9e6b 100755 --- a/cmd/account/storage/storage.go +++ b/cmd/account/storage/storage.go @@ -65,20 +65,13 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + cmd.Flags().StringVar(&createReq.RoleArn, "role-arn", createReq.RoleArn, `Optional IAM role that is used to access the workspace catalog which is created during workspace creation for UC by Default.`) + cmd.Use = "create" - cmd.Short = `Create new storage configuration.` - cmd.Long = `Create new storage configuration. - - Creates new storage configuration for an account, specified by ID. Uploads a - storage configuration object that represents the root AWS S3 bucket in your - account. Databricks stores related workspace assets including DBFS, cluster - logs, and job results. For the AWS S3 bucket, you need to configure the - required bucket policy. - - For information about how to create a new workspace with this API, see [Create - a new workspace using the Account API] + cmd.Short = `Create a storage configuration.` + cmd.Long = `Create a storage configuration. - [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html` + Creates a Databricks storage configuration for an account.` cmd.Annotations = make(map[string]string) @@ -136,46 +129,31 @@ func newDelete() *cobra.Command { var deleteReq provisioning.DeleteStorageRequest cmd.Use = "delete STORAGE_CONFIGURATION_ID" - cmd.Short = `Delete storage configuration.` - cmd.Long = `Delete storage configuration. + cmd.Short = `Delete a storage configuration.` + cmd.Long = `Delete a storage configuration. Deletes a Databricks storage configuration. You cannot delete a storage - configuration that is associated with any workspace. - - Arguments: - STORAGE_CONFIGURATION_ID: Databricks Account API storage configuration ID.` + configuration that is associated with any workspace.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No STORAGE_CONFIGURATION_ID argument specified. Loading names for Storage drop-down." - names, err := a.Storage.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Storage drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks Account API storage configuration ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks account api storage configuration id") - } deleteReq.StorageConfigurationId = args[0] - err = a.Storage.Delete(ctx, deleteReq) + response, err := a.Storage.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -205,38 +183,23 @@ func newGet() *cobra.Command { var getReq provisioning.GetStorageRequest cmd.Use = "get STORAGE_CONFIGURATION_ID" - cmd.Short = `Get storage configuration.` - cmd.Long = `Get storage configuration. + cmd.Short = `Get a storage configuration.` + cmd.Long = `Get a storage configuration. - Gets a Databricks storage configuration for an account, both specified by ID. - - Arguments: - STORAGE_CONFIGURATION_ID: Databricks Account API storage configuration ID.` + Gets a Databricks storage configuration for an account, both specified by ID.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No STORAGE_CONFIGURATION_ID argument specified. Loading names for Storage drop-down." - names, err := a.Storage.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Storage drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks Account API storage configuration ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks account api storage configuration id") - } getReq.StorageConfigurationId = args[0] response, err := a.Storage.Get(ctx, getReq) @@ -270,11 +233,10 @@ func newList() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" - cmd.Short = `Get all storage configurations.` - cmd.Long = `Get all storage configurations. + cmd.Short = `List storage configurations.` + cmd.Long = `List storage configurations. - Gets a list of all Databricks storage configurations for your account, - specified by ID.` + Lists Databricks storage configurations for an account, specified by ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/vpc-endpoints/vpc-endpoints.go b/cmd/account/vpc-endpoints/vpc-endpoints.go index fdd1e506f1..69a771f588 100755 --- a/cmd/account/vpc-endpoints/vpc-endpoints.go +++ b/cmd/account/vpc-endpoints/vpc-endpoints.go @@ -3,8 +3,6 @@ package vpc_endpoints import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" @@ -62,11 +60,12 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.AwsVpcEndpointId, "aws-vpc-endpoint-id", createReq.AwsVpcEndpointId, `The ID of the VPC endpoint object in AWS.`) // TODO: complex arg: gcp_vpc_endpoint_info - cmd.Flags().StringVar(&createReq.Region, "region", createReq.Region, `The AWS region in which this VPC endpoint object exists.`) + cmd.Flags().StringVar(&createReq.Region, "region", createReq.Region, `The region in which this VPC endpoint object exists.`) + cmd.Flags().StringVar(&createReq.VpcEndpointName, "vpc-endpoint-name", createReq.VpcEndpointName, `The human-readable name of the storage configuration.`) - cmd.Use = "create VPC_ENDPOINT_NAME" - cmd.Short = `Create VPC endpoint configuration.` - cmd.Long = `Create VPC endpoint configuration. + cmd.Use = "create" + cmd.Short = `Create a VPC endpoint configuration.` + cmd.Long = `Create a VPC endpoint configuration. Creates a VPC endpoint configuration, which represents a [VPC endpoint] object in AWS used to communicate privately with Databricks over [AWS PrivateLink]. @@ -80,22 +79,12 @@ func newCreate() *cobra.Command { [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html - [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html - - Arguments: - VPC_ENDPOINT_NAME: The human-readable name of the storage configuration.` + [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'vpc_endpoint_name' in your JSON input") - } - return nil - } - check := root.ExactArgs(1) + check := root.ExactArgs(0) return check(cmd, args) } @@ -116,9 +105,6 @@ func newCreate() *cobra.Command { } } } - if !cmd.Flags().Changed("json") { - createReq.VpcEndpointName = args[0] - } response, err := a.VpcEndpoints.Create(ctx, createReq) if err != nil { @@ -154,53 +140,31 @@ func newDelete() *cobra.Command { var deleteReq provisioning.DeleteVpcEndpointRequest cmd.Use = "delete VPC_ENDPOINT_ID" - cmd.Short = `Delete VPC endpoint configuration.` - cmd.Long = `Delete VPC endpoint configuration. - - Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] - that can communicate privately with Databricks over [AWS PrivateLink]. - - Before configuring PrivateLink, read the [Databricks article about - PrivateLink]. + cmd.Short = `Delete a VPC endpoint configuration.` + cmd.Long = `Delete a VPC endpoint configuration. - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - - Arguments: - VPC_ENDPOINT_ID: Databricks VPC endpoint ID.` + Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC + endpoint configuration that is associated with any workspace.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No VPC_ENDPOINT_ID argument specified. Loading names for Vpc Endpoints drop-down." - names, err := a.VpcEndpoints.VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Vpc Endpoints drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks VPC endpoint ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks vpc endpoint id") - } deleteReq.VpcEndpointId = args[0] - err = a.VpcEndpoints.Delete(ctx, deleteReq) + response, err := a.VpcEndpoints.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -244,28 +208,16 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No VPC_ENDPOINT_ID argument specified. Loading names for Vpc Endpoints drop-down." - names, err := a.VpcEndpoints.VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Vpc Endpoints drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Databricks VPC endpoint ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have databricks vpc endpoint id") - } getReq.VpcEndpointId = args[0] response, err := a.VpcEndpoints.Get(ctx, getReq) @@ -299,15 +251,10 @@ func newList() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" - cmd.Short = `Get all VPC endpoint configurations.` - cmd.Long = `Get all VPC endpoint configurations. - - Gets a list of all VPC endpoints for an account, specified by ID. - - Before configuring PrivateLink, read the [Databricks article about - PrivateLink]. + cmd.Short = `List VPC endpoint configurations.` + cmd.Long = `List VPC endpoint configurations. - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html` + Lists Databricks VPC endpoint configurations for an account.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 97641f3af4..5fc50b7385 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -75,18 +75,18 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.AwsRegion, "aws-region", createReq.AwsRegion, `The AWS region of the workspace's data plane.`) - cmd.Flags().StringVar(&createReq.Cloud, "cloud", createReq.Cloud, `The cloud provider which the workspace uses.`) + cmd.Flags().StringVar(&createReq.AwsRegion, "aws-region", createReq.AwsRegion, ``) + cmd.Flags().StringVar(&createReq.Cloud, "cloud", createReq.Cloud, `The cloud name.`) // TODO: complex arg: cloud_resource_container + cmd.Flags().Var(&createReq.ComputeMode, "compute-mode", `If the compute mode is SERVERLESS, a serverless workspace is created that comes pre-configured with serverless compute and default storage, providing a fully-managed, enterprise-ready SaaS experience. Supported values: [HYBRID, SERVERLESS]`) cmd.Flags().StringVar(&createReq.CredentialsId, "credentials-id", createReq.CredentialsId, `ID of the workspace's credential configuration object.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().StringVar(&createReq.DeploymentName, "deployment-name", createReq.DeploymentName, `The deployment name defines part of the subdomain for the workspace.`) // TODO: complex arg: gcp_managed_network_config // TODO: complex arg: gke_config - cmd.Flags().BoolVar(&createReq.IsNoPublicIpEnabled, "is-no-public-ip-enabled", createReq.IsNoPublicIpEnabled, `Whether no public IP is enabled for the workspace.`) - cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account.`) + cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account (for example, us-east4).`) cmd.Flags().StringVar(&createReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", createReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) - cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, ``) + cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, `The ID of the workspace's network configuration object.`) cmd.Flags().Var(&createReq.PricingTier, "pricing-tier", `Supported values: [ COMMUNITY_EDITION, DEDICATED, @@ -96,37 +96,55 @@ func newCreate() *cobra.Command { UNKNOWN, ]`) cmd.Flags().StringVar(&createReq.PrivateAccessSettingsId, "private-access-settings-id", createReq.PrivateAccessSettingsId, `ID of the workspace's private access settings object.`) - cmd.Flags().StringVar(&createReq.StorageConfigurationId, "storage-configuration-id", createReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`) + cmd.Flags().StringVar(&createReq.StorageConfigurationId, "storage-configuration-id", createReq.StorageConfigurationId, `ID of the workspace's storage configuration object.`) cmd.Flags().StringVar(&createReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", createReq.StorageCustomerManagedKeyId, `The ID of the workspace's storage encryption key configuration object.`) + cmd.Flags().StringVar(&createReq.WorkspaceName, "workspace-name", createReq.WorkspaceName, `The human-readable name of the workspace.`) - cmd.Use = "create WORKSPACE_NAME" - cmd.Short = `Create a new workspace.` - cmd.Long = `Create a new workspace. + cmd.Use = "create" + cmd.Short = `Create a workspace.` + cmd.Long = `Create a workspace. - Creates a new workspace. + Creates a new workspace using a credential configuration and a storage + configuration, an optional network configuration (if using a customer-managed + VPC), an optional managed services key configuration (if using + customer-managed keys for managed services), and an optional storage key + configuration (if using customer-managed keys for storage). The key + configurations used for managed services and storage encryption can be the + same or different. - **Important**: This operation is asynchronous. A response with HTTP status - code 200 means the request has been accepted and is in progress, but does not - mean that the workspace deployed successfully and is running. The initial - workspace status is typically PROVISIONING. Use the workspace ID - (workspace_id) field in the response to identify the new workspace and make - repeated GET requests with the workspace ID and check its status. The - workspace becomes available when the status changes to RUNNING. - - Arguments: - WORKSPACE_NAME: The workspace's human-readable name.` + Important: This operation is asynchronous. A response with HTTP status code + 200 means the request has been accepted and is in progress, but does not mean + that the workspace deployed successfully and is running. The initial workspace + status is typically PROVISIONING. Use the workspace ID (workspace_id) field in + the response to identify the new workspace and make repeated GET requests with + the workspace ID and check its status. The workspace becomes available when + the status changes to RUNNING. + + You can share one customer-managed VPC with multiple workspaces in a single + account. It is not required to create a new VPC for each workspace. However, + you cannot reuse subnets or Security Groups between workspaces. If you plan to + share one VPC with multiple workspaces, make sure you size your VPC and + subnets accordingly. Because a Databricks Account API network configuration + encapsulates this information, you cannot reuse a Databricks Account API + network configuration across workspaces. + + For information about how to create a new workspace with this API including + error handling, see [Create a new workspace using the Account API]. + + Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are + supported on a limited set of deployment and subscription types. If you have + questions about availability, contact your Databricks representative. + + This operation is available only if your account is on the E2 version of the + platform or on a select custom plan that allows multiple workspaces per + account. + + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'workspace_name' in your JSON input") - } - return nil - } - check := root.ExactArgs(1) + check := root.ExactArgs(0) return check(cmd, args) } @@ -147,9 +165,6 @@ func newCreate() *cobra.Command { } } } - if !cmd.Flags().Changed("json") { - createReq.WorkspaceName = args[0] - } wait, err := a.Workspaces.Create(ctx, createReq) if err != nil { @@ -200,52 +215,30 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a workspace.` cmd.Long = `Delete a workspace. - Terminates and deletes a Databricks workspace. From an API perspective, - deletion is immediate. However, it might take a few minutes for all workspaces - resources to be deleted, depending on the size and number of workspace - resources. - - This operation is available only if your account is on the E2 version of the - platform or on a select custom plan that allows multiple workspaces per - account. - - Arguments: - WORKSPACE_ID: Workspace ID.` + Deletes a Databricks workspace, both specified by ID.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Workspaces drop-down." - names, err := a.Workspaces.WorkspaceWorkspaceNameToWorkspaceIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Workspaces drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Workspace ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have workspace id") - } _, err = fmt.Sscan(args[0], &deleteReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) } - err = a.Workspaces.Delete(ctx, deleteReq) + response, err := a.Workspaces.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -282,44 +275,24 @@ func newGet() *cobra.Command { In the response, the workspace_status field indicates the current status. After initial workspace creation (which is asynchronous), make repeated GET requests with the workspace ID and check its status. The workspace becomes - available when the status changes to RUNNING. - - For information about how to create a new workspace with this API **including - error handling**, see [Create a new workspace using the Account API]. + available when the status changes to RUNNING. For information about how to + create a new workspace with this API **including error handling**, see [Create + a new workspace using the Account API]. - This operation is available only if your account is on the E2 version of the - platform or on a select custom plan that allows multiple workspaces per - account. - - [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html - - Arguments: - WORKSPACE_ID: Workspace ID.` + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Workspaces drop-down." - names, err := a.Workspaces.WorkspaceWorkspaceNameToWorkspaceIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Workspaces drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Workspace ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have workspace id") - } _, err = fmt.Sscan(args[0], &getReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -356,14 +329,10 @@ func newList() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" - cmd.Short = `Get all workspaces.` - cmd.Long = `Get all workspaces. + cmd.Short = `List workspaces.` + cmd.Long = `List workspaces. - Gets a list of all workspaces associated with an account, specified by ID. - - This operation is available only if your account is on the E2 version of the - platform or on a select custom plan that allows multiple workspaces per - account.` + Lists Databricks workspaces for an account.` cmd.Annotations = make(map[string]string) @@ -403,6 +372,7 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq provisioning.UpdateWorkspaceRequest + updateReq.CustomerFacingWorkspace = provisioning.Workspace{} var updateJson flags.JsonFlag var updateSkipWait bool @@ -413,151 +383,57 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.AwsRegion, "aws-region", updateReq.AwsRegion, `The AWS region of the workspace's data plane (for example, us-west-2).`) - cmd.Flags().StringVar(&updateReq.CredentialsId, "credentials-id", updateReq.CredentialsId, `ID of the workspace's credential configuration object.`) + cmd.Flags().StringVar(&updateReq.UpdateMask, "update-mask", updateReq.UpdateMask, `The field mask must be a single string, with multiple fields separated by commas (no spaces).`) + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.AwsRegion, "aws-region", updateReq.CustomerFacingWorkspace.AwsRegion, ``) + // TODO: complex arg: azure_workspace_info + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.Cloud, "cloud", updateReq.CustomerFacingWorkspace.Cloud, `The cloud name.`) + // TODO: complex arg: cloud_resource_container + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.CredentialsId, "credentials-id", updateReq.CustomerFacingWorkspace.CredentialsId, `ID of the workspace's credential configuration object.`) // TODO: map via StringToStringVar: custom_tags - cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) - cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, ``) - cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`) - cmd.Flags().StringVar(&updateReq.PrivateAccessSettingsId, "private-access-settings-id", updateReq.PrivateAccessSettingsId, `The ID of the workspace's private access settings configuration object.`) - cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`) - cmd.Flags().StringVar(&updateReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.StorageCustomerManagedKeyId, `The ID of the key configuration object for workspace storage.`) + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.DeploymentName, "deployment-name", updateReq.CustomerFacingWorkspace.DeploymentName, ``) + cmd.Flags().Var(&updateReq.CustomerFacingWorkspace.ExpectedWorkspaceStatus, "expected-workspace-status", `A client owned field used to indicate the workspace status that the client expects to be in. Supported values: [ + BANNED, + CANCELLING, + FAILED, + NOT_PROVISIONED, + PROVISIONING, + RUNNING, +]`) + // TODO: complex arg: gcp_managed_network_config + // TODO: complex arg: gke_config + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.Location, "location", updateReq.CustomerFacingWorkspace.Location, `The Google Cloud region of the workspace data plane in your Google account (for example, us-east4).`) + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.CustomerFacingWorkspace.ManagedServicesCustomerManagedKeyId, `ID of the key configuration for encrypting managed services.`) + // TODO: complex arg: network + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.CustomerFacingWorkspace.NetworkConnectivityConfigId, `The object ID of network connectivity config.`) + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.NetworkId, "network-id", updateReq.CustomerFacingWorkspace.NetworkId, `If this workspace is BYO VPC, then the network_id will be populated.`) + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.PrivateAccessSettingsId, "private-access-settings-id", updateReq.CustomerFacingWorkspace.PrivateAccessSettingsId, `ID of the workspace's private access settings object.`) + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.StorageConfigurationId, "storage-configuration-id", updateReq.CustomerFacingWorkspace.StorageConfigurationId, `ID of the workspace's storage configuration object.`) + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.CustomerFacingWorkspace.StorageCustomerManagedKeyId, `ID of the key configuration for encrypting workspace storage.`) + cmd.Flags().StringVar(&updateReq.CustomerFacingWorkspace.WorkspaceName, "workspace-name", updateReq.CustomerFacingWorkspace.WorkspaceName, `The human-readable name of the workspace.`) cmd.Use = "update WORKSPACE_ID" - cmd.Short = `Update workspace configuration.` - cmd.Long = `Update workspace configuration. - - Updates a workspace configuration for either a running workspace or a failed - workspace. The elements that can be updated varies between these two use - cases. - - ### Update a failed workspace You can update a Databricks workspace - configuration for failed workspace deployment for some fields, but not all - fields. For a failed workspace, this request supports updates to the following - fields only: - Credential configuration ID - Storage configuration ID - - Network configuration ID. Used only to add or change a network configuration - for a customer-managed VPC. For a failed workspace only, you can convert a - workspace with Databricks-managed VPC to use a customer-managed VPC by adding - this ID. You cannot downgrade a workspace with a customer-managed VPC to be a - Databricks-managed VPC. You can update the network configuration for a failed - or running workspace to add PrivateLink support, though you must also add a - private access settings object. - Key configuration ID for managed services - (control plane storage, such as notebook source and Databricks SQL queries). - Used only if you use customer-managed keys for managed services. - Key - configuration ID for workspace storage (root S3 bucket and, optionally, EBS - volumes). Used only if you use customer-managed keys for workspace storage. - **Important**: If the workspace was ever in the running state, even if briefly - before becoming a failed workspace, you cannot add a new key configuration ID - for workspace storage. - Private access settings ID to add PrivateLink - support. You can add or update the private access settings ID to upgrade a - workspace to add support for front-end, back-end, or both types of - connectivity. You cannot remove (downgrade) any existing front-end or back-end - PrivateLink support on a workspace. - Custom tags. Given you provide an empty - custom tags, the update would not be applied. - Network connectivity - configuration ID to add serverless stable IP support. You can add or update - the network connectivity configuration ID to ensure the workspace uses the - same set of stable IP CIDR blocks to access your resources. You cannot remove - a network connectivity configuration from the workspace once attached, you can - only switch to another one. - - After calling the PATCH operation to update the workspace configuration, - make repeated GET requests with the workspace ID and check the workspace - status. The workspace is successful if the status changes to RUNNING. - - For information about how to create a new workspace with this API **including - error handling**, see [Create a new workspace using the Account API]. + cmd.Short = `Update a workspace.` + cmd.Long = `Update a workspace. - ### Update a running workspace You can update a Databricks workspace - configuration for running workspaces for some fields, but not all fields. For - a running workspace, this request supports updating the following fields only: - - Credential configuration ID - Network configuration ID. Used only if you - already use a customer-managed VPC. You cannot convert a running workspace - from a Databricks-managed VPC to a customer-managed VPC. You can use a network - configuration update in this API for a failed or running workspace to add - support for PrivateLink, although you also need to add a private access - settings object. - Key configuration ID for managed services (control plane - storage, such as notebook source and Databricks SQL queries). Databricks does - not directly encrypt the data with the customer-managed key (CMK). Databricks - uses both the CMK and the Databricks managed key (DMK) that is unique to your - workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to - encrypt your workspace's managed services persisted data. If the workspace - does not already have a CMK for managed services, adding this ID enables - managed services encryption for new or updated data. Existing managed services - data that existed before adding the key remains not encrypted with the DEK - until it is modified. If the workspace already has customer-managed keys for - managed services, this request rotates (changes) the CMK keys and the DEK is - re-encrypted with the DMK and the new CMK. - Key configuration ID for - workspace storage (root S3 bucket and, optionally, EBS volumes). You can set - this only if the workspace does not already have a customer-managed key - configuration for workspace storage. - Private access settings ID to add - PrivateLink support. You can add or update the private access settings ID to - upgrade a workspace to add support for front-end, back-end, or both types of - connectivity. You cannot remove (downgrade) any existing front-end or back-end - PrivateLink support on a workspace. - Custom tags. Given you provide an empty - custom tags, the update would not be applied. - Network connectivity - configuration ID to add serverless stable IP support. You can add or update - the network connectivity configuration ID to ensure the workspace uses the - same set of stable IP CIDR blocks to access your resources. You cannot remove - a network connectivity configuration from the workspace once attached, you can - only switch to another one. - - **Important**: To update a running workspace, your workspace must have no - running compute resources that run in your workspace's VPC in the Classic data - plane. For example, stop all all-purpose clusters, job clusters, pools with - running clusters, and Classic SQL warehouses. If you do not terminate all - cluster instances in the workspace before calling this API, the request will - fail. - - ### Wait until changes take effect. After calling the PATCH operation to - update the workspace configuration, make repeated GET requests with the - workspace ID and check the workspace status and the status of the fields. * - For workspaces with a Databricks-managed VPC, the workspace status becomes - PROVISIONING temporarily (typically under 20 minutes). If the workspace - update is successful, the workspace status changes to RUNNING. Note that you - can also check the workspace status in the [Account Console]. However, you - cannot use or create clusters for another 20 minutes after that status change. - This results in a total of up to 40 minutes in which you cannot create - clusters. If you create or use clusters before this time interval elapses, - clusters do not launch successfully, fail, or could cause other unexpected - behavior. * For workspaces with a customer-managed VPC, the workspace status - stays at status RUNNING and the VPC change happens immediately. A change to - the storage customer-managed key configuration ID might take a few minutes to - update, so continue to check the workspace until you observe that it has been - updated. If the update fails, the workspace might revert silently to its - original configuration. After the workspace has been updated, you cannot use - or create clusters for another 20 minutes. If you create or use clusters - before this time interval elapses, clusters do not launch successfully, fail, - or could cause other unexpected behavior. - - If you update the _storage_ customer-managed key configurations, it takes 20 - minutes for the changes to fully take effect. During the 20 minute wait, it is - important that you stop all REST API calls to the DBFS API. If you are - modifying _only the managed services key configuration_, you can omit the 20 - minute wait. - - **Important**: Customer-managed keys and customer-managed VPCs are supported - by only some deployment types and subscription types. If you have questions - about availability, contact your Databricks representative. - - This operation is available only if your account is on the E2 version of the - platform or on a select custom plan that allows multiple workspaces per - account. - - [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html - [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + Updates a workspace. Arguments: - WORKSPACE_ID: Workspace ID.` + WORKSPACE_ID: A unique integer ID for the workspace` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := cmdctx.AccountClient(ctx) if cmd.Flags().Changed("json") { - diags := updateJson.Unmarshal(&updateReq) + diags := updateJson.Unmarshal(&updateReq.CustomerFacingWorkspace) if diags.HasError() { return diags.Error() } @@ -568,23 +444,6 @@ func newUpdate() *cobra.Command { } } } - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Workspaces drop-down." - names, err := a.Workspaces.WorkspaceWorkspaceNameToWorkspaceIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Workspaces drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Workspace ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have workspace id") - } _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) @@ -595,7 +454,7 @@ func newUpdate() *cobra.Command { return err } if updateSkipWait { - return nil + return cmdio.Render(ctx, wait.Response) } spinner := cmdio.Spinner(ctx) info, err := wait.OnProgress(func(i *provisioning.Workspace) { diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 6cf510f227..a3b18a0b08 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -34,12 +34,14 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateUpdate()) cmd.AddCommand(newDelete()) cmd.AddCommand(newDeploy()) cmd.AddCommand(newGet()) cmd.AddCommand(newGetDeployment()) cmd.AddCommand(newGetPermissionLevels()) cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetUpdate()) cmd.AddCommand(newList()) cmd.AddCommand(newListDeployments()) cmd.AddCommand(newSetPermissions()) @@ -84,6 +86,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: active_deployment // TODO: complex arg: app_status cmd.Flags().StringVar(&createReq.App.BudgetPolicyId, "budget-policy-id", createReq.App.BudgetPolicyId, ``) + cmd.Flags().Var(&createReq.App.ComputeSize, "compute-size", `Supported values: [LARGE, LIQUID, MEDIUM]`) // TODO: complex arg: compute_status cmd.Flags().StringVar(&createReq.App.Description, "description", createReq.App.Description, `The description of the app.`) // TODO: array: effective_user_api_scopes @@ -174,6 +177,127 @@ func newCreate() *cobra.Command { return cmd } +// start create-update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createUpdateOverrides []func( + *cobra.Command, + *apps.AsyncUpdateAppRequest, +) + +func newCreateUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var createUpdateReq apps.AsyncUpdateAppRequest + var createUpdateJson flags.JsonFlag + + var createUpdateSkipWait bool + var createUpdateTimeout time.Duration + + cmd.Flags().BoolVar(&createUpdateSkipWait, "no-wait", createUpdateSkipWait, `do not wait to reach SUCCEEDED state`) + cmd.Flags().DurationVar(&createUpdateTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach SUCCEEDED state`) + + cmd.Flags().Var(&createUpdateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: app + + cmd.Use = "create-update APP_NAME UPDATE_MASK" + cmd.Short = `Create an app update.` + cmd.Long = `Create an app update. + + Creates an app update and starts the update process. The update process is + asynchronous and the status of the update can be checked with the GetAppUpdate + method. + + Arguments: + APP_NAME: + UPDATE_MASK: The field mask must be a single string, with multiple fields separated by + commas (no spaces). The field path is relative to the resource object, + using a dot (.) to navigate sub-fields (e.g., author.given_name). + Specification of elements in sequence or map fields is not allowed, as + only the entire collection field can be specified. Field names must + exactly match the resource field names. + + A field mask of * indicates full replacement. It’s recommended to + always explicitly list the fields being updated and avoid using * + wildcards, as it can lead to unintended results if the API changes in the + future.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'update_mask' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createUpdateJson.Unmarshal(&createUpdateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createUpdateReq.AppName = args[0] + if !cmd.Flags().Changed("json") { + createUpdateReq.UpdateMask = args[1] + } + + wait, err := w.Apps.CreateUpdate(ctx, createUpdateReq) + if err != nil { + return err + } + if createUpdateSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *apps.AppUpdate) { + if i.Status == nil { + return + } + status := i.Status.State + statusMessage := fmt.Sprintf("current status: %s", status) + if i.Status != nil { + statusMessage = i.Status.Message + } + spinner <- statusMessage + }).GetWithTimeout(createUpdateTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createUpdateOverrides { + fn(cmd, &createUpdateReq) + } + + return cmd +} + // start delete command // Slice with functions to override default command behavior. @@ -561,6 +685,62 @@ func newGetPermissions() *cobra.Command { return cmd } +// start get-update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getUpdateOverrides []func( + *cobra.Command, + *apps.GetAppUpdateRequest, +) + +func newGetUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var getUpdateReq apps.GetAppUpdateRequest + + cmd.Use = "get-update APP_NAME" + cmd.Short = `Get an app update.` + cmd.Long = `Get an app update. + + Gets the status of an app update. + + Arguments: + APP_NAME: The name of the app.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getUpdateReq.AppName = args[0] + + response, err := w.Apps.GetUpdate(ctx, getUpdateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getUpdateOverrides { + fn(cmd, &getUpdateReq) + } + + return cmd +} + // start list command // Slice with functions to override default command behavior. @@ -926,6 +1106,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: active_deployment // TODO: complex arg: app_status cmd.Flags().StringVar(&updateReq.App.BudgetPolicyId, "budget-policy-id", updateReq.App.BudgetPolicyId, ``) + cmd.Flags().Var(&updateReq.App.ComputeSize, "compute-size", `Supported values: [LARGE, LIQUID, MEDIUM]`) // TODO: complex arg: compute_status cmd.Flags().StringVar(&updateReq.App.Description, "description", updateReq.App.Description, `The description of the app.`) // TODO: array: effective_user_api_scopes diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index a7e442f7dc..f6234fd1e0 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -274,6 +274,7 @@ func newList() *cobra.Command { var listReq catalog.ListCatalogsRequest cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include catalogs in the response for which the principal can only access selective metadata for.`) + cmd.Flags().BoolVar(&listReq.IncludeUnbound, "include-unbound", listReq.IncludeUnbound, `Whether to include catalogs not bound to the workspace.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of catalogs to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -285,7 +286,16 @@ func newList() *cobra.Command { admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the caller (or for which the caller has the **USE_CATALOG** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the - array.` + array. + + NOTE: we recommend using max_results=0 to use the paginated version of this + API. Unpaginated calls will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may + contain zero results while still providing a next_page_token. Clients must + continue reading pages until next_page_token is absent, which is the only + indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 8be495af25..14d8901f09 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -30,6 +30,7 @@ import ( current_user "github.com/databricks/cli/cmd/workspace/current-user" dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" dashboards "github.com/databricks/cli/cmd/workspace/dashboards" + data_quality "github.com/databricks/cli/cmd/workspace/data-quality" data_sources "github.com/databricks/cli/cmd/workspace/data-sources" database "github.com/databricks/cli/cmd/workspace/database" entity_tag_assignments "github.com/databricks/cli/cmd/workspace/entity-tag-assignments" @@ -152,6 +153,7 @@ func All() []*cobra.Command { out = append(out, current_user.New()) out = append(out, dashboard_widgets.New()) out = append(out, dashboards.New()) + out = append(out, data_quality.New()) out = append(out, data_sources.New()) out = append(out, database.New()) out = append(out, entity_tag_assignments.New()) diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index bae12cd8ef..71ec58f7c2 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -283,7 +283,16 @@ func newList() *cobra.Command { cmd.Short = `List connections.` cmd.Long = `List connections. - List all connections.` + List all connections. + + NOTE: we recommend using max_results=0 to use the paginated version of this + API. Unpaginated calls will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may + contain zero results while still providing a next_page_token. Clients must + continue reading pages until next_page_token is absent, which is the only + indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/credentials/credentials.go b/cmd/workspace/credentials/credentials.go index 96b042b9b1..2f7504ed23 100755 --- a/cmd/workspace/credentials/credentials.go +++ b/cmd/workspace/credentials/credentials.go @@ -364,6 +364,7 @@ func newListCredentials() *cobra.Command { var listCredentialsReq catalog.ListCredentialsRequest + cmd.Flags().BoolVar(&listCredentialsReq.IncludeUnbound, "include-unbound", listCredentialsReq.IncludeUnbound, `Whether to include credentials not bound to the workspace.`) cmd.Flags().IntVar(&listCredentialsReq.MaxResults, "max-results", listCredentialsReq.MaxResults, `Maximum number of credentials to return.`) cmd.Flags().StringVar(&listCredentialsReq.PageToken, "page-token", listCredentialsReq.PageToken, `Opaque token to retrieve the next page of results.`) cmd.Flags().Var(&listCredentialsReq.Purpose, "purpose", `Return only credentials for the specified purpose. Supported values: [SERVICE, STORAGE]`) diff --git a/cmd/workspace/data-quality/data-quality.go b/cmd/workspace/data-quality/data-quality.go new file mode 100755 index 0000000000..2d25620380 --- /dev/null +++ b/cmd/workspace/data-quality/data-quality.go @@ -0,0 +1,895 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package data_quality + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/dataquality" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "data-quality", + Short: `Manage the data quality of Unity Catalog objects (currently support schema and table).`, + Long: `Manage the data quality of Unity Catalog objects (currently support schema + and table)`, + GroupID: "dataquality", + Annotations: map[string]string{ + "package": "dataquality", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCancelRefresh()) + cmd.AddCommand(newCreateMonitor()) + cmd.AddCommand(newCreateRefresh()) + cmd.AddCommand(newDeleteMonitor()) + cmd.AddCommand(newDeleteRefresh()) + cmd.AddCommand(newGetMonitor()) + cmd.AddCommand(newGetRefresh()) + cmd.AddCommand(newListMonitor()) + cmd.AddCommand(newListRefresh()) + cmd.AddCommand(newUpdateMonitor()) + cmd.AddCommand(newUpdateRefresh()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start cancel-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cancelRefreshOverrides []func( + *cobra.Command, + *dataquality.CancelRefreshRequest, +) + +func newCancelRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var cancelRefreshReq dataquality.CancelRefreshRequest + + cmd.Use = "cancel-refresh OBJECT_TYPE OBJECT_ID REFRESH_ID" + cmd.Short = `Cancel a refresh.` + cmd.Long = `Cancel a refresh. + + Cancels a data quality monitor refresh. Currently only supported for the + table object_type. + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id. + REFRESH_ID: Unique id of the refresh operation.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + cancelRefreshReq.ObjectType = args[0] + cancelRefreshReq.ObjectId = args[1] + _, err = fmt.Sscan(args[2], &cancelRefreshReq.RefreshId) + if err != nil { + return fmt.Errorf("invalid REFRESH_ID: %s", args[2]) + } + + response, err := w.DataQuality.CancelRefresh(ctx, cancelRefreshReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range cancelRefreshOverrides { + fn(cmd, &cancelRefreshReq) + } + + return cmd +} + +// start create-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createMonitorOverrides []func( + *cobra.Command, + *dataquality.CreateMonitorRequest, +) + +func newCreateMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var createMonitorReq dataquality.CreateMonitorRequest + createMonitorReq.Monitor = dataquality.Monitor{} + var createMonitorJson flags.JsonFlag + + cmd.Flags().Var(&createMonitorJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: anomaly_detection_config + // TODO: complex arg: data_profiling_config + + cmd.Use = "create-monitor OBJECT_TYPE OBJECT_ID" + cmd.Short = `Create a monitor.` + cmd.Long = `Create a monitor. + + Create a data quality monitor on a Unity Catalog object. The caller must + provide either anomaly_detection_config for a schema monitor or + data_profiling_config for a table monitor. + + For the table object_type, the caller must either: 1. be an owner of the + table's parent catalog, have **USE_SCHEMA** on the table's parent schema, and + have **SELECT** access on the table 2. have **USE_CATALOG** on the table's + parent catalog, be an owner of the table's parent schema, and have **SELECT** + access on the table. 3. have the following permissions: - **USE_CATALOG** on + the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be + an owner of the table. + + Workspace assets, such as the dashboard, will be created in the workspace + where this call was made. + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'object_type', 'object_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createMonitorJson.Unmarshal(&createMonitorReq.Monitor) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createMonitorReq.Monitor.ObjectType = args[0] + } + if !cmd.Flags().Changed("json") { + createMonitorReq.Monitor.ObjectId = args[1] + } + + response, err := w.DataQuality.CreateMonitor(ctx, createMonitorReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createMonitorOverrides { + fn(cmd, &createMonitorReq) + } + + return cmd +} + +// start create-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createRefreshOverrides []func( + *cobra.Command, + *dataquality.CreateRefreshRequest, +) + +func newCreateRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var createRefreshReq dataquality.CreateRefreshRequest + createRefreshReq.Refresh = dataquality.Refresh{} + var createRefreshJson flags.JsonFlag + + cmd.Flags().Var(&createRefreshJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create-refresh OBJECT_TYPE OBJECT_ID" + cmd.Short = `Create a refresh.` + cmd.Long = `Create a refresh. + + Creates a refresh. Currently only supported for the table object_type. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schemaor + table. + OBJECT_ID: The UUID of the request object. For example, table id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createRefreshJson.Unmarshal(&createRefreshReq.Refresh) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createRefreshReq.ObjectType = args[0] + createRefreshReq.ObjectId = args[1] + + response, err := w.DataQuality.CreateRefresh(ctx, createRefreshReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createRefreshOverrides { + fn(cmd, &createRefreshReq) + } + + return cmd +} + +// start delete-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteMonitorOverrides []func( + *cobra.Command, + *dataquality.DeleteMonitorRequest, +) + +func newDeleteMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var deleteMonitorReq dataquality.DeleteMonitorRequest + + cmd.Use = "delete-monitor OBJECT_TYPE OBJECT_ID" + cmd.Short = `Delete a monitor.` + cmd.Long = `Delete a monitor. + + Delete a data quality monitor on Unity Catalog object. + + For the table object_type, the caller must either: 1. be an owner of the + table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog + and be an owner of the table's parent schema 3. have the following + permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** + on the table's parent schema - be an owner of the table. + + Note that the metric tables and dashboard will not be deleted as part of this + call; those assets must be manually cleaned up (if desired). + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteMonitorReq.ObjectType = args[0] + deleteMonitorReq.ObjectId = args[1] + + err = w.DataQuality.DeleteMonitor(ctx, deleteMonitorReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteMonitorOverrides { + fn(cmd, &deleteMonitorReq) + } + + return cmd +} + +// start delete-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteRefreshOverrides []func( + *cobra.Command, + *dataquality.DeleteRefreshRequest, +) + +func newDeleteRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var deleteRefreshReq dataquality.DeleteRefreshRequest + + cmd.Use = "delete-refresh OBJECT_TYPE OBJECT_ID REFRESH_ID" + cmd.Short = `Delete a refresh.` + cmd.Long = `Delete a refresh. + + (Unimplemented) Delete a refresh + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id. + REFRESH_ID: Unique id of the refresh operation.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteRefreshReq.ObjectType = args[0] + deleteRefreshReq.ObjectId = args[1] + _, err = fmt.Sscan(args[2], &deleteRefreshReq.RefreshId) + if err != nil { + return fmt.Errorf("invalid REFRESH_ID: %s", args[2]) + } + + err = w.DataQuality.DeleteRefresh(ctx, deleteRefreshReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteRefreshOverrides { + fn(cmd, &deleteRefreshReq) + } + + return cmd +} + +// start get-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getMonitorOverrides []func( + *cobra.Command, + *dataquality.GetMonitorRequest, +) + +func newGetMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var getMonitorReq dataquality.GetMonitorRequest + + cmd.Use = "get-monitor OBJECT_TYPE OBJECT_ID" + cmd.Short = `Read a monitor.` + cmd.Long = `Read a monitor. + + Read a data quality monitor on Unity Catalog object. + + For the table object_type, the caller must either: 1. be an owner of the + table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog + and be an owner of the table's parent schema. 3. have the following + permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** + on the table's parent schema - **SELECT** privilege on the table. + + The returned information includes configuration values, as well as information + on assets created by the monitor. Some information (e.g., dashboard) may be + filtered out if the caller is in a different workspace than where the monitor + was created. + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getMonitorReq.ObjectType = args[0] + getMonitorReq.ObjectId = args[1] + + response, err := w.DataQuality.GetMonitor(ctx, getMonitorReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getMonitorOverrides { + fn(cmd, &getMonitorReq) + } + + return cmd +} + +// start get-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getRefreshOverrides []func( + *cobra.Command, + *dataquality.GetRefreshRequest, +) + +func newGetRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var getRefreshReq dataquality.GetRefreshRequest + + cmd.Use = "get-refresh OBJECT_TYPE OBJECT_ID REFRESH_ID" + cmd.Short = `Get a refresh.` + cmd.Long = `Get a refresh. + + Get data quality monitor refresh. + + For the table object_type, the caller must either: 1. be an owner of the + table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog + and be an owner of the table's parent schema 3. have the following + permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** + on the table's parent schema - **SELECT** privilege on the table. + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id. + REFRESH_ID: Unique id of the refresh operation.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getRefreshReq.ObjectType = args[0] + getRefreshReq.ObjectId = args[1] + _, err = fmt.Sscan(args[2], &getRefreshReq.RefreshId) + if err != nil { + return fmt.Errorf("invalid REFRESH_ID: %s", args[2]) + } + + response, err := w.DataQuality.GetRefresh(ctx, getRefreshReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getRefreshOverrides { + fn(cmd, &getRefreshReq) + } + + return cmd +} + +// start list-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listMonitorOverrides []func( + *cobra.Command, + *dataquality.ListMonitorRequest, +) + +func newListMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var listMonitorReq dataquality.ListMonitorRequest + + cmd.Flags().IntVar(&listMonitorReq.PageSize, "page-size", listMonitorReq.PageSize, ``) + cmd.Flags().StringVar(&listMonitorReq.PageToken, "page-token", listMonitorReq.PageToken, ``) + + cmd.Use = "list-monitor" + cmd.Short = `List monitors.` + cmd.Long = `List monitors. + + (Unimplemented) List data quality monitors.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response := w.DataQuality.ListMonitor(ctx, listMonitorReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listMonitorOverrides { + fn(cmd, &listMonitorReq) + } + + return cmd +} + +// start list-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listRefreshOverrides []func( + *cobra.Command, + *dataquality.ListRefreshRequest, +) + +func newListRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var listRefreshReq dataquality.ListRefreshRequest + + cmd.Flags().IntVar(&listRefreshReq.PageSize, "page-size", listRefreshReq.PageSize, ``) + cmd.Flags().StringVar(&listRefreshReq.PageToken, "page-token", listRefreshReq.PageToken, ``) + + cmd.Use = "list-refresh OBJECT_TYPE OBJECT_ID" + cmd.Short = `List refreshes.` + cmd.Long = `List refreshes. + + List data quality monitor refreshes. + + For the table object_type, the caller must either: 1. be an owner of the + table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog + and be an owner of the table's parent schema 3. have the following + permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** + on the table's parent schema - **SELECT** privilege on the table. + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + listRefreshReq.ObjectType = args[0] + listRefreshReq.ObjectId = args[1] + + response := w.DataQuality.ListRefresh(ctx, listRefreshReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listRefreshOverrides { + fn(cmd, &listRefreshReq) + } + + return cmd +} + +// start update-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateMonitorOverrides []func( + *cobra.Command, + *dataquality.UpdateMonitorRequest, +) + +func newUpdateMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var updateMonitorReq dataquality.UpdateMonitorRequest + updateMonitorReq.Monitor = dataquality.Monitor{} + var updateMonitorJson flags.JsonFlag + + cmd.Flags().Var(&updateMonitorJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: anomaly_detection_config + // TODO: complex arg: data_profiling_config + + cmd.Use = "update-monitor OBJECT_TYPE OBJECT_ID UPDATE_MASK OBJECT_TYPE OBJECT_ID" + cmd.Short = `Update a monitor.` + cmd.Long = `Update a monitor. + + Update a data quality monitor on Unity Catalog object. + + For the table object_type, The caller must either: 1. be an owner of the + table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog + and be an owner of the table's parent schema 3. have the following + permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** + on the table's parent schema - be an owner of the table. + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id. + UPDATE_MASK: The field mask to specify which fields to update as a comma-separated + list. Example value: + data_profiling_config.custom_metrics,data_profiling_config.schedule.quartz_cron_expression + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(3)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only OBJECT_TYPE, OBJECT_ID, UPDATE_MASK as positional arguments. Provide 'object_type', 'object_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(5) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateMonitorJson.Unmarshal(&updateMonitorReq.Monitor) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateMonitorReq.ObjectType = args[0] + updateMonitorReq.ObjectId = args[1] + updateMonitorReq.UpdateMask = args[2] + if !cmd.Flags().Changed("json") { + updateMonitorReq.Monitor.ObjectType = args[3] + } + if !cmd.Flags().Changed("json") { + updateMonitorReq.Monitor.ObjectId = args[4] + } + + response, err := w.DataQuality.UpdateMonitor(ctx, updateMonitorReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateMonitorOverrides { + fn(cmd, &updateMonitorReq) + } + + return cmd +} + +// start update-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateRefreshOverrides []func( + *cobra.Command, + *dataquality.UpdateRefreshRequest, +) + +func newUpdateRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var updateRefreshReq dataquality.UpdateRefreshRequest + updateRefreshReq.Refresh = dataquality.Refresh{} + var updateRefreshJson flags.JsonFlag + + cmd.Flags().Var(&updateRefreshJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update-refresh OBJECT_TYPE OBJECT_ID REFRESH_ID UPDATE_MASK OBJECT_TYPE OBJECT_ID" + cmd.Short = `Update a refresh.` + cmd.Long = `Update a refresh. + + (Unimplemented) Update a refresh + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema or + table. + OBJECT_ID: The UUID of the request object. For example, schema id. + REFRESH_ID: Unique id of the refresh operation. + UPDATE_MASK: The field mask to specify which fields to update. + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schemaor + table. + OBJECT_ID: The UUID of the request object. For example, table id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(4)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only OBJECT_TYPE, OBJECT_ID, REFRESH_ID, UPDATE_MASK as positional arguments. Provide 'object_type', 'object_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(6) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateRefreshJson.Unmarshal(&updateRefreshReq.Refresh) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateRefreshReq.ObjectType = args[0] + updateRefreshReq.ObjectId = args[1] + _, err = fmt.Sscan(args[2], &updateRefreshReq.RefreshId) + if err != nil { + return fmt.Errorf("invalid REFRESH_ID: %s", args[2]) + } + updateRefreshReq.UpdateMask = args[3] + if !cmd.Flags().Changed("json") { + updateRefreshReq.Refresh.ObjectType = args[4] + } + if !cmd.Flags().Changed("json") { + updateRefreshReq.Refresh.ObjectId = args[5] + } + + response, err := w.DataQuality.UpdateRefresh(ctx, updateRefreshReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateRefreshOverrides { + fn(cmd, &updateRefreshReq) + } + + return cmd +} + +// end service DataQuality diff --git a/cmd/workspace/database/database.go b/cmd/workspace/database/database.go index 6ce0723659..89246ea2d7 100755 --- a/cmd/workspace/database/database.go +++ b/cmd/workspace/database/database.go @@ -179,12 +179,15 @@ func newCreateDatabaseInstance() *cobra.Command { cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", createDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) // TODO: array: child_instance_refs + // TODO: array: custom_tags + // TODO: array: effective_custom_tags cmd.Flags().BoolVar(&createDatabaseInstanceReq.DatabaseInstance.EnablePgNativeLogin, "enable-pg-native-login", createDatabaseInstanceReq.DatabaseInstance.EnablePgNativeLogin, `Whether to enable PG native password login on the instance.`) cmd.Flags().BoolVar(&createDatabaseInstanceReq.DatabaseInstance.EnableReadableSecondaries, "enable-readable-secondaries", createDatabaseInstanceReq.DatabaseInstance.EnableReadableSecondaries, `Whether to enable secondaries to serve read-only traffic.`) cmd.Flags().IntVar(&createDatabaseInstanceReq.DatabaseInstance.NodeCount, "node-count", createDatabaseInstanceReq.DatabaseInstance.NodeCount, `The number of nodes in the instance, composed of 1 primary and 0 or more secondaries.`) // TODO: complex arg: parent_instance_ref cmd.Flags().IntVar(&createDatabaseInstanceReq.DatabaseInstance.RetentionWindowInDays, "retention-window-in-days", createDatabaseInstanceReq.DatabaseInstance.RetentionWindowInDays, `The retention window for the instance.`) cmd.Flags().BoolVar(&createDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", createDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether to stop the instance.`) + cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.UsagePolicyId, "usage-policy-id", createDatabaseInstanceReq.DatabaseInstance.UsagePolicyId, `The desired usage policy to associate with the instance.`) cmd.Use = "create-database-instance NAME" cmd.Short = `Create a Database Instance.` @@ -278,14 +281,21 @@ func newCreateDatabaseInstanceRole() *cobra.Command { cmd.Flags().Var(&createDatabaseInstanceRoleJson, "json", `either inline JSON string or @path/to/file.json with request body`) + cmd.Flags().StringVar(&createDatabaseInstanceRoleReq.DatabaseInstanceName, "database-instance-name", createDatabaseInstanceRoleReq.DatabaseInstanceName, ``) // TODO: complex arg: attributes + // TODO: complex arg: effective_attributes cmd.Flags().Var(&createDatabaseInstanceRoleReq.DatabaseInstanceRole.IdentityType, "identity-type", `The type of the role. Supported values: [GROUP, PG_ONLY, SERVICE_PRINCIPAL, USER]`) + cmd.Flags().StringVar(&createDatabaseInstanceRoleReq.DatabaseInstanceRole.InstanceName, "instance-name", createDatabaseInstanceRoleReq.DatabaseInstanceRole.InstanceName, ``) cmd.Flags().Var(&createDatabaseInstanceRoleReq.DatabaseInstanceRole.MembershipRole, "membership-role", `An enum value for a standard role that this role is a member of. Supported values: [DATABRICKS_SUPERUSER]`) - cmd.Flags().StringVar(&createDatabaseInstanceRoleReq.DatabaseInstanceRole.Name, "name", createDatabaseInstanceRoleReq.DatabaseInstanceRole.Name, `The name of the role.`) - cmd.Use = "create-database-instance-role INSTANCE_NAME" + cmd.Use = "create-database-instance-role INSTANCE_NAME NAME" cmd.Short = `Create a role for a Database Instance.` - cmd.Long = `Create a role for a Database Instance.` + cmd.Long = `Create a role for a Database Instance. + + Arguments: + INSTANCE_NAME: + NAME: The name of the role. This is the unique identifier for the role in an + instance.` // This command is being previewed; hide from help output. cmd.Hidden = true @@ -293,7 +303,14 @@ func newCreateDatabaseInstanceRole() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only INSTANCE_NAME as positional arguments. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) return check(cmd, args) } @@ -315,6 +332,9 @@ func newCreateDatabaseInstanceRole() *cobra.Command { } } createDatabaseInstanceRoleReq.InstanceName = args[0] + if !cmd.Flags().Changed("json") { + createDatabaseInstanceRoleReq.DatabaseInstanceRole.Name = args[1] + } response, err := w.Database.CreateDatabaseInstanceRole(ctx, createDatabaseInstanceRoleReq) if err != nil { @@ -1500,12 +1520,15 @@ func newUpdateDatabaseInstance() *cobra.Command { cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", updateDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) // TODO: array: child_instance_refs + // TODO: array: custom_tags + // TODO: array: effective_custom_tags cmd.Flags().BoolVar(&updateDatabaseInstanceReq.DatabaseInstance.EnablePgNativeLogin, "enable-pg-native-login", updateDatabaseInstanceReq.DatabaseInstance.EnablePgNativeLogin, `Whether to enable PG native password login on the instance.`) cmd.Flags().BoolVar(&updateDatabaseInstanceReq.DatabaseInstance.EnableReadableSecondaries, "enable-readable-secondaries", updateDatabaseInstanceReq.DatabaseInstance.EnableReadableSecondaries, `Whether to enable secondaries to serve read-only traffic.`) cmd.Flags().IntVar(&updateDatabaseInstanceReq.DatabaseInstance.NodeCount, "node-count", updateDatabaseInstanceReq.DatabaseInstance.NodeCount, `The number of nodes in the instance, composed of 1 primary and 0 or more secondaries.`) // TODO: complex arg: parent_instance_ref cmd.Flags().IntVar(&updateDatabaseInstanceReq.DatabaseInstance.RetentionWindowInDays, "retention-window-in-days", updateDatabaseInstanceReq.DatabaseInstance.RetentionWindowInDays, `The retention window for the instance.`) cmd.Flags().BoolVar(&updateDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", updateDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether to stop the instance.`) + cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.UsagePolicyId, "usage-policy-id", updateDatabaseInstanceReq.DatabaseInstance.UsagePolicyId, `The desired usage policy to associate with the instance.`) cmd.Use = "update-database-instance NAME UPDATE_MASK" cmd.Short = `Update a Database Instance.` diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index a0db1aeaa4..14dffa1769 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -288,6 +288,7 @@ func newList() *cobra.Command { var listReq catalog.ListExternalLocationsRequest cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include external locations in the response for which the principal can only access selective metadata for.`) + cmd.Flags().BoolVar(&listReq.IncludeUnbound, "include-unbound", listReq.IncludeUnbound, `Whether to include external locations not bound to the workspace.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of external locations to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -298,7 +299,16 @@ func newList() *cobra.Command { Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on the external location. There is - no guarantee of a specific ordering of the elements in the array.` + no guarantee of a specific ordering of the elements in the array. + + NOTE: we recommend using max_results=0 to use the paginated version of this + API. Unpaginated calls will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may + contain zero results while still providing a next_page_token. Clients must + continue reading pages until next_page_token is absent, which is the only + indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/external-metadata/external-metadata.go b/cmd/workspace/external-metadata/external-metadata.go index fc9c2c56f8..d11a36387c 100755 --- a/cmd/workspace/external-metadata/external-metadata.go +++ b/cmd/workspace/external-metadata/external-metadata.go @@ -105,6 +105,7 @@ func newCreateExternalMetadata() *cobra.Command { SAP, SERVICENOW, SNOWFLAKE, + STREAM_NATIVE, TABLEAU, TERADATA, WORKDAY, @@ -407,6 +408,7 @@ func newUpdateExternalMetadata() *cobra.Command { SAP, SERVICENOW, SNOWFLAKE, + STREAM_NATIVE, TABLEAU, TERADATA, WORKDAY, diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index 057a43547e..bd104fedb2 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -148,7 +148,7 @@ func newDelete() *cobra.Command { Arguments: NAME: The fully-qualified name of the function (of the form - __catalog_name__.__schema_name__.__function__name__).` + __catalog_name__.__schema_name__.__function__name__) .` cmd.Annotations = make(map[string]string) @@ -165,14 +165,14 @@ func newDelete() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Functions drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__)") + id, err := cmdio.Select(ctx, names, "The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__) ") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have the fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__)") + return fmt.Errorf("expected to have the fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__) ") } deleteReq.Name = args[0] @@ -301,6 +301,15 @@ func newList() *cobra.Command { functions for which either the user has the **EXECUTE** privilege or the user is the owner. There is no guarantee of a specific ordering of the elements in the array. + + NOTE: we recommend using max_results=0 to use the paginated version of this + API. Unpaginated calls will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may + contain zero results while still providing a next_page_token. Clients must + continue reading pages until next_page_token is absent, which is the only + indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. Arguments: CATALOG_NAME: Name of parent catalog for functions of interest. @@ -354,7 +363,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of function.`) + cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of the function.`) cmd.Use = "update NAME" cmd.Short = `Update a function.` diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index 16977d7643..fd16bc3744 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -66,7 +66,8 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.GitUsername, "git-username", createReq.GitUsername, `The username or email provided with your Git provider account, depending on which provider you are using.`) + cmd.Flags().StringVar(&createReq.GitEmail, "git-email", createReq.GitEmail, `The authenticating email associated with your Git provider user account.`) + cmd.Flags().StringVar(&createReq.GitUsername, "git-username", createReq.GitUsername, `The username provided with your Git provider account and associated with the credential.`) cmd.Flags().BoolVar(&createReq.IsDefaultForProvider, "is-default-for-provider", createReq.IsDefaultForProvider, `if the credential is the default for the given provider.`) cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `the name of the git credential, used for identification and ease of lookup.`) cmd.Flags().StringVar(&createReq.PersonalAccessToken, "personal-access-token", createReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`) @@ -339,7 +340,8 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.GitUsername, "git-username", updateReq.GitUsername, `The username or email provided with your Git provider account, depending on which provider you are using.`) + cmd.Flags().StringVar(&updateReq.GitEmail, "git-email", updateReq.GitEmail, `The authenticating email associated with your Git provider user account.`) + cmd.Flags().StringVar(&updateReq.GitUsername, "git-username", updateReq.GitUsername, `The username provided with your Git provider account and associated with the credential.`) cmd.Flags().BoolVar(&updateReq.IsDefaultForProvider, "is-default-for-provider", updateReq.IsDefaultForProvider, `if the credential is the default for the given provider.`) cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `the name of the git credential, used for identification and ease of lookup.`) cmd.Flags().StringVar(&updateReq.PersonalAccessToken, "personal-access-token", updateReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`) diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index dd426a722d..2ca23080ea 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -289,12 +289,44 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: array: access_control_list + cmd.Flags().StringVar(&createReq.BudgetPolicyId, "budget-policy-id", createReq.BudgetPolicyId, `The id of the user specified budget policy to use for this job.`) + // TODO: complex arg: continuous + // TODO: complex arg: deployment + cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `An optional description for the job.`) + cmd.Flags().Var(&createReq.EditMode, "edit-mode", `Edit mode of the job. Supported values: [EDITABLE, UI_LOCKED]`) + // TODO: complex arg: email_notifications + // TODO: array: environments + cmd.Flags().Var(&createReq.Format, "format", `Used to tell what is the format of the job. Supported values: [MULTI_TASK, SINGLE_TASK]`) + // TODO: complex arg: git_source + // TODO: complex arg: health + // TODO: array: job_clusters + cmd.Flags().IntVar(&createReq.MaxConcurrentRuns, "max-concurrent-runs", createReq.MaxConcurrentRuns, `An optional maximum allowed number of concurrent runs of the job.`) + cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `An optional name for the job.`) + // TODO: complex arg: notification_settings + // TODO: array: parameters + cmd.Flags().Var(&createReq.PerformanceTarget, "performance-target", `The performance mode on a serverless job. Supported values: [PERFORMANCE_OPTIMIZED, STANDARD]`) + // TODO: complex arg: queue + // TODO: complex arg: run_as + // TODO: complex arg: schedule + // TODO: map via StringToStringVar: tags + // TODO: array: tasks + cmd.Flags().IntVar(&createReq.TimeoutSeconds, "timeout-seconds", createReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`) + // TODO: complex arg: trigger + cmd.Flags().StringVar(&createReq.UsagePolicyId, "usage-policy-id", createReq.UsagePolicyId, `The id of the user specified usage policy to use for this job.`) + // TODO: complex arg: webhook_notifications + cmd.Use = "create" cmd.Short = `Create a new job.` cmd.Long = `Create a new job.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -311,8 +343,6 @@ func newCreate() *cobra.Command { return err } } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } response, err := w.Jobs.Create(ctx, createReq) diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index bd6a372f91..7a3740f718 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -421,7 +421,16 @@ func newList() *cobra.Command { Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin to retrieve this info. There is no guarantee of a - specific ordering of the elements in the array.` + specific ordering of the elements in the array. + + NOTE: we recommend using max_results=0 to use the paginated version of this + API. Unpaginated calls will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may + contain zero results while still providing a next_page_token. Clients must + continue reading pages until next_page_token is absent, which is the only + indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index 2d47df8da6..77f2302fea 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -338,7 +338,23 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: array: aliases + cmd.Flags().StringVar(&updateReq.CatalogName, "catalog-name", updateReq.CatalogName, `The name of the catalog containing the model version.`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the model version.`) + cmd.Flags().Int64Var(&updateReq.CreatedAt, "created-at", updateReq.CreatedAt, ``) + cmd.Flags().StringVar(&updateReq.CreatedBy, "created-by", updateReq.CreatedBy, `The identifier of the user who created the model version.`) + cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `The unique identifier of the model version.`) + cmd.Flags().StringVar(&updateReq.MetastoreId, "metastore-id", updateReq.MetastoreId, `The unique identifier of the metastore containing the model version.`) + cmd.Flags().StringVar(&updateReq.ModelName, "model-name", updateReq.ModelName, `The name of the parent registered model of the model version, relative to parent schema.`) + // TODO: complex arg: model_version_dependencies + cmd.Flags().StringVar(&updateReq.RunId, "run-id", updateReq.RunId, `MLflow run ID used when creating the model version, if source was generated by an experiment run stored in an MLflow tracking server.`) + cmd.Flags().IntVar(&updateReq.RunWorkspaceId, "run-workspace-id", updateReq.RunWorkspaceId, `ID of the Databricks workspace containing the MLflow run that generated this model version, if applicable.`) + cmd.Flags().StringVar(&updateReq.SchemaName, "schema-name", updateReq.SchemaName, `The name of the schema containing the model version, relative to parent catalog.`) + cmd.Flags().StringVar(&updateReq.Source, "source", updateReq.Source, `URI indicating the location of the source artifacts (files) for the model version.`) + cmd.Flags().Var(&updateReq.Status, "status", `Current status of the model version. Supported values: [FAILED_REGISTRATION, MODEL_VERSION_STATUS_UNKNOWN, PENDING_REGISTRATION, READY]`) + cmd.Flags().StringVar(&updateReq.StorageLocation, "storage-location", updateReq.StorageLocation, `The storage location on the cloud under which model version data files are stored.`) + cmd.Flags().Int64Var(&updateReq.UpdatedAt, "updated-at", updateReq.UpdatedAt, ``) + cmd.Flags().StringVar(&updateReq.UpdatedBy, "updated-by", updateReq.UpdatedBy, `The identifier of the user who updated the model version last time.`) cmd.Use = "update FULL_NAME VERSION" cmd.Short = `Update a Model Version.` diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index 1178565d7a..413d5a2712 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -95,8 +95,8 @@ func newGet() *cobra.Command { Arguments: REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, alertsv2, authorization, clusters, cluster-policies, dashboards, - dbsql-dashboards, directories, experiments, files, instance-pools, jobs, - notebooks, pipelines, queries, registered-models, repos, + dbsql-dashboards, directories, experiments, files, genie, instance-pools, + jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` @@ -157,8 +157,8 @@ func newGetPermissionLevels() *cobra.Command { Arguments: REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, alertsv2, authorization, clusters, cluster-policies, dashboards, - dbsql-dashboards, directories, experiments, files, instance-pools, jobs, - notebooks, pipelines, queries, registered-models, repos, + dbsql-dashboards, directories, experiments, files, genie, instance-pools, + jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: ` @@ -226,8 +226,8 @@ func newSet() *cobra.Command { Arguments: REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, alertsv2, authorization, clusters, cluster-policies, dashboards, - dbsql-dashboards, directories, experiments, files, instance-pools, jobs, - notebooks, pipelines, queries, registered-models, repos, + dbsql-dashboards, directories, experiments, files, genie, instance-pools, + jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` @@ -306,8 +306,8 @@ func newUpdate() *cobra.Command { Arguments: REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, alertsv2, authorization, clusters, cluster-policies, dashboards, - dbsql-dashboards, directories, experiments, files, instance-pools, jobs, - notebooks, pipelines, queries, registered-models, repos, + dbsql-dashboards, directories, experiments, files, genie, instance-pools, + jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index e3e7e667c6..e054630331 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -84,6 +84,37 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + cmd.Flags().BoolVar(&createReq.AllowDuplicateNames, "allow-duplicate-names", createReq.AllowDuplicateNames, `If false, deployment will fail if name conflicts with that of another pipeline.`) + cmd.Flags().StringVar(&createReq.BudgetPolicyId, "budget-policy-id", createReq.BudgetPolicyId, `Budget policy of this pipeline.`) + cmd.Flags().StringVar(&createReq.Catalog, "catalog", createReq.Catalog, `A catalog in Unity Catalog to publish data from this pipeline to.`) + cmd.Flags().StringVar(&createReq.Channel, "channel", createReq.Channel, `DLT Release Channel that specifies which version to use.`) + // TODO: array: clusters + // TODO: map via StringToStringVar: configuration + cmd.Flags().BoolVar(&createReq.Continuous, "continuous", createReq.Continuous, `Whether the pipeline is continuous or triggered.`) + // TODO: complex arg: deployment + cmd.Flags().BoolVar(&createReq.Development, "development", createReq.Development, `Whether the pipeline is in Development mode.`) + cmd.Flags().BoolVar(&createReq.DryRun, "dry-run", createReq.DryRun, ``) + cmd.Flags().StringVar(&createReq.Edition, "edition", createReq.Edition, `Pipeline product edition.`) + // TODO: complex arg: environment + // TODO: complex arg: event_log + // TODO: complex arg: filters + // TODO: complex arg: gateway_definition + cmd.Flags().StringVar(&createReq.Id, "id", createReq.Id, `Unique identifier for this pipeline.`) + // TODO: complex arg: ingestion_definition + // TODO: array: libraries + cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Friendly identifier for this pipeline.`) + // TODO: array: notifications + cmd.Flags().BoolVar(&createReq.Photon, "photon", createReq.Photon, `Whether Photon is enabled for this pipeline.`) + // TODO: complex arg: restart_window + cmd.Flags().StringVar(&createReq.RootPath, "root-path", createReq.RootPath, `Root path for this pipeline.`) + // TODO: complex arg: run_as + cmd.Flags().StringVar(&createReq.Schema, "schema", createReq.Schema, `The default schema (database) where tables are read from or published to.`) + cmd.Flags().BoolVar(&createReq.Serverless, "serverless", createReq.Serverless, `Whether serverless compute is enabled for this pipeline.`) + cmd.Flags().StringVar(&createReq.Storage, "storage", createReq.Storage, `DBFS root directory for storing checkpoints and tables.`) + // TODO: map via StringToStringVar: tags + cmd.Flags().StringVar(&createReq.Target, "target", createReq.Target, `Target schema (database) to add tables in this pipeline to.`) + // TODO: complex arg: trigger + cmd.Use = "create" cmd.Short = `Create a pipeline.` cmd.Long = `Create a pipeline. @@ -93,6 +124,11 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -109,8 +145,6 @@ func newCreate() *cobra.Command { return err } } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } response, err := w.Pipelines.Create(ctx, createReq) diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index f98ce48fdc..06dbb39233 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -47,9 +47,9 @@ func New() *cobra.Command { update permissions on the registered model, users must be owners of the registered model. - Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. - tagging, grants) that specify a securable type, use "FUNCTION" as the - securable type.`, + Note: The securable type for models is FUNCTION. When using REST APIs (e.g. + tagging, grants) that specify a securable type, use FUNCTION as the securable + type.`, GroupID: "catalog", Annotations: map[string]string{ "package": "catalog", @@ -91,10 +91,22 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: array: aliases + cmd.Flags().BoolVar(&createReq.BrowseOnly, "browse-only", createReq.BrowseOnly, `Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request.`) + cmd.Flags().StringVar(&createReq.CatalogName, "catalog-name", createReq.CatalogName, `The name of the catalog where the schema and the registered model reside.`) cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `The comment attached to the registered model.`) + cmd.Flags().Int64Var(&createReq.CreatedAt, "created-at", createReq.CreatedAt, `Creation timestamp of the registered model in milliseconds since the Unix epoch.`) + cmd.Flags().StringVar(&createReq.CreatedBy, "created-by", createReq.CreatedBy, `The identifier of the user who created the registered model.`) + cmd.Flags().StringVar(&createReq.FullName, "full-name", createReq.FullName, `The three-level (fully qualified) name of the registered model.`) + cmd.Flags().StringVar(&createReq.MetastoreId, "metastore-id", createReq.MetastoreId, `The unique identifier of the metastore.`) + cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `The name of the registered model.`) + cmd.Flags().StringVar(&createReq.Owner, "owner", createReq.Owner, `The identifier of the user who owns the registered model.`) + cmd.Flags().StringVar(&createReq.SchemaName, "schema-name", createReq.SchemaName, `The name of the schema where the registered model resides.`) cmd.Flags().StringVar(&createReq.StorageLocation, "storage-location", createReq.StorageLocation, `The storage location on the cloud under which model version data files are stored.`) + cmd.Flags().Int64Var(&createReq.UpdatedAt, "updated-at", createReq.UpdatedAt, `Last-update timestamp of the registered model in milliseconds since the Unix epoch.`) + cmd.Flags().StringVar(&createReq.UpdatedBy, "updated-by", createReq.UpdatedBy, `The identifier of the user who updated the registered model last time.`) - cmd.Use = "create CATALOG_NAME SCHEMA_NAME NAME" + cmd.Use = "create" cmd.Short = `Create a Registered Model.` cmd.Long = `Create a Registered Model. @@ -109,24 +121,12 @@ func newCreate() *cobra.Command { parent catalog and schema, or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller must have the **CREATE MODEL** or **CREATE FUNCTION** privilege on the parent - schema. - - Arguments: - CATALOG_NAME: The name of the catalog where the schema and the registered model reside - SCHEMA_NAME: The name of the schema where the registered model resides - NAME: The name of the registered model` + schema.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'catalog_name', 'schema_name', 'name' in your JSON input") - } - return nil - } - check := root.ExactArgs(3) + check := root.ExactArgs(0) return check(cmd, args) } @@ -147,15 +147,6 @@ func newCreate() *cobra.Command { } } } - if !cmd.Flags().Changed("json") { - createReq.CatalogName = args[0] - } - if !cmd.Flags().Changed("json") { - createReq.SchemaName = args[1] - } - if !cmd.Flags().Changed("json") { - createReq.Name = args[2] - } response, err := w.RegisteredModels.Create(ctx, createReq) if err != nil { @@ -483,7 +474,7 @@ func newSetAlias() *cobra.Command { schema. Arguments: - FULL_NAME: Full name of the registered model + FULL_NAME: The three-level (fully qualified) name of the registered model ALIAS: The name of the alias VERSION_NUM: The version number of the model version to which the alias points` @@ -563,9 +554,20 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: array: aliases + cmd.Flags().BoolVar(&updateReq.BrowseOnly, "browse-only", updateReq.BrowseOnly, `Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request.`) + cmd.Flags().StringVar(&updateReq.CatalogName, "catalog-name", updateReq.CatalogName, `The name of the catalog where the schema and the registered model reside.`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the registered model.`) + cmd.Flags().Int64Var(&updateReq.CreatedAt, "created-at", updateReq.CreatedAt, `Creation timestamp of the registered model in milliseconds since the Unix epoch.`) + cmd.Flags().StringVar(&updateReq.CreatedBy, "created-by", updateReq.CreatedBy, `The identifier of the user who created the registered model.`) + cmd.Flags().StringVar(&updateReq.MetastoreId, "metastore-id", updateReq.MetastoreId, `The unique identifier of the metastore.`) + cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the registered model.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the registered model.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the registered model.`) + cmd.Flags().StringVar(&updateReq.SchemaName, "schema-name", updateReq.SchemaName, `The name of the schema where the registered model resides.`) + cmd.Flags().StringVar(&updateReq.StorageLocation, "storage-location", updateReq.StorageLocation, `The storage location on the cloud under which model version data files are stored.`) + cmd.Flags().Int64Var(&updateReq.UpdatedAt, "updated-at", updateReq.UpdatedAt, `Last-update timestamp of the registered model in milliseconds since the Unix epoch.`) + cmd.Flags().StringVar(&updateReq.UpdatedBy, "updated-by", updateReq.UpdatedBy, `The identifier of the user who updated the registered model last time.`) cmd.Use = "update FULL_NAME" cmd.Short = `Update a Registered Model.` diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index 0c73eb5ea4..7757127c33 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -284,6 +284,15 @@ func newList() *cobra.Command { catalog will be retrieved. Otherwise, only schemas owned by the caller (or for which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the array. + + NOTE: we recommend using max_results=0 to use the paginated version of this + API. Unpaginated calls will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may + contain zero results while still providing a next_page_token. Clients must + continue reading pages until next_page_token is absent, which is the only + indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. Arguments: CATALOG_NAME: Parent catalog for schemas of interest.` diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 287f9d4575..3a7ceefd7d 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -61,6 +61,7 @@ func New() *cobra.Command { cmd.AddCommand(newQuery()) cmd.AddCommand(newSetPermissions()) cmd.AddCommand(newUpdateConfig()) + cmd.AddCommand(newUpdateNotifications()) cmd.AddCommand(newUpdatePermissions()) cmd.AddCommand(newUpdateProvisionedThroughputEndpointConfig()) @@ -1324,6 +1325,80 @@ func newUpdateConfig() *cobra.Command { return cmd } +// start update-notifications command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateNotificationsOverrides []func( + *cobra.Command, + *serving.UpdateInferenceEndpointNotifications, +) + +func newUpdateNotifications() *cobra.Command { + cmd := &cobra.Command{} + + var updateNotificationsReq serving.UpdateInferenceEndpointNotifications + var updateNotificationsJson flags.JsonFlag + + cmd.Flags().Var(&updateNotificationsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: email_notifications + + cmd.Use = "update-notifications NAME" + cmd.Short = `Update the email and webhook notification settings for an endpoint.` + cmd.Long = `Update the email and webhook notification settings for an endpoint. + + Updates the email and webhook notification settings for an endpoint. + + Arguments: + NAME: The name of the serving endpoint whose notifications are being updated. + This field is required.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateNotificationsJson.Unmarshal(&updateNotificationsReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateNotificationsReq.Name = args[0] + + response, err := w.ServingEndpoints.UpdateNotifications(ctx, updateNotificationsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateNotificationsOverrides { + fn(cmd, &updateNotificationsReq) + } + + return cmd +} + // start update-permissions command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 483a0155e6..ac8c5cb6c3 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -281,6 +281,7 @@ func newList() *cobra.Command { var listReq catalog.ListStorageCredentialsRequest + cmd.Flags().BoolVar(&listReq.IncludeUnbound, "include-unbound", listReq.IncludeUnbound, `Whether to include credentials not bound to the workspace.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of storage credentials to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -292,7 +293,16 @@ func newList() *cobra.Command { The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore admin, retrieval of credentials is unrestricted. There is no guarantee of a specific ordering of - the elements in the array.` + the elements in the array. + + NOTE: we recommend using max_results=0 to use the paginated version of this + API. Unpaginated calls will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may + contain zero results while still providing a next_page_token. Clients must + continue reading pages until next_page_token is absent, which is the only + indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index e88ee60489..f0f21b203a 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -200,6 +200,15 @@ func newList() *cobra.Command { Gets an array of system schemas for a metastore. The caller must be an account admin or a metastore admin. + + NOTE: we recommend using max_results=0 to use the paginated version of this + API. Unpaginated calls will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may + contain zero results while still providing a next_page_token. Clients must + continue reading pages until next_page_token is absent, which is the only + indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. Arguments: METASTORE_ID: The ID for the metastore in which the system schema resides.` diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index 5ae07f0db3..af6db516f2 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -450,6 +450,15 @@ func newList() *cobra.Command { be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. There is no guarantee of a specific ordering of the elements in the array. + + NOTE: we recommend using max_results=0 to use the paginated version of this + API. Unpaginated calls will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may + contain zero results while still providing a next_page_token. Clients must + continue reading pages until next_page_token is absent, which is the only + indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. Arguments: CATALOG_NAME: Name of parent catalog for tables of interest. diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index ce76a94fc5..006e30d400 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -98,7 +98,12 @@ func newCreate() *cobra.Command { CATALOG_NAME: The name of the catalog where the schema and the volume are SCHEMA_NAME: The name of the schema where the volume is NAME: The name of the volume - VOLUME_TYPE: + VOLUME_TYPE: The type of the volume. An external volume is located in the specified + external location. A managed volume is located in the default location + which is specified by the parent schema, or the parent catalog, or the + Metastore. [Learn more] + + [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external Supported values: [EXTERNAL, MANAGED]` cmd.Annotations = make(map[string]string) @@ -267,7 +272,7 @@ func newList() *cobra.Command { The returned volumes are filtered based on the privileges of the calling user. For example, the metastore admin is able to list all the volumes. A regular user needs to be the owner or have the **READ VOLUME** privilege on the volume - to recieve the volumes in the response. For the latter case, the caller must + to receive the volumes in the response. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index 439d988074..966b886444 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -88,9 +88,9 @@ func newCreate() *cobra.Command { cmd.Flags().IntVar(&createReq.MaxNumClusters, "max-num-clusters", createReq.MaxNumClusters, `Maximum number of clusters that the autoscaler will create to handle concurrent queries.`) cmd.Flags().IntVar(&createReq.MinNumClusters, "min-num-clusters", createReq.MinNumClusters, `Minimum number of available clusters that will be maintained for this SQL warehouse.`) cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Logical name for the cluster.`) - cmd.Flags().Var(&createReq.SpotInstancePolicy, "spot-instance-policy", `Supported values: [COST_OPTIMIZED, POLICY_UNSPECIFIED, RELIABILITY_OPTIMIZED]`) + cmd.Flags().Var(&createReq.SpotInstancePolicy, "spot-instance-policy", `Configurations whether the endpoint should use spot instances. Supported values: [COST_OPTIMIZED, POLICY_UNSPECIFIED, RELIABILITY_OPTIMIZED]`) // TODO: complex arg: tags - cmd.Flags().Var(&createReq.WarehouseType, "warehouse-type", `Supported values: [CLASSIC, PRO, TYPE_UNSPECIFIED]`) + cmd.Flags().Var(&createReq.WarehouseType, "warehouse-type", `Warehouse type: PRO or CLASSIC. Supported values: [CLASSIC, PRO, TYPE_UNSPECIFIED]`) cmd.Use = "create" cmd.Short = `Create a warehouse.` @@ -262,9 +262,9 @@ func newEdit() *cobra.Command { cmd.Flags().IntVar(&editReq.MaxNumClusters, "max-num-clusters", editReq.MaxNumClusters, `Maximum number of clusters that the autoscaler will create to handle concurrent queries.`) cmd.Flags().IntVar(&editReq.MinNumClusters, "min-num-clusters", editReq.MinNumClusters, `Minimum number of available clusters that will be maintained for this SQL warehouse.`) cmd.Flags().StringVar(&editReq.Name, "name", editReq.Name, `Logical name for the cluster.`) - cmd.Flags().Var(&editReq.SpotInstancePolicy, "spot-instance-policy", `Supported values: [COST_OPTIMIZED, POLICY_UNSPECIFIED, RELIABILITY_OPTIMIZED]`) + cmd.Flags().Var(&editReq.SpotInstancePolicy, "spot-instance-policy", `Configurations whether the endpoint should use spot instances. Supported values: [COST_OPTIMIZED, POLICY_UNSPECIFIED, RELIABILITY_OPTIMIZED]`) // TODO: complex arg: tags - cmd.Flags().Var(&editReq.WarehouseType, "warehouse-type", `Supported values: [CLASSIC, PRO, TYPE_UNSPECIFIED]`) + cmd.Flags().Var(&editReq.WarehouseType, "warehouse-type", `Warehouse type: PRO or CLASSIC. Supported values: [CLASSIC, PRO, TYPE_UNSPECIFIED]`) cmd.Use = "edit ID" cmd.Short = `Update a warehouse.` @@ -613,7 +613,9 @@ func newList() *cobra.Command { var listReq sql.ListWarehousesRequest - cmd.Flags().IntVar(&listReq.RunAsUserId, "run-as-user-id", listReq.RunAsUserId, `Service Principal which will be used to fetch the list of warehouses.`) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The max number of warehouses to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token, received from a previous ListWarehouses call.`) + cmd.Flags().IntVar(&listReq.RunAsUserId, "run-as-user-id", listReq.RunAsUserId, `Service Principal which will be used to fetch the list of endpoints.`) cmd.Use = "list" cmd.Short = `List warehouses.` @@ -756,10 +758,11 @@ func newSetWorkspaceWarehouseConfig() *cobra.Command { // TODO: complex arg: channel // TODO: complex arg: config_param // TODO: array: data_access_config + cmd.Flags().BoolVar(&setWorkspaceWarehouseConfigReq.EnableServerlessCompute, "enable-serverless-compute", setWorkspaceWarehouseConfigReq.EnableServerlessCompute, `Enable Serverless compute for SQL warehouses.`) // TODO: array: enabled_warehouse_types // TODO: complex arg: global_param cmd.Flags().StringVar(&setWorkspaceWarehouseConfigReq.GoogleServiceAccount, "google-service-account", setWorkspaceWarehouseConfigReq.GoogleServiceAccount, `GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage.`) - cmd.Flags().StringVar(&setWorkspaceWarehouseConfigReq.InstanceProfileArn, "instance-profile-arn", setWorkspaceWarehouseConfigReq.InstanceProfileArn, `AWS Only: Instance profile used to pass IAM role to the cluster.`) + cmd.Flags().StringVar(&setWorkspaceWarehouseConfigReq.InstanceProfileArn, "instance-profile-arn", setWorkspaceWarehouseConfigReq.InstanceProfileArn, `AWS Only: The instance profile used to pass an IAM role to the SQL warehouses.`) cmd.Flags().Var(&setWorkspaceWarehouseConfigReq.SecurityPolicy, "security-policy", `Security policy for warehouses. Supported values: [DATA_ACCESS_CONTROL, NONE, PASSTHROUGH]`) // TODO: complex arg: sql_configuration_parameters diff --git a/experimental/python/databricks/bundles/jobs/_models/condition.py b/experimental/python/databricks/bundles/jobs/_models/condition.py index d1b3566d5d..d7c1b25bc7 100644 --- a/experimental/python/databricks/bundles/jobs/_models/condition.py +++ b/experimental/python/databricks/bundles/jobs/_models/condition.py @@ -3,10 +3,6 @@ class Condition(Enum): - """ - :meta private: [EXPERIMENTAL] - """ - ANY_UPDATED = "ANY_UPDATED" ALL_UPDATED = "ALL_UPDATED" diff --git a/experimental/python/databricks/bundles/jobs/_models/environment.py b/experimental/python/databricks/bundles/jobs/_models/environment.py index e2b88cb481..1521fffeda 100644 --- a/experimental/python/databricks/bundles/jobs/_models/environment.py +++ b/experimental/python/databricks/bundles/jobs/_models/environment.py @@ -37,7 +37,7 @@ class Environment: """ :meta private: [EXPERIMENTAL] - List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`. + List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`. """ @classmethod @@ -72,7 +72,7 @@ class EnvironmentDict(TypedDict, total=False): """ :meta private: [EXPERIMENTAL] - List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`. + List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`. """ diff --git a/experimental/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py b/experimental/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py index c89b0c4011..c824a72499 100644 --- a/experimental/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py +++ b/experimental/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py @@ -12,9 +12,7 @@ @dataclass(kw_only=True) class TableUpdateTriggerConfiguration: - """ - :meta private: [EXPERIMENTAL] - """ + """""" condition: VariableOrOptional[Condition] = None """ @@ -29,7 +27,7 @@ class TableUpdateTriggerConfiguration: table_names: VariableOrList[str] = field(default_factory=list) """ - A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. """ wait_after_last_change_seconds: VariableOrOptional[int] = None @@ -63,7 +61,7 @@ class TableUpdateTriggerConfigurationDict(TypedDict, total=False): table_names: VariableOrList[str] """ - A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. """ wait_after_last_change_seconds: VariableOrOptional[int] diff --git a/experimental/python/databricks/bundles/jobs/_models/trigger_settings.py b/experimental/python/databricks/bundles/jobs/_models/trigger_settings.py index 4608c98c82..18cccd14dd 100644 --- a/experimental/python/databricks/bundles/jobs/_models/trigger_settings.py +++ b/experimental/python/databricks/bundles/jobs/_models/trigger_settings.py @@ -42,9 +42,6 @@ class TriggerSettings: """ table_update: VariableOrOptional[TableUpdateTriggerConfiguration] = None - """ - :meta private: [EXPERIMENTAL] - """ @classmethod def from_dict(cls, value: "TriggerSettingsDict") -> "Self": @@ -73,9 +70,6 @@ class TriggerSettingsDict(TypedDict, total=False): """ table_update: VariableOrOptional[TableUpdateTriggerConfigurationParam] - """ - :meta private: [EXPERIMENTAL] - """ TriggerSettingsParam = TriggerSettingsDict | TriggerSettings diff --git a/experimental/python/databricks/bundles/volumes/_models/volume_type.py b/experimental/python/databricks/bundles/volumes/_models/volume_type.py index 1b9bcd1089..5c96db8fde 100644 --- a/experimental/python/databricks/bundles/volumes/_models/volume_type.py +++ b/experimental/python/databricks/bundles/volumes/_models/volume_type.py @@ -3,12 +3,8 @@ class VolumeType(Enum): - """ - The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) - """ - - EXTERNAL = "EXTERNAL" MANAGED = "MANAGED" + EXTERNAL = "EXTERNAL" -VolumeTypeParam = Literal["EXTERNAL", "MANAGED"] | VolumeType +VolumeTypeParam = Literal["MANAGED", "EXTERNAL"] | VolumeType diff --git a/go.mod b/go.mod index aaad41c802..3fc6492749 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/BurntSushi/toml v1.5.0 // MIT github.com/Masterminds/semver/v3 v3.4.0 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.85.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.86.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index 0b9ae76cf5..582c62ea5b 100644 --- a/go.sum +++ b/go.sum @@ -29,8 +29,8 @@ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.85.0 h1:oDCioucFiIP3ioVqWkxFvN5jB+s4kxYvsFZYww/y4VI= -github.com/databricks/databricks-sdk-go v0.85.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= +github.com/databricks/databricks-sdk-go v0.86.0 h1:Di1+NBQlfzBMUhY6w6gS2mtmNXIWycowoCsLCGFQPyU= +github.com/databricks/databricks-sdk-go v0.86.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/genkit/tagging.py b/internal/genkit/tagging.py index 5504bdd0e7..f2ac65457d 100644 --- a/internal/genkit/tagging.py +++ b/internal/genkit/tagging.py @@ -51,8 +51,7 @@ def commit_and_push(self, message: str): new_tree = self.repo.create_git_tree(self.changed_files, base_tree) parent_commit = self.repo.get_git_commit(head_ref.object.sha) - new_commit = self.repo.create_git_commit( - message=message, tree=new_tree, parents=[parent_commit]) + new_commit = self.repo.create_git_commit(message=message, tree=new_tree, parents=[parent_commit]) # Update branch reference head_ref.edit(new_commit.sha) self.sha = new_commit.sha @@ -70,11 +69,10 @@ def tag(self, tag_name: str, tag_message: str): # The email MUST be the GitHub Apps email. # Otherwise, the tag will not be verified. tagger = InputGitAuthor( - name="Databricks SDK Release Bot", - email="DECO-SDK-Tagging[bot]@users.noreply.github.com") + name="Databricks SDK Release Bot", email="DECO-SDK-Tagging[bot]@users.noreply.github.com" + ) - tag = self.repo.create_git_tag( - tag=tag_name, message=tag_message, object=self.sha, type="commit", tagger=tagger) + tag = self.repo.create_git_tag(tag=tag_name, message=tag_message, object=self.sha, type="commit", tagger=tagger) # Create a Git ref (the actual reference for the tag in the repo) self.repo.create_git_ref(ref=f"refs/tags/{tag_name}", sha=tag.sha) @@ -89,6 +87,7 @@ class Package: :name: The package name. :path: The path to the package relative to the repository root. """ + name: str path: str @@ -140,7 +139,7 @@ def get_package_name(package_path: str) -> str: } """ filepath = os.path.join(os.getcwd(), package_path, PACKAGE_FILE_NAME) - with open(filepath, 'r') as file: + with open(filepath, "r") as file: content = json.load(file) if "package" in content: return content["package"] @@ -156,21 +155,21 @@ def update_version_references(tag_info: TagInfo) -> None: # Load version patterns from '.codegen.json' file at the top level of the repository package_file_path = os.path.join(os.getcwd(), CODEGEN_FILE_NAME) - with open(package_file_path, 'r') as file: + with open(package_file_path, "r") as file: package_file = json.load(file) - version = package_file.get('version') + version = package_file.get("version") if not version: - print(f"`version` not found in .codegen.json. Nothing to update.") + print("`version` not found in .codegen.json. Nothing to update.") return # Update the versions for filename, pattern in version.items(): loc = os.path.join(os.getcwd(), tag_info.package.path, filename) - previous_version = re.sub(r'\$VERSION', r"\\d+\\.\\d+\\.\\d+", pattern) - new_version = re.sub(r'\$VERSION', tag_info.version, pattern) + previous_version = re.sub(r"\$VERSION", r"\\d+\\.\\d+\\.\\d+", pattern) + new_version = re.sub(r"\$VERSION", tag_info.version, pattern) - with open(loc, 'r') as file: + with open(loc, "r") as file: content = file.read() # Replace the version in the file content @@ -188,15 +187,15 @@ def clean_next_changelog(package_path: str) -> None: """ file_path = os.path.join(os.getcwd(), package_path, NEXT_CHANGELOG_FILE_NAME) - with open(file_path, 'r') as file: + with open(file_path, "r") as file: content = file.read() # Remove content between ### sections - cleaned_content = re.sub(r'(### [^\n]+\n)(?:.*?\n?)*?(?=###|$)', r'\1', content) + cleaned_content = re.sub(r"(### [^\n]+\n)(?:.*?\n?)*?(?=###|$)", r"\1", content) # Ensure there is exactly one empty line before each section - cleaned_content = re.sub(r'(\n*)(###[^\n]+)', r'\n\n\2', cleaned_content) + cleaned_content = re.sub(r"(\n*)(###[^\n]+)", r"\n\n\2", cleaned_content) # Find the version number - version_match = re.search(r'Release v(\d+)\.(\d+)\.(\d+)', cleaned_content) + version_match = re.search(r"Release v(\d+)\.(\d+)\.(\d+)", cleaned_content) if not version_match: raise Exception("Version not found in the changelog") major, minor, patch = map(int, version_match.groups()) @@ -206,7 +205,7 @@ def clean_next_changelog(package_path: str) -> None: # are more common than patch or major version releases. minor += 1 patch = 0 - new_version = f'Release v{major}.{minor}.{patch}' + new_version = f"Release v{major}.{minor}.{patch}" cleaned_content = cleaned_content.replace(version_match.group(0), new_version) # Update file with cleaned content @@ -220,19 +219,18 @@ def get_previous_tag_info(package: Package) -> Optional[TagInfo]: """ changelog_path = os.path.join(os.getcwd(), package.path, CHANGELOG_FILE_NAME) - with open(changelog_path, 'r') as f: + with open(changelog_path, "r") as f: changelog = f.read() # Extract the latest release section using regex - match = re.search(r"## (\[Release\] )?Release v[\d\.]+.*?(?=\n## (\[Release\] )?Release v|\Z)", - changelog, re.S) + match = re.search(r"## (\[Release\] )?Release v[\d\.]+.*?(?=\n## (\[Release\] )?Release v|\Z)", changelog, re.S) # E.g., for new packages. if not match: return None latest_release = match.group(0) - version_match = re.search(r'## (\[Release\] )?Release v(\d+\.\d+\.\d+)', latest_release) + version_match = re.search(r"## (\[Release\] )?Release v(\d+\.\d+\.\d+)", latest_release) if not version_match: raise Exception("Version not found in the changelog") @@ -247,22 +245,22 @@ def get_next_tag_info(package: Package) -> Optional[TagInfo]: """ next_changelog_path = os.path.join(os.getcwd(), package.path, NEXT_CHANGELOG_FILE_NAME) # Read NEXT_CHANGELOG.md - with open(next_changelog_path, 'r') as f: + with open(next_changelog_path, "r") as f: next_changelog = f.read() # Remove "# NEXT CHANGELOG" line - next_changelog = re.sub(r'^# NEXT CHANGELOG(\n+)', '', next_changelog, flags=re.MULTILINE) + next_changelog = re.sub(r"^# NEXT CHANGELOG(\n+)", "", next_changelog, flags=re.MULTILINE) # Remove empty sections - next_changelog = re.sub(r'###[^\n]+\n+(?=##|\Z)', '', next_changelog) + next_changelog = re.sub(r"###[^\n]+\n+(?=##|\Z)", "", next_changelog) # Ensure there is exactly one empty line before each section - next_changelog = re.sub(r'(\n*)(###[^\n]+)', r'\n\n\2', next_changelog) + next_changelog = re.sub(r"(\n*)(###[^\n]+)", r"\n\n\2", next_changelog) - if not re.search(r'###', next_changelog): + if not re.search(r"###", next_changelog): print("All sections are empty. No changes will be made to the changelog.") return None - version_match = re.search(r'## Release v(\d+\.\d+\.\d+)', next_changelog) + version_match = re.search(r"## Release v(\d+\.\d+\.\d+)", next_changelog) if not version_match: raise Exception("Version not found in the changelog") @@ -275,10 +273,9 @@ def write_changelog(tag_info: TagInfo) -> None: Updates the changelog with a new tag info. """ changelog_path = os.path.join(os.getcwd(), tag_info.package.path, CHANGELOG_FILE_NAME) - with open(changelog_path, 'r') as f: + with open(changelog_path, "r") as f: changelog = f.read() - updated_changelog = re.sub(r'(# Version changelog\n\n)', f'\\1{tag_info.content.strip()}\n\n\n', - changelog) + updated_changelog = re.sub(r"(# Version changelog\n\n)", f"\\1{tag_info.content.strip()}\n\n\n", changelog) gh.add_file(changelog_path, updated_changelog) @@ -333,8 +330,7 @@ def is_tag_applied(tag: TagInfo) -> bool: """ try: # Check if the specific tag exists - result = subprocess.check_output( - ['git', 'tag', '--list', tag.tag_name()], stderr=subprocess.PIPE, text=True) + result = subprocess.check_output(["git", "tag", "--list", tag.tag_name()], stderr=subprocess.PIPE, text=True) return result.strip() == tag.tag_name() except subprocess.CalledProcessError as e: # Raise a exception for git command errors @@ -349,10 +345,7 @@ def find_last_tags() -> List[TagInfo]: """ packages = find_packages() - return [ - info for info in (get_previous_tag_info(package) for package in packages) - if info is not None - ] + return [info for info in (get_previous_tag_info(package) for package in packages) if info is not None] def find_pending_tags() -> List[TagInfo]: @@ -379,8 +372,9 @@ def generate_commit_message(tag_infos: List[TagInfo]) -> str: # Sort tag_infos by package name for consistency tag_infos.sort(key=lambda info: info.package.name) - return 'Release\n\n' + '\n\n'.join(f"## {info.package.name}/v{info.version}\n\n{info.content}" - for info in tag_infos) + return "Release\n\n" + "\n\n".join( + f"## {info.package.name}/v{info.version}\n\n{info.content}" for info in tag_infos + ) def push_changes(tag_infos: List[TagInfo]) -> None: @@ -404,25 +398,24 @@ def reset_repository(hash: Optional[str] = None) -> None: :param hash: The commit hash to reset to. If None, it resets to HEAD. """ # Fetch the latest changes from the remote repository - subprocess.run(['git', 'fetch']) + subprocess.run(["git", "fetch"]) # Determine the commit hash (default to origin/main if none is provided) - commit_hash = hash or 'origin/main' + commit_hash = hash or "origin/main" # Reset in memory changed files and the commit hash gh.reset(hash) # Construct the Git reset command - command = ['git', 'reset', '--hard', commit_hash] + command = ["git", "reset", "--hard", commit_hash] # Execute the git reset command subprocess.run(command, check=True) -def retry_function(func: Callable[[], List[TagInfo]], - cleanup: Callable[[], None], - max_attempts: int = 5, - delay: int = 5) -> List[TagInfo]: +def retry_function( + func: Callable[[], List[TagInfo]], cleanup: Callable[[], None], max_attempts: int = 5, delay: int = 5 +) -> List[TagInfo]: """ Calls a function call up to `max_attempts` times if an exception occurs. @@ -451,9 +444,7 @@ def update_changelogs(packages: List[Package]) -> List[TagInfo]: """ Updates changelogs and pushes the commits. """ - tag_infos = [ - info for info in (process_package(package) for package in packages) if info is not None - ] + tag_infos = [info for info in (process_package(package) for package in packages) if info is not None] # If any package was changed, push the changes. if tag_infos: push_changes(tag_infos) @@ -479,12 +470,12 @@ def run_command(command: List[str]) -> str: def pull_last_release_commit() -> None: """ - Reset the repository to the last release. + Reset the repository to the last release. Uses commit for last change to .release_metadata.json, since it's only updated on releases. """ commit_hash = subprocess.check_output( - ['git', 'log', '-n', '1', '--format=%H', '--', '.release_metadata.json'], - text=True).strip() + ["git", "log", "-n", "1", "--format=%H", "--", ".release_metadata.json"], text=True + ).strip() # If no commit is found, raise an exception if not commit_hash: @@ -499,15 +490,15 @@ def get_package_from_args() -> Optional[str]: Retrieves an optional package python3 ./tagging.py --package """ - parser = argparse.ArgumentParser(description='Update changelogs and tag the release.') - parser.add_argument('--package', '-p', type=str, help='Tag a single package') + parser = argparse.ArgumentParser(description="Update changelogs and tag the release.") + parser.add_argument("--package", "-p", type=str, help="Tag a single package") args = parser.parse_args() return args.package def init_github(): - token = os.environ['GITHUB_TOKEN'] - repo_name = os.environ['GITHUB_REPOSITORY'] + token = os.environ["GITHUB_TOKEN"] + repo_name = os.environ["GITHUB_REPOSITORY"] g = Github(token) repo = g.get_repo(repo_name) global gh @@ -536,8 +527,7 @@ def process(): # Therefore, we don't support specifying the package until the previously started process has been successfully completed. if pending_tags and package_name: pending_packages = [tag.package.name for tag in pending_tags] - raise Exception( - f"Cannot release package {package_name}. Pending release for {pending_packages}") + raise Exception(f"Cannot release package {package_name}. Pending release for {pending_packages}") if pending_tags: print("Found pending tags from previous executions, entering recovery mode.") @@ -550,8 +540,7 @@ def process(): if package_name: packages = [package for package in packages if package.name == package_name] - pending_tags = retry_function( - func=lambda: update_changelogs(packages), cleanup=reset_repository) + pending_tags = retry_function(func=lambda: update_changelogs(packages), cleanup=reset_repository) push_tags(pending_tags) @@ -559,8 +548,7 @@ def validate_git_root(): """ Validate that the script is run from the root of the repository. """ - repo_root = subprocess.check_output(["git", "rev-parse", - "--show-toplevel"]).strip().decode("utf-8") + repo_root = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip().decode("utf-8") current_dir = subprocess.check_output(["pwd"]).strip().decode("utf-8") if repo_root != current_dir: raise Exception("Please run this script from the root of the repository.") From 13104aaa66ea027ee1152fa0271b576c24cdab65 Mon Sep 17 00:00:00 2001 From: Anton Nekipelov <226657+anton-107@users.noreply.github.com> Date: Wed, 15 Oct 2025 15:45:42 +0200 Subject: [PATCH 2/7] update NEXT_CHANGELOG.md --- NEXT_CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index fbef54be25..c86294f73e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -9,6 +9,7 @@ * Add the `--configure-serverless` flag to `databricks auth login` to configure Databricks Connect to use serverless. ### Dependency updates +* Upgrade Go SDK to 0.82.0 ([#3769](https://github.com/databricks/cli/pull/3769)) ### Bundles * Updated the internal lakeflow-pipelines template to use an "src" layout ([#3671](https://github.com/databricks/cli/pull/3671)). From b45fdec7f0a72a792a3b08f7851cf6679fbbd667 Mon Sep 17 00:00:00 2001 From: Anton Nekipelov <226657+anton-107@users.noreply.github.com> Date: Wed, 15 Oct 2025 15:56:39 +0200 Subject: [PATCH 3/7] fix lint issues --- bundle/direct/dresources/registered_model.go | 22 ++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/bundle/direct/dresources/registered_model.go b/bundle/direct/dresources/registered_model.go index cb171a9298..b040488748 100644 --- a/bundle/direct/dresources/registered_model.go +++ b/bundle/direct/dresources/registered_model.go @@ -31,6 +31,16 @@ func (*ResourceRegisteredModel) RemapState(model *catalog.RegisteredModelInfo) * SchemaName: model.SchemaName, StorageLocation: model.StorageLocation, ForceSendFields: filterFields[catalog.CreateRegisteredModelRequest](model.ForceSendFields), + + Aliases: model.Aliases, + BrowseOnly: model.BrowseOnly, + CreatedAt: model.CreatedAt, + CreatedBy: model.CreatedBy, + FullName: model.FullName, + MetastoreId: model.MetastoreId, + Owner: model.Owner, + UpdatedAt: model.UpdatedAt, + UpdatedBy: model.UpdatedBy, } } @@ -64,6 +74,18 @@ func (r *ResourceRegisteredModel) DoUpdate(ctx context.Context, id string, confi // Name updates are not supported yet without recreating. Can be added as a follow-up. // Note: TF also does not support changing name without a recreate so the current behavior matches TF. NewName: "", + + Aliases: config.Aliases, + BrowseOnly: config.BrowseOnly, + CreatedAt: config.CreatedAt, + CreatedBy: config.CreatedBy, + MetastoreId: config.MetastoreId, + UpdatedAt: config.UpdatedAt, + UpdatedBy: config.UpdatedBy, + SchemaName: config.SchemaName, + StorageLocation: config.StorageLocation, + Name: config.Name, + CatalogName: config.CatalogName, } response, err := r.client.RegisteredModels.Update(ctx, updateRequest) From 621ab495bc90f3abc63215dcc1e2834af2caaff4 Mon Sep 17 00:00:00 2001 From: Anton Nekipelov <226657+anton-107@users.noreply.github.com> Date: Wed, 15 Oct 2025 16:09:01 +0200 Subject: [PATCH 4/7] fix test bundle/deployment/bind/registered-model --- acceptance/bundle/deployment/bind/registered-model/script | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/acceptance/bundle/deployment/bind/registered-model/script b/acceptance/bundle/deployment/bind/registered-model/script index eb90a73898..dbd388851c 100644 --- a/acceptance/bundle/deployment/bind/registered-model/script +++ b/acceptance/bundle/deployment/bind/registered-model/script @@ -13,7 +13,8 @@ cat databricks.yml trace $CLI schemas create ${SCHEMA_NAME} ${CATALOG_NAME} | jq '{full_name, catalog_name}' # Create a pre-defined registered model: -MODEL_FULL_NAME=$($CLI registered-models create "${CATALOG_NAME}" "${SCHEMA_NAME}" "${MODEL_NAME}" | jq -r '.full_name') +REGISTERED_MODEL_DEF="{\"name\": \"${MODEL_NAME}\", \"catalog_name\": \"${CATALOG_NAME}\", \"schema_name\": \"${SCHEMA_NAME}\"}" +MODEL_FULL_NAME=$($CLI registered-models create --json "${REGISTERED_MODEL_DEF}" | jq -r '.full_name') cleanup() { trace $CLI registered-models delete "${MODEL_FULL_NAME}" From 08f80cdf28fdfb07f99e6d60898049dbf786bc3a Mon Sep 17 00:00:00 2001 From: Anton Nekipelov <226657+anton-107@users.noreply.github.com> Date: Wed, 15 Oct 2025 16:13:47 +0200 Subject: [PATCH 5/7] make test-update --- acceptance/bundle/refschema/out.fields.txt | 47 ++++++++++++------- .../run_as/allowed/regular_user/output.txt | 16 ------- .../allowed/service_principal/output.txt | 16 ------- .../bundle/run_as/pipelines_legacy/output.txt | 8 ---- .../empty_resources/empty_dict/output.txt | 12 ----- .../empty_resources/with_grants/output.txt | 12 ----- .../with_permissions/output.txt | 12 ----- .../cmd/account/account-help/output.txt | 3 -- acceptance/cmd/workspace/apps/output.txt | 9 ++-- .../update-database-instance/output.txt | 1 + 10 files changed, 37 insertions(+), 99 deletions(-) diff --git a/acceptance/bundle/refschema/out.fields.txt b/acceptance/bundle/refschema/out.fields.txt index f2a85bdd9f..c8c88c8271 100644 --- a/acceptance/bundle/refschema/out.fields.txt +++ b/acceptance/bundle/refschema/out.fields.txt @@ -71,6 +71,7 @@ resources.apps.*.app_status *apps.ApplicationStatus ALL resources.apps.*.app_status.message string ALL resources.apps.*.app_status.state apps.ApplicationState ALL resources.apps.*.budget_policy_id string ALL +resources.apps.*.compute_size apps.ComputeSize ALL resources.apps.*.compute_status *apps.ComputeStatus ALL resources.apps.*.compute_status.message string ALL resources.apps.*.compute_status.state apps.ComputeState ALL @@ -113,6 +114,10 @@ resources.apps.*.resources[*].database.database_name string ALL resources.apps.*.resources[*].database.instance_name string ALL resources.apps.*.resources[*].database.permission apps.AppResourceDatabaseDatabasePermission ALL resources.apps.*.resources[*].description string ALL +resources.apps.*.resources[*].genie_space *apps.AppResourceGenieSpace ALL +resources.apps.*.resources[*].genie_space.name string ALL +resources.apps.*.resources[*].genie_space.permission apps.AppResourceGenieSpaceGenieSpacePermission ALL +resources.apps.*.resources[*].genie_space.space_id string ALL resources.apps.*.resources[*].job *apps.AppResourceJob ALL resources.apps.*.resources[*].job.id string ALL resources.apps.*.resources[*].job.permission apps.AppResourceJobJobPermission ALL @@ -416,12 +421,21 @@ resources.database_instances.*.child_instance_refs[*].name string ALL resources.database_instances.*.child_instance_refs[*].uid string ALL resources.database_instances.*.creation_time string ALL resources.database_instances.*.creator string ALL +resources.database_instances.*.custom_tags []database.CustomTag ALL +resources.database_instances.*.custom_tags[*] database.CustomTag ALL +resources.database_instances.*.custom_tags[*].key string ALL +resources.database_instances.*.custom_tags[*].value string ALL resources.database_instances.*.effective_capacity string ALL +resources.database_instances.*.effective_custom_tags []database.CustomTag ALL +resources.database_instances.*.effective_custom_tags[*] database.CustomTag ALL +resources.database_instances.*.effective_custom_tags[*].key string ALL +resources.database_instances.*.effective_custom_tags[*].value string ALL resources.database_instances.*.effective_enable_pg_native_login bool ALL resources.database_instances.*.effective_enable_readable_secondaries bool ALL resources.database_instances.*.effective_node_count int ALL resources.database_instances.*.effective_retention_window_in_days int ALL resources.database_instances.*.effective_stopped bool ALL +resources.database_instances.*.effective_usage_policy_id string ALL resources.database_instances.*.enable_pg_native_login bool ALL resources.database_instances.*.enable_readable_secondaries bool ALL resources.database_instances.*.id string INPUT @@ -450,6 +464,7 @@ resources.database_instances.*.state database.DatabaseInstanceState ALL resources.database_instances.*.stopped bool ALL resources.database_instances.*.uid string ALL resources.database_instances.*.url string INPUT +resources.database_instances.*.usage_policy_id string ALL resources.experiments.*.artifact_location string ALL resources.experiments.*.creation_time int64 REMOTE resources.experiments.*.experiment_id string REMOTE @@ -504,8 +519,6 @@ resources.jobs.*.environments[*].spec.client string INPUT STATE resources.jobs.*.environments[*].spec.dependencies []string INPUT STATE resources.jobs.*.environments[*].spec.dependencies[*] string INPUT STATE resources.jobs.*.environments[*].spec.environment_version string INPUT STATE -resources.jobs.*.environments[*].spec.jar_dependencies []string INPUT STATE -resources.jobs.*.environments[*].spec.jar_dependencies[*] string INPUT STATE resources.jobs.*.environments[*].spec.java_dependencies []string INPUT STATE resources.jobs.*.environments[*].spec.java_dependencies[*] string INPUT STATE resources.jobs.*.format jobs.Format INPUT STATE @@ -695,8 +708,6 @@ resources.jobs.*.settings.environments[*].spec.client string REMOTE resources.jobs.*.settings.environments[*].spec.dependencies []string REMOTE resources.jobs.*.settings.environments[*].spec.dependencies[*] string REMOTE resources.jobs.*.settings.environments[*].spec.environment_version string REMOTE -resources.jobs.*.settings.environments[*].spec.jar_dependencies []string REMOTE -resources.jobs.*.settings.environments[*].spec.jar_dependencies[*] string REMOTE resources.jobs.*.settings.environments[*].spec.java_dependencies []string REMOTE resources.jobs.*.settings.environments[*].spec.java_dependencies[*] string REMOTE resources.jobs.*.settings.format jobs.Format REMOTE @@ -2827,16 +2838,20 @@ resources.pipelines.*.trigger.cron.quartz_cron_schedule string INPUT STATE resources.pipelines.*.trigger.cron.timezone_id string INPUT STATE resources.pipelines.*.trigger.manual *pipelines.ManualTrigger INPUT STATE resources.pipelines.*.url string INPUT -resources.registered_models.*.aliases []catalog.RegisteredModelAlias REMOTE -resources.registered_models.*.aliases[*] catalog.RegisteredModelAlias REMOTE -resources.registered_models.*.aliases[*].alias_name string REMOTE -resources.registered_models.*.aliases[*].version_num int REMOTE -resources.registered_models.*.browse_only bool REMOTE +resources.registered_models.*.aliases []catalog.RegisteredModelAlias ALL +resources.registered_models.*.aliases[*] catalog.RegisteredModelAlias ALL +resources.registered_models.*.aliases[*].alias_name string ALL +resources.registered_models.*.aliases[*].catalog_name string ALL +resources.registered_models.*.aliases[*].id string ALL +resources.registered_models.*.aliases[*].model_name string ALL +resources.registered_models.*.aliases[*].schema_name string ALL +resources.registered_models.*.aliases[*].version_num int ALL +resources.registered_models.*.browse_only bool ALL resources.registered_models.*.catalog_name string ALL resources.registered_models.*.comment string ALL -resources.registered_models.*.created_at int64 REMOTE -resources.registered_models.*.created_by string REMOTE -resources.registered_models.*.full_name string REMOTE +resources.registered_models.*.created_at int64 ALL +resources.registered_models.*.created_by string ALL +resources.registered_models.*.full_name string ALL resources.registered_models.*.grants []resources.Grant INPUT resources.registered_models.*.grants[*] resources.Grant INPUT resources.registered_models.*.grants[*].principal string INPUT @@ -2845,14 +2860,14 @@ resources.registered_models.*.grants[*].privileges[*] string INPUT resources.registered_models.*.id string INPUT resources.registered_models.*.lifecycle resources.Lifecycle INPUT resources.registered_models.*.lifecycle.prevent_destroy bool INPUT -resources.registered_models.*.metastore_id string REMOTE +resources.registered_models.*.metastore_id string ALL resources.registered_models.*.modified_status string INPUT resources.registered_models.*.name string ALL -resources.registered_models.*.owner string REMOTE +resources.registered_models.*.owner string ALL resources.registered_models.*.schema_name string ALL resources.registered_models.*.storage_location string ALL -resources.registered_models.*.updated_at int64 REMOTE -resources.registered_models.*.updated_by string REMOTE +resources.registered_models.*.updated_at int64 ALL +resources.registered_models.*.updated_by string ALL resources.registered_models.*.url string INPUT resources.schemas.*.browse_only bool REMOTE resources.schemas.*.catalog_name string ALL diff --git a/acceptance/bundle/run_as/allowed/regular_user/output.txt b/acceptance/bundle/run_as/allowed/regular_user/output.txt index f220bdd80a..a326e67081 100644 --- a/acceptance/bundle/run_as/allowed/regular_user/output.txt +++ b/acceptance/bundle/run_as/allowed/regular_user/output.txt @@ -1,13 +1,5 @@ >>> [CLI] bundle validate -o json -Warning: required field "catalog_name" is not set - at resources.registered_models.model_two - in databricks.yml:50:7 - -Warning: required field "schema_name" is not set - at resources.registered_models.model_two - in databricks.yml:50:7 - >>> jq .run_as { @@ -46,14 +38,6 @@ Warning: required field "schema_name" is not set } >>> [CLI] bundle validate -o json -t development -Warning: required field "catalog_name" is not set - at resources.registered_models.model_two - in databricks.yml:50:7 - -Warning: required field "schema_name" is not set - at resources.registered_models.model_two - in databricks.yml:50:7 - >>> jq .run_as { diff --git a/acceptance/bundle/run_as/allowed/service_principal/output.txt b/acceptance/bundle/run_as/allowed/service_principal/output.txt index f220bdd80a..a326e67081 100644 --- a/acceptance/bundle/run_as/allowed/service_principal/output.txt +++ b/acceptance/bundle/run_as/allowed/service_principal/output.txt @@ -1,13 +1,5 @@ >>> [CLI] bundle validate -o json -Warning: required field "catalog_name" is not set - at resources.registered_models.model_two - in databricks.yml:50:7 - -Warning: required field "schema_name" is not set - at resources.registered_models.model_two - in databricks.yml:50:7 - >>> jq .run_as { @@ -46,14 +38,6 @@ Warning: required field "schema_name" is not set } >>> [CLI] bundle validate -o json -t development -Warning: required field "catalog_name" is not set - at resources.registered_models.model_two - in databricks.yml:50:7 - -Warning: required field "schema_name" is not set - at resources.registered_models.model_two - in databricks.yml:50:7 - >>> jq .run_as { diff --git a/acceptance/bundle/run_as/pipelines_legacy/output.txt b/acceptance/bundle/run_as/pipelines_legacy/output.txt index 966e0f3594..3984f8260f 100644 --- a/acceptance/bundle/run_as/pipelines_legacy/output.txt +++ b/acceptance/bundle/run_as/pipelines_legacy/output.txt @@ -4,14 +4,6 @@ Warning: You are using the legacy mode of run_as. The support for this mode is e at experimental.use_legacy_run_as in databricks.yml:8:22 -Warning: required field "catalog_name" is not set - at resources.registered_models.model_two - in databricks.yml:59:7 - -Warning: required field "schema_name" is not set - at resources.registered_models.model_two - in databricks.yml:59:7 - >>> jq .run_as { diff --git a/acceptance/bundle/validate/empty_resources/empty_dict/output.txt b/acceptance/bundle/validate/empty_resources/empty_dict/output.txt index d73aff0d1b..7a7f784785 100644 --- a/acceptance/bundle/validate/empty_resources/empty_dict/output.txt +++ b/acceptance/bundle/validate/empty_resources/empty_dict/output.txt @@ -53,18 +53,6 @@ Warning: required field "name" is not set } === resources.registered_models.rname === -Warning: required field "catalog_name" is not set - at resources.registered_models.rname - in databricks.yml:6:12 - -Warning: required field "name" is not set - at resources.registered_models.rname - in databricks.yml:6:12 - -Warning: required field "schema_name" is not set - at resources.registered_models.rname - in databricks.yml:6:12 - { "registered_models": { "rname": {} diff --git a/acceptance/bundle/validate/empty_resources/with_grants/output.txt b/acceptance/bundle/validate/empty_resources/with_grants/output.txt index 09cef7a319..faddbc13dc 100644 --- a/acceptance/bundle/validate/empty_resources/with_grants/output.txt +++ b/acceptance/bundle/validate/empty_resources/with_grants/output.txt @@ -69,18 +69,6 @@ Warning: unknown field: grants } === resources.registered_models.rname === -Warning: required field "catalog_name" is not set - at resources.registered_models.rname - in databricks.yml:7:7 - -Warning: required field "name" is not set - at resources.registered_models.rname - in databricks.yml:7:7 - -Warning: required field "schema_name" is not set - at resources.registered_models.rname - in databricks.yml:7:7 - { "registered_models": { "rname": { diff --git a/acceptance/bundle/validate/empty_resources/with_permissions/output.txt b/acceptance/bundle/validate/empty_resources/with_permissions/output.txt index 990fcea778..75f3816a17 100644 --- a/acceptance/bundle/validate/empty_resources/with_permissions/output.txt +++ b/acceptance/bundle/validate/empty_resources/with_permissions/output.txt @@ -57,18 +57,6 @@ Warning: unknown field: permissions at resources.registered_models.rname in databricks.yml:7:7 -Warning: required field "catalog_name" is not set - at resources.registered_models.rname - in databricks.yml:7:7 - -Warning: required field "name" is not set - at resources.registered_models.rname - in databricks.yml:7:7 - -Warning: required field "schema_name" is not set - at resources.registered_models.rname - in databricks.yml:7:7 - { "registered_models": { "rname": {} diff --git a/acceptance/cmd/account/account-help/output.txt b/acceptance/cmd/account/account-help/output.txt index 007525f3e9..87378fab32 100644 --- a/acceptance/cmd/account/account-help/output.txt +++ b/acceptance/cmd/account/account-help/output.txt @@ -7,11 +7,8 @@ Usage: Identity and Access Management access-control These APIs manage access rules on resources in an account. - groups Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. groups-v2 Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. - service-principals Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. service-principals-v2 Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. - users User identities recognized by Databricks and represented by email addresses. users-v2 User identities recognized by Databricks and represented by email addresses. workspace-assignment The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. diff --git a/acceptance/cmd/workspace/apps/output.txt b/acceptance/cmd/workspace/apps/output.txt index 4e17741346..775a200604 100644 --- a/acceptance/cmd/workspace/apps/output.txt +++ b/acceptance/cmd/workspace/apps/output.txt @@ -63,10 +63,11 @@ Usage: databricks apps update NAME [flags] Flags: - --budget-policy-id string - --description string The description of the app. - -h, --help help for update - --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --budget-policy-id string + --compute-size ComputeSize Supported values: [LARGE, LIQUID, MEDIUM] + --description string The description of the app. + -h, --help help for update + --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) Global Flags: --debug enable debug logging diff --git a/acceptance/cmd/workspace/database/update-database-instance/output.txt b/acceptance/cmd/workspace/database/update-database-instance/output.txt index 9be04cd49d..c634d3fffe 100644 --- a/acceptance/cmd/workspace/database/update-database-instance/output.txt +++ b/acceptance/cmd/workspace/database/update-database-instance/output.txt @@ -14,6 +14,7 @@ Flags: --node-count int The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. --retention-window-in-days int The retention window for the instance. --stopped Whether to stop the instance. + --usage-policy-id string The desired usage policy to associate with the instance. Global Flags: --debug enable debug logging From e15ac529238083bbc89c1bf9a398bf916f18788b Mon Sep 17 00:00:00 2001 From: Anton Nekipelov <226657+anton-107@users.noreply.github.com> Date: Wed, 15 Oct 2025 16:25:28 +0200 Subject: [PATCH 6/7] fix dresources/all_test.go --- bundle/direct/dresources/all_test.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/bundle/direct/dresources/all_test.go b/bundle/direct/dresources/all_test.go index 78b0729569..f89ec54069 100644 --- a/bundle/direct/dresources/all_test.go +++ b/bundle/direct/dresources/all_test.go @@ -184,7 +184,17 @@ func testCRUD(t *testing.T, group string, adapter *Adapter, client *databricks.W if remoteStateFromUpdate != nil { remappedStateFromUpdate, err := adapter.RemapState(remoteStateFromUpdate) require.NoError(t, err) - require.Equal(t, remappedState, remappedStateFromUpdate) + changes, err := structdiff.GetStructDiff(remappedState, remappedStateFromUpdate) + require.NoError(t, err) + // Filter out timestamp fields that are expected to differ in value + var relevantChanges []structdiff.Change + for _, change := range changes { + fieldName := change.Path.String() + if fieldName != "updated_at" { + relevantChanges = append(relevantChanges, change) + } + } + require.Empty(t, relevantChanges, "unexpected differences found: %v", relevantChanges) } remoteStateFromWaitUpdate, err := adapter.WaitAfterUpdate(ctx, newState) From 088ab0506068b236540442f835e5e00bf2d68c1e Mon Sep 17 00:00:00 2001 From: Anton Nekipelov <226657+anton-107@users.noreply.github.com> Date: Wed, 15 Oct 2025 16:27:04 +0200 Subject: [PATCH 7/7] fix TestNoDuplicatedAnnotations --- bundle/internal/schema/annotations_openapi_overrides.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 51ed3d0db4..72d50bf2ab 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -594,10 +594,6 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehousePermissionLevel: CAN_MONITOR - |- CAN_VIEW -github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: - "lifecycle": - "description": |- - PLACEHOLDER github.com/databricks/cli/bundle/config/resources.Volume: "_": "markdown_description": |-