From f2e5648eb5b0bdbb0314c3db3c4d9654e7b565be Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 7 Aug 2025 12:11:23 +0200 Subject: [PATCH 1/5] Update Go SDK to 0.79.0 --- .codegen/_openapi_sha | 2 +- .gitattributes | 4 +- Makefile | 2 +- .../internal/schema/annotations_openapi.yml | 196 ++++++---- .../schema/annotations_openapi_overrides.yml | 13 +- .../validation/generated/enum_fields.go | 8 +- bundle/schema/jsonschema.json | 138 +++++-- .../disable-legacy-features.go | 5 +- cmd/account/private-access/private-access.go | 8 +- cmd/auth/in_memory_test.go | 2 +- .../agent-bricks.go} | 48 +-- cmd/workspace/apps/apps.go | 4 +- .../clean-room-asset-revisions.go | 175 +++++++++ .../clean-room-assets/clean-room-assets.go | 120 +++++- .../clean-room-auto-approval-rules.go | 362 ++++++++++++++++++ cmd/workspace/clean-rooms/clean-rooms.go | 26 +- cmd/workspace/cmd.go | 8 +- cmd/workspace/connections/connections.go | 2 + .../consumer-providers/consumer-providers.go | 6 +- cmd/workspace/dashboards/dashboards.go | 93 ----- cmd/workspace/database/database.go | 27 +- .../disable-legacy-dbfs.go | 5 +- .../external-lineage/external-lineage.go | 4 +- .../external-metadata/external-metadata.go | 4 +- .../quality-monitors/quality-monitors.go | 92 +++-- .../serving-endpoints/serving-endpoints.go | 7 +- .../databricks/bundles/pipelines/__init__.py | 8 + .../pipelines/_models/ingestion_config.py | 5 +- ...fic_config_query_based_connector_config.py | 120 ++++++ .../_models/ingestion_source_type.py | 6 + .../_models/table_specific_config.py | 22 ++ go.mod | 2 +- go.sum | 4 +- 33 files changed, 1218 insertions(+), 310 deletions(-) rename cmd/workspace/{ai-builder/ai-builder.go => agent-bricks/agent-bricks.go} (89%) create mode 100755 cmd/workspace/clean-room-asset-revisions/clean-room-asset-revisions.go create mode 100755 cmd/workspace/clean-room-auto-approval-rules/clean-room-auto-approval-rules.go create mode 100644 experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_table_specific_config_query_based_connector_config.py diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 75afa4ea6d..bdabb99911 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -90fefb5618fdecf7dfdf6be7d56c2213d24ad944 \ No newline at end of file +ff038204ea0f04d1ee43bc4a0221cb6c0a9be5d8 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 598b968e34..61d27416d1 100755 --- a/.gitattributes +++ b/.gitattributes @@ -38,7 +38,7 @@ cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true cmd/account/workspace-network-configuration/workspace-network-configuration.go linguist-generated=true cmd/account/workspaces/workspaces.go linguist-generated=true cmd/workspace/access-control/access-control.go linguist-generated=true -cmd/workspace/ai-builder/ai-builder.go linguist-generated=true +cmd/workspace/agent-bricks/agent-bricks.go linguist-generated=true cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go linguist-generated=true cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go linguist-generated=true cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true @@ -48,7 +48,9 @@ cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true +cmd/workspace/clean-room-asset-revisions/clean-room-asset-revisions.go linguist-generated=true cmd/workspace/clean-room-assets/clean-room-assets.go linguist-generated=true +cmd/workspace/clean-room-auto-approval-rules/clean-room-auto-approval-rules.go linguist-generated=true cmd/workspace/clean-room-task-runs/clean-room-task-runs.go linguist-generated=true cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true diff --git a/Makefile b/Makefile index 9e0949b055..7037157a80 100644 --- a/Makefile +++ b/Makefile @@ -117,7 +117,7 @@ GENKIT_BINARY := $(UNIVERSE_DIR)/bazel-bin/openapi/genkit/genkit_/genkit generate: @echo "Checking out universe at SHA: $$(cat .codegen/_openapi_sha)" - cd $(UNIVERSE_DIR) && git checkout $$(cat $(PWD)/.codegen/_openapi_sha) + cd $(UNIVERSE_DIR) && git fetch origin master && git checkout $$(cat $(PWD)/.codegen/_openapi_sha) @echo "Building genkit..." cd $(UNIVERSE_DIR) && bazel build //openapi/genkit @echo "Generating CLI code..." diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index 6729b875df..be0ec30923 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -5,7 +5,10 @@ github.com/databricks/cli/bundle/config/resources.App: The active deployment of the app. A deployment is considered active when it has been deployed to the app compute. "app_status": {} - "budget_policy_id": {} + "budget_policy_id": + "description": |- + TODO: Deprecate this field after serverless entitlements are released to all prod stages + and the new usage_policy_id is properly populated and used. "compute_status": {} "create_time": "description": |- @@ -20,7 +23,10 @@ github.com/databricks/cli/bundle/config/resources.App: "description": "description": |- The description of the app. - "effective_budget_policy_id": {} + "effective_budget_policy_id": + "description": |- + TODO: Deprecate this field after serverless entitlements are released to all prod stages + and the new usage_policy_id is properly populated and used. "effective_user_api_scopes": "description": |- The effective api scopes granted to the user access token. @@ -392,6 +398,9 @@ github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: "description": |- The core config of the serving endpoint. "description": {} + "email_notifications": + "description": |- + Email notification settings. "name": "description": |- The name of the serving endpoint. This field is required and must be unique across a Databricks workspace. @@ -517,41 +526,44 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: github.com/databricks/cli/bundle/config/resources.QualityMonitor: "assets_dir": "description": |- - The directory to store monitoring assets (e.g. dashboard, metric tables). + [Create:REQ Update:IGN] Field for specifying the absolute path to a custom directory to store data-monitoring + assets. Normally prepopulated to a default user location via UI and Python APIs. "baseline_table_name": - "description": | - Name of the baseline table from which drift metrics are computed from. - Columns in the monitored table should also be present in the baseline table. + "description": |- + [Create:OPT Update:OPT] Baseline table name. + Baseline data is used to compute drift from the data in the monitored `table_name`. + The baseline table and the monitored table shall have the same schema. "custom_metrics": - "description": | - Custom metrics to compute on the monitored table. These can be aggregate metrics, derived - metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time - windows). + "description": |- + [Create:OPT Update:OPT] Custom metrics. "data_classification_config": "description": |- - The data classification config for the monitor. + [Create:OPT Update:OPT] Data classification related config. "x-databricks-preview": |- PRIVATE - "inference_log": + "inference_log": {} + "latest_monitor_failure_msg": "description": |- - Configuration for monitoring inference logs. + [Create:ERR Update:IGN] The latest error message for a monitor failure. "notifications": "description": |- - The notification settings for the monitor. + [Create:OPT Update:OPT] Field for specifying notification settings. "output_schema_name": "description": |- - Schema where output metric tables are created. + [Create:REQ Update:REQ] Schema where output tables are created. Needs to be in 2-level format {catalog}.{schema} "schedule": "description": |- - The schedule for automatically updating and refreshing metric tables. + [Create:OPT Update:OPT] The monitor schedule. "skip_builtin_dashboard": "description": |- Whether to skip creating a default dashboard summarizing data quality metrics. "slicing_exprs": - "description": | - List of column expressions to slice data with for targeted analysis. The data is grouped by + "description": |- + [Create:OPT Update:OPT] List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its - complements. For high-cardinality columns, only the top 100 unique values by frequency will + complements. For example `slicing_exprs=[“col_1”, “col_2 > 10”]` will generate the following + slices: two slices for `col_2 > 10` (True and False), and one slice per unique value in + `col1`. For high-cardinality columns, only the top 100 unique values by frequency will generate slices. "snapshot": "description": |- @@ -560,7 +572,7 @@ github.com/databricks/cli/bundle/config/resources.QualityMonitor: "description": |- Configuration for monitoring time series tables. "warehouse_id": - "description": | + "description": |- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. github.com/databricks/cli/bundle/config/resources.RegisteredModel: @@ -914,76 +926,72 @@ github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedule: "description": |- Read only field that indicates whether a schedule is paused or not. "quartz_cron_expression": - "description": | + "description": |- The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html). "timezone_id": - "description": | - The timezone id (e.g., ``"PST"``) in which to evaluate the quartz expression. + "description": |- + The timezone id (e.g., ``PST``) in which to evaluate the quartz expression. github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus: "_": "description": |- - Read only field that indicates whether a schedule is paused or not. + Source link: https://src.dev.databricks.com/databricks/universe/-/blob/elastic-spark-common/api/messages/schedule.proto + Monitoring workflow schedule pause status. "enum": + - |- + UNSPECIFIED - |- UNPAUSED - |- PAUSED github.com/databricks/databricks-sdk-go/service/catalog.MonitorDataClassificationConfig: "_": - "x-databricks-preview": |- - PRIVATE + "description": |- + Data classification related configuration. "enabled": "description": |- - Whether data classification is enabled. + Whether to enable data classification. github.com/databricks/databricks-sdk-go/service/catalog.MonitorDestination: "email_addresses": "description": |- The list of email addresses to send the notification to. A maximum of 5 email addresses is supported. github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLog: "granularities": - "description": | - Granularities for aggregating data into time windows based on their timestamp. Currently the following static - granularities are supported: - {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" week(s)"``, ``"1 month"``, ``"1 year"``}. + "description": |- + List of granularities to use when aggregating data into time windows based on their timestamp. "label_col": "description": |- - Optional column that contains the ground truth for the prediction. + Column for the label. "model_id_col": - "description": | - Column that contains the id of the model generating the predictions. Metrics will be computed per model id by - default, and also across all model ids. + "description": |- + Column for the model identifier. "prediction_col": "description": |- - Column that contains the output/prediction from the model. + Column for the prediction. "prediction_proba_col": - "description": | - Optional column that contains the prediction probabilities for each class in a classification problem type. - The values in this column should be a map, mapping each class label to the prediction probability for a given - sample. The map should be of PySpark MapType(). + "description": |- + Column for prediction probabilities "problem_type": "description": |- - Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed. + Problem type the model aims to solve. "timestamp_col": - "description": | - Column that contains the timestamps of requests. The column must be one of the following: - - A ``TimestampType`` column - - A column whose values can be converted to timestamps through the pyspark - ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html). + "description": |- + Column for the timestamp. github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType: "_": - "description": |- - Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed. "enum": - |- PROBLEM_TYPE_CLASSIFICATION - |- PROBLEM_TYPE_REGRESSION github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetric: + "_": + "description": |- + Custom metric definition. "definition": "description": |- Jinja template for a SQL expression that specifies how to compute the metric. See [create metric definition](https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition). "input_columns": - "description": | + "description": |- A list of column names in the input table the metric should be computed for. Can use ``":table"`` to indicate that the metric needs information from multiple columns. "name": @@ -993,7 +1001,7 @@ github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetric: "description": |- The output type of the custom metric. "type": - "description": | + "description": |- Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. The ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` metrics are computed on a single table, whereas the ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across @@ -1003,10 +1011,10 @@ github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetric: - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType: "_": - "description": | - Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. - The ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` metrics - are computed on a single table, whereas the ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across + "description": |- + Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``. + The ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics + are computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across baseline and input table, or across the two consecutive time windows. - CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics @@ -1021,25 +1029,28 @@ github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType: github.com/databricks/databricks-sdk-go/service/catalog.MonitorNotifications: "on_failure": "description": |- - Who to send notifications to on monitor failure. + Destinations to send notifications on failure/timeout. "on_new_classification_tag_detected": "description": |- - Who to send notifications to when new data classification tags are detected. + Destinations to send notifications on new classification tag detected. "x-databricks-preview": |- PRIVATE -github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot: {} +github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot: + "_": + "description": |- + Snapshot analysis configuration github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries: + "_": + "description": |- + Time series analysis configuration. "granularities": - "description": | + "description": |- Granularities for aggregating data into time windows based on their timestamp. Currently the following static granularities are supported: - {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" week(s)"``, ``"1 month"``, ``"1 year"``}. + {``\"5 minutes\"``, ``\"30 minutes\"``, ``\"1 hour\"``, ``\"1 day\"``, ``\"\u003cn\u003e week(s)\"``, ``\"1 month\"``, ``\"1 year\"``}. "timestamp_col": - "description": | - Column that contains the timestamps of requests. The column must be one of the following: - - A ``TimestampType`` column - - A column whose values can be converted to timestamps through the pyspark - ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html). + "description": |- + Column for the timestamp. github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: "_": "description": |- @@ -2953,6 +2964,43 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefin "table_configuration": "description": |- Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline. +? github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig +: "_": + "description": |- + Configurations that are only applicable for query-based ingestion connectors. + "cursor_columns": + "description": |- + The names of the monotonically increasing columns in the source table that are used to enable + the table to be read and ingested incrementally through structured streaming. + The columns are allowed to have repeated values but have to be non-decreasing. + If the source data is merged into the destination (e.g., using SCD Type 1 or Type 2), these + columns will implicitly define the `sequence_by` behavior. You can still explicitly set + `sequence_by` to override this default. + "x-databricks-preview": |- + PRIVATE + "deletion_condition": + "description": |- + Specifies a SQL WHERE condition that specifies that the source row has been deleted. + This is sometimes referred to as "soft-deletes". + For example: "Operation = 'DELETE'" or "is_deleted = true". + This field is orthogonal to `hard_deletion_sync_interval_in_seconds`, + one for soft-deletes and the other for hard-deletes. + See also the hard_deletion_sync_min_interval_in_seconds field for + handling of "hard deletes" where the source rows are physically removed from the table. + "x-databricks-preview": |- + PRIVATE + "hard_deletion_sync_min_interval_in_seconds": + "description": |- + Specifies the minimum interval (in seconds) between snapshots on primary keys + for detecting and synchronizing hard deletions—i.e., rows that have been + physically removed from the source table. + This interval acts as a lower bound. If ingestion runs less frequently than + this value, hard deletion synchronization will align with the actual ingestion + frequency instead of happening more often. + If not set, hard deletion synchronization via snapshots is disabled. + This field is mutable and can be updated without triggering a full snapshot. + "x-databricks-preview": |- + PRIVATE github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: "_": "enum": @@ -2960,6 +3008,10 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: MYSQL - |- POSTGRESQL + - |- + REDSHIFT + - |- + SQLDW - |- SQLSERVER - |- @@ -2986,6 +3038,8 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: DYNAMICS365 - |- CONFLUENCE + - |- + META_MARKETING github.com/databricks/databricks-sdk-go/service/pipelines.ManualTrigger: {} github.com/databricks/databricks-sdk-go/service/pipelines.NotebookLibrary: "path": @@ -3277,6 +3331,11 @@ github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig: "primary_keys": "description": |- The primary key of the table used to apply changes. + "query_based_connector_config": + "description": |- + Configurations that are only applicable for query-based ingestion connectors. + "x-databricks-preview": |- + PRIVATE "salesforce_include_formula_fields": "description": |- If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector @@ -3363,6 +3422,8 @@ github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBeh NONE - |- BLOCK + - |- + MASK github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrails: "input": "description": |- @@ -3575,6 +3636,13 @@ github.com/databricks/databricks-sdk-go/service/serving.DatabricksModelServingCo "description": |- The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model. +github.com/databricks/databricks-sdk-go/service/serving.EmailNotifications: + "on_update_failure": + "description": |- + A list of email addresses to be notified when an endpoint fails to update its configuration or state. + "on_update_success": + "description": |- + A list of email addresses to be notified when an endpoint successfully updates its configuration or state. github.com/databricks/databricks-sdk-go/service/serving.EndpointCoreConfigInput: "auto_capture_config": "description": |- diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 6adcd46fb6..7eb7b637ce 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -386,6 +386,9 @@ github.com/databricks/cli/bundle/config/resources.QualityMonitor: quartz_cron_expression: 0 0 8 * * ? # Run Every day at 8am timezone_id: UTC ``` + "inference_log": + "description": |- + PLACEHOLDER "table_name": "description": |- PLACEHOLDER @@ -475,6 +478,11 @@ github.com/databricks/cli/bundle/config/resources.SecretScopePermissionLevel: - |- MANAGE github.com/databricks/cli/bundle/config/resources.SqlWarehouse: + "enable_photon": + "description": |- + Configures whether the warehouse should use Photon optimized clusters. + + Defaults to true. "permissions": "description": |- PLACEHOLDER @@ -484,11 +492,6 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: "warehouse_type": "description": |- PLACEHOLDER - "enable_photon": - "description": |- - Configures whether the warehouse should use Photon optimized clusters. - - Defaults to true. github.com/databricks/cli/bundle/config/resources.SqlWarehousePermissionLevel: "_": "enum": diff --git a/bundle/internal/validation/generated/enum_fields.go b/bundle/internal/validation/generated/enum_fields.go index 788b8ae96f..a809a32152 100644 --- a/bundle/internal/validation/generated/enum_fields.go +++ b/bundle/internal/validation/generated/enum_fields.go @@ -96,8 +96,8 @@ var EnumFields = map[string][]string{ "resources.jobs.*.trigger.table.condition": {"ALL_UPDATED", "ANY_UPDATED"}, "resources.jobs.*.trigger.table_update.condition": {"ALL_UPDATED", "ANY_UPDATED"}, - "resources.model_serving_endpoints.*.ai_gateway.guardrails.input.pii.behavior": {"BLOCK", "NONE"}, - "resources.model_serving_endpoints.*.ai_gateway.guardrails.output.pii.behavior": {"BLOCK", "NONE"}, + "resources.model_serving_endpoints.*.ai_gateway.guardrails.input.pii.behavior": {"BLOCK", "MASK", "NONE"}, + "resources.model_serving_endpoints.*.ai_gateway.guardrails.output.pii.behavior": {"BLOCK", "MASK", "NONE"}, "resources.model_serving_endpoints.*.ai_gateway.rate_limits[*].key": {"endpoint", "service_principal", "user", "user_group"}, "resources.model_serving_endpoints.*.ai_gateway.rate_limits[*].renewal_period": {"minute"}, "resources.model_serving_endpoints.*.config.served_entities[*].external_model.amazon_bedrock_config.bedrock_provider": {"ai21labs", "amazon", "anthropic", "cohere"}, @@ -116,13 +116,13 @@ var EnumFields = map[string][]string{ "resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.scd_type": {"APPEND_ONLY", "SCD_TYPE_1", "SCD_TYPE_2"}, "resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.scd_type": {"APPEND_ONLY", "SCD_TYPE_1", "SCD_TYPE_2"}, "resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.scd_type": {"APPEND_ONLY", "SCD_TYPE_1", "SCD_TYPE_2"}, - "resources.pipelines.*.ingestion_definition.source_type": {"BIGQUERY", "CONFLUENCE", "DYNAMICS365", "GA4_RAW_DATA", "MANAGED_POSTGRESQL", "MYSQL", "NETSUITE", "ORACLE", "POSTGRESQL", "SALESFORCE", "SERVICENOW", "SHAREPOINT", "SQLSERVER", "TERADATA", "WORKDAY_RAAS"}, + "resources.pipelines.*.ingestion_definition.source_type": {"BIGQUERY", "CONFLUENCE", "DYNAMICS365", "GA4_RAW_DATA", "MANAGED_POSTGRESQL", "META_MARKETING", "MYSQL", "NETSUITE", "ORACLE", "POSTGRESQL", "REDSHIFT", "SALESFORCE", "SERVICENOW", "SHAREPOINT", "SQLDW", "SQLSERVER", "TERADATA", "WORKDAY_RAAS"}, "resources.pipelines.*.ingestion_definition.table_configuration.scd_type": {"APPEND_ONLY", "SCD_TYPE_1", "SCD_TYPE_2"}, "resources.pipelines.*.restart_window.days_of_week[*]": {"FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY"}, "resources.quality_monitors.*.custom_metrics[*].type": {"CUSTOM_METRIC_TYPE_AGGREGATE", "CUSTOM_METRIC_TYPE_DERIVED", "CUSTOM_METRIC_TYPE_DRIFT"}, "resources.quality_monitors.*.inference_log.problem_type": {"PROBLEM_TYPE_CLASSIFICATION", "PROBLEM_TYPE_REGRESSION"}, - "resources.quality_monitors.*.schedule.pause_status": {"PAUSED", "UNPAUSED"}, + "resources.quality_monitors.*.schedule.pause_status": {"PAUSED", "UNPAUSED", "UNSPECIFIED"}, "resources.secret_scopes.*.backend_type": {"AZURE_KEYVAULT", "DATABRICKS"}, diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 592ffccbfd..2607f35549 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -873,6 +873,10 @@ "description": { "$ref": "#/$defs/string" }, + "email_notifications": { + "description": "Email notification settings.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.EmailNotifications" + }, "name": { "description": "The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.\nAn endpoint name can consist of alphanumeric characters, dashes, and underscores.", "$ref": "#/$defs/string" @@ -1176,37 +1180,40 @@ "type": "object", "properties": { "assets_dir": { - "description": "The directory to store monitoring assets (e.g. dashboard, metric tables).", + "description": "[Create:REQ Update:IGN] Field for specifying the absolute path to a custom directory to store data-monitoring\nassets. Normally prepopulated to a default user location via UI and Python APIs.", "$ref": "#/$defs/string" }, "baseline_table_name": { - "description": "Name of the baseline table from which drift metrics are computed from.\nColumns in the monitored table should also be present in the baseline table.\n", + "description": "[Create:OPT Update:OPT] Baseline table name.\nBaseline data is used to compute drift from the data in the monitored `table_name`.\nThe baseline table and the monitored table shall have the same schema.", "$ref": "#/$defs/string" }, "custom_metrics": { - "description": "Custom metrics to compute on the monitored table. These can be aggregate metrics, derived\nmetrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time\nwindows).\n", + "description": "[Create:OPT Update:OPT] Custom metrics.", "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetric" }, "data_classification_config": { - "description": "The data classification config for the monitor.", + "description": "[Create:OPT Update:OPT] Data classification related config.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorDataClassificationConfig", "x-databricks-preview": "PRIVATE", "doNotSuggest": true }, "inference_log": { - "description": "Configuration for monitoring inference logs.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLog" }, + "latest_monitor_failure_msg": { + "description": "[Create:ERR Update:IGN] The latest error message for a monitor failure.", + "$ref": "#/$defs/string" + }, "notifications": { - "description": "The notification settings for the monitor.", + "description": "[Create:OPT Update:OPT] Field for specifying notification settings.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorNotifications" }, "output_schema_name": { - "description": "Schema where output metric tables are created.", + "description": "[Create:REQ Update:REQ] Schema where output tables are created. Needs to be in 2-level format {catalog}.{schema}", "$ref": "#/$defs/string" }, "schedule": { - "description": "The schedule for automatically updating and refreshing metric tables.", + "description": "[Create:OPT Update:OPT] The monitor schedule.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedule" }, "skip_builtin_dashboard": { @@ -1214,7 +1221,7 @@ "$ref": "#/$defs/bool" }, "slicing_exprs": { - "description": "List of column expressions to slice data with for targeted analysis. The data is grouped by\neach expression independently, resulting in a separate slice for each predicate and its\ncomplements. For high-cardinality columns, only the top 100 unique values by frequency will\ngenerate slices.\n", + "description": "[Create:OPT Update:OPT] List of column expressions to slice data with for targeted analysis. The data is grouped by\neach expression independently, resulting in a separate slice for each predicate and its\ncomplements. For example `slicing_exprs=[“col_1”, “col_2 \u003e 10”]` will generate the following\nslices: two slices for `col_2 \u003e 10` (True and False), and one slice per unique value in\n`col1`. For high-cardinality columns, only the top 100 unique values by frequency will\ngenerate slices.", "$ref": "#/$defs/slice/string" }, "snapshot": { @@ -1229,7 +1236,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries" }, "warehouse_id": { - "description": "Optional argument to specify the warehouse for dashboard creation. If not specified, the first running\nwarehouse will be used.\n", + "description": "Optional argument to specify the warehouse for dashboard creation. If not specified, the first running\nwarehouse will be used.", "$ref": "#/$defs/string" } }, @@ -2802,11 +2809,11 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus" }, "quartz_cron_expression": { - "description": "The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html).\n", + "description": "The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html).", "$ref": "#/$defs/string" }, "timezone_id": { - "description": "The timezone id (e.g., ``\"PST\"``) in which to evaluate the quartz expression.\n", + "description": "The timezone id (e.g., ``PST``) in which to evaluate the quartz expression.", "$ref": "#/$defs/string" } }, @@ -2826,8 +2833,9 @@ "oneOf": [ { "type": "string", - "description": "Read only field that indicates whether a schedule is paused or not.", + "description": "Source link: https://src.dev.databricks.com/databricks/universe/-/blob/elastic-spark-common/api/messages/schedule.proto\nMonitoring workflow schedule pause status.", "enum": [ + "UNSPECIFIED", "UNPAUSED", "PAUSED" ] @@ -2842,15 +2850,14 @@ "oneOf": [ { "type": "object", + "description": "Data classification related configuration.", "properties": { "enabled": { - "description": "Whether data classification is enabled.", + "description": "Whether to enable data classification.", "$ref": "#/$defs/bool" } }, - "additionalProperties": false, - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "additionalProperties": false }, { "type": "string", @@ -2886,27 +2893,27 @@ "$ref": "#/$defs/slice/string" }, "label_col": { - "description": "Optional column that contains the ground truth for the prediction.", + "description": "Column for the label.", "$ref": "#/$defs/string" }, "model_id_col": { - "description": "Column that contains the id of the model generating the predictions. Metrics will be computed per model id by\ndefault, and also across all model ids.\n", + "description": "Column for the model identifier.", "$ref": "#/$defs/string" }, "prediction_col": { - "description": "Column that contains the output/prediction from the model.", + "description": "Column for the prediction.", "$ref": "#/$defs/string" }, "prediction_proba_col": { - "description": "Optional column that contains the prediction probabilities for each class in a classification problem type.\nThe values in this column should be a map, mapping each class label to the prediction probability for a given\nsample. The map should be of PySpark MapType().\n", + "description": "Column for prediction probabilities", "$ref": "#/$defs/string" }, "problem_type": { - "description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.", + "description": "Problem type the model aims to solve.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType" }, "timestamp_col": { - "description": "Column that contains the timestamps of requests. The column must be one of the following:\n- A ``TimestampType`` column\n- A column whose values can be converted to timestamps through the pyspark\n ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html).\n", + "description": "Column for the timestamp.", "$ref": "#/$defs/string" } }, @@ -2929,7 +2936,6 @@ "oneOf": [ { "type": "string", - "description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.", "enum": [ "PROBLEM_TYPE_CLASSIFICATION", "PROBLEM_TYPE_REGRESSION" @@ -2945,13 +2951,14 @@ "oneOf": [ { "type": "object", + "description": "Custom metric definition.", "properties": { "definition": { "description": "Jinja template for a SQL expression that specifies how to compute the metric. See [create metric definition](https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition).", "$ref": "#/$defs/string" }, "input_columns": { - "description": "A list of column names in the input table the metric should be computed for.\nCan use ``\":table\"`` to indicate that the metric needs information from multiple columns.\n", + "description": "A list of column names in the input table the metric should be computed for.\nCan use ``\":table\"`` to indicate that the metric needs information from multiple columns.", "$ref": "#/$defs/slice/string" }, "name": { @@ -2963,7 +2970,7 @@ "$ref": "#/$defs/string" }, "type": { - "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n", + "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType" } }, @@ -2986,7 +2993,7 @@ "oneOf": [ { "type": "string", - "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n", + "description": "Can only be one of ``\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"``, ``\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"``, or ``\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"``.\nThe ``\\\"CUSTOM_METRIC_TYPE_AGGREGATE\\\"`` and ``\\\"CUSTOM_METRIC_TYPE_DERIVED\\\"`` metrics\nare computed on a single table, whereas the ``\\\"CUSTOM_METRIC_TYPE_DRIFT\\\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics", "enum": [ "CUSTOM_METRIC_TYPE_AGGREGATE", "CUSTOM_METRIC_TYPE_DERIVED", @@ -3005,11 +3012,11 @@ "type": "object", "properties": { "on_failure": { - "description": "Who to send notifications to on monitor failure.", + "description": "Destinations to send notifications on failure/timeout.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorDestination" }, "on_new_classification_tag_detected": { - "description": "Who to send notifications to when new data classification tags are detected.", + "description": "Destinations to send notifications on new classification tag detected.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorDestination", "x-databricks-preview": "PRIVATE", "doNotSuggest": true @@ -3027,6 +3034,7 @@ "oneOf": [ { "type": "object", + "description": "Snapshot analysis configuration", "additionalProperties": false }, { @@ -3039,13 +3047,14 @@ "oneOf": [ { "type": "object", + "description": "Time series analysis configuration.", "properties": { "granularities": { "description": "Granularities for aggregating data into time windows based on their timestamp. Valid values are 5 minutes, 30 minutes, 1 hour, 1 day, n weeks, 1 month, or 1 year.", "$ref": "#/$defs/slice/string" }, "timestamp_col": { - "description": "Column that contains the timestamps of requests. The column must be one of the following:\n- A ``TimestampType`` column\n- A column whose values can be converted to timestamps through the pyspark\n ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html).\n", + "description": "Column for the timestamp.", "$ref": "#/$defs/string" } }, @@ -6235,6 +6244,39 @@ } ] }, + "pipelines.IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig": { + "oneOf": [ + { + "type": "object", + "description": "Configurations that are only applicable for query-based ingestion connectors.", + "properties": { + "cursor_columns": { + "description": "The names of the monotonically increasing columns in the source table that are used to enable\nthe table to be read and ingested incrementally through structured streaming.\nThe columns are allowed to have repeated values but have to be non-decreasing.\nIf the source data is merged into the destination (e.g., using SCD Type 1 or Type 2), these\ncolumns will implicitly define the `sequence_by` behavior. You can still explicitly set\n`sequence_by` to override this default.", + "$ref": "#/$defs/slice/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, + "deletion_condition": { + "description": "Specifies a SQL WHERE condition that specifies that the source row has been deleted.\nThis is sometimes referred to as \"soft-deletes\".\nFor example: \"Operation = 'DELETE'\" or \"is_deleted = true\".\nThis field is orthogonal to `hard_deletion_sync_interval_in_seconds`,\none for soft-deletes and the other for hard-deletes.\nSee also the hard_deletion_sync_min_interval_in_seconds field for\nhandling of \"hard deletes\" where the source rows are physically removed from the table.", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, + "hard_deletion_sync_min_interval_in_seconds": { + "description": "Specifies the minimum interval (in seconds) between snapshots on primary keys\nfor detecting and synchronizing hard deletions—i.e., rows that have been\nphysically removed from the source table.\nThis interval acts as a lower bound. If ingestion runs less frequently than\nthis value, hard deletion synchronization will align with the actual ingestion\nfrequency instead of happening more often.\nIf not set, hard deletion synchronization via snapshots is disabled.\nThis field is mutable and can be updated without triggering a full snapshot.", + "$ref": "#/$defs/int64", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "pipelines.IngestionSourceType": { "oneOf": [ { @@ -6242,6 +6284,8 @@ "enum": [ "MYSQL", "POSTGRESQL", + "REDSHIFT", + "SQLDW", "SQLSERVER", "SALESFORCE", "BIGQUERY", @@ -6254,7 +6298,8 @@ "TERADATA", "SHAREPOINT", "DYNAMICS365", - "CONFLUENCE" + "CONFLUENCE", + "META_MARKETING" ] }, { @@ -6771,6 +6816,12 @@ "description": "The primary key of the table used to apply changes.", "$ref": "#/$defs/slice/string" }, + "query_based_connector_config": { + "description": "Configurations that are only applicable for query-based ingestion connectors.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "salesforce_include_formula_fields": { "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector", "$ref": "#/$defs/bool", @@ -6927,7 +6978,8 @@ "type": "string", "enum": [ "NONE", - "BLOCK" + "BLOCK", + "MASK" ] }, { @@ -7321,6 +7373,28 @@ } ] }, + "serving.EmailNotifications": { + "oneOf": [ + { + "type": "object", + "properties": { + "on_update_failure": { + "description": "A list of email addresses to be notified when an endpoint fails to update its configuration or state.", + "$ref": "#/$defs/slice/string" + }, + "on_update_success": { + "description": "A list of email addresses to be notified when an endpoint successfully updates its configuration or state.", + "$ref": "#/$defs/slice/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "serving.EndpointCoreConfigInput": { "oneOf": [ { diff --git a/cmd/account/disable-legacy-features/disable-legacy-features.go b/cmd/account/disable-legacy-features/disable-legacy-features.go index eaadd7a5f3..063a55c743 100755 --- a/cmd/account/disable-legacy-features/disable-legacy-features.go +++ b/cmd/account/disable-legacy-features/disable-legacy-features.go @@ -26,10 +26,7 @@ func New() *cobra.Command { For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2. Hive Metastore will not be provisioned. 3. Disables the use of ‘No-isolation clusters’. 4. Disables Databricks Runtime versions prior to 13.3LTS.`, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + RunE: root.ReportUnknownSubcommand, } // Add methods diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index ece5203cdb..486bf96ece 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -50,13 +50,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *provisioning.UpsertPrivateAccessSettingsRequest, + *provisioning.CreatePrivateAccessSettingsRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq provisioning.UpsertPrivateAccessSettingsRequest + var createReq provisioning.CreatePrivateAccessSettingsRequest var createJson flags.JsonFlag cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) @@ -346,13 +346,13 @@ func newList() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var replaceOverrides []func( *cobra.Command, - *provisioning.UpsertPrivateAccessSettingsRequest, + *provisioning.ReplacePrivateAccessSettingsRequest, ) func newReplace() *cobra.Command { cmd := &cobra.Command{} - var replaceReq provisioning.UpsertPrivateAccessSettingsRequest + var replaceReq provisioning.ReplacePrivateAccessSettingsRequest var replaceJson flags.JsonFlag cmd.Flags().Var(&replaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) diff --git a/cmd/auth/in_memory_test.go b/cmd/auth/in_memory_test.go index 212b2ed91f..3a6bfd4f33 100644 --- a/cmd/auth/in_memory_test.go +++ b/cmd/auth/in_memory_test.go @@ -13,7 +13,7 @@ type inMemoryTokenCache struct { func (i *inMemoryTokenCache) Lookup(key string) (*oauth2.Token, error) { token, ok := i.Tokens[key] if !ok { - return nil, cache.ErrNotConfigured + return nil, cache.ErrNotFound } return token, nil } diff --git a/cmd/workspace/ai-builder/ai-builder.go b/cmd/workspace/agent-bricks/agent-bricks.go similarity index 89% rename from cmd/workspace/ai-builder/ai-builder.go rename to cmd/workspace/agent-bricks/agent-bricks.go index cd3c77d993..06e61d7434 100755 --- a/cmd/workspace/ai-builder/ai-builder.go +++ b/cmd/workspace/agent-bricks/agent-bricks.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package ai_builder +package agent_bricks import ( "fmt" @@ -9,7 +9,7 @@ import ( "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/aibuilder" + "github.com/databricks/databricks-sdk-go/service/agentbricks" "github.com/spf13/cobra" ) @@ -19,13 +19,13 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "ai-builder", + Use: "agent-bricks", Short: `The Custom LLMs service manages state and powers the UI for the Custom LLM product.`, Long: `The Custom LLMs service manages state and powers the UI for the Custom LLM product.`, - GroupID: "aibuilder", + GroupID: "agentbricks", Annotations: map[string]string{ - "package": "aibuilder", + "package": "agentbricks", }, // This service is being previewed; hide from help output. @@ -55,13 +55,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var cancelOptimizeOverrides []func( *cobra.Command, - *aibuilder.CancelCustomLlmOptimizationRunRequest, + *agentbricks.CancelCustomLlmOptimizationRunRequest, ) func newCancelOptimize() *cobra.Command { cmd := &cobra.Command{} - var cancelOptimizeReq aibuilder.CancelCustomLlmOptimizationRunRequest + var cancelOptimizeReq agentbricks.CancelCustomLlmOptimizationRunRequest cmd.Use = "cancel-optimize ID" cmd.Short = `Cancel a Custom LLM Optimization Run.` @@ -81,7 +81,7 @@ func newCancelOptimize() *cobra.Command { cancelOptimizeReq.Id = args[0] - err = w.AiBuilder.CancelOptimize(ctx, cancelOptimizeReq) + err = w.AgentBricks.CancelOptimize(ctx, cancelOptimizeReq) if err != nil { return err } @@ -106,13 +106,13 @@ func newCancelOptimize() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createCustomLlmOverrides []func( *cobra.Command, - *aibuilder.CreateCustomLlmRequest, + *agentbricks.CreateCustomLlmRequest, ) func newCreateCustomLlm() *cobra.Command { cmd := &cobra.Command{} - var createCustomLlmReq aibuilder.CreateCustomLlmRequest + var createCustomLlmReq agentbricks.CreateCustomLlmRequest var createCustomLlmJson flags.JsonFlag cmd.Flags().Var(&createCustomLlmJson, "json", `either inline JSON string or @path/to/file.json with request body`) @@ -167,7 +167,7 @@ func newCreateCustomLlm() *cobra.Command { createCustomLlmReq.Instructions = args[1] } - response, err := w.AiBuilder.CreateCustomLlm(ctx, createCustomLlmReq) + response, err := w.AgentBricks.CreateCustomLlm(ctx, createCustomLlmReq) if err != nil { return err } @@ -192,13 +192,13 @@ func newCreateCustomLlm() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteCustomLlmOverrides []func( *cobra.Command, - *aibuilder.DeleteCustomLlmRequest, + *agentbricks.DeleteCustomLlmRequest, ) func newDeleteCustomLlm() *cobra.Command { cmd := &cobra.Command{} - var deleteCustomLlmReq aibuilder.DeleteCustomLlmRequest + var deleteCustomLlmReq agentbricks.DeleteCustomLlmRequest cmd.Use = "delete-custom-llm ID" cmd.Short = `Delete a Custom LLM.` @@ -221,7 +221,7 @@ func newDeleteCustomLlm() *cobra.Command { deleteCustomLlmReq.Id = args[0] - err = w.AiBuilder.DeleteCustomLlm(ctx, deleteCustomLlmReq) + err = w.AgentBricks.DeleteCustomLlm(ctx, deleteCustomLlmReq) if err != nil { return err } @@ -246,13 +246,13 @@ func newDeleteCustomLlm() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getCustomLlmOverrides []func( *cobra.Command, - *aibuilder.GetCustomLlmRequest, + *agentbricks.GetCustomLlmRequest, ) func newGetCustomLlm() *cobra.Command { cmd := &cobra.Command{} - var getCustomLlmReq aibuilder.GetCustomLlmRequest + var getCustomLlmReq agentbricks.GetCustomLlmRequest cmd.Use = "get-custom-llm ID" cmd.Short = `Get a Custom LLM.` @@ -275,7 +275,7 @@ func newGetCustomLlm() *cobra.Command { getCustomLlmReq.Id = args[0] - response, err := w.AiBuilder.GetCustomLlm(ctx, getCustomLlmReq) + response, err := w.AgentBricks.GetCustomLlm(ctx, getCustomLlmReq) if err != nil { return err } @@ -300,13 +300,13 @@ func newGetCustomLlm() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var startOptimizeOverrides []func( *cobra.Command, - *aibuilder.StartCustomLlmOptimizationRunRequest, + *agentbricks.StartCustomLlmOptimizationRunRequest, ) func newStartOptimize() *cobra.Command { cmd := &cobra.Command{} - var startOptimizeReq aibuilder.StartCustomLlmOptimizationRunRequest + var startOptimizeReq agentbricks.StartCustomLlmOptimizationRunRequest cmd.Use = "start-optimize ID" cmd.Short = `Start a Custom LLM Optimization Run.` @@ -329,7 +329,7 @@ func newStartOptimize() *cobra.Command { startOptimizeReq.Id = args[0] - response, err := w.AiBuilder.StartOptimize(ctx, startOptimizeReq) + response, err := w.AgentBricks.StartOptimize(ctx, startOptimizeReq) if err != nil { return err } @@ -354,13 +354,13 @@ func newStartOptimize() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateCustomLlmOverrides []func( *cobra.Command, - *aibuilder.UpdateCustomLlmRequest, + *agentbricks.UpdateCustomLlmRequest, ) func newUpdateCustomLlm() *cobra.Command { cmd := &cobra.Command{} - var updateCustomLlmReq aibuilder.UpdateCustomLlmRequest + var updateCustomLlmReq agentbricks.UpdateCustomLlmRequest var updateCustomLlmJson flags.JsonFlag cmd.Flags().Var(&updateCustomLlmJson, "json", `either inline JSON string or @path/to/file.json with request body`) @@ -400,7 +400,7 @@ func newUpdateCustomLlm() *cobra.Command { } updateCustomLlmReq.Id = args[0] - response, err := w.AiBuilder.UpdateCustomLlm(ctx, updateCustomLlmReq) + response, err := w.AgentBricks.UpdateCustomLlm(ctx, updateCustomLlmReq) if err != nil { return err } @@ -419,4 +419,4 @@ func newUpdateCustomLlm() *cobra.Command { return cmd } -// end service AiBuilder +// end service AgentBricks diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 6cf510f227..12bcb4a50f 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -83,7 +83,7 @@ func newCreate() *cobra.Command { cmd.Flags().BoolVar(&createReq.NoCompute, "no-compute", createReq.NoCompute, `If true, the app will not be started after creation.`) // TODO: complex arg: active_deployment // TODO: complex arg: app_status - cmd.Flags().StringVar(&createReq.App.BudgetPolicyId, "budget-policy-id", createReq.App.BudgetPolicyId, ``) + cmd.Flags().StringVar(&createReq.App.BudgetPolicyId, "budget-policy-id", createReq.App.BudgetPolicyId, `TODO: Deprecate this field after serverless entitlements are released to all prod stages and the new usage_policy_id is properly populated and used.`) // TODO: complex arg: compute_status cmd.Flags().StringVar(&createReq.App.Description, "description", createReq.App.Description, `The description of the app.`) // TODO: array: effective_user_api_scopes @@ -925,7 +925,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: active_deployment // TODO: complex arg: app_status - cmd.Flags().StringVar(&updateReq.App.BudgetPolicyId, "budget-policy-id", updateReq.App.BudgetPolicyId, ``) + cmd.Flags().StringVar(&updateReq.App.BudgetPolicyId, "budget-policy-id", updateReq.App.BudgetPolicyId, `TODO: Deprecate this field after serverless entitlements are released to all prod stages and the new usage_policy_id is properly populated and used.`) // TODO: complex arg: compute_status cmd.Flags().StringVar(&updateReq.App.Description, "description", updateReq.App.Description, `The description of the app.`) // TODO: array: effective_user_api_scopes diff --git a/cmd/workspace/clean-room-asset-revisions/clean-room-asset-revisions.go b/cmd/workspace/clean-room-asset-revisions/clean-room-asset-revisions.go new file mode 100755 index 0000000000..6900d7415f --- /dev/null +++ b/cmd/workspace/clean-room-asset-revisions/clean-room-asset-revisions.go @@ -0,0 +1,175 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_room_asset_revisions + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-room-asset-revisions", + Short: `Clean Room Asset Revisions denote new versions of uploaded assets (e.g.`, + Long: `Clean Room Asset Revisions denote new versions of uploaded assets (e.g. + notebooks) in the clean room.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *cleanrooms.GetCleanRoomAssetRevisionRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq cleanrooms.GetCleanRoomAssetRevisionRequest + + cmd.Use = "get CLEAN_ROOM_NAME ASSET_TYPE NAME ETAG" + cmd.Short = `Get an asset revision.` + cmd.Long = `Get an asset revision. + + Get a specific revision of an asset + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: Asset type. Only NOTEBOOK_FILE is supported. + Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] + NAME: Name of the asset. + ETAG: Revision etag to fetch. If not provided, the latest revision will be + returned.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(4) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &getReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + getReq.Name = args[2] + getReq.Etag = args[3] + + response, err := w.CleanRoomAssetRevisions.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomAssetRevisionsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomAssetRevisionsRequest + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Maximum number of asset revisions to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on the previous query.`) + + cmd.Use = "list CLEAN_ROOM_NAME ASSET_TYPE NAME" + cmd.Short = `List asset revisions.` + cmd.Long = `List asset revisions. + + List revisions for an asset + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room. + ASSET_TYPE: Asset type. Only NOTEBOOK_FILE is supported. + Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] + NAME: Name of the asset.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + listReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &listReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + listReq.Name = args[2] + + response := w.CleanRoomAssetRevisions.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service CleanRoomAssetRevisions diff --git a/cmd/workspace/clean-room-assets/clean-room-assets.go b/cmd/workspace/clean-room-assets/clean-room-assets.go index bba557b36f..61a9e190b6 100755 --- a/cmd/workspace/clean-room-assets/clean-room-assets.go +++ b/cmd/workspace/clean-room-assets/clean-room-assets.go @@ -32,6 +32,7 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateCleanRoomAssetReview()) cmd.AddCommand(newDelete()) cmd.AddCommand(newGet()) cmd.AddCommand(newList()) @@ -63,10 +64,9 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().Var(&createReq.Asset.AssetType, "asset-type", `The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME]`) + cmd.Flags().StringVar(&createReq.Asset.CleanRoomName, "clean-room-name", createReq.Asset.CleanRoomName, `The name of the clean room this asset belongs to.`) // TODO: complex arg: foreign_table // TODO: complex arg: foreign_table_local_details - cmd.Flags().StringVar(&createReq.Asset.Name, "name", createReq.Asset.Name, `A fully qualified name that uniquely identifies the asset within the clean room.`) // TODO: complex arg: notebook // TODO: complex arg: table // TODO: complex arg: table_local_details @@ -74,7 +74,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: view_local_details // TODO: complex arg: volume_local_details - cmd.Use = "create CLEAN_ROOM_NAME" + cmd.Use = "create CLEAN_ROOM_NAME NAME ASSET_TYPE" cmd.Short = `Create an asset.` cmd.Long = `Create an asset. @@ -85,12 +85,29 @@ func newCreate() *cobra.Command { access the asset. Typically, you should use a group as the clean room owner. Arguments: - CLEAN_ROOM_NAME: Name of the clean room.` + CLEAN_ROOM_NAME: The name of the clean room this asset belongs to. This field is required + for create operations and populated by the server for responses. + NAME: A fully qualified name that uniquely identifies the asset within the clean + room. This is also the name displayed in the clean room UI. + + For UC securable assets (tables, volumes, etc.), the format is + *shared_catalog*.*shared_schema*.*asset_name* + + For notebooks, the name is the notebook file name. + ASSET_TYPE: The type of the asset. + Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME]` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only CLEAN_ROOM_NAME as positional arguments. Provide 'name', 'asset_type' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) return check(cmd, args) } @@ -112,6 +129,15 @@ func newCreate() *cobra.Command { } } createReq.CleanRoomName = args[0] + if !cmd.Flags().Changed("json") { + createReq.Asset.Name = args[1] + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &createReq.Asset.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[2]) + } + } response, err := w.CleanRoomAssets.Create(ctx, createReq) if err != nil { @@ -132,6 +158,87 @@ func newCreate() *cobra.Command { return cmd } +// start create-clean-room-asset-review command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createCleanRoomAssetReviewOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomAssetReviewRequest, +) + +func newCreateCleanRoomAssetReview() *cobra.Command { + cmd := &cobra.Command{} + + var createCleanRoomAssetReviewReq cleanrooms.CreateCleanRoomAssetReviewRequest + var createCleanRoomAssetReviewJson flags.JsonFlag + + cmd.Flags().Var(&createCleanRoomAssetReviewJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create-clean-room-asset-review CLEAN_ROOM_NAME ASSET_TYPE NAME" + cmd.Short = `Create a review (e.g. approval) for an asset.` + cmd.Long = `Create a review (e.g. approval) for an asset. + + submit an asset review + + Arguments: + CLEAN_ROOM_NAME: Name of the clean room + ASSET_TYPE: can only be NOTEBOOK_FILE for now + Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] + NAME: Name of the asset` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createCleanRoomAssetReviewJson.Unmarshal(&createCleanRoomAssetReviewReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createCleanRoomAssetReviewReq.CleanRoomName = args[0] + _, err = fmt.Sscan(args[1], &createCleanRoomAssetReviewReq.AssetType) + if err != nil { + return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) + } + createCleanRoomAssetReviewReq.Name = args[2] + + response, err := w.CleanRoomAssets.CreateCleanRoomAssetReview(ctx, createCleanRoomAssetReviewReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createCleanRoomAssetReviewOverrides { + fn(cmd, &createCleanRoomAssetReviewReq) + } + + return cmd +} + // start delete command // Slice with functions to override default command behavior. @@ -333,10 +440,9 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().Var(&updateReq.Asset.AssetType, "asset-type", `The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME]`) + cmd.Flags().StringVar(&updateReq.Asset.CleanRoomName, "clean-room-name", updateReq.Asset.CleanRoomName, `The name of the clean room this asset belongs to.`) // TODO: complex arg: foreign_table // TODO: complex arg: foreign_table_local_details - cmd.Flags().StringVar(&updateReq.Asset.Name, "name", updateReq.Asset.Name, `A fully qualified name that uniquely identifies the asset within the clean room.`) // TODO: complex arg: notebook // TODO: complex arg: table // TODO: complex arg: table_local_details diff --git a/cmd/workspace/clean-room-auto-approval-rules/clean-room-auto-approval-rules.go b/cmd/workspace/clean-room-auto-approval-rules/clean-room-auto-approval-rules.go new file mode 100755 index 0000000000..4da81c30b1 --- /dev/null +++ b/cmd/workspace/clean-room-auto-approval-rules/clean-room-auto-approval-rules.go @@ -0,0 +1,362 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package clean_room_auto_approval_rules + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/cleanrooms" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean-room-auto-approval-rules", + Short: `Clean room auto-approval rules automatically create an approval on your behalf when an asset (e.g.`, + Long: `Clean room auto-approval rules automatically create an approval on your behalf + when an asset (e.g. notebook) meeting specific criteria is shared in a clean + room.`, + GroupID: "cleanrooms", + Annotations: map[string]string{ + "package": "cleanrooms", + }, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *cleanrooms.CreateCleanRoomAutoApprovalRuleRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq cleanrooms.CreateCleanRoomAutoApprovalRuleRequest + var createJson flags.JsonFlag + + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create CLEAN_ROOM_NAME" + cmd.Short = `Create an auto-approval rule.` + cmd.Long = `Create an auto-approval rule. + + Create an auto-approval rule + + Arguments: + CLEAN_ROOM_NAME: The name of the clean room this auto-approval rule belongs to.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createJson.Unmarshal(&createReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createReq.CleanRoomName = args[0] + + response, err := w.CleanRoomAutoApprovalRules.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *cleanrooms.DeleteCleanRoomAutoApprovalRuleRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq cleanrooms.DeleteCleanRoomAutoApprovalRuleRequest + + cmd.Use = "delete CLEAN_ROOM_NAME RULE_ID" + cmd.Short = `Delete an auto-approval rule.` + cmd.Long = `Delete an auto-approval rule. + + Delete a auto-approval rule by rule ID` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteReq.CleanRoomName = args[0] + deleteReq.RuleId = args[1] + + err = w.CleanRoomAutoApprovalRules.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *cleanrooms.GetCleanRoomAutoApprovalRuleRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq cleanrooms.GetCleanRoomAutoApprovalRuleRequest + + cmd.Use = "get CLEAN_ROOM_NAME RULE_ID" + cmd.Short = `Get an auto-approval rule.` + cmd.Long = `Get an auto-approval rule. + + Get a auto-approval rule by rule ID` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getReq.CleanRoomName = args[0] + getReq.RuleId = args[1] + + response, err := w.CleanRoomAutoApprovalRules.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *cleanrooms.ListCleanRoomAutoApprovalRulesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq cleanrooms.ListCleanRoomAutoApprovalRulesRequest + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Maximum number of auto-approval rules to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + + cmd.Use = "list CLEAN_ROOM_NAME" + cmd.Short = `List auto-approval rules.` + cmd.Long = `List auto-approval rules. + + List all auto-approval rules for the caller` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + listReq.CleanRoomName = args[0] + + response := w.CleanRoomAutoApprovalRules.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *cleanrooms.UpdateCleanRoomAutoApprovalRuleRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq cleanrooms.UpdateCleanRoomAutoApprovalRuleRequest + updateReq.AutoApprovalRule = cleanrooms.CleanRoomAutoApprovalRule{} + var updateJson flags.JsonFlag + + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.AutoApprovalRule.AuthorCollaboratorAlias, "author-collaborator-alias", updateReq.AutoApprovalRule.AuthorCollaboratorAlias, ``) + cmd.Flags().Var(&updateReq.AutoApprovalRule.AuthorScope, "author-scope", `Supported values: [ANY_AUTHOR]`) + cmd.Flags().StringVar(&updateReq.AutoApprovalRule.CleanRoomName, "clean-room-name", updateReq.AutoApprovalRule.CleanRoomName, `The name of the clean room this auto-approval rule belongs to.`) + cmd.Flags().StringVar(&updateReq.AutoApprovalRule.RunnerCollaboratorAlias, "runner-collaborator-alias", updateReq.AutoApprovalRule.RunnerCollaboratorAlias, ``) + + cmd.Use = "update CLEAN_ROOM_NAME RULE_ID" + cmd.Short = `Update an auto-approval rule.` + cmd.Long = `Update an auto-approval rule. + + Update a auto-approval rule by rule ID + + Arguments: + CLEAN_ROOM_NAME: The name of the clean room this auto-approval rule belongs to. + RULE_ID: A generated UUID identifying the rule.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq.AutoApprovalRule) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateReq.CleanRoomName = args[0] + updateReq.RuleId = args[1] + + response, err := w.CleanRoomAutoApprovalRules.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CleanRoomAutoApprovalRules diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index 71c003c21f..7b7dd6bdb8 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -3,6 +3,9 @@ package clean_rooms import ( + "fmt" + "time" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" @@ -61,6 +64,12 @@ func newCreate() *cobra.Command { createReq.CleanRoom = cleanrooms.CleanRoom{} var createJson flags.JsonFlag + var createSkipWait bool + var createTimeout time.Duration + + cmd.Flags().BoolVar(&createSkipWait, "no-wait", createSkipWait, `do not wait to reach ACTIVE state`) + cmd.Flags().DurationVar(&createTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach ACTIVE state`) + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createReq.CleanRoom.Comment, "comment", createReq.CleanRoom.Comment, ``) @@ -108,11 +117,24 @@ func newCreate() *cobra.Command { } } - response, err := w.CleanRooms.Create(ctx, createReq) + wait, err := w.CleanRooms.Create(ctx, createReq) if err != nil { return err } - return cmdio.Render(ctx, response) + if createSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *cleanrooms.CleanRoom) { + status := i.Status + statusMessage := fmt.Sprintf("current status: %s", status) + spinner <- statusMessage + }).GetWithTimeout(createTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 3203f3176e..1afa50bafa 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -4,14 +4,16 @@ package workspace import ( access_control "github.com/databricks/cli/cmd/workspace/access-control" - ai_builder "github.com/databricks/cli/cmd/workspace/ai-builder" + agent_bricks "github.com/databricks/cli/cmd/workspace/agent-bricks" alerts "github.com/databricks/cli/cmd/workspace/alerts" alerts_legacy "github.com/databricks/cli/cmd/workspace/alerts-legacy" alerts_v2 "github.com/databricks/cli/cmd/workspace/alerts-v2" apps "github.com/databricks/cli/cmd/workspace/apps" artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists" catalogs "github.com/databricks/cli/cmd/workspace/catalogs" + clean_room_asset_revisions "github.com/databricks/cli/cmd/workspace/clean-room-asset-revisions" clean_room_assets "github.com/databricks/cli/cmd/workspace/clean-room-assets" + clean_room_auto_approval_rules "github.com/databricks/cli/cmd/workspace/clean-room-auto-approval-rules" clean_room_task_runs "github.com/databricks/cli/cmd/workspace/clean-room-task-runs" clean_rooms "github.com/databricks/cli/cmd/workspace/clean-rooms" cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" @@ -111,14 +113,16 @@ func All() []*cobra.Command { var out []*cobra.Command out = append(out, access_control.New()) - out = append(out, ai_builder.New()) + out = append(out, agent_bricks.New()) out = append(out, alerts.New()) out = append(out, alerts_legacy.New()) out = append(out, alerts_v2.New()) out = append(out, apps.New()) out = append(out, artifact_allowlists.New()) out = append(out, catalogs.New()) + out = append(out, clean_room_asset_revisions.New()) out = append(out, clean_room_assets.New()) + out = append(out, clean_room_auto_approval_rules.New()) out = append(out, clean_room_task_runs.New()) out = append(out, clean_rooms.New()) out = append(out, cluster_policies.New()) diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index bae12cd8ef..761f278c29 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -72,6 +72,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) + // TODO: complex arg: environment_settings // TODO: map via StringToStringVar: properties cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `If the connection is read only.`) @@ -330,6 +331,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: complex arg: environment_settings cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the connection.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of the connection.`) diff --git a/cmd/workspace/consumer-providers/consumer-providers.go b/cmd/workspace/consumer-providers/consumer-providers.go index 4c61c4ab85..2965644ba6 100755 --- a/cmd/workspace/consumer-providers/consumer-providers.go +++ b/cmd/workspace/consumer-providers/consumer-providers.go @@ -126,7 +126,7 @@ func newGet() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No ID argument specified. Loading names for Consumer Providers drop-down." - names, err := w.ConsumerProviders.ProviderInfoNameToIdMap(ctx, marketplace.ListProvidersRequest{}) + names, err := w.ConsumerProviders.ProviderInfoNameToIdMap(ctx, marketplace.ListConsumerProvidersRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Consumer Providers drop-down. Please manually specify required arguments. Original error: %w", err) @@ -167,13 +167,13 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, - *marketplace.ListProvidersRequest, + *marketplace.ListConsumerProvidersRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} - var listReq marketplace.ListProvidersRequest + var listReq marketplace.ListConsumerProvidersRequest cmd.Flags().BoolVar(&listReq.IsFeatured, "is-featured", listReq.IsFeatured, ``) cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index ce01dae8cd..4a5d6dd224 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -3,8 +3,6 @@ package dashboards import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" @@ -35,7 +33,6 @@ func New() *cobra.Command { } // Add methods - cmd.AddCommand(newCreate()) cmd.AddCommand(newDelete()) cmd.AddCommand(newGet()) cmd.AddCommand(newList()) @@ -50,96 +47,6 @@ func New() *cobra.Command { return cmd } -// start create command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var createOverrides []func( - *cobra.Command, - *sql.DashboardPostContent, -) - -func newCreate() *cobra.Command { - cmd := &cobra.Command{} - - var createReq sql.DashboardPostContent - var createJson flags.JsonFlag - - cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().BoolVar(&createReq.DashboardFiltersEnabled, "dashboard-filters-enabled", createReq.DashboardFiltersEnabled, `Indicates whether the dashboard filters are enabled.`) - cmd.Flags().BoolVar(&createReq.IsFavorite, "is-favorite", createReq.IsFavorite, `Indicates whether this dashboard object should appear in the current user's favorites list.`) - cmd.Flags().StringVar(&createReq.Parent, "parent", createReq.Parent, `The identifier of the workspace folder containing the object.`) - cmd.Flags().Var(&createReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`) - // TODO: array: tags - - cmd.Use = "create NAME" - cmd.Short = `Create a dashboard object.` - cmd.Long = `Create a dashboard object. - - Creates a new dashboard object. Only the name parameter is required in the - POST request JSON body. Other fields can be included when duplicating - dashboards with this API. Databricks does not recommend designing dashboards - exclusively using this API.', - - Arguments: - NAME: The title of this dashboard that appears in list views and at the top of - the dashboard page.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") - } - return nil - } - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := createJson.Unmarshal(&createReq) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - if !cmd.Flags().Changed("json") { - createReq.Name = args[0] - } - - response, err := w.Dashboards.Create(ctx, createReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range createOverrides { - fn(cmd, &createReq) - } - - return cmd -} - // start delete command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/database/database.go b/cmd/workspace/database/database.go index 8b4bbe8305..4d07bce20e 100755 --- a/cmd/workspace/database/database.go +++ b/cmd/workspace/database/database.go @@ -4,6 +4,7 @@ package database import ( "fmt" + "time" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" @@ -164,6 +165,12 @@ func newCreateDatabaseInstance() *cobra.Command { createDatabaseInstanceReq.DatabaseInstance = database.DatabaseInstance{} var createDatabaseInstanceJson flags.JsonFlag + var createDatabaseInstanceSkipWait bool + var createDatabaseInstanceTimeout time.Duration + + cmd.Flags().BoolVar(&createDatabaseInstanceSkipWait, "no-wait", createDatabaseInstanceSkipWait, `do not wait to reach AVAILABLE state`) + cmd.Flags().DurationVar(&createDatabaseInstanceTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach AVAILABLE state`) + cmd.Flags().Var(&createDatabaseInstanceJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", createDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) @@ -216,11 +223,24 @@ func newCreateDatabaseInstance() *cobra.Command { createDatabaseInstanceReq.DatabaseInstance.Name = args[0] } - response, err := w.Database.CreateDatabaseInstance(ctx, createDatabaseInstanceReq) + wait, err := w.Database.CreateDatabaseInstance(ctx, createDatabaseInstanceReq) if err != nil { return err } - return cmdio.Render(ctx, response) + if createDatabaseInstanceSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *database.DatabaseInstance) { + status := i.State + statusMessage := fmt.Sprintf("current status: %s", status) + spinner <- statusMessage + }).GetWithTimeout(createDatabaseInstanceTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) } // Disable completions since they are not applicable. @@ -1271,7 +1291,8 @@ func newUpdateDatabaseInstance() *cobra.Command { Arguments: NAME: The name of the instance. This is the unique identifier for the instance. - UPDATE_MASK: The list of fields to update.` + UPDATE_MASK: The list of fields to update. This field is not yet supported, and is + ignored by the server.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go b/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go index b748619f44..fd1d3fb902 100755 --- a/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go +++ b/cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go @@ -30,10 +30,7 @@ func New() *cobra.Command { are imposed on Databricks Runtime versions. This setting can take up to 20 minutes to take effect and requires a manual restart of all-purpose compute clusters and SQL warehouses.`, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + RunE: root.ReportUnknownSubcommand, } // Add methods diff --git a/cmd/workspace/external-lineage/external-lineage.go b/cmd/workspace/external-lineage/external-lineage.go index 5ed6468681..4516bea724 100755 --- a/cmd/workspace/external-lineage/external-lineage.go +++ b/cmd/workspace/external-lineage/external-lineage.go @@ -226,8 +226,8 @@ func newListExternalLineageRelationships() *cobra.Command { cmd.Flags().Var(&listExternalLineageRelationshipsJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().IntVar(&listExternalLineageRelationshipsReq.PageSize, "page-size", listExternalLineageRelationshipsReq.PageSize, ``) - cmd.Flags().StringVar(&listExternalLineageRelationshipsReq.PageToken, "page-token", listExternalLineageRelationshipsReq.PageToken, ``) + cmd.Flags().IntVar(&listExternalLineageRelationshipsReq.PageSize, "page-size", listExternalLineageRelationshipsReq.PageSize, `Specifies the maximum number of external lineage relationships to return in a single response.`) + cmd.Flags().StringVar(&listExternalLineageRelationshipsReq.PageToken, "page-token", listExternalLineageRelationshipsReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) cmd.Use = "list-external-lineage-relationships" cmd.Short = `List external lineage relationships.` diff --git a/cmd/workspace/external-metadata/external-metadata.go b/cmd/workspace/external-metadata/external-metadata.go index 601ea1894d..1d4ffaa9ef 100755 --- a/cmd/workspace/external-metadata/external-metadata.go +++ b/cmd/workspace/external-metadata/external-metadata.go @@ -298,8 +298,8 @@ func newListExternalMetadata() *cobra.Command { var listExternalMetadataReq catalog.ListExternalMetadataRequest - cmd.Flags().IntVar(&listExternalMetadataReq.PageSize, "page-size", listExternalMetadataReq.PageSize, ``) - cmd.Flags().StringVar(&listExternalMetadataReq.PageToken, "page-token", listExternalMetadataReq.PageToken, ``) + cmd.Flags().IntVar(&listExternalMetadataReq.PageSize, "page-size", listExternalMetadataReq.PageSize, `Specifies the maximum number of external metadata objects to return in a single response.`) + cmd.Flags().StringVar(&listExternalMetadataReq.PageToken, "page-token", listExternalMetadataReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) cmd.Use = "list-external-metadata" cmd.Short = `List external metadata objects.` diff --git a/cmd/workspace/quality-monitors/quality-monitors.go b/cmd/workspace/quality-monitors/quality-monitors.go index 740fe228ef..80a07aaf59 100755 --- a/cmd/workspace/quality-monitors/quality-monitors.go +++ b/cmd/workspace/quality-monitors/quality-monitors.go @@ -23,12 +23,11 @@ func New() *cobra.Command { Short: `A monitor computes and monitors data or model quality metrics for a table over time.`, Long: `A monitor computes and monitors data or model quality metrics for a table over time. It generates metrics tables and a dashboard that you can use to monitor - table health and set alerts. - - Most write operations require the user to be the owner of the table (or its - parent schema or parent catalog). Viewing the dashboard, computed metrics, or - monitor configuration only requires the user to have **SELECT** privileges on - the table (along with **USE_SCHEMA** and **USE_CATALOG**).`, + table health and set alerts. Most write operations require the user to be the + owner of the table (or its parent schema or parent catalog). Viewing the + dashboard, computed metrics, or monitor configuration only requires the user + to have **SELECT** privileges on the table (along with **USE_SCHEMA** and + **USE_CATALOG**).`, GroupID: "catalog", Annotations: map[string]string{ "package": "catalog", @@ -73,20 +72,12 @@ func newCancelRefresh() *cobra.Command { cmd.Short = `Cancel refresh.` cmd.Long = `Cancel refresh. - Cancel an active monitor refresh for the given refresh ID. - - The caller must either: 1. be an owner of the table's parent catalog 2. have - **USE_CATALOG** on the table's parent catalog and be an owner of the table's - parent schema 3. have the following permissions: - **USE_CATALOG** on the - table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an - owner of the table - - Additionally, the call must be made from the workspace where the monitor was - created. + Cancels an already-initiated refresh job. Arguments: - TABLE_NAME: Full name of the table. - REFRESH_ID: ID of the refresh.` + TABLE_NAME: UC table name in format catalog.schema.table_name. table_name is case + insensitive and spaces are disallowed. + REFRESH_ID: ` // This command is being previewed; hide from help output. cmd.Hidden = true @@ -104,7 +95,10 @@ func newCancelRefresh() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) cancelRefreshReq.TableName = args[0] - cancelRefreshReq.RefreshId = args[1] + _, err = fmt.Sscan(args[1], &cancelRefreshReq.RefreshId) + if err != nil { + return fmt.Errorf("invalid REFRESH_ID: %s", args[1]) + } err = w.QualityMonitors.CancelRefresh(ctx, cancelRefreshReq) if err != nil { @@ -142,10 +136,11 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.BaselineTableName, "baseline-table-name", createReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`) + cmd.Flags().StringVar(&createReq.BaselineTableName, "baseline-table-name", createReq.BaselineTableName, `[Create:OPT Update:OPT] Baseline table name.`) // TODO: array: custom_metrics // TODO: complex arg: data_classification_config // TODO: complex arg: inference_log + cmd.Flags().StringVar(&createReq.LatestMonitorFailureMsg, "latest-monitor-failure-msg", createReq.LatestMonitorFailureMsg, `[Create:ERR Update:IGN] The latest error message for a monitor failure.`) // TODO: complex arg: notifications // TODO: complex arg: schedule cmd.Flags().BoolVar(&createReq.SkipBuiltinDashboard, "skip-builtin-dashboard", createReq.SkipBuiltinDashboard, `Whether to skip creating a default dashboard summarizing data quality metrics.`) @@ -154,7 +149,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: time_series cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `Optional argument to specify the warehouse for dashboard creation.`) - cmd.Use = "create TABLE_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME" + cmd.Use = "create TABLE_NAME OUTPUT_SCHEMA_NAME ASSETS_DIR" cmd.Short = `Create a table monitor.` cmd.Long = `Create a table monitor. @@ -171,9 +166,13 @@ func newCreate() *cobra.Command { where this call was made. Arguments: - TABLE_NAME: Full name of the table. - ASSETS_DIR: The directory to store monitoring assets (e.g. dashboard, metric tables). - OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.` + TABLE_NAME: UC table name in format catalog.schema.table_name. This field + corresponds to the {full_table_name_arg} arg in the endpoint path. + OUTPUT_SCHEMA_NAME: [Create:REQ Update:REQ] Schema where output tables are created. Needs to + be in 2-level format {catalog}.{schema} + ASSETS_DIR: [Create:REQ Update:IGN] Field for specifying the absolute path to a custom + directory to store data-monitoring assets. Normally prepopulated to a + default user location via UI and Python APIs.` cmd.Annotations = make(map[string]string) @@ -181,7 +180,7 @@ func newCreate() *cobra.Command { if cmd.Flags().Changed("json") { err := root.ExactArgs(1)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, provide only TABLE_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only TABLE_NAME as positional arguments. Provide 'output_schema_name', 'assets_dir' in your JSON input") } return nil } @@ -208,10 +207,10 @@ func newCreate() *cobra.Command { } createReq.TableName = args[0] if !cmd.Flags().Changed("json") { - createReq.AssetsDir = args[1] + createReq.OutputSchemaName = args[1] } if !cmd.Flags().Changed("json") { - createReq.OutputSchemaName = args[2] + createReq.AssetsDir = args[2] } response, err := w.QualityMonitors.Create(ctx, createReq) @@ -266,7 +265,8 @@ func newDelete() *cobra.Command { call; those assets must be manually cleaned up (if desired). Arguments: - TABLE_NAME: Full name of the table.` + TABLE_NAME: UC table name in format catalog.schema.table_name. This field + corresponds to the {full_table_name_arg} arg in the endpoint path.` cmd.Annotations = make(map[string]string) @@ -282,11 +282,11 @@ func newDelete() *cobra.Command { deleteReq.TableName = args[0] - err = w.QualityMonitors.Delete(ctx, deleteReq) + response, err := w.QualityMonitors.Delete(ctx, deleteReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -333,7 +333,8 @@ func newGet() *cobra.Command { was created. Arguments: - TABLE_NAME: Full name of the table.` + TABLE_NAME: UC table name in format catalog.schema.table_name. This field + corresponds to the {full_table_name_arg} arg in the endpoint path.` cmd.Annotations = make(map[string]string) @@ -414,7 +415,10 @@ func newGetRefresh() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) getRefreshReq.TableName = args[0] - getRefreshReq.RefreshId = args[1] + _, err = fmt.Sscan(args[1], &getRefreshReq.RefreshId) + if err != nil { + return fmt.Errorf("invalid REFRESH_ID: %s", args[1]) + } response, err := w.QualityMonitors.GetRefresh(ctx, getRefreshReq) if err != nil { @@ -466,7 +470,8 @@ func newListRefreshes() *cobra.Command { created. Arguments: - TABLE_NAME: Full name of the table.` + TABLE_NAME: UC table name in format catalog.schema.table_name. table_name is case + insensitive and spaces are disallowed.` cmd.Annotations = make(map[string]string) @@ -537,7 +542,8 @@ func newRegenerateDashboard() *cobra.Command { the monitor was created. Arguments: - TABLE_NAME: Full name of the table.` + TABLE_NAME: UC table name in format catalog.schema.table_name. This field + corresponds to the {full_table_name_arg} arg in the endpoint path.` // This command is being previewed; hide from help output. cmd.Hidden = true @@ -602,8 +608,8 @@ func newRunRefresh() *cobra.Command { var runRefreshReq catalog.RunRefreshRequest cmd.Use = "run-refresh TABLE_NAME" - cmd.Short = `Queue a metric refresh for a monitor.` - cmd.Long = `Queue a metric refresh for a monitor. + cmd.Short = `Run refresh.` + cmd.Long = `Run refresh. Queues a metric refresh on the monitor for the specified table. The refresh will execute in the background. @@ -618,7 +624,8 @@ func newRunRefresh() *cobra.Command { created. Arguments: - TABLE_NAME: Full name of the table.` + TABLE_NAME: UC table name in format catalog.schema.table_name. table_name is case + insensitive and spaces are disallowed.` cmd.Annotations = make(map[string]string) @@ -670,11 +677,12 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.BaselineTableName, "baseline-table-name", updateReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`) + cmd.Flags().StringVar(&updateReq.BaselineTableName, "baseline-table-name", updateReq.BaselineTableName, `[Create:OPT Update:OPT] Baseline table name.`) // TODO: array: custom_metrics - cmd.Flags().StringVar(&updateReq.DashboardId, "dashboard-id", updateReq.DashboardId, `Id of dashboard that visualizes the computed metrics.`) + cmd.Flags().StringVar(&updateReq.DashboardId, "dashboard-id", updateReq.DashboardId, `[Create:ERR Update:OPT] Id of dashboard that visualizes the computed metrics.`) // TODO: complex arg: data_classification_config // TODO: complex arg: inference_log + cmd.Flags().StringVar(&updateReq.LatestMonitorFailureMsg, "latest-monitor-failure-msg", updateReq.LatestMonitorFailureMsg, `[Create:ERR Update:IGN] The latest error message for a monitor failure.`) // TODO: complex arg: notifications // TODO: complex arg: schedule // TODO: array: slicing_exprs @@ -700,8 +708,10 @@ func newUpdate() *cobra.Command { updated. Arguments: - TABLE_NAME: Full name of the table. - OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.` + TABLE_NAME: UC table name in format catalog.schema.table_name. This field + corresponds to the {full_table_name_arg} arg in the endpoint path. + OUTPUT_SCHEMA_NAME: [Create:REQ Update:REQ] Schema where output tables are created. Needs to + be in 2-level format {catalog}.{schema}` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 997ec86bb8..667413bb4d 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -159,6 +159,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.BudgetPolicyId, "budget-policy-id", createReq.BudgetPolicyId, `The budget policy to be applied to the serving endpoint.`) // TODO: complex arg: config cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, ``) + // TODO: complex arg: email_notifications // TODO: array: rate_limits cmd.Flags().BoolVar(&createReq.RouteOptimized, "route-optimized", createReq.RouteOptimized, `Enable route optimization for the serving endpoint.`) // TODO: array: tags @@ -264,6 +265,7 @@ func newCreateProvisionedThroughputEndpoint() *cobra.Command { // TODO: complex arg: ai_gateway cmd.Flags().StringVar(&createProvisionedThroughputEndpointReq.BudgetPolicyId, "budget-policy-id", createProvisionedThroughputEndpointReq.BudgetPolicyId, `The budget policy associated with the endpoint.`) + // TODO: complex arg: email_notifications // TODO: array: tags cmd.Use = "create-provisioned-throughput-endpoint" @@ -1095,9 +1097,12 @@ func newQuery() *cobra.Command { cmd.Use = "query NAME" cmd.Short = `Query a serving endpoint.` cmd.Long = `Query a serving endpoint. + + Query a serving endpoint Arguments: - NAME: The name of the serving endpoint. This field is required.` + NAME: The name of the serving endpoint. This field is required and is provided + via the path parameter.` cmd.Annotations = make(map[string]string) diff --git a/experimental/python/databricks/bundles/pipelines/__init__.py b/experimental/python/databricks/bundles/pipelines/__init__.py index f34f79dcb4..12cd530cac 100644 --- a/experimental/python/databricks/bundles/pipelines/__init__.py +++ b/experimental/python/databricks/bundles/pipelines/__init__.py @@ -48,6 +48,9 @@ "IngestionPipelineDefinition", "IngestionPipelineDefinitionDict", "IngestionPipelineDefinitionParam", + "IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig", + "IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigDict", + "IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigParam", "IngestionSourceType", "IngestionSourceTypeParam", "InitScriptInfo", @@ -242,6 +245,11 @@ IngestionPipelineDefinitionDict, IngestionPipelineDefinitionParam, ) +from databricks.bundles.pipelines._models.ingestion_pipeline_definition_table_specific_config_query_based_connector_config import ( + IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig, + IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigDict, + IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigParam, +) from databricks.bundles.pipelines._models.ingestion_source_type import ( IngestionSourceType, IngestionSourceTypeParam, diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py index 988227c43e..c452222df9 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py @@ -4,10 +4,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrOptional -from databricks.bundles.pipelines._models.report_spec import ( - ReportSpec, - ReportSpecParam, -) +from databricks.bundles.pipelines._models.report_spec import ReportSpec, ReportSpecParam from databricks.bundles.pipelines._models.schema_spec import SchemaSpec, SchemaSpecParam from databricks.bundles.pipelines._models.table_spec import TableSpec, TableSpecParam diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_table_specific_config_query_based_connector_config.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_table_specific_config_query_based_connector_config.py new file mode 100644 index 0000000000..50f3cc1f57 --- /dev/null +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_table_specific_config_query_based_connector_config.py @@ -0,0 +1,120 @@ +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrList, VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig: + """ + :meta private: [EXPERIMENTAL] + + Configurations that are only applicable for query-based ingestion connectors. + """ + + cursor_columns: VariableOrList[str] = field(default_factory=list) + """ + :meta private: [EXPERIMENTAL] + + The names of the monotonically increasing columns in the source table that are used to enable + the table to be read and ingested incrementally through structured streaming. + The columns are allowed to have repeated values but have to be non-decreasing. + If the source data is merged into the destination (e.g., using SCD Type 1 or Type 2), these + columns will implicitly define the `sequence_by` behavior. You can still explicitly set + `sequence_by` to override this default. + """ + + deletion_condition: VariableOrOptional[str] = None + """ + :meta private: [EXPERIMENTAL] + + Specifies a SQL WHERE condition that specifies that the source row has been deleted. + This is sometimes referred to as "soft-deletes". + For example: "Operation = 'DELETE'" or "is_deleted = true". + This field is orthogonal to `hard_deletion_sync_interval_in_seconds`, + one for soft-deletes and the other for hard-deletes. + See also the hard_deletion_sync_min_interval_in_seconds field for + handling of "hard deletes" where the source rows are physically removed from the table. + """ + + hard_deletion_sync_min_interval_in_seconds: VariableOrOptional[int] = None + """ + :meta private: [EXPERIMENTAL] + + Specifies the minimum interval (in seconds) between snapshots on primary keys + for detecting and synchronizing hard deletions—i.e., rows that have been + physically removed from the source table. + This interval acts as a lower bound. If ingestion runs less frequently than + this value, hard deletion synchronization will align with the actual ingestion + frequency instead of happening more often. + If not set, hard deletion synchronization via snapshots is disabled. + This field is mutable and can be updated without triggering a full snapshot. + """ + + @classmethod + def from_dict( + cls, + value: "IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigDict", + ) -> "Self": + return _transform(cls, value) + + def as_dict( + self, + ) -> "IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigDict": + return _transform_to_json_value(self) # type:ignore + + +class IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigDict( + TypedDict, total=False +): + """""" + + cursor_columns: VariableOrList[str] + """ + :meta private: [EXPERIMENTAL] + + The names of the monotonically increasing columns in the source table that are used to enable + the table to be read and ingested incrementally through structured streaming. + The columns are allowed to have repeated values but have to be non-decreasing. + If the source data is merged into the destination (e.g., using SCD Type 1 or Type 2), these + columns will implicitly define the `sequence_by` behavior. You can still explicitly set + `sequence_by` to override this default. + """ + + deletion_condition: VariableOrOptional[str] + """ + :meta private: [EXPERIMENTAL] + + Specifies a SQL WHERE condition that specifies that the source row has been deleted. + This is sometimes referred to as "soft-deletes". + For example: "Operation = 'DELETE'" or "is_deleted = true". + This field is orthogonal to `hard_deletion_sync_interval_in_seconds`, + one for soft-deletes and the other for hard-deletes. + See also the hard_deletion_sync_min_interval_in_seconds field for + handling of "hard deletes" where the source rows are physically removed from the table. + """ + + hard_deletion_sync_min_interval_in_seconds: VariableOrOptional[int] + """ + :meta private: [EXPERIMENTAL] + + Specifies the minimum interval (in seconds) between snapshots on primary keys + for detecting and synchronizing hard deletions—i.e., rows that have been + physically removed from the source table. + This interval acts as a lower bound. If ingestion runs less frequently than + this value, hard deletion synchronization will align with the actual ingestion + frequency instead of happening more often. + If not set, hard deletion synchronization via snapshots is disabled. + This field is mutable and can be updated without triggering a full snapshot. + """ + + +IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigParam = ( + IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigDict + | IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig +) diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py index d33ba2493e..cb8ceca59e 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py @@ -5,6 +5,8 @@ class IngestionSourceType(Enum): MYSQL = "MYSQL" POSTGRESQL = "POSTGRESQL" + REDSHIFT = "REDSHIFT" + SQLDW = "SQLDW" SQLSERVER = "SQLSERVER" SALESFORCE = "SALESFORCE" BIGQUERY = "BIGQUERY" @@ -18,12 +20,15 @@ class IngestionSourceType(Enum): SHAREPOINT = "SHAREPOINT" DYNAMICS365 = "DYNAMICS365" CONFLUENCE = "CONFLUENCE" + META_MARKETING = "META_MARKETING" IngestionSourceTypeParam = ( Literal[ "MYSQL", "POSTGRESQL", + "REDSHIFT", + "SQLDW", "SQLSERVER", "SALESFORCE", "BIGQUERY", @@ -37,6 +42,7 @@ class IngestionSourceType(Enum): "SHAREPOINT", "DYNAMICS365", "CONFLUENCE", + "META_MARKETING", ] | IngestionSourceType ) diff --git a/experimental/python/databricks/bundles/pipelines/_models/table_specific_config.py b/experimental/python/databricks/bundles/pipelines/_models/table_specific_config.py index c354ee74ae..ca39efaa13 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/table_specific_config.py +++ b/experimental/python/databricks/bundles/pipelines/_models/table_specific_config.py @@ -4,6 +4,10 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrList, VariableOrOptional +from databricks.bundles.pipelines._models.ingestion_pipeline_definition_table_specific_config_query_based_connector_config import ( + IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig, + IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigParam, +) from databricks.bundles.pipelines._models.table_specific_config_scd_type import ( TableSpecificConfigScdType, TableSpecificConfigScdTypeParam, @@ -39,6 +43,15 @@ class TableSpecificConfig: The primary key of the table used to apply changes. """ + query_based_connector_config: VariableOrOptional[ + IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig + ] = None + """ + :meta private: [EXPERIMENTAL] + + Configurations that are only applicable for query-based ingestion connectors. + """ + salesforce_include_formula_fields: VariableOrOptional[bool] = None """ :meta private: [EXPERIMENTAL] @@ -91,6 +104,15 @@ class TableSpecificConfigDict(TypedDict, total=False): The primary key of the table used to apply changes. """ + query_based_connector_config: VariableOrOptional[ + IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigParam + ] + """ + :meta private: [EXPERIMENTAL] + + Configurations that are only applicable for query-based ingestion connectors. + """ + salesforce_include_formula_fields: VariableOrOptional[bool] """ :meta private: [EXPERIMENTAL] diff --git a/go.mod b/go.mod index d2f2e53307..f98d8d2c62 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/BurntSushi/toml v1.5.0 // MIT github.com/Masterminds/semver/v3 v3.4.0 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.75.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.79.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index 89dd163f6d..2387094969 100644 --- a/go.sum +++ b/go.sum @@ -33,8 +33,8 @@ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.75.0 h1:BIRSPmUNtkSqAywFPOIsy2Oq+C9xc+X6TAGGYpKXuBo= -github.com/databricks/databricks-sdk-go v0.75.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= +github.com/databricks/databricks-sdk-go v0.79.0 h1:lIgI7nlFe7wRuKjuvmiKP7wfmsdKOuefykPtTNAfaq0= +github.com/databricks/databricks-sdk-go v0.79.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 8c2e5ac08ec3280eac19edb65572be819a0f2491 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 7 Aug 2025 12:12:53 +0200 Subject: [PATCH 2/5] changelog --- NEXT_CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 495d603964..962db53951 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -5,6 +5,7 @@ ### Notable Changes ### Dependency updates +* Update Go SDK to 0.79.0 ([#3376](https://github.com/databricks/cli/pull/3376)) ### CLI From 67b57c6f1b199c8c3fe998cfd9c9af3217fce981 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 7 Aug 2025 12:40:19 +0200 Subject: [PATCH 3/5] fixed help test --- acceptance/help/output.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/acceptance/help/output.txt b/acceptance/help/output.txt index c299f3aff2..2033f7cc50 100644 --- a/acceptance/help/output.txt +++ b/acceptance/help/output.txt @@ -122,7 +122,9 @@ Apps apps Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. Clean Rooms + clean-room-asset-revisions Clean Room Asset Revisions denote new versions of uploaded assets (e.g. clean-room-assets Clean room assets are data and code objects — Tables, volumes, and notebooks that are shared with the clean room. + clean-room-auto-approval-rules Clean room auto-approval rules automatically create an approval on your behalf when an asset (e.g. clean-room-task-runs Clean room task runs are the executions of notebooks in a clean room. clean-rooms A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other's data. From 595803435abea2b33730ebc3de9b5a5540851eeb Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 7 Aug 2025 12:53:33 +0200 Subject: [PATCH 4/5] fixed apps test output --- acceptance/cmd/workspace/apps/output.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/acceptance/cmd/workspace/apps/output.txt b/acceptance/cmd/workspace/apps/output.txt index 4e17741346..5d7723b7f1 100644 --- a/acceptance/cmd/workspace/apps/output.txt +++ b/acceptance/cmd/workspace/apps/output.txt @@ -63,7 +63,7 @@ Usage: databricks apps update NAME [flags] Flags: - --budget-policy-id string + --budget-policy-id string TODO: Deprecate this field after serverless entitlements are released to all prod stages and the new usage_policy_id is properly populated and used. --description string The description of the app. -h, --help help for update --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) From 8cb30769c2f4db96ae90912638d8ea40d3ae8b5b Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 7 Aug 2025 13:07:04 +0200 Subject: [PATCH 5/5] fixed bind output --- acceptance/bundle/deployment/bind/quality-monitor/output.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/acceptance/bundle/deployment/bind/quality-monitor/output.txt b/acceptance/bundle/deployment/bind/quality-monitor/output.txt index 9e06742c5d..e6551c31c8 100644 --- a/acceptance/bundle/deployment/bind/quality-monitor/output.txt +++ b/acceptance/bundle/deployment/bind/quality-monitor/output.txt @@ -3,7 +3,7 @@ { "assets_dir":"/Users/user/databricks_lakehouse_monitoring", "drift_metrics_table_name":"", - "monitor_version":"", + "monitor_version":0, "output_schema_name":"catalog.schema", "profile_metrics_table_name":"", "snapshot": {},