From c3da88e107aa5a617d782e1ab7ba234967ca34b2 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Wed, 26 Nov 2025 00:54:27 +0100 Subject: [PATCH 01/18] Enable alerts back again --- .../bundle/deployment/bind/alert/alert.json | 24 +++++- .../deployment/bind/alert/out.test.toml | 2 +- .../bundle/deployment/bind/alert/test.toml | 2 +- .../alerts/basic/databricks.yml.tmpl | 2 +- .../resources/alerts/basic/out.test.toml | 2 +- .../bundle/resources/alerts/basic/output.txt | 85 ++++++++++++++++--- .../bundle/resources/alerts/basic/script | 4 +- .../bundle/resources/alerts/basic/test.toml | 2 +- .../mutator/resourcemutator/apply_presets.go | 12 +-- .../resourcemutator/apply_target_mode_test.go | 14 +-- .../config/mutator/resourcemutator/run_as.go | 18 ++-- .../mutator/resourcemutator/run_as_test.go | 2 +- bundle/config/resources.go | 32 +++---- bundle/config/resources_test.go | 12 +-- bundle/deploy/resource_path_mkdir.go | 43 +++++----- bundle/direct/dresources/all.go | 1 + bundle/statemgmt/state_load_test.go | 72 ++++++++-------- libs/testserver/alerts.go | 18 ++++ libs/testserver/handlers.go | 2 +- 19 files changed, 226 insertions(+), 123 deletions(-) diff --git a/acceptance/bundle/deployment/bind/alert/alert.json b/acceptance/bundle/deployment/bind/alert/alert.json index 6e7406230c..7649f73890 100644 --- a/acceptance/bundle/deployment/bind/alert/alert.json +++ b/acceptance/bundle/deployment/bind/alert/alert.json @@ -3,5 +3,27 @@ "query_text": "SELECT 1", "warehouse_id": "0123-456789-warehouse0", "custom_summary": "Test Alert Summary", - "custom_description": "Test Alert Description" + "custom_description": "Test Alert Description", + "evaluation": { + "comparison_operator": "EQUAL", + "notification": { + "notify_on_ok": false, + "retrigger_seconds": 1 + }, + "source": { + "aggregation": "MAX", + "display": "1", + "name": "1" + }, + "threshold": { + "value": { + "double_value": 1 + } + } + }, + "schedule": { + "pause_status": "UNPAUSED", + "quartz_cron_schedule": "0 0 * * * ?", + "timezone_id": "UTC" + } } diff --git a/acceptance/bundle/deployment/bind/alert/out.test.toml b/acceptance/bundle/deployment/bind/alert/out.test.toml index f9eb74f070..90061dedb1 100644 --- a/acceptance/bundle/deployment/bind/alert/out.test.toml +++ b/acceptance/bundle/deployment/bind/alert/out.test.toml @@ -1,4 +1,4 @@ -Local = false +Local = true Cloud = false [EnvMatrix] diff --git a/acceptance/bundle/deployment/bind/alert/test.toml b/acceptance/bundle/deployment/bind/alert/test.toml index 13cff7b7f4..5a92cae2e8 100644 --- a/acceptance/bundle/deployment/bind/alert/test.toml +++ b/acceptance/bundle/deployment/bind/alert/test.toml @@ -1,4 +1,4 @@ Cloud = false -Local = false # Enable when releasing support for alerts. +Local = true BundleConfigTarget = "databricks.yml" diff --git a/acceptance/bundle/resources/alerts/basic/databricks.yml.tmpl b/acceptance/bundle/resources/alerts/basic/databricks.yml.tmpl index 022e43a81e..2651e6c97d 100644 --- a/acceptance/bundle/resources/alerts/basic/databricks.yml.tmpl +++ b/acceptance/bundle/resources/alerts/basic/databricks.yml.tmpl @@ -27,4 +27,4 @@ resources: pause_status: "UNPAUSED" quartz_cron_schedule: "44 19 */1 * * ?" timezone_id: "Europe/Amsterdam" - warehouse_id: "dd43ee29fedd958d" + warehouse_id: $TEST_DEFAULT_WAREHOUSE_ID diff --git a/acceptance/bundle/resources/alerts/basic/out.test.toml b/acceptance/bundle/resources/alerts/basic/out.test.toml index d560f1de04..01ed6822af 100644 --- a/acceptance/bundle/resources/alerts/basic/out.test.toml +++ b/acceptance/bundle/resources/alerts/basic/out.test.toml @@ -1,5 +1,5 @@ Local = true -Cloud = false +Cloud = true [EnvMatrix] DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct"] diff --git a/acceptance/bundle/resources/alerts/basic/output.txt b/acceptance/bundle/resources/alerts/basic/output.txt index e978b67eff..49039ec74a 100644 --- a/acceptance/bundle/resources/alerts/basic/output.txt +++ b/acceptance/bundle/resources/alerts/basic/output.txt @@ -1,22 +1,81 @@ >>> [CLI] bundle deploy -Warning: unknown field: alerts - at resources - in databricks.yml:5:3 - Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/alerts-basic-[UNIQUE_NAME]/default/files... Deploying resources... +Updating deployment state... Deployment complete! -Warning: unknown field: alerts - at resources - in databricks.yml:5:3 +>>> [CLI] alerts-v2 get-alert [ALERT_ID] +{ + "display_name": "My alert", + "lifecycle_state": "ACTIVE", + "custom_summary": "My alert", + "evaluation": { + "comparison_operator": "EQUAL", + "notification": { + "notify_on_ok": false, + "retrigger_seconds": 1 + }, + "source": { + "aggregation": "MAX", + "display": "1", + "name": "1" + }, + "threshold": { + "value": { + "double_value": 2 + } + } + }, + "query_text": "select 2", + "schedule": { + "pause_status": "UNPAUSED", + "quartz_cron_schedule": "44 19 */1 * * ?", + "timezone_id": "Europe/Amsterdam" + }, + "warehouse_id": "[TEST_DEFAULT_WAREHOUSE_ID]" +} + +>>> [CLI] permissions get alertsv2 [ALERT_ID] +{ + "access_control_list": [ + { + "all_permissions": [ + { + "inherited": false, + "permission_level": "CAN_RUN" + } + ], + "group_name": "users" + } + ], + "object_id": "/alertsv2/[ALERT_ID]", + "object_type": "alertv2" +} + +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete alert myalert + +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/alerts-basic-[UNIQUE_NAME]/default ->>> [CLI] alerts-v2 get-alert null -Warning: unknown field: alerts - at resources - in databricks.yml:5:3 +Deleting files... +Destroy complete! -Error: Resource sql.AlertV2 not found: null +>>> [CLI] alerts-v2 get-alert [ALERT_ID] +{ + "display_name": "My alert", + "lifecycle_state": "DELETED" +} -Exit code: 1 +>>> [CLI] bundle summary +Name: alerts-basic-[UNIQUE_NAME] +Target: default +Workspace: + User: [USERNAME] + Path: /Workspace/Users/[USERNAME]/.bundle/alerts-basic-[UNIQUE_NAME]/default +Resources: + Alerts: + myalert: + Name: My alert + URL: (not deployed) diff --git a/acceptance/bundle/resources/alerts/basic/script b/acceptance/bundle/resources/alerts/basic/script index 19e05a31a4..aaf1796eaf 100644 --- a/acceptance/bundle/resources/alerts/basic/script +++ b/acceptance/bundle/resources/alerts/basic/script @@ -4,7 +4,9 @@ trace $CLI bundle deploy alert_id=$($CLI bundle summary --output json | jq -r '.resources.alerts.myalert.id') -trace $CLI alerts-v2 get-alert $alert_id | jq '{display_name, lifecycle_state}' +echo "$alert_id:ALERT_ID" >> ACC_REPLS + +trace $CLI alerts-v2 get-alert $alert_id | jq '{display_name, lifecycle_state, custom_summary, evaluation, query_text, schedule, warehouse_id}' trace $CLI permissions get alertsv2 $alert_id | jq '{access_control_list: [.access_control_list[] | select(any(.all_permissions[]; .permission_level == "CAN_RUN"))], object_id, object_type}' diff --git a/acceptance/bundle/resources/alerts/basic/test.toml b/acceptance/bundle/resources/alerts/basic/test.toml index 5de630bdf8..8f2cd86d82 100644 --- a/acceptance/bundle/resources/alerts/basic/test.toml +++ b/acceptance/bundle/resources/alerts/basic/test.toml @@ -1,4 +1,4 @@ Local = true -Cloud = false +Cloud = true RecordRequests = false Ignore = [".databricks"] diff --git a/bundle/config/mutator/resourcemutator/apply_presets.go b/bundle/config/mutator/resourcemutator/apply_presets.go index 6d1eb16d72..1311f3660f 100644 --- a/bundle/config/mutator/resourcemutator/apply_presets.go +++ b/bundle/config/mutator/resourcemutator/apply_presets.go @@ -240,12 +240,12 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos // Apps: No presets // Alerts: Prefix - // for _, a := range r.Alerts { - // if a == nil { - // continue - // } - // a.DisplayName = prefix + a.DisplayName - // } + for _, a := range r.Alerts { + if a == nil { + continue + } + a.DisplayName = prefix + a.DisplayName + } // SQL Warehouses: Prefix, Tags for _, w := range r.SqlWarehouses { diff --git a/bundle/config/mutator/resourcemutator/apply_target_mode_test.go b/bundle/config/mutator/resourcemutator/apply_target_mode_test.go index afce55f98e..de3aa992af 100644 --- a/bundle/config/mutator/resourcemutator/apply_target_mode_test.go +++ b/bundle/config/mutator/resourcemutator/apply_target_mode_test.go @@ -190,13 +190,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, }, }, - // Alerts: map[string]*resources.Alert{ - // "alert1": { - // AlertV2: sql.AlertV2{ - // DisplayName: "alert1", - // }, - // }, - // }, + Alerts: map[string]*resources.Alert{ + "alert1": { + AlertV2: sql.AlertV2{ + DisplayName: "alert1", + }, + }, + }, }, }, SyncRoot: vfs.MustNew("/Users/lennart.kats@databricks.com"), diff --git a/bundle/config/mutator/resourcemutator/run_as.go b/bundle/config/mutator/resourcemutator/run_as.go index 499f9ae293..8433db9ed8 100644 --- a/bundle/config/mutator/resourcemutator/run_as.go +++ b/bundle/config/mutator/resourcemutator/run_as.go @@ -110,15 +110,15 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics { )) } - // // Alerts do not support run_as in the API. - // if len(b.Config.Resources.Alerts) > 0 { - // diags = diags.Extend(reportRunAsNotSupported( - // "alerts", - // b.Config.GetLocation("resources.alerts"), - // b.Config.Workspace.CurrentUser.UserName, - // identity, - // )) - // } + // Alerts do not support run_as in the API. + if len(b.Config.Resources.Alerts) > 0 { + diags = diags.Extend(reportRunAsNotSupported( + "alerts", + b.Config.GetLocation("resources.alerts"), + b.Config.Workspace.CurrentUser.UserName, + identity, + )) + } // Apps do not support run_as in the API. if len(b.Config.Resources.Apps) > 0 { diff --git a/bundle/config/mutator/resourcemutator/run_as_test.go b/bundle/config/mutator/resourcemutator/run_as_test.go index 93fdb6e457..7500374adc 100644 --- a/bundle/config/mutator/resourcemutator/run_as_test.go +++ b/bundle/config/mutator/resourcemutator/run_as_test.go @@ -32,7 +32,7 @@ func allResourceTypes(t *testing.T) []string { // the dyn library gives us the correct list of all resources supported. Please // also update this check when adding a new resource require.Equal(t, []string{ - // "alerts", + "alerts", "apps", "clusters", "dashboards", diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 228bf16704..4cb5da53ce 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -25,11 +25,11 @@ type Resources struct { Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"` Apps map[string]*resources.App `json:"apps,omitempty"` SecretScopes map[string]*resources.SecretScope `json:"secret_scopes,omitempty"` - // Alerts map[string]*resources.Alert `json:"alerts,omitempty"` - SqlWarehouses map[string]*resources.SqlWarehouse `json:"sql_warehouses,omitempty"` - DatabaseInstances map[string]*resources.DatabaseInstance `json:"database_instances,omitempty"` - DatabaseCatalogs map[string]*resources.DatabaseCatalog `json:"database_catalogs,omitempty"` - SyncedDatabaseTables map[string]*resources.SyncedDatabaseTable `json:"synced_database_tables,omitempty"` + Alerts map[string]*resources.Alert `json:"alerts,omitempty"` + SqlWarehouses map[string]*resources.SqlWarehouse `json:"sql_warehouses,omitempty"` + DatabaseInstances map[string]*resources.DatabaseInstance `json:"database_instances,omitempty"` + DatabaseCatalogs map[string]*resources.DatabaseCatalog `json:"database_catalogs,omitempty"` + SyncedDatabaseTables map[string]*resources.SyncedDatabaseTable `json:"synced_database_tables,omitempty"` } type ConfigResource interface { @@ -92,7 +92,7 @@ func (r *Resources) AllResources() []ResourceGroup { collectResourceMap(descriptions["dashboards"], r.Dashboards), collectResourceMap(descriptions["volumes"], r.Volumes), collectResourceMap(descriptions["apps"], r.Apps), - // collectResourceMap(descriptions["alerts"], r.Alerts), + collectResourceMap(descriptions["alerts"], r.Alerts), collectResourceMap(descriptions["secret_scopes"], r.SecretScopes), collectResourceMap(descriptions["sql_warehouses"], r.SqlWarehouses), collectResourceMap(descriptions["database_instances"], r.DatabaseInstances), @@ -175,11 +175,11 @@ func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) } } - // for k := range r.Alerts { - // if k == key { - // found = append(found, r.Alerts[k]) - // } - // } + for k := range r.Alerts { + if k == key { + found = append(found, r.Alerts[k]) + } + } for k := range r.SqlWarehouses { if k == key { @@ -236,10 +236,10 @@ func SupportedResources() map[string]resources.ResourceDescription { "volumes": (&resources.Volume{}).ResourceDescription(), "apps": (&resources.App{}).ResourceDescription(), "secret_scopes": (&resources.SecretScope{}).ResourceDescription(), - // "alerts": (&resources.Alert{}).ResourceDescription(), - "sql_warehouses": (&resources.SqlWarehouse{}).ResourceDescription(), - "database_instances": (&resources.DatabaseInstance{}).ResourceDescription(), - "database_catalogs": (&resources.DatabaseCatalog{}).ResourceDescription(), - "synced_database_tables": (&resources.SyncedDatabaseTable{}).ResourceDescription(), + "alerts": (&resources.Alert{}).ResourceDescription(), + "sql_warehouses": (&resources.SqlWarehouse{}).ResourceDescription(), + "database_instances": (&resources.DatabaseInstance{}).ResourceDescription(), + "database_catalogs": (&resources.DatabaseCatalog{}).ResourceDescription(), + "synced_database_tables": (&resources.SyncedDatabaseTable{}).ResourceDescription(), } } diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index 4d0b99f5e9..6bc61196e2 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -160,11 +160,11 @@ func TestResourcesBindSupport(t *testing.T) { App: apps.App{}, }, }, - // Alerts: map[string]*resources.Alert{ - // "my_alert": { - // AlertV2: sql.AlertV2{}, - // }, - // }, + Alerts: map[string]*resources.Alert{ + "my_alert": { + AlertV2: sql.AlertV2{}, + }, + }, QualityMonitors: map[string]*resources.QualityMonitor{ "my_quality_monitor": { CreateMonitor: catalog.CreateMonitor{}, @@ -214,7 +214,7 @@ func TestResourcesBindSupport(t *testing.T) { m.GetMockLakeviewAPI().EXPECT().Get(mock.Anything, mock.Anything).Return(nil, nil) m.GetMockVolumesAPI().EXPECT().Read(mock.Anything, mock.Anything).Return(nil, nil) m.GetMockAppsAPI().EXPECT().GetByName(mock.Anything, mock.Anything).Return(nil, nil) - // m.GetMockAlertsV2API().EXPECT().GetAlertById(mock.Anything, mock.Anything).Return(nil, nil) + m.GetMockAlertsV2API().EXPECT().GetAlertById(mock.Anything, mock.Anything).Return(nil, nil) m.GetMockQualityMonitorsAPI().EXPECT().Get(mock.Anything, mock.Anything).Return(nil, nil) m.GetMockServingEndpointsAPI().EXPECT().Get(mock.Anything, mock.Anything).Return(nil, nil) m.GetMockSecretsAPI().EXPECT().ListScopesAll(mock.Anything).Return([]workspace.SecretScope{ diff --git a/bundle/deploy/resource_path_mkdir.go b/bundle/deploy/resource_path_mkdir.go index a59028d5c2..9a1610becb 100644 --- a/bundle/deploy/resource_path_mkdir.go +++ b/bundle/deploy/resource_path_mkdir.go @@ -2,9 +2,11 @@ package deploy import ( "context" + "errors" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/apierr" ) func ResourcePathMkdir() bundle.Mutator { @@ -18,25 +20,24 @@ func (m *resourcePathMkdir) Name() string { } func (m *resourcePathMkdir) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - // // Only dashboards and alerts need ${workspace.resource_path} to exist. - // // - // // We'll defer to TF to create resource_path for for dashboards. - // // ref: https://github.com/databricks/terraform-provider-databricks/blob/d84bc6f28d8aa0fd77d110a41f745970299142b1/dashboards/resource_dashboard.go#L83. - // // - // // For alerts we proactively create it here because the TF implementation - // // is autogenerated and cannot be easily customized. - // if len(b.Config.Resources.Alerts) == 0 { - // return nil - // } - - // w := b.WorkspaceClient() - - // // Optimisitcally create the resource path. If it already exists ignore the error. - // err := w.Workspace.MkdirsByPath(ctx, b.Config.Workspace.ResourcePath) - // var aerr *apierr.APIError - // if errors.As(err, &aerr) && aerr.ErrorCode == "RESOURCE_ALREADY_EXISTS" { - // return nil - // } - // return diag.FromErr(err) - return nil + // Only dashboards and alerts need ${workspace.resource_path} to exist. + // + // We'll defer to TF to create resource_path for for dashboards. + // ref: https://github.com/databricks/terraform-provider-databricks/blob/d84bc6f28d8aa0fd77d110a41f745970299142b1/dashboards/resource_dashboard.go#L83. + // + // For alerts we proactively create it here because the TF implementation + // is autogenerated and cannot be easily customized. + if len(b.Config.Resources.Alerts) == 0 { + return nil + } + + w := b.WorkspaceClient() + + // Optimisitcally create the resource path. If it already exists ignore the error. + err := w.Workspace.MkdirsByPath(ctx, b.Config.Workspace.ResourcePath) + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.ErrorCode == "RESOURCE_ALREADY_EXISTS" { + return nil + } + return diag.FromErr(err) } diff --git a/bundle/direct/dresources/all.go b/bundle/direct/dresources/all.go index 5fb8e95a58..745ebfacf3 100644 --- a/bundle/direct/dresources/all.go +++ b/bundle/direct/dresources/all.go @@ -28,6 +28,7 @@ var SupportedResources = map[string]any{ "jobs.permissions": (*ResourcePermissions)(nil), "pipelines.permissions": (*ResourcePermissions)(nil), "apps.permissions": (*ResourcePermissions)(nil), + "alerts.permissions": (*ResourcePermissions)(nil), "clusters.permissions": (*ResourcePermissions)(nil), "database_instances.permissions": (*ResourcePermissions)(nil), "experiments.permissions": (*ResourcePermissions)(nil), diff --git a/bundle/statemgmt/state_load_test.go b/bundle/statemgmt/state_load_test.go index 434e91028e..a9394f19e9 100644 --- a/bundle/statemgmt/state_load_test.go +++ b/bundle/statemgmt/state_load_test.go @@ -77,9 +77,9 @@ func TestStateToBundleEmptyLocalResources(t *testing.T) { "synced_database_tables": map[string]ResourceState{ "test_synced_database_table": {ID: "1"}, }, - // "alerts": map[string]ResourceState{ - // "test_alert": {ID: "1"}, - // }, + "alerts": map[string]ResourceState{ + "test_alert": {ID: "1"}, + }, } err := StateToBundle(context.Background(), state, &config) assert.NoError(t, err) @@ -130,8 +130,8 @@ func TestStateToBundleEmptyLocalResources(t *testing.T) { assert.Equal(t, "1", config.Resources.DatabaseInstances["test_database_instance"].ID) assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.DatabaseInstances["test_database_instance"].ModifiedStatus) - // assert.Equal(t, "1", config.Resources.Alerts["test_alert"].ID) - // assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Alerts["test_alert"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Alerts["test_alert"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Alerts["test_alert"].ModifiedStatus) AssertFullResourceCoverage(t, &config) } @@ -258,13 +258,13 @@ func TestStateToBundleEmptyRemoteResources(t *testing.T) { }, }, }, - // Alerts: map[string]*resources.Alert{ - // "test_alert": { - // AlertV2: sql.AlertV2{ - // DisplayName: "test_alert", - // }, - // }, - // }, + Alerts: map[string]*resources.Alert{ + "test_alert": { + AlertV2: sql.AlertV2{ + DisplayName: "test_alert", + }, + }, + }, }, } @@ -322,8 +322,8 @@ func TestStateToBundleEmptyRemoteResources(t *testing.T) { assert.Equal(t, "", config.Resources.SyncedDatabaseTables["test_synced_database_table"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.SyncedDatabaseTables["test_synced_database_table"].ModifiedStatus) - // assert.Equal(t, "", config.Resources.Alerts["test_alert"].ID) - // assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Alerts["test_alert"].ModifiedStatus) + assert.Equal(t, "", config.Resources.Alerts["test_alert"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Alerts["test_alert"].ModifiedStatus) AssertFullResourceCoverage(t, &config) } @@ -535,18 +535,18 @@ func TestStateToBundleModifiedResources(t *testing.T) { }, }, }, - // Alerts: map[string]*resources.Alert{ - // "test_alert": { - // AlertV2: sql.AlertV2{ - // DisplayName: "test_alert", - // }, - // }, - // "test_alert_new": { - // AlertV2: sql.AlertV2{ - // DisplayName: "test_alert_new", - // }, - // }, - // }, + Alerts: map[string]*resources.Alert{ + "test_alert": { + AlertV2: sql.AlertV2{ + DisplayName: "test_alert", + }, + }, + "test_alert_new": { + AlertV2: sql.AlertV2{ + DisplayName: "test_alert_new", + }, + }, + }, }, } state := ExportedResourcesMap{ @@ -610,10 +610,10 @@ func TestStateToBundleModifiedResources(t *testing.T) { "test_database_instance": {ID: "1"}, "test_database_instance_old": {ID: "2"}, }, - // "alerts": map[string]ResourceState{ - // "test_alert": {ID: "1"}, - // "test_alert_old": {ID: "2"}, - // }, + "alerts": map[string]ResourceState{ + "test_alert": {ID: "1"}, + "test_alert_old": {ID: "2"}, + }, } err := StateToBundle(context.Background(), state, &config) assert.NoError(t, err) @@ -725,12 +725,12 @@ func TestStateToBundleModifiedResources(t *testing.T) { assert.Equal(t, "", config.Resources.DatabaseInstances["test_database_instance_new"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.DatabaseInstances["test_database_instance_new"].ModifiedStatus) - // assert.Equal(t, "1", config.Resources.Alerts["test_alert"].ID) - // assert.Equal(t, "", config.Resources.Alerts["test_alert"].ModifiedStatus) - // assert.Equal(t, "2", config.Resources.Alerts["test_alert_old"].ID) - // assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Alerts["test_alert_old"].ModifiedStatus) - // assert.Equal(t, "", config.Resources.Alerts["test_alert_new"].ID) - // assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Alerts["test_alert_new"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Alerts["test_alert"].ID) + assert.Equal(t, "", config.Resources.Alerts["test_alert"].ModifiedStatus) + assert.Equal(t, "2", config.Resources.Alerts["test_alert_old"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Alerts["test_alert_old"].ModifiedStatus) + assert.Equal(t, "", config.Resources.Alerts["test_alert_new"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Alerts["test_alert_new"].ModifiedStatus) AssertFullResourceCoverage(t, &config) } diff --git a/libs/testserver/alerts.go b/libs/testserver/alerts.go index 7efd4be648..a40090f1e7 100644 --- a/libs/testserver/alerts.go +++ b/libs/testserver/alerts.go @@ -40,3 +40,21 @@ func (s *FakeWorkspace) AlertsUpsert(req Request, alertId string) Response { Body: alert, } } + +func (s *FakeWorkspace) AlertsDelete(alertId string) Response { + defer s.LockUnlock()() + + alert, ok := s.Alerts[alertId] + if !ok { + return Response{ + StatusCode: 404, + } + } + + alert.LifecycleState = sql.AlertLifecycleStateDeleted + s.Alerts[alertId] = alert + + return Response{ + StatusCode: 200, + } +} diff --git a/libs/testserver/handlers.go b/libs/testserver/handlers.go index 56ff7616d4..dbb59a048b 100644 --- a/libs/testserver/handlers.go +++ b/libs/testserver/handlers.go @@ -458,7 +458,7 @@ func AddDefaultHandlers(server *Server) { }) server.Handle("DELETE", "/api/2.0/alerts/{id}", func(req Request) any { - return MapDelete(req.Workspace, req.Workspace.Alerts, req.Vars["id"]) + return req.Workspace.AlertsDelete(req.Vars["id"]) }) // Secrets ACLs: From 7d7ac333cd4592769a38b04f76ee928400f8cc17 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Wed, 26 Nov 2025 01:13:34 +0100 Subject: [PATCH 02/18] generate schema --- bundle/internal/schema/annotations.yml | 71 +++++ bundle/schema/jsonschema.json | 365 +++++++++++++++++++++++++ 2 files changed, 436 insertions(+) diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 85ab74e11d..4a8cd4a713 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -154,6 +154,9 @@ github.com/databricks/cli/bundle/config.Python: If enabled, Python code will execute within this environment. If disabled, it defaults to using the Python interpreter available in the current shell. github.com/databricks/cli/bundle/config.Resources: + "alerts": + "description": |- + PLACEHOLDER "apps": "description": |- The app resource defines a Databricks app. @@ -442,6 +445,74 @@ github.com/databricks/cli/bundle/config.Workspace: "state_path": "description": |- The workspace state path +github.com/databricks/cli/bundle/config/resources.Alert: + "create_time": + "description": |- + PLACEHOLDER + "custom_description": + "description": |- + PLACEHOLDER + "custom_summary": + "description": |- + PLACEHOLDER + "display_name": + "description": |- + PLACEHOLDER + "effective_run_as": + "description": |- + PLACEHOLDER + "evaluation": + "description": |- + PLACEHOLDER + "id": + "description": |- + PLACEHOLDER + "lifecycle": + "description": |- + PLACEHOLDER + "lifecycle_state": + "description": |- + PLACEHOLDER + "owner_user_name": + "description": |- + PLACEHOLDER + "parent_path": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER + "query_text": + "description": |- + PLACEHOLDER + "run_as": + "description": |- + PLACEHOLDER + "run_as_user_name": + "description": |- + PLACEHOLDER + "schedule": + "description": |- + PLACEHOLDER + "update_time": + "description": |- + PLACEHOLDER + "warehouse_id": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.AlertPermission: + "group_name": + "description": |- + PLACEHOLDER + "level": + "description": |- + PLACEHOLDER + "service_principal_name": + "description": |- + PLACEHOLDER + "user_name": + "description": |- + PLACEHOLDER github.com/databricks/cli/bundle/config/resources.AppPermission: "group_name": "description": |- diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 9c892a15ba..50dce23a79 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -59,6 +59,113 @@ "cli": { "bundle": { "config": { + "resources.Alert": { + "oneOf": [ + { + "type": "object", + "properties": { + "create_time": { + "$ref": "#/$defs/string" + }, + "custom_description": { + "$ref": "#/$defs/string" + }, + "custom_summary": { + "$ref": "#/$defs/string" + }, + "display_name": { + "$ref": "#/$defs/string" + }, + "effective_run_as": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2RunAs" + }, + "evaluation": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2Evaluation" + }, + "id": { + "$ref": "#/$defs/string" + }, + "lifecycle": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, + "lifecycle_state": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertLifecycleState" + }, + "owner_user_name": { + "$ref": "#/$defs/string" + }, + "parent_path": { + "$ref": "#/$defs/string" + }, + "permissions": { + "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.AlertPermission" + }, + "query_text": { + "$ref": "#/$defs/string" + }, + "run_as": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2RunAs" + }, + "run_as_user_name": { + "$ref": "#/$defs/string" + }, + "schedule": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.CronSchedule" + }, + "update_time": { + "$ref": "#/$defs/string" + }, + "warehouse_id": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "display_name", + "evaluation", + "query_text", + "schedule", + "warehouse_id" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "resources.AlertPermission": { + "oneOf": [ + { + "type": "object", + "properties": { + "group_name": { + "$ref": "#/$defs/string" + }, + "level": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.AlertPermissionLevel" + }, + "service_principal_name": { + "$ref": "#/$defs/string" + }, + "user_name": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "level" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "resources.AlertPermissionLevel": { + "type": "string" + }, "resources.App": { "oneOf": [ { @@ -2332,6 +2439,9 @@ { "type": "object", "properties": { + "alerts": { + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Alert" + }, "apps": { "description": "The app resource defines a Databricks app.", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.App", @@ -8862,6 +8972,186 @@ } ] }, + "sql.Aggregation": { + "type": "string" + }, + "sql.AlertEvaluationState": { + "type": "string" + }, + "sql.AlertLifecycleState": { + "type": "string" + }, + "sql.AlertV2Evaluation": { + "oneOf": [ + { + "type": "object", + "properties": { + "comparison_operator": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.ComparisonOperator" + }, + "empty_result_state": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertEvaluationState" + }, + "last_evaluated_at": { + "$ref": "#/$defs/string" + }, + "notification": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2Notification" + }, + "source": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandColumn" + }, + "state": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertEvaluationState" + }, + "threshold": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2Operand" + } + }, + "additionalProperties": false, + "required": [ + "comparison_operator", + "source" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "sql.AlertV2Notification": { + "oneOf": [ + { + "type": "object", + "properties": { + "notify_on_ok": { + "$ref": "#/$defs/bool" + }, + "retrigger_seconds": { + "$ref": "#/$defs/int" + }, + "subscriptions": { + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/sql.AlertV2Subscription" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "sql.AlertV2Operand": { + "oneOf": [ + { + "type": "object", + "properties": { + "column": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandColumn" + }, + "value": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandValue" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "sql.AlertV2OperandColumn": { + "oneOf": [ + { + "type": "object", + "properties": { + "aggregation": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.Aggregation" + }, + "display": { + "$ref": "#/$defs/string" + }, + "name": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "name" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "sql.AlertV2OperandValue": { + "oneOf": [ + { + "type": "object", + "properties": { + "bool_value": { + "$ref": "#/$defs/bool" + }, + "double_value": { + "$ref": "#/$defs/float64" + }, + "string_value": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "sql.AlertV2RunAs": { + "oneOf": [ + { + "type": "object", + "properties": { + "service_principal_name": { + "$ref": "#/$defs/string" + }, + "user_name": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "sql.AlertV2Subscription": { + "oneOf": [ + { + "type": "object", + "properties": { + "destination_id": { + "$ref": "#/$defs/string" + }, + "user_email": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "sql.Channel": { "oneOf": [ { @@ -8900,6 +9190,9 @@ } ] }, + "sql.ComparisonOperator": { + "type": "string" + }, "sql.CreateWarehouseRequestWarehouseType": { "oneOf": [ { @@ -8916,6 +9209,33 @@ } ] }, + "sql.CronSchedule": { + "oneOf": [ + { + "type": "object", + "properties": { + "pause_status": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.SchedulePauseStatus" + }, + "quartz_cron_schedule": { + "$ref": "#/$defs/string" + }, + "timezone_id": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "quartz_cron_schedule", + "timezone_id" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "sql.EndpointTagPair": { "oneOf": [ { @@ -8953,6 +9273,9 @@ } ] }, + "sql.SchedulePauseStatus": { + "type": "string" + }, "sql.SpotInstancePolicy": { "oneOf": [ { @@ -9078,6 +9401,20 @@ "cli": { "bundle": { "config": { + "resources.Alert": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Alert" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "resources.App": { "oneOf": [ { @@ -9426,6 +9763,20 @@ "cli": { "bundle": { "config": { + "resources.AlertPermission": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.AlertPermission" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "resources.AppPermission": { "oneOf": [ { @@ -10131,6 +10482,20 @@ } ] }, + "sql.AlertV2Subscription": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2Subscription" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "sql.EndpointTagPair": { "oneOf": [ { From 6745d55ca4383f13f47ff553cf32ffe7f49d25b8 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Wed, 26 Nov 2025 01:14:24 +0100 Subject: [PATCH 03/18] add nextchangelog entry --- NEXT_CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 6942156caa..67a64b2472 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -9,5 +9,6 @@ ### Dependency updates ### Bundles +* Add support for alerts to DABs ([#4004](https://github.com/databricks/cli/pull/4004)) ### API Changes From 0ced9502ffc06b1021e13dc42f9e7abd13f79354 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Mon, 1 Dec 2025 02:59:17 +0100 Subject: [PATCH 04/18] update test --- .../alerts/basic/databricks.yml.tmpl | 4 +- .../bundle/resources/alerts/basic/output.txt | 23 ++++---- .../bundle/resources/alerts/basic/script | 6 +- bundle/direct/dresources/all_test.go | 57 ++++++++++++++++++- libs/testserver/permissions.go | 24 ++++++++ 5 files changed, 98 insertions(+), 16 deletions(-) diff --git a/acceptance/bundle/resources/alerts/basic/databricks.yml.tmpl b/acceptance/bundle/resources/alerts/basic/databricks.yml.tmpl index 2651e6c97d..36f1183b85 100644 --- a/acceptance/bundle/resources/alerts/basic/databricks.yml.tmpl +++ b/acceptance/bundle/resources/alerts/basic/databricks.yml.tmpl @@ -5,8 +5,8 @@ resources: alerts: myalert: permissions: - - level: CAN_RUN - group_name: users + - level: CAN_MANAGE + user_name: deco-test-user@databricks.com custom_summary: "My alert" display_name: "My alert" diff --git a/acceptance/bundle/resources/alerts/basic/output.txt b/acceptance/bundle/resources/alerts/basic/output.txt index 49039ec74a..7ca1ad4eb9 100644 --- a/acceptance/bundle/resources/alerts/basic/output.txt +++ b/acceptance/bundle/resources/alerts/basic/output.txt @@ -36,26 +36,25 @@ Deployment complete! "warehouse_id": "[TEST_DEFAULT_WAREHOUSE_ID]" } +=== assert that permissions are applied >>> [CLI] permissions get alertsv2 [ALERT_ID] { - "access_control_list": [ + "user_name": "deco-test-user@databricks.com", + "all_permissions": [ { - "all_permissions": [ - { - "inherited": false, - "permission_level": "CAN_RUN" - } - ], - "group_name": "users" + "inherited": false, + "permission_level": "CAN_MANAGE" } - ], - "object_id": "/alertsv2/[ALERT_ID]", - "object_type": "alertv2" + ] } +=== assert that no permanent drift happens +>>> [CLI] bundle plan +Plan: 0 to add, 0 to change, 0 to delete, 2 unchanged + >>> [CLI] bundle destroy --auto-approve The following resources will be deleted: - delete alert myalert + delete resources.alerts.myalert All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/alerts-basic-[UNIQUE_NAME]/default diff --git a/acceptance/bundle/resources/alerts/basic/script b/acceptance/bundle/resources/alerts/basic/script index aaf1796eaf..16f6feeb8b 100644 --- a/acceptance/bundle/resources/alerts/basic/script +++ b/acceptance/bundle/resources/alerts/basic/script @@ -8,7 +8,11 @@ echo "$alert_id:ALERT_ID" >> ACC_REPLS trace $CLI alerts-v2 get-alert $alert_id | jq '{display_name, lifecycle_state, custom_summary, evaluation, query_text, schedule, warehouse_id}' -trace $CLI permissions get alertsv2 $alert_id | jq '{access_control_list: [.access_control_list[] | select(any(.all_permissions[]; .permission_level == "CAN_RUN"))], object_id, object_type}' +title "assert that permissions are applied" +trace $CLI permissions get alertsv2 $alert_id | jq ".access_control_list.[]" -c | grep 'deco-test-user@databricks.com' | jq '{user_name, all_permissions}' + +title "assert that no permanent drift happens" +trace $CLI bundle plan trace $CLI bundle destroy --auto-approve diff --git a/bundle/direct/dresources/all_test.go b/bundle/direct/dresources/all_test.go index b9c115768d..fc6e56dd94 100644 --- a/bundle/direct/dresources/all_test.go +++ b/bundle/direct/dresources/all_test.go @@ -26,6 +26,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/databricks/databricks-sdk-go/service/sql" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -143,6 +144,29 @@ var testConfig map[string]any = map[string]any{ }, }, }, + + "alerts": &resources.Alert{ + AlertV2: sql.AlertV2{ + DisplayName: "my-alert", + QueryText: "SELECT 1", + WarehouseId: "test-warehouse-id", + Schedule: sql.CronSchedule{ + QuartzCronSchedule: "0 0 12 * * ?", + TimezoneId: "UTC", + }, + Evaluation: sql.AlertV2Evaluation{ + ComparisonOperator: sql.ComparisonOperatorGreaterThan, + Source: sql.AlertV2OperandColumn{ + Name: "column1", + }, + Threshold: &sql.AlertV2Operand{ + Column: &sql.AlertV2OperandColumn{ + Name: "column2", + }, + }, + }, + }, + }, } type prepareWorkspace func(client *databricks.WorkspaceClient) (any, error) @@ -356,6 +380,37 @@ var testDeps = map[string]prepareWorkspace{ }, nil }, + "alerts.permissions": func(client *databricks.WorkspaceClient) (any, error) { + resp, err := client.AlertsV2.CreateAlert(context.Background(), sql.CreateAlertV2Request{ + Alert: sql.AlertV2{ + DisplayName: "alert-permissions", + QueryText: "SELECT 1", + WarehouseId: "test-warehouse-id", + Schedule: sql.CronSchedule{ + QuartzCronSchedule: "0 0 12 * * ?", + TimezoneId: "UTC", + }, + Evaluation: sql.AlertV2Evaluation{ + ComparisonOperator: sql.ComparisonOperatorGreaterThan, + Source: sql.AlertV2OperandColumn{ + Name: "column1", + }, + }, + }, + }) + if err != nil { + return nil, err + } + + return &PermissionsState{ + ObjectID: "/alerts/" + resp.Id, + Permissions: []iam.AccessControlRequest{{ + PermissionLevel: "CAN_MANAGE", + UserName: "user@example.com", + }}, + }, nil + }, + "schemas.grants": func(client *databricks.WorkspaceClient) (any, error) { return &GrantsState{ SecurableType: "schema", @@ -532,7 +587,7 @@ func testCRUD(t *testing.T, group string, adapter *Adapter, client *databricks.W }, remote, false) require.NoError(t, err) - deleteIsNoop := strings.HasSuffix(group, "permissions") || strings.HasSuffix(group, "grants") + deleteIsNoop := strings.HasSuffix(group, "permissions") || strings.HasSuffix(group, "grants") || group == "alerts" remoteAfterDelete, err := adapter.DoRead(ctx, createdID) if deleteIsNoop { diff --git a/libs/testserver/permissions.go b/libs/testserver/permissions.go index 722b7bdc00..12483429af 100644 --- a/libs/testserver/permissions.go +++ b/libs/testserver/permissions.go @@ -206,6 +206,30 @@ func (s *FakeWorkspace) SetPermissions(req Request) any { }) } + // Add default ACLs for alertsv2 to match cloud environment + if requestObjectType == "alertsv2" { + existingPermissions.AccessControlList = append(existingPermissions.AccessControlList, iam.AccessControlResponse{ + AllPermissions: []iam.Permission{ + { + Inherited: true, + InheritedFromObject: []string{"/directories/4454031293888593"}, + PermissionLevel: "CAN_MANAGE", + }, + }, + UserName: "shreyas.goenka@databricks.com", + DisplayName: "shreyas.goenka@databricks.com", + }, iam.AccessControlResponse{ + AllPermissions: []iam.Permission{ + { + Inherited: true, + InheritedFromObject: []string{"/directories/"}, + PermissionLevel: "CAN_MANAGE", + }, + }, + GroupName: "admins", + }) + } + // Validate job ownership requirements if requestObjectType == "jobs" { hasOwner := false From f9b0c5ca136c1aa2f987b0cae7528013c75da5b1 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Mon, 1 Dec 2025 04:33:37 +0100 Subject: [PATCH 05/18] fix TestAll --- bundle/config/resources/permission.go | 1 - bundle/direct/dresources/alert.go | 11 ++++++++++- bundle/direct/dresources/all_test.go | 4 ++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/bundle/config/resources/permission.go b/bundle/config/resources/permission.go index 1515c3ba51..d7c3f6fca9 100644 --- a/bundle/config/resources/permission.go +++ b/bundle/config/resources/permission.go @@ -148,7 +148,6 @@ type SqlWarehousePermission struct { } // GetAPIRequestObjectType is used by direct to construct a request to permissions API: -// Untested, since we don't have alerts // https://github.com/databricks/terraform-provider-databricks/blob/430902d/permissions/permission_definitions.go#L775C24-L775C32 func (p AlertPermission) GetAPIRequestObjectType() string { return "/alertsv2/" } func (p AppPermission) GetAPIRequestObjectType() string { return "/apps/" } diff --git a/bundle/direct/dresources/alert.go b/bundle/direct/dresources/alert.go index 4a08219571..b699a53bf7 100644 --- a/bundle/direct/dresources/alert.go +++ b/bundle/direct/dresources/alert.go @@ -24,7 +24,16 @@ func (*ResourceAlert) PrepareState(input *resources.Alert) *sql.AlertV2 { // DoRead reads the alert by id. func (r *ResourceAlert) DoRead(ctx context.Context, id string) (*sql.AlertV2, error) { - return r.client.AlertsV2.GetAlertById(ctx, id) + alert, err := r.client.AlertsV2.GetAlertById(ctx, id) + if err != nil { + return nil, err + } + + // If the alert is already marked as thrashed, return a 404 on DoRead. + if alert.LifecycleState == sql.AlertLifecycleStateDeleted { + return nil, databricks.ErrResourceDoesNotExist + } + return alert, nil } // DoCreate creates the alert and returns its id. diff --git a/bundle/direct/dresources/all_test.go b/bundle/direct/dresources/all_test.go index fc6e56dd94..f68b6a177b 100644 --- a/bundle/direct/dresources/all_test.go +++ b/bundle/direct/dresources/all_test.go @@ -403,7 +403,7 @@ var testDeps = map[string]prepareWorkspace{ } return &PermissionsState{ - ObjectID: "/alerts/" + resp.Id, + ObjectID: "/alertsv2/" + resp.Id, Permissions: []iam.AccessControlRequest{{ PermissionLevel: "CAN_MANAGE", UserName: "user@example.com", @@ -587,7 +587,7 @@ func testCRUD(t *testing.T, group string, adapter *Adapter, client *databricks.W }, remote, false) require.NoError(t, err) - deleteIsNoop := strings.HasSuffix(group, "permissions") || strings.HasSuffix(group, "grants") || group == "alerts" + deleteIsNoop := strings.HasSuffix(group, "permissions") || strings.HasSuffix(group, "grants") remoteAfterDelete, err := adapter.DoRead(ctx, createdID) if deleteIsNoop { From cb7e6b02720e47892786cde24e187c87f2e29d53 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Mon, 1 Dec 2025 05:46:07 +0100 Subject: [PATCH 06/18] updatE --- .../alert/{alert.json => alert.json.tmpl} | 4 +- .../deployment/bind/alert/databricks.yml | 8 -- .../deployment/bind/alert/databricks.yml.tmpl | 19 +++ .../deployment/bind/alert/out.test.toml | 4 +- .../bundle/deployment/bind/alert/output.txt | 14 +- .../bundle/deployment/bind/alert/script | 4 + .../bundle/deployment/bind/alert/test.toml | 9 +- acceptance/bundle/refschema/out.fields.txt | 7 + .../bundle/summary/modified_status/output.txt | 121 +++--------------- .../empty_resources/empty_def/output.txt | 12 +- .../empty_resources/empty_dict/output.txt | 12 +- .../validate/empty_resources/null/output.txt | 12 +- .../empty_resources/with_grants/output.txt | 14 +- .../with_permissions/output.txt | 12 +- .../validation/generated/enum_fields.go | 8 ++ .../validation/generated/required_fields.go | 7 + 16 files changed, 124 insertions(+), 143 deletions(-) rename acceptance/bundle/deployment/bind/alert/{alert.json => alert.json.tmpl} (87%) delete mode 100644 acceptance/bundle/deployment/bind/alert/databricks.yml create mode 100644 acceptance/bundle/deployment/bind/alert/databricks.yml.tmpl diff --git a/acceptance/bundle/deployment/bind/alert/alert.json b/acceptance/bundle/deployment/bind/alert/alert.json.tmpl similarity index 87% rename from acceptance/bundle/deployment/bind/alert/alert.json rename to acceptance/bundle/deployment/bind/alert/alert.json.tmpl index 7649f73890..e65d68f847 100644 --- a/acceptance/bundle/deployment/bind/alert/alert.json +++ b/acceptance/bundle/deployment/bind/alert/alert.json.tmpl @@ -1,7 +1,7 @@ { - "display_name": "Test Alert", + "display_name": "test-alert-$UNIQUE_NAME", "query_text": "SELECT 1", - "warehouse_id": "0123-456789-warehouse0", + "warehouse_id": "$TEST_DEFAULT_WAREHOUSE_ID", "custom_summary": "Test Alert Summary", "custom_description": "Test Alert Description", "evaluation": { diff --git a/acceptance/bundle/deployment/bind/alert/databricks.yml b/acceptance/bundle/deployment/bind/alert/databricks.yml deleted file mode 100644 index a23cc7ce18..0000000000 --- a/acceptance/bundle/deployment/bind/alert/databricks.yml +++ /dev/null @@ -1,8 +0,0 @@ -resources: - alerts: - my_alert: - display_name: test-alert - query_text: "SELECT 1" - warehouse_id: "test-sql-warehouse" - custom_summary: "test-alert-summary" - custom_description: "test-alert-description" diff --git a/acceptance/bundle/deployment/bind/alert/databricks.yml.tmpl b/acceptance/bundle/deployment/bind/alert/databricks.yml.tmpl new file mode 100644 index 0000000000..05e5165e97 --- /dev/null +++ b/acceptance/bundle/deployment/bind/alert/databricks.yml.tmpl @@ -0,0 +1,19 @@ +bundle: + name: test-bind-alert-$UNIQUE_NAME + +resources: + alerts: + my_alert: + display_name: test-alert + evaluation: + comparison_operator: EQUAL + source: + name: "1" + threshold: + value: + double_value: 2 + query_text: select 2 + schedule: + quartz_cron_schedule: "44 19 */1 * * ?" + timezone_id: Europe/Amsterdam + warehouse_id: aaaaaaaaaaaaaaaa diff --git a/acceptance/bundle/deployment/bind/alert/out.test.toml b/acceptance/bundle/deployment/bind/alert/out.test.toml index 90061dedb1..3cdb920b67 100644 --- a/acceptance/bundle/deployment/bind/alert/out.test.toml +++ b/acceptance/bundle/deployment/bind/alert/out.test.toml @@ -1,5 +1,5 @@ -Local = true -Cloud = false +Local = false +Cloud = true [EnvMatrix] DATABRICKS_BUNDLE_ENGINE = ["terraform"] diff --git a/acceptance/bundle/deployment/bind/alert/output.txt b/acceptance/bundle/deployment/bind/alert/output.txt index d37176b247..27949d29d1 100644 --- a/acceptance/bundle/deployment/bind/alert/output.txt +++ b/acceptance/bundle/deployment/bind/alert/output.txt @@ -1,32 +1,32 @@ >>> [CLI] alerts-v2 create-alert --json @alert.json ->>> [CLI] bundle deployment bind my_alert [UUID] --auto-approve +>>> [CLI] bundle deployment bind my_alert [ALERT_ID] --auto-approve Updating deployment state... -Successfully bound alert with an id '[UUID]' +Successfully bound alert with an id '[ALERT_ID]' Run 'bundle deploy' to deploy changes to your workspace >>> [CLI] bundle summary -Name: test-bundle-$UNIQUE_NAME +Name: test-bind-alert-[UNIQUE_NAME] Target: default Workspace: User: [USERNAME] - Path: /Workspace/Users/[USERNAME]/.bundle/test-bundle-$UNIQUE_NAME/default + Path: /Workspace/Users/[USERNAME]/.bundle/test-bind-alert-[UNIQUE_NAME]/default Resources: Alerts: my_alert: Name: test-alert - URL: [DATABRICKS_URL]/sql/alerts-v2/[UUID]?o=[NUMID] + URL: [DATABRICKS_URL]/sql/alerts-v2/[ALERT_ID] >>> [CLI] bundle deployment unbind my_alert Updating deployment state... >>> [CLI] bundle summary -Name: test-bundle-$UNIQUE_NAME +Name: test-bind-alert-[UNIQUE_NAME] Target: default Workspace: User: [USERNAME] - Path: /Workspace/Users/[USERNAME]/.bundle/test-bundle-$UNIQUE_NAME/default + Path: /Workspace/Users/[USERNAME]/.bundle/test-bind-alert-[UNIQUE_NAME]/default Resources: Alerts: my_alert: diff --git a/acceptance/bundle/deployment/bind/alert/script b/acceptance/bundle/deployment/bind/alert/script index 024721873b..88e12f15c3 100644 --- a/acceptance/bundle/deployment/bind/alert/script +++ b/acceptance/bundle/deployment/bind/alert/script @@ -1,4 +1,8 @@ +envsubst < databricks.yml.tmpl > databricks.yml +envsubst < alert.json.tmpl > alert.json + alert_id=$(trace $CLI alerts-v2 create-alert --json @alert.json | jq -r '.id') +echo "$alert_id:ALERT_ID" >> ACC_REPLS trace $CLI bundle deployment bind my_alert $alert_id --auto-approve trace $CLI bundle summary diff --git a/acceptance/bundle/deployment/bind/alert/test.toml b/acceptance/bundle/deployment/bind/alert/test.toml index 5a92cae2e8..a69c533ecd 100644 --- a/acceptance/bundle/deployment/bind/alert/test.toml +++ b/acceptance/bundle/deployment/bind/alert/test.toml @@ -1,4 +1,9 @@ -Cloud = false -Local = true +Cloud = true +Local = false BundleConfigTarget = "databricks.yml" + +Ignore = [ + "databricks.yml", + "alert.json", +] diff --git a/acceptance/bundle/refschema/out.fields.txt b/acceptance/bundle/refschema/out.fields.txt index 8271361883..6ee3d24784 100644 --- a/acceptance/bundle/refschema/out.fields.txt +++ b/acceptance/bundle/refschema/out.fields.txt @@ -55,6 +55,13 @@ resources.alerts.*.schedule.timezone_id string ALL resources.alerts.*.update_time string ALL resources.alerts.*.url string INPUT resources.alerts.*.warehouse_id string ALL +resources.alerts.*.permissions.object_id string ALL +resources.alerts.*.permissions.permissions []iam.AccessControlRequest ALL +resources.alerts.*.permissions.permissions[*] iam.AccessControlRequest ALL +resources.alerts.*.permissions.permissions[*].group_name string ALL +resources.alerts.*.permissions.permissions[*].permission_level iam.PermissionLevel ALL +resources.alerts.*.permissions.permissions[*].service_principal_name string ALL +resources.alerts.*.permissions.permissions[*].user_name string ALL resources.apps.*.active_deployment *apps.AppDeployment ALL resources.apps.*.active_deployment.create_time string ALL resources.apps.*.active_deployment.creator string ALL diff --git a/acceptance/bundle/summary/modified_status/output.txt b/acceptance/bundle/summary/modified_status/output.txt index 974a3fe13c..a542866140 100644 --- a/acceptance/bundle/summary/modified_status/output.txt +++ b/acceptance/bundle/summary/modified_status/output.txt @@ -1,11 +1,18 @@ === Initial view of resources without id and modified_status=created >>> [CLI] bundle summary -o json -Warning: unknown field: alerts - at resources - in databricks.yml:4:3 - { + "alerts": { + "my_alert": { + "custom_description": "test-alert-description", + "custom_summary": "test-alert-summary", + "display_name": "test-alert", + "modified_status": "created", + "parent_path": "/Workspace/Users/[USERNAME]/.bundle/test-bundle/default/resources", + "query_text": "SELECT 1", + "warehouse_id": "test-sql-warehouse" + } + }, "pipelines": { "my_pipeline": { "channel": "CURRENT", @@ -48,109 +55,23 @@ Warning: unknown field: alerts } >>> [CLI] bundle deploy -Warning: unknown field: alerts - at resources - in databricks.yml:4:3 - Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/test-bundle/default/files... -Deploying resources... -Updating deployment state... -Deployment complete! +Error: exit status 1 -=== Post-deployment view of resources with id and without modified_status ->>> [CLI] bundle summary -o json -Warning: unknown field: alerts - at resources - in databricks.yml:4:3 +Error: Missing required argument -{ - "pipelines": { - "my_pipeline": { - "channel": "CURRENT", - "deployment": { - "kind": "BUNDLE", - "metadata_file_path": "/Workspace/Users/[USERNAME]/.bundle/test-bundle/default/state/metadata.json" - }, - "edition": "ADVANCED", - "id": "[UUID]", - "libraries": [ - { - "file": { - "path": "/Workspace/Users/[USERNAME]/.bundle/test-bundle/default/files/foo.py" - } - } - ], - "name": "test-pipeline", - "url": "[DATABRICKS_URL]/pipelines/[UUID]?o=[NUMID]" - } - }, - "schemas": { - "my_schema": { - "catalog_name": "main", - "comment": "COMMENT1", - "id": "main.test-schema", - "name": "test-schema", - "url": "[DATABRICKS_URL]/explore/data/main/test-schema?o=[NUMID]" - } - }, - "sql_warehouses": { - "my_sql_warehouse": { - "auto_stop_mins": 120, - "cluster_size": "X-Large", - "enable_photon": true, - "enable_serverless_compute": true, - "id": "[UUID]", - "max_num_clusters": 1, - "name": "test-sql-warehouse", - "spot_instance_policy": "COST_OPTIMIZED", - "url": "[DATABRICKS_URL]/sql/warehouses/[UUID]?o=[NUMID]" - } - } -} + on bundle.tf.json line 22, in resource.databricks_alert_v2.my_alert: + 22: } -=== Expecting all resources to have modified_status=deleted ->>> [CLI] bundle summary -o json -{ - "pipelines": { - "my_pipeline": { - "id": "[UUID]", - "modified_status": "deleted", - "url": "[DATABRICKS_URL]/pipelines/[UUID]?o=[NUMID]" - } - }, - "schemas": { - "my_schema": { - "id": "main.test-schema", - "modified_status": "deleted", - "url": "[DATABRICKS_URL]/explore/data/main/test-schema?o=[NUMID]" - } - }, - "sql_warehouses": { - "my_sql_warehouse": { - "id": "[UUID]", - "modified_status": "deleted", - "url": "[DATABRICKS_URL]/sql/warehouses/[UUID]?o=[NUMID]" - } - } -} +The argument "evaluation" is required, but no definition was found. ->>> [CLI] bundle destroy --auto-approve -The following resources will be deleted: - delete resources.pipelines.my_pipeline - delete resources.schemas.my_schema - delete resources.sql_warehouses.my_sql_warehouse +Error: Missing required argument -This action will result in the deletion of the following UC schemas. Any underlying data may be lost: - delete resources.schemas.my_schema + on bundle.tf.json line 22, in resource.databricks_alert_v2.my_alert: + 22: } -This action will result in the deletion of the following Lakeflow Declarative Pipelines along with the -Streaming Tables (STs) and Materialized Views (MVs) managed by them: - delete resources.pipelines.my_pipeline +The argument "schedule" is required, but no definition was found. -All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/test-bundle/default -Deleting files... -Destroy complete! ->>> [CLI] bundle summary -o json -{} +Exit code: 1 diff --git a/acceptance/bundle/validate/empty_resources/empty_def/output.txt b/acceptance/bundle/validate/empty_resources/empty_def/output.txt index 85b27f9899..5e4ac03100 100644 --- a/acceptance/bundle/validate/empty_resources/empty_def/output.txt +++ b/acceptance/bundle/validate/empty_resources/empty_def/output.txt @@ -143,8 +143,12 @@ Error: secret_scope rname is not defined } === resources.alerts.rname === -Warning: unknown field: alerts - at resources - in databricks.yml:5:3 +Error: alert rname is not defined + at resources.alerts.rname + in databricks.yml:6:11 -{} +{ + "alerts": { + "rname": null + } +} diff --git a/acceptance/bundle/validate/empty_resources/empty_dict/output.txt b/acceptance/bundle/validate/empty_resources/empty_dict/output.txt index f2692be0f6..d978b0914c 100644 --- a/acceptance/bundle/validate/empty_resources/empty_dict/output.txt +++ b/acceptance/bundle/validate/empty_resources/empty_dict/output.txt @@ -187,8 +187,10 @@ Warning: required field "name" is not set } === resources.alerts.rname === -Warning: unknown field: alerts - at resources - in databricks.yml:5:3 - -{} +{ + "alerts": { + "rname": { + "parent_path": "/Workspace/Users/[USERNAME]/.bundle/BUNDLE/default/resources" + } + } +} diff --git a/acceptance/bundle/validate/empty_resources/null/output.txt b/acceptance/bundle/validate/empty_resources/null/output.txt index cedc5af0d3..1c64bbb717 100644 --- a/acceptance/bundle/validate/empty_resources/null/output.txt +++ b/acceptance/bundle/validate/empty_resources/null/output.txt @@ -143,8 +143,12 @@ Error: secret_scope rname is not defined } === resources.alerts.rname === -Warning: unknown field: alerts - at resources - in databricks.yml:5:3 +Error: alert rname is not defined + at resources.alerts.rname + in databricks.yml:6:12 -{} +{ + "alerts": { + "rname": null + } +} diff --git a/acceptance/bundle/validate/empty_resources/with_grants/output.txt b/acceptance/bundle/validate/empty_resources/with_grants/output.txt index 4bd690c2e2..de989a6f00 100644 --- a/acceptance/bundle/validate/empty_resources/with_grants/output.txt +++ b/acceptance/bundle/validate/empty_resources/with_grants/output.txt @@ -232,8 +232,14 @@ Warning: required field "name" is not set } === resources.alerts.rname === -Warning: unknown field: alerts - at resources - in databricks.yml:5:3 +Warning: unknown field: grants + at resources.alerts.rname + in databricks.yml:7:7 -{} +{ + "alerts": { + "rname": { + "parent_path": "/Workspace/Users/[USERNAME]/.bundle/BUNDLE/default/resources" + } + } +} diff --git a/acceptance/bundle/validate/empty_resources/with_permissions/output.txt b/acceptance/bundle/validate/empty_resources/with_permissions/output.txt index 48ddbb156d..5a0520c379 100644 --- a/acceptance/bundle/validate/empty_resources/with_permissions/output.txt +++ b/acceptance/bundle/validate/empty_resources/with_permissions/output.txt @@ -203,8 +203,10 @@ Warning: required field "name" is not set } === resources.alerts.rname === -Warning: unknown field: alerts - at resources - in databricks.yml:5:3 - -{} +{ + "alerts": { + "rname": { + "parent_path": "/Workspace/Users/[USERNAME]/.bundle/BUNDLE/default/resources" + } + } +} diff --git a/bundle/internal/validation/generated/enum_fields.go b/bundle/internal/validation/generated/enum_fields.go index c8b13327fc..09b60e0eab 100644 --- a/bundle/internal/validation/generated/enum_fields.go +++ b/bundle/internal/validation/generated/enum_fields.go @@ -8,6 +8,14 @@ var EnumFields = map[string][]string{ "artifacts.*.executable": {"bash", "sh", "cmd"}, "artifacts.*.type": {"whl", "jar"}, + "resources.alerts.*.evaluation.comparison_operator": {"EQUAL", "GREATER_THAN", "GREATER_THAN_OR_EQUAL", "IS_NOT_NULL", "IS_NULL", "LESS_THAN", "LESS_THAN_OR_EQUAL", "NOT_EQUAL"}, + "resources.alerts.*.evaluation.empty_result_state": {"ERROR", "OK", "TRIGGERED", "UNKNOWN"}, + "resources.alerts.*.evaluation.source.aggregation": {"AVG", "COUNT", "COUNT_DISTINCT", "MAX", "MEDIAN", "MIN", "STDDEV", "SUM"}, + "resources.alerts.*.evaluation.state": {"ERROR", "OK", "TRIGGERED", "UNKNOWN"}, + "resources.alerts.*.evaluation.threshold.column.aggregation": {"AVG", "COUNT", "COUNT_DISTINCT", "MAX", "MEDIAN", "MIN", "STDDEV", "SUM"}, + "resources.alerts.*.lifecycle_state": {"ACTIVE", "DELETED"}, + "resources.alerts.*.schedule.pause_status": {"PAUSED", "UNPAUSED"}, + "resources.apps.*.active_deployment.mode": {"AUTO_SYNC", "SNAPSHOT"}, "resources.apps.*.active_deployment.status.state": {"CANCELLED", "FAILED", "IN_PROGRESS", "SUCCEEDED"}, "resources.apps.*.app_status.state": {"CRASHED", "DEPLOYING", "RUNNING", "UNAVAILABLE"}, diff --git a/bundle/internal/validation/generated/required_fields.go b/bundle/internal/validation/generated/required_fields.go index 432dba0a7d..c73614cb33 100644 --- a/bundle/internal/validation/generated/required_fields.go +++ b/bundle/internal/validation/generated/required_fields.go @@ -11,6 +11,13 @@ var RequiredFields = map[string][]string{ "permissions[*]": {"level"}, + "resources.alerts.*": {"display_name", "evaluation", "query_text", "schedule", "warehouse_id"}, + "resources.alerts.*.evaluation": {"comparison_operator", "source"}, + "resources.alerts.*.evaluation.source": {"name"}, + "resources.alerts.*.evaluation.threshold.column": {"name"}, + "resources.alerts.*.permissions[*]": {"level"}, + "resources.alerts.*.schedule": {"quartz_cron_schedule", "timezone_id"}, + "resources.apps.*": {"name", "source_code_path"}, "resources.apps.*.permissions[*]": {"level"}, "resources.apps.*.resources[*]": {"name"}, From 7de37dbe4dedf66bec67013b5102381b6a44e3d9 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Mon, 1 Dec 2025 05:50:44 +0100 Subject: [PATCH 07/18] pass acc tests --- .../bundle/summary/modified_status/output.txt | 16 +++++++++++++++ .../empty_resources/empty_dict/output.txt | 20 +++++++++++++++++++ .../empty_resources/with_grants/output.txt | 20 +++++++++++++++++++ .../with_permissions/output.txt | 20 +++++++++++++++++++ 4 files changed, 76 insertions(+) diff --git a/acceptance/bundle/summary/modified_status/output.txt b/acceptance/bundle/summary/modified_status/output.txt index a542866140..1ac7dce61c 100644 --- a/acceptance/bundle/summary/modified_status/output.txt +++ b/acceptance/bundle/summary/modified_status/output.txt @@ -1,6 +1,14 @@ === Initial view of resources without id and modified_status=created >>> [CLI] bundle summary -o json +Warning: required field "evaluation" is not set + at resources.alerts.my_alert + in databricks.yml:6:7 + +Warning: required field "schedule" is not set + at resources.alerts.my_alert + in databricks.yml:6:7 + { "alerts": { "my_alert": { @@ -55,6 +63,14 @@ } >>> [CLI] bundle deploy +Warning: required field "evaluation" is not set + at resources.alerts.my_alert + in databricks.yml:6:7 + +Warning: required field "schedule" is not set + at resources.alerts.my_alert + in databricks.yml:6:7 + Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/test-bundle/default/files... Error: exit status 1 diff --git a/acceptance/bundle/validate/empty_resources/empty_dict/output.txt b/acceptance/bundle/validate/empty_resources/empty_dict/output.txt index d978b0914c..bc0635fe8c 100644 --- a/acceptance/bundle/validate/empty_resources/empty_dict/output.txt +++ b/acceptance/bundle/validate/empty_resources/empty_dict/output.txt @@ -187,6 +187,26 @@ Warning: required field "name" is not set } === resources.alerts.rname === +Warning: required field "display_name" is not set + at resources.alerts.rname + in databricks.yml:6:12 + +Warning: required field "evaluation" is not set + at resources.alerts.rname + in databricks.yml:6:12 + +Warning: required field "query_text" is not set + at resources.alerts.rname + in databricks.yml:6:12 + +Warning: required field "schedule" is not set + at resources.alerts.rname + in databricks.yml:6:12 + +Warning: required field "warehouse_id" is not set + at resources.alerts.rname + in databricks.yml:6:12 + { "alerts": { "rname": { diff --git a/acceptance/bundle/validate/empty_resources/with_grants/output.txt b/acceptance/bundle/validate/empty_resources/with_grants/output.txt index de989a6f00..221a4c10a7 100644 --- a/acceptance/bundle/validate/empty_resources/with_grants/output.txt +++ b/acceptance/bundle/validate/empty_resources/with_grants/output.txt @@ -236,6 +236,26 @@ Warning: unknown field: grants at resources.alerts.rname in databricks.yml:7:7 +Warning: required field "display_name" is not set + at resources.alerts.rname + in databricks.yml:7:7 + +Warning: required field "evaluation" is not set + at resources.alerts.rname + in databricks.yml:7:7 + +Warning: required field "query_text" is not set + at resources.alerts.rname + in databricks.yml:7:7 + +Warning: required field "schedule" is not set + at resources.alerts.rname + in databricks.yml:7:7 + +Warning: required field "warehouse_id" is not set + at resources.alerts.rname + in databricks.yml:7:7 + { "alerts": { "rname": { diff --git a/acceptance/bundle/validate/empty_resources/with_permissions/output.txt b/acceptance/bundle/validate/empty_resources/with_permissions/output.txt index 5a0520c379..87adda9e5e 100644 --- a/acceptance/bundle/validate/empty_resources/with_permissions/output.txt +++ b/acceptance/bundle/validate/empty_resources/with_permissions/output.txt @@ -203,6 +203,26 @@ Warning: required field "name" is not set } === resources.alerts.rname === +Warning: required field "display_name" is not set + at resources.alerts.rname + in databricks.yml:7:7 + +Warning: required field "evaluation" is not set + at resources.alerts.rname + in databricks.yml:7:7 + +Warning: required field "query_text" is not set + at resources.alerts.rname + in databricks.yml:7:7 + +Warning: required field "schedule" is not set + at resources.alerts.rname + in databricks.yml:7:7 + +Warning: required field "warehouse_id" is not set + at resources.alerts.rname + in databricks.yml:7:7 + { "alerts": { "rname": { From 53b4d3c7f3f1ef3316689ead3e785968a776338d Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Mon, 1 Dec 2025 12:21:56 +0100 Subject: [PATCH 08/18] update python codegen --- .../internal/schema/annotations_openapi.yml | 520 +++++++++++------- .../schema/annotations_openapi_overrides.yml | 120 ++++ bundle/schema/jsonschema.json | 292 +++++----- .../jobs/_models/clean_rooms_notebook_task.py | 5 +- .../bundles/jobs/_models/condition.py | 4 + .../bundles/jobs/_models/continuous.py | 4 + .../bundles/jobs/_models/environment.py | 10 - .../bundles/jobs/_models/spark_jar_task.py | 8 +- .../bundles/jobs/_models/spark_submit_task.py | 4 +- .../table_update_trigger_configuration.py | 8 +- .../databricks/bundles/jobs/_models/task.py | 24 +- .../bundles/jobs/_models/task_retry_mode.py | 2 + .../bundles/jobs/_models/trigger_settings.py | 6 + .../bundles/pipelines/_models/day_of_week.py | 2 +- .../_models/ingestion_pipeline_definition.py | 14 - ...ne_definition_workday_report_parameters.py | 40 +- ...rkday_report_parameters_query_key_value.py | 26 +- .../bundles/pipelines/_models/pipeline.py | 10 + .../bundles/pipelines/_models/run_as.py | 2 + .../_models/table_specific_config.py | 10 - .../bundles/volumes/_models/volume_type.py | 8 +- 21 files changed, 653 insertions(+), 466 deletions(-) diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index a2704cf7f5..850b97eb50 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -1,4 +1,71 @@ # This file is auto-generated. DO NOT EDIT. +github.com/databricks/cli/bundle/config/resources.Alert: + "create_time": + "description": |- + The timestamp indicating when the alert was created. + "x-databricks-field-behaviors_output_only": |- + true + "custom_description": + "description": |- + Custom description for the alert. support mustache template. + "custom_summary": + "description": |- + Custom summary for the alert. support mustache template. + "display_name": + "description": |- + The display name of the alert. + "effective_run_as": + "description": |- + The actual identity that will be used to execute the alert. + This is an output-only field that shows the resolved run-as identity after applying + permissions and defaults. + "x-databricks-field-behaviors_output_only": |- + true + "evaluation": {} + "id": + "description": |- + UUID identifying the alert. + "x-databricks-field-behaviors_output_only": |- + true + "lifecycle_state": + "description": |- + Indicates whether the query is trashed. + "x-databricks-field-behaviors_output_only": |- + true + "owner_user_name": + "description": |- + The owner's username. This field is set to "Unavailable" if the user has been deleted. + "x-databricks-field-behaviors_output_only": |- + true + "parent_path": + "description": |- + The workspace path of the folder containing the alert. Can only be set on create, and cannot be updated. + "query_text": + "description": |- + Text of the query to be run. + "run_as": + "description": |- + Specifies the identity that will be used to run the alert. + This field allows you to configure alerts to run as a specific user or service principal. + - For user identity: Set `user_name` to the email of an active workspace user. Users can only set this to their own email. + - For service principal: Set `service_principal_name` to the application ID. Requires the `servicePrincipal/user` role. + If not specified, the alert will run as the request user. + "run_as_user_name": + "description": |- + The run as username or application ID of service principal. + On Create and Update, this field can be set to application ID of an active service principal. Setting this field requires the servicePrincipal/user role. + Deprecated: Use `run_as` field instead. This field will be removed in a future release. + "deprecation_message": |- + This field is deprecated + "schedule": {} + "update_time": + "description": |- + The timestamp indicating when the alert was updated. + "x-databricks-field-behaviors_output_only": |- + true + "warehouse_id": + "description": |- + ID of the SQL warehouse attached to the alert. github.com/databricks/cli/bundle/config/resources.App: "active_deployment": "description": |- @@ -10,7 +77,6 @@ github.com/databricks/cli/bundle/config/resources.App: "x-databricks-field-behaviors_output_only": |- true "budget_policy_id": {} - "compute_size": {} "compute_status": "x-databricks-field-behaviors_output_only": |- true @@ -323,43 +389,50 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "custom_tags": "description": |- Custom tags associated with the instance. This field is only included on create and update responses. - "effective_capacity": - "description": |- - Deprecated. The sku of the instance; this field will always match the value of capacity. - "deprecation_message": |- - This field is deprecated - "x-databricks-field-behaviors_output_only": |- - true + "x-databricks-preview": |- + PRIVATE "effective_custom_tags": "description": |- The recorded custom tags associated with the instance. "x-databricks-field-behaviors_output_only": |- true + "x-databricks-preview": |- + PRIVATE "effective_enable_pg_native_login": "description": |- - Whether the instance has PG native password login enabled. + xref AIP-129. `enable_pg_native_login` is owned by the client, while `effective_enable_pg_native_login` is owned by the server. + `enable_pg_native_login` will only be set in Create/Update response messages if and only if the user provides the field via the request. + `effective_enable_pg_native_login` on the other hand will always bet set in all response messages (Create/Update/Get/List). "x-databricks-field-behaviors_output_only": |- true + "x-databricks-preview": |- + PRIVATE "effective_enable_readable_secondaries": "description": |- - Whether secondaries serving read-only traffic are enabled. Defaults to false. + xref AIP-129. `enable_readable_secondaries` is owned by the client, while `effective_enable_readable_secondaries` is owned by the server. + `enable_readable_secondaries` will only be set in Create/Update response messages if and only if the user provides the field via the request. + `effective_enable_readable_secondaries` on the other hand will always bet set in all response messages (Create/Update/Get/List). "x-databricks-field-behaviors_output_only": |- true "effective_node_count": "description": |- - The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to - 1 primary and 0 secondaries. + xref AIP-129. `node_count` is owned by the client, while `effective_node_count` is owned by the server. + `node_count` will only be set in Create/Update response messages if and only if the user provides the field via the request. + `effective_node_count` on the other hand will always bet set in all response messages (Create/Update/Get/List). "x-databricks-field-behaviors_output_only": |- true "effective_retention_window_in_days": "description": |- - The retention window for the instance. This is the time window in days - for which the historical data is retained. + xref AIP-129. `retention_window_in_days` is owned by the client, while `effective_retention_window_in_days` is owned by the server. + `retention_window_in_days` will only be set in Create/Update response messages if and only if the user provides the field via the request. + `effective_retention_window_in_days` on the other hand will always bet set in all response messages (Create/Update/Get/List). "x-databricks-field-behaviors_output_only": |- true "effective_stopped": "description": |- - Whether the instance is stopped. + xref AIP-129. `stopped` is owned by the client, while `effective_stopped` is owned by the server. + `stopped` will only be set in Create/Update response messages if and only if the user provides the field via the request. + `effective_stopped` on the other hand will always bet set in all response messages (Create/Update/Get/List). "x-databricks-field-behaviors_output_only": |- true "effective_usage_policy_id": @@ -367,9 +440,13 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: The policy that is applied to the instance. "x-databricks-field-behaviors_output_only": |- true + "x-databricks-preview": |- + PRIVATE "enable_pg_native_login": "description": |- - Whether to enable PG native password login on the instance. Defaults to false. + Whether the instance has PG native password login enabled. Defaults to true. + "x-databricks-preview": |- + PRIVATE "enable_readable_secondaries": "description": |- Whether to enable secondaries to serve read-only traffic. Defaults to false. @@ -379,7 +456,7 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "node_count": "description": |- The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to - 1 primary and 0 secondaries. This field is input only, see effective_node_count for the output. + 1 primary and 0 secondaries. "parent_instance_ref": "description": |- The ref of the parent instance. This is only available if the instance is @@ -414,7 +491,7 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: true "stopped": "description": |- - Whether to stop the instance. An input only param, see effective_stopped for the output. + Whether the instance is stopped. "uid": "description": |- An immutable UUID identifier for the instance. @@ -423,6 +500,8 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "usage_policy_id": "description": |- The desired usage policy to associate with the instance. + "x-databricks-preview": |- + PRIVATE github.com/databricks/cli/bundle/config/resources.Job: "budget_policy_id": "description": |- @@ -589,6 +668,8 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: "budget_policy_id": "description": |- Budget policy of this pipeline. + "x-databricks-preview": |- + PRIVATE "catalog": "description": |- A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog. @@ -661,6 +742,8 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline. Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown. + "x-databricks-preview": |- + PRIVATE "schema": "description": |- The default schema (database) where tables are read from or published to. @@ -743,48 +826,21 @@ github.com/databricks/cli/bundle/config/resources.QualityMonitor: Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. github.com/databricks/cli/bundle/config/resources.RegisteredModel: - "aliases": - "description": |- - List of aliases associated with the registered model - "browse_only": - "description": |- - Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request. "catalog_name": "description": |- The name of the catalog where the schema and the registered model reside "comment": "description": |- The comment attached to the registered model - "created_at": - "description": |- - Creation timestamp of the registered model in milliseconds since the Unix epoch - "created_by": - "description": |- - The identifier of the user who created the registered model - "full_name": - "description": |- - The three-level (fully qualified) name of the registered model - "metastore_id": - "description": |- - The unique identifier of the metastore "name": "description": |- The name of the registered model - "owner": - "description": |- - The identifier of the user who owns the registered model "schema_name": "description": |- The name of the schema where the registered model resides "storage_location": "description": |- The storage location on the cloud under which model version data files are stored - "updated_at": - "description": |- - Last-update timestamp of the registered model in milliseconds since the Unix epoch - "updated_by": - "description": |- - The identifier of the user who updated the registered model last time github.com/databricks/cli/bundle/config/resources.Schema: "catalog_name": "description": |- @@ -802,39 +858,36 @@ github.com/databricks/cli/bundle/config/resources.Schema: "description": |- Storage root URL for managed tables within schema. github.com/databricks/cli/bundle/config/resources.SqlWarehouse: - "_": - "description": |- - Creates a new SQL warehouse. "auto_stop_mins": "description": |- - The amount of time in minutes that a SQL warehouse must be idle (i.e., no - RUNNING queries) before it is automatically stopped. + The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before + it is automatically stopped. Supported values: - - Must be == 0 or >= 10 mins - - 0 indicates no autostop. + - Must be >= 0 mins for serverless warehouses + - Must be == 0 or >= 10 mins for non-serverless warehouses + - 0 indicates no autostop. Defaults to 120 mins "channel": "description": |- Channel Details "cluster_size": - "description": |- + "description": | Size of the clusters allocated for this warehouse. - Increasing the size of a spark cluster allows you to run larger queries on - it. If you want to increase the number of concurrent queries, please tune - max_num_clusters. + Increasing the size of a spark cluster allows you to run larger queries on it. + If you want to increase the number of concurrent queries, please tune max_num_clusters. Supported values: - - 2X-Small - - X-Small - - Small - - Medium - - Large - - X-Large - - 2X-Large - - 3X-Large - - 4X-Large + - 2X-Small + - X-Small + - Small + - Medium + - Large + - X-Large + - 2X-Large + - 3X-Large + - 4X-Large "creator_name": "description": |- warehouse creator name @@ -853,25 +906,22 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: This field is deprecated "max_num_clusters": "description": |- - Maximum number of clusters that the autoscaler will create to handle - concurrent queries. + Maximum number of clusters that the autoscaler will create to handle concurrent queries. Supported values: - - Must be >= min_num_clusters - - Must be <= 40. + - Must be >= min_num_clusters + - Must be <= 30. Defaults to min_clusters if unset. "min_num_clusters": "description": |- - Minimum number of available clusters that will be maintained for this SQL - warehouse. Increasing this will ensure that a larger number of clusters are - always running and therefore may reduce the cold start time for new - queries. This is similar to reserved vs. revocable cores in a resource - manager. + Minimum number of available clusters that will be maintained for this SQL warehouse. + Increasing this will ensure that a larger number of clusters are always running and therefore may reduce + the cold start time for new queries. This is similar to reserved vs. revocable cores in a resource manager. Supported values: - - Must be > 0 - - Must be <= min(max_num_clusters, 30) + - Must be > 0 + - Must be <= min(max_num_clusters, 30) Defaults to 1 "name": @@ -879,26 +929,25 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: Logical name for the cluster. Supported values: - - Must be unique within an org. - - Must be less than 100 characters. + - Must be unique within an org. + - Must be less than 100 characters. "spot_instance_policy": "description": |- - Configurations whether the endpoint should use spot instances. + Configurations whether the warehouse should use spot instances. "tags": "description": |- A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. Supported values: - - Number of tags < 45. + - Number of tags < 45. "warehouse_type": "description": |- - Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, - you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. + Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: "_": "description": |- - Next field marker: 18 + Next field marker: 14 "data_synchronization_status": "description": |- Synced Table data synchronization status @@ -964,9 +1013,7 @@ github.com/databricks/cli/bundle/config/resources.Volume: The storage location on the cloud "volume_type": "description": |- - The type of the volume. An external volume is located in the specified external location. - A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. - [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) + The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) github.com/databricks/databricks-sdk-go/service/apps.AppDeployment: "create_time": "description": |- @@ -1043,7 +1090,6 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResource: "description": "description": |- Description of the App Resource. - "genie_space": {} "job": {} "name": "description": |- @@ -1061,21 +1107,6 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResourceDatabaseDatabase "enum": - |- CAN_CONNECT_AND_CREATE -github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpace: - "name": {} - "permission": {} - "space_id": {} -github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpaceGenieSpacePermission: - "_": - "enum": - - |- - CAN_MANAGE - - |- - CAN_EDIT - - |- - CAN_RUN - - |- - CAN_VIEW github.com/databricks/databricks-sdk-go/service/apps.AppResourceJob: "id": "description": |- @@ -1185,13 +1216,6 @@ github.com/databricks/databricks-sdk-go/service/apps.ApplicationStatus: State of the application. "x-databricks-field-behaviors_output_only": |- true -github.com/databricks/databricks-sdk-go/service/apps.ComputeSize: - "_": - "enum": - - |- - MEDIUM - - |- - LARGE github.com/databricks/databricks-sdk-go/service/apps.ComputeState: "_": "enum": @@ -1351,31 +1375,24 @@ github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries: "description": |- Column for the timestamp. github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: + "_": + "description": |- + Registered model alias. "alias_name": "description": |- Name of the alias, e.g. 'champion' or 'latest_stable' - "catalog_name": - "description": |- - The name of the catalog containing the model version - "id": - "description": |- - The unique identifier of the alias - "model_name": - "description": |- - The name of the parent registered model of the model version, relative to parent schema - "schema_name": - "description": |- - The name of the schema containing the model version, relative to parent catalog "version_num": "description": |- Integer version number of the model version to which this alias points. github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: "_": + "description": |- + The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) "enum": - - |- - MANAGED - |- EXTERNAL + - |- + MANAGED github.com/databricks/databricks-sdk-go/service/compute.Adlsgen2Info: "_": "description": |- @@ -1838,11 +1855,6 @@ github.com/databricks/databricks-sdk-go/service/compute.Environment: Required. Environment version used by the environment. Each version comes with a specific Python version and a set of Python packages. The version is a string, consisting of an integer. - "java_dependencies": - "description": |- - List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`. - "x-databricks-preview": |- - PRIVATE github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes: "_": "description": |- @@ -2121,6 +2133,9 @@ github.com/databricks/databricks-sdk-go/service/database.DatabaseInstanceRef: Output: Only populated if provided as input to create a child instance. "effective_lsn": "description": |- + xref AIP-129. `lsn` is owned by the client, while `effective_lsn` is owned by the server. + `lsn` will only be set in Create/Update response messages if and only if the user provides the field via the request. + `effective_lsn` on the other hand will always bet set in all response messages (Create/Update/Get/List). For a parent ref instance, this is the LSN on the parent instance from which the instance was created. For a child ref instance, this is the LSN on the instance from which the child instance @@ -2463,10 +2478,6 @@ github.com/databricks/databricks-sdk-go/service/jobs.AuthenticationMethod: - |- PAT github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask: - "_": - "description": |- - Clean Rooms notebook task for V1 Clean Room service (GA). - Replaces the deprecated CleanRoomNotebookTask (defined above) which was for V0 service. "clean_room_name": "description": |- The clean room that the notebook belongs to. @@ -2537,6 +2548,8 @@ github.com/databricks/databricks-sdk-go/service/jobs.Continuous: "task_retry_mode": "description": |- Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER. + "x-databricks-preview": |- + PRIVATE github.com/databricks/databricks-sdk-go/service/jobs.CronSchedule: "pause_status": "description": |- @@ -3065,8 +3078,6 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: "dbt_commands": "description": |- An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` - - ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. "deprecation_message": |- This field is deprecated "x-databricks-preview": |- @@ -3079,7 +3090,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: jar_params cannot be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. "deprecation_message": |- This field is deprecated "x-databricks-preview": |- @@ -3099,7 +3110,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: notebook_params cannot be specified in conjunction with jar_params. - ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. "deprecation_message": |- @@ -3121,7 +3132,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. Important @@ -3138,7 +3149,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. + Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs Important @@ -3151,8 +3162,6 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: "sql_params": "description": |- A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. - - ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. "deprecation_message": |- This field is deprecated "x-databricks-preview": |- @@ -3174,9 +3183,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.Source: github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask: "jar_uri": "description": |- - Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list. - - See the examples of classic and serverless compute usage at the top of the page. + Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. "deprecation_message": |- This field is deprecated "main_class_name": @@ -3319,7 +3326,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfigura the last time the trigger fired. The minimum allowed value is 60 seconds. "table_names": "description": |- - A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. "wait_after_last_change_seconds": "description": |- If set, the trigger starts a run only after no table updates have occurred for the specified time @@ -3328,7 +3335,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfigura github.com/databricks/databricks-sdk-go/service/jobs.Task: "clean_rooms_notebook_task": "description": |- - The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook + The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook when the `clean_rooms_notebook_task` field is present. "condition_task": "description": |- @@ -3442,9 +3449,15 @@ github.com/databricks/databricks-sdk-go/service/jobs.Task: The task runs a Python file when the `spark_python_task` field is present. "spark_submit_task": "description": |- - (Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit). - "deprecation_message": |- - This field is deprecated + (Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute. + + In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. + + `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters. + + By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage. + + The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. "sql_task": "description": |- The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present. @@ -3522,7 +3535,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: "periodic": "description": |- Periodic trigger settings. - "table_update": {} + "table_update": + "x-databricks-preview": |- + PRIVATE github.com/databricks/databricks-sdk-go/service/jobs.Webhook: "id": {} github.com/databricks/databricks-sdk-go/service/jobs.WebhookNotifications: @@ -3570,7 +3585,7 @@ github.com/databricks/databricks-sdk-go/service/pipelines.CronTrigger: github.com/databricks/databricks-sdk-go/service/pipelines.DayOfWeek: "_": "description": |- - Days of week in which the window is allowed to happen. + Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). If not specified all days of the week will be used. "enum": - |- @@ -3656,13 +3671,6 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefin "ingestion_gateway_id": "description": |- Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server. - "netsuite_jar_path": - "description": |- - Netsuite only configuration. When the field is set for a netsuite connector, - the jar stored in the field will be validated and added to the classpath of - pipeline's cluster. - "x-databricks-preview": |- - PRIVATE "objects": "description": |- Required. Settings specifying tables to replicate and the destination for the replicated tables. @@ -3718,40 +3726,6 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefin This field is mutable and can be updated without triggering a full snapshot. "x-databricks-preview": |- PRIVATE -github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParameters: - "incremental": - "description": |- - (Optional) Marks the report as incremental. - This field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now - controlled by the `parameters` field. - "deprecation_message": |- - This field is deprecated - "parameters": - "description": |- - Parameters for the Workday report. Each key represents the parameter name (e.g., "start_date", "end_date"), - and the corresponding value is a SQL-like expression used to compute the parameter value at runtime. - Example: - { - "start_date": "{ coalesce(current_offset(), date(\"2025-02-01\")) }", - "end_date": "{ current_date() - INTERVAL 1 DAY }" - } - "report_parameters": - "description": |- - (Optional) Additional custom parameters for Workday Report - This field is deprecated and should not be used. Use `parameters` instead. - "deprecation_message": |- - This field is deprecated -github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue: - "key": - "description": |- - Key for the report parameter, can be a column name or other metadata - "value": - "description": |- - Value for the report parameter. - Possible values it can take are these sql functions: - 1. coalesce(current_offset(), date("YYYY-MM-DD")) -> if current_offset() is null, then the passed date, else current_offset() - 2. current_date() - 3. date_sub(current_date(), x) -> subtract x (some non-negative integer) days from current date github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: "_": "enum": @@ -3759,6 +3733,10 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: MYSQL - |- POSTGRESQL + - |- + REDSHIFT + - |- + SQLDW - |- SQLSERVER - |- @@ -3783,6 +3761,10 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: SHAREPOINT - |- DYNAMICS365 + - |- + CONFLUENCE + - |- + META_MARKETING github.com/databricks/databricks-sdk-go/service/pipelines.ManualTrigger: {} github.com/databricks/databricks-sdk-go/service/pipelines.NotebookLibrary: "path": @@ -4135,11 +4117,6 @@ github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig: "sequence_by": "description": |- The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order. - "workday_report_parameters": - "description": |- - (Optional) Additional custom parameters for Workday Report - "x-databricks-preview": |- - PRIVATE github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType: "_": "description": |- @@ -4777,6 +4754,97 @@ github.com/databricks/databricks-sdk-go/service/serving.TrafficConfig: "routes": "description": |- The list of routes that define traffic to each served entity. +github.com/databricks/databricks-sdk-go/service/sql.Aggregation: + "_": + "enum": + - |- + SUM + - |- + COUNT + - |- + COUNT_DISTINCT + - |- + AVG + - |- + MEDIAN + - |- + MIN + - |- + MAX + - |- + STDDEV +github.com/databricks/databricks-sdk-go/service/sql.AlertEvaluationState: + "_": + "description": |- + UNSPECIFIED - default unspecify value for proto enum, do not use it in the code + UNKNOWN - alert not yet evaluated + TRIGGERED - alert is triggered + OK - alert is not triggered + ERROR - alert evaluation failed + "enum": + - |- + UNKNOWN + - |- + TRIGGERED + - |- + OK + - |- + ERROR +github.com/databricks/databricks-sdk-go/service/sql.AlertV2Evaluation: + "comparison_operator": + "description": |- + Operator used for comparison in alert evaluation. + "empty_result_state": + "description": |- + Alert state if result is empty. + "last_evaluated_at": + "description": |- + Timestamp of the last evaluation. + "x-databricks-field-behaviors_output_only": |- + true + "notification": + "description": |- + User or Notification Destination to notify when alert is triggered. + "source": + "description": |- + Source column from result to use to evaluate alert + "state": + "description": |- + Latest state of alert evaluation. + "x-databricks-field-behaviors_output_only": |- + true + "threshold": + "description": |- + Threshold to user for alert evaluation, can be a column or a value. +github.com/databricks/databricks-sdk-go/service/sql.AlertV2Notification: + "notify_on_ok": + "description": |- + Whether to notify alert subscribers when alert returns back to normal. + "retrigger_seconds": + "description": |- + Number of seconds an alert must wait after being triggered to rearm itself. After rearming, it can be triggered again. If 0 or not specified, the alert will not be triggered again. + "subscriptions": {} +github.com/databricks/databricks-sdk-go/service/sql.AlertV2Operand: + "column": {} + "value": {} +github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandColumn: + "aggregation": {} + "display": {} + "name": {} +github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandValue: + "bool_value": {} + "double_value": {} + "string_value": {} +github.com/databricks/databricks-sdk-go/service/sql.AlertV2RunAs: + "service_principal_name": + "description": |- + Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role. + "user_name": + "description": |- + The email of an active workspace user. Can only set this field to their own email. +github.com/databricks/databricks-sdk-go/service/sql.AlertV2Subscription: + "destination_id": {} + "user_email": {} github.com/databricks/databricks-sdk-go/service/sql.Channel: "_": "description": |- @@ -4794,8 +4862,29 @@ github.com/databricks/databricks-sdk-go/service/sql.ChannelName: CHANNEL_NAME_PREVIOUS - |- CHANNEL_NAME_CUSTOM +github.com/databricks/databricks-sdk-go/service/sql.ComparisonOperator: + "_": + "enum": + - |- + LESS_THAN + - |- + GREATER_THAN + - |- + EQUAL + - |- + NOT_EQUAL + - |- + GREATER_THAN_OR_EQUAL + - |- + LESS_THAN_OR_EQUAL + - |- + IS_NULL + - |- + IS_NOT_NULL github.com/databricks/databricks-sdk-go/service/sql.CreateWarehouseRequestWarehouseType: "_": + "description": |- + Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. "enum": - |- TYPE_UNSPECIFIED @@ -4803,32 +4892,35 @@ github.com/databricks/databricks-sdk-go/service/sql.CreateWarehouseRequestWareho CLASSIC - |- PRO +github.com/databricks/databricks-sdk-go/service/sql.CronSchedule: + "pause_status": + "description": |- + Indicate whether this schedule is paused or not. + "quartz_cron_schedule": + "description": |- + A cron expression using quartz syntax that specifies the schedule for this pipeline. + Should use the quartz format described here: http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html + "timezone_id": + "description": |- + A Java timezone id. The schedule will be resolved using this timezone. + This will be combined with the quartz_cron_schedule to determine the schedule. + See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details. github.com/databricks/databricks-sdk-go/service/sql.EndpointTagPair: "key": {} "value": {} github.com/databricks/databricks-sdk-go/service/sql.EndpointTags: "custom_tags": {} +github.com/databricks/databricks-sdk-go/service/sql.SchedulePauseStatus: + "_": + "enum": + - |- + UNPAUSED + - |- + PAUSED github.com/databricks/databricks-sdk-go/service/sql.SpotInstancePolicy: "_": "description": |- - EndpointSpotInstancePolicy configures whether the endpoint should use spot - instances. - - The breakdown of how the EndpointSpotInstancePolicy converts to per cloud - configurations is: - - +-------+--------------------------------------+--------------------------------+ - | Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED | - +-------+--------------------------------------+--------------------------------+ - | AWS | On Demand Driver with Spot Executors | On Demand Driver and - Executors | | AZURE | On Demand Driver and Executors | On Demand Driver - and Executors | - +-------+--------------------------------------+--------------------------------+ - - While including "spot" in the enum name may limit the the future - extensibility of this field because it limits this enum to denoting "spot or - not", this is the field that PM recommends after discussion with customers - per SC-48783. + Configurations whether the warehouse should use spot instances. "enum": - |- POLICY_UNSPECIFIED diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 72d50bf2ab..714005587a 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -1,3 +1,27 @@ +github.com/databricks/cli/bundle/config/resources.AlertPermissionLevel: + "_": + "enum": + - |- + CAN_EDIT + - |- + CAN_MANAGE + - |- + CAN_READ + - |- + CAN_RUN +github.com/databricks/cli/bundle/config/resources.Alert: + "evaluation": + "description": |- + PLACEHOLDER + "lifecycle": + "description": |- + PLACEHOLDER + "permissions": + "description": |- + PLACEHOLDER + "schedule": + "description": |- + PLACEHOLDER github.com/databricks/cli/bundle/config/resources.App: "app_status": "description": |- @@ -205,6 +229,9 @@ github.com/databricks/cli/bundle/config/resources.DatabaseCatalog: "description": |- PLACEHOLDER github.com/databricks/cli/bundle/config/resources.DatabaseInstance: + "effective_capacity": + "description": |- + PLACEHOLDER "lifecycle": "description": |- Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. @@ -465,12 +492,39 @@ github.com/databricks/cli/bundle/config/resources.RegisteredModel: - EXECUTE principal: account users ``` + "aliases": + "description": |- + PLACEHOLDER + "browse_only": + "description": |- + PLACEHOLDER + "created_at": + "description": |- + PLACEHOLDER + "created_by": + "description": |- + PLACEHOLDER + "full_name": + "description": |- + PLACEHOLDER "grants": "description": |- PLACEHOLDER "lifecycle": "description": |- Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. + "metastore_id": + "description": |- + PLACEHOLDER + "owner": + "description": |- + PLACEHOLDER + "updated_at": + "description": |- + PLACEHOLDER + "updated_by": + "description": |- + PLACEHOLDER github.com/databricks/cli/bundle/config/resources.Schema: "_": "markdown_description": |- @@ -594,6 +648,10 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehousePermissionLevel: CAN_MONITOR - |- CAN_VIEW +github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: + "lifecycle": + "description": |- + PLACEHOLDER github.com/databricks/cli/bundle/config/resources.Volume: "_": "markdown_description": |- @@ -776,6 +834,19 @@ github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries: "granularities": "description": |- Granularities for aggregating data into time windows based on their timestamp. Valid values are 5 minutes, 30 minutes, 1 hour, 1 day, n weeks, 1 month, or 1 year. +github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: + "catalog_name": + "description": |- + PLACEHOLDER + "id": + "description": |- + PLACEHOLDER + "model_name": + "description": |- + PLACEHOLDER + "schema_name": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/compute.AwsAttributes: "availability": "description": |- @@ -811,6 +882,9 @@ github.com/databricks/databricks-sdk-go/service/compute.Environment: "dependencies": "description": |- List of pip dependencies, as supported by the version of pip in this environment. + "java_dependencies": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/compute.GcpAttributes: "availability": "description": |- @@ -905,6 +979,10 @@ github.com/databricks/databricks-sdk-go/service/pipelines.CronTrigger: "timezone_id": "description": |- PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinition: + "netsuite_jar_path": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/pipelines.PipelineLibrary: "whl": "deprecation_message": |- @@ -916,6 +994,10 @@ github.com/databricks/databricks-sdk-go/service/pipelines.PipelineTrigger: "manual": "description": |- PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig: + "workday_report_parameters": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/serving.Route: "served_entity_name": "description": |- @@ -931,6 +1013,44 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput: "model_version": "description": |- PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/sql.AlertV2Notification: + "subscriptions": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/sql.AlertV2Operand: + "column": + "description": |- + PLACEHOLDER + "value": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandColumn: + "aggregation": + "description": |- + PLACEHOLDER + "display": + "description": |- + PLACEHOLDER + "name": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandValue: + "bool_value": + "description": |- + PLACEHOLDER + "double_value": + "description": |- + PLACEHOLDER + "string_value": + "description": |- + PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/sql.AlertV2Subscription: + "destination_id": + "description": |- + PLACEHOLDER + "user_email": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/sql.Channel: "dbsql_version": "description": |- diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 50dce23a79..9f273c7882 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -64,9 +64,6 @@ { "type": "object", "properties": { - "create_time": { - "$ref": "#/$defs/string" - }, "custom_description": { "$ref": "#/$defs/string" }, @@ -76,24 +73,12 @@ "display_name": { "$ref": "#/$defs/string" }, - "effective_run_as": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2RunAs" - }, "evaluation": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2Evaluation" }, - "id": { - "$ref": "#/$defs/string" - }, "lifecycle": { "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, - "lifecycle_state": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertLifecycleState" - }, - "owner_user_name": { - "$ref": "#/$defs/string" - }, "parent_path": { "$ref": "#/$defs/string" }, @@ -107,14 +92,13 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2RunAs" }, "run_as_user_name": { - "$ref": "#/$defs/string" + "$ref": "#/$defs/string", + "deprecationMessage": "This field is deprecated", + "deprecated": true }, "schedule": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.CronSchedule" }, - "update_time": { - "$ref": "#/$defs/string" - }, "warehouse_id": { "$ref": "#/$defs/string" } @@ -164,7 +148,21 @@ ] }, "resources.AlertPermissionLevel": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "CAN_EDIT", + "CAN_MANAGE", + "CAN_READ", + "CAN_RUN" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "resources.App": { "oneOf": [ @@ -614,11 +612,18 @@ }, "custom_tags": { "description": "Custom tags associated with the instance. This field is only included on create and update responses.", - "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/database.CustomTag" + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/database.CustomTag", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, + "effective_capacity": { + "$ref": "#/$defs/string" }, "enable_pg_native_login": { - "description": "Whether to enable PG native password login on the instance. Defaults to false.", - "$ref": "#/$defs/bool" + "description": "Whether the instance has PG native password login enabled. Defaults to true.", + "$ref": "#/$defs/bool", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true }, "enable_readable_secondaries": { "description": "Whether to enable secondaries to serve read-only traffic. Defaults to false.", @@ -633,7 +638,7 @@ "$ref": "#/$defs/string" }, "node_count": { - "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries. This field is input only, see effective_node_count for the output.", + "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries.", "$ref": "#/$defs/int" }, "parent_instance_ref": { @@ -648,12 +653,14 @@ "$ref": "#/$defs/int" }, "stopped": { - "description": "Whether to stop the instance. An input only param, see effective_stopped for the output.", + "description": "Whether the instance is stopped.", "$ref": "#/$defs/bool" }, "usage_policy_id": { "description": "The desired usage policy to associate with the instance.", - "$ref": "#/$defs/string" + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true } }, "additionalProperties": false, @@ -1229,7 +1236,9 @@ }, "budget_policy_id": { "description": "Budget policy of this pipeline.", - "$ref": "#/$defs/string" + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true }, "catalog": { "description": "A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog.", @@ -1319,7 +1328,9 @@ "$ref": "#/$defs/string" }, "run_as": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RunAs" + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RunAs", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true }, "schema": { "description": "The default schema (database) where tables are read from or published to.", @@ -1501,11 +1512,9 @@ "type": "object", "properties": { "aliases": { - "description": "List of aliases associated with the registered model", "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias" }, "browse_only": { - "description": "Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request.", "$ref": "#/$defs/bool" }, "catalog_name": { @@ -1517,15 +1526,12 @@ "$ref": "#/$defs/string" }, "created_at": { - "description": "Creation timestamp of the registered model in milliseconds since the Unix epoch", "$ref": "#/$defs/int64" }, "created_by": { - "description": "The identifier of the user who created the registered model", "$ref": "#/$defs/string" }, "full_name": { - "description": "The three-level (fully qualified) name of the registered model", "$ref": "#/$defs/string" }, "grants": { @@ -1536,7 +1542,6 @@ "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, "metastore_id": { - "description": "The unique identifier of the metastore", "$ref": "#/$defs/string" }, "name": { @@ -1544,7 +1549,6 @@ "$ref": "#/$defs/string" }, "owner": { - "description": "The identifier of the user who owns the registered model", "$ref": "#/$defs/string" }, "schema_name": { @@ -1556,11 +1560,9 @@ "$ref": "#/$defs/string" }, "updated_at": { - "description": "Last-update timestamp of the registered model in milliseconds since the Unix epoch", "$ref": "#/$defs/int64" }, "updated_by": { - "description": "The identifier of the user who updated the registered model last time", "$ref": "#/$defs/string" } }, @@ -1758,10 +1760,9 @@ "oneOf": [ { "type": "object", - "description": "Creates a new SQL warehouse.", "properties": { "auto_stop_mins": { - "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values:\n- Must be == 0 or \u003e= 10 mins\n- 0 indicates no autostop.\n\nDefaults to 120 mins", + "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before\nit is automatically stopped.\n\nSupported values:\n - Must be \u003e= 0 mins for serverless warehouses\n - Must be == 0 or \u003e= 10 mins for non-serverless warehouses\n - 0 indicates no autostop.\n\nDefaults to 120 mins", "$ref": "#/$defs/int" }, "channel": { @@ -1769,7 +1770,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.Channel" }, "cluster_size": { - "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", + "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on it.\nIf you want to increase the number of concurrent queries, please tune max_num_clusters.\n\nSupported values:\n - 2X-Small\n - X-Small\n - Small\n - Medium\n - Large\n - X-Large\n - 2X-Large\n - 3X-Large\n - 4X-Large\n", "$ref": "#/$defs/string" }, "creator_name": { @@ -1795,15 +1796,15 @@ "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, "max_num_clusters": { - "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values:\n- Must be \u003e= min_num_clusters\n- Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", + "description": "Maximum number of clusters that the autoscaler will create to handle concurrent queries.\n\nSupported values:\n - Must be \u003e= min_num_clusters\n - Must be \u003c= 30.\n\nDefaults to min_clusters if unset.", "$ref": "#/$defs/int" }, "min_num_clusters": { - "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters are\nalways running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values:\n- Must be \u003e 0\n- Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", + "description": "Minimum number of available clusters that will be maintained for this SQL warehouse.\nIncreasing this will ensure that a larger number of clusters are always running and therefore may reduce\nthe cold start time for new queries. This is similar to reserved vs. revocable cores in a resource manager.\n\nSupported values:\n - Must be \u003e 0\n - Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", "$ref": "#/$defs/int" }, "name": { - "description": "Logical name for the cluster.\n\nSupported values:\n- Must be unique within an org.\n- Must be less than 100 characters.", + "description": "Logical name for the cluster.\n\nSupported values:\n - Must be unique within an org.\n - Must be less than 100 characters.", "$ref": "#/$defs/string" }, "permissions": { @@ -1813,7 +1814,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.SpotInstancePolicy" }, "tags": { - "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated\nwith this SQL warehouse.\n\nSupported values:\n- Number of tags \u003c 45.", + "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated\nwith this SQL warehouse.\n\nSupported values:\n - Number of tags \u003c 45.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.EndpointTags" }, "warehouse_type": { @@ -1878,7 +1879,7 @@ "oneOf": [ { "type": "object", - "description": "Next field marker: 18", + "description": "Next field marker: 14", "properties": { "database_instance_name": { "$ref": "#/$defs/string" @@ -2938,21 +2939,7 @@ ] }, "apps.AppResourceGenieSpaceGenieSpacePermission": { - "oneOf": [ - { - "type": "string", - "enum": [ - "CAN_MANAGE", - "CAN_EDIT", - "CAN_RUN", - "CAN_VIEW" - ] - }, - { - "type": "string", - "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" - } - ] + "type": "string" }, "apps.AppResourceJob": { "oneOf": [ @@ -3207,19 +3194,7 @@ ] }, "apps.ComputeSize": { - "oneOf": [ - { - "type": "string", - "enum": [ - "MEDIUM", - "LARGE" - ] - }, - { - "type": "string", - "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" - } - ] + "type": "string" }, "apps.ComputeState": { "oneOf": [ @@ -3528,25 +3503,22 @@ "oneOf": [ { "type": "object", + "description": "Registered model alias.", "properties": { "alias_name": { "description": "Name of the alias, e.g. 'champion' or 'latest_stable'", "$ref": "#/$defs/string" }, "catalog_name": { - "description": "The name of the catalog containing the model version", "$ref": "#/$defs/string" }, "id": { - "description": "The unique identifier of the alias", "$ref": "#/$defs/string" }, "model_name": { - "description": "The name of the parent registered model of the model version, relative to parent schema", "$ref": "#/$defs/string" }, "schema_name": { - "description": "The name of the schema containing the model version, relative to parent catalog", "$ref": "#/$defs/string" }, "version_num": { @@ -3566,9 +3538,10 @@ "oneOf": [ { "type": "string", + "description": "The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external)", "enum": [ - "MANAGED", - "EXTERNAL" + "EXTERNAL", + "MANAGED" ] }, { @@ -4051,10 +4024,7 @@ "$ref": "#/$defs/string" }, "java_dependencies": { - "description": "List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`.", - "$ref": "#/$defs/slice/string", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "$ref": "#/$defs/slice/string" } }, "additionalProperties": false @@ -4865,7 +4835,6 @@ "oneOf": [ { "type": "object", - "description": "Clean Rooms notebook task for V1 Clean Room service (GA).\nReplaces the deprecated CleanRoomNotebookTask (defined above) which was for V0 service.", "properties": { "clean_room_name": { "description": "The clean room that the notebook belongs to.", @@ -5002,7 +4971,9 @@ }, "task_retry_mode": { "description": "Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskRetryMode" + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskRetryMode", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true } }, "additionalProperties": false @@ -6032,7 +6003,7 @@ "type": "object", "properties": { "dbt_commands": { - "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", + "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`", "$ref": "#/$defs/slice/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6040,7 +6011,7 @@ "deprecated": true }, "jar_params": { - "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", + "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "$ref": "#/$defs/slice/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6056,7 +6027,7 @@ "$ref": "#/$defs/map/string" }, "notebook_params": { - "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", + "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", "$ref": "#/$defs/map/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6075,7 +6046,7 @@ "deprecated": true }, "python_params": { - "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", "$ref": "#/$defs/slice/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6083,7 +6054,7 @@ "deprecated": true }, "spark_submit_params": { - "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", "$ref": "#/$defs/slice/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6091,7 +6062,7 @@ "deprecated": true }, "sql_params": { - "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", + "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.", "$ref": "#/$defs/map/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6132,7 +6103,7 @@ "type": "object", "properties": { "jar_uri": { - "description": "Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list.\n\nSee the examples of classic and serverless compute usage at the top of the page.", + "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.", "$ref": "#/$defs/string", "deprecationMessage": "This field is deprecated", "deprecated": true @@ -6453,7 +6424,7 @@ "$ref": "#/$defs/int" }, "table_names": { - "description": "A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", "$ref": "#/$defs/slice/string" }, "wait_after_last_change_seconds": { @@ -6475,7 +6446,7 @@ "type": "object", "properties": { "clean_rooms_notebook_task": { - "description": "The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.", + "description": "The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask" }, "condition_task": { @@ -6606,10 +6577,8 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask" }, "spark_submit_task": { - "description": "(Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit).", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask", - "deprecationMessage": "This field is deprecated", - "deprecated": true + "description": "(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask" }, "sql_task": { "description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.", @@ -6764,7 +6733,9 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PeriodicTriggerConfiguration" }, "table_update": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfiguration" + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfiguration", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true } }, "additionalProperties": false @@ -6899,7 +6870,7 @@ "oneOf": [ { "type": "string", - "description": "Days of week in which the window is allowed to happen.\nIf not specified all days of the week will be used.", + "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", "enum": [ "MONDAY", "TUESDAY", @@ -7079,10 +7050,7 @@ "$ref": "#/$defs/string" }, "netsuite_jar_path": { - "description": "Netsuite only configuration. When the field is set for a netsuite connector,\nthe jar stored in the field will be validated and added to the classpath of\npipeline's cluster.", - "$ref": "#/$defs/string", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "$ref": "#/$defs/string" }, "objects": { "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", @@ -7146,20 +7114,13 @@ "type": "object", "properties": { "incremental": { - "description": "(Optional) Marks the report as incremental.\nThis field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now\ncontrolled by the `parameters` field.", - "$ref": "#/$defs/bool", - "deprecationMessage": "This field is deprecated", - "deprecated": true + "$ref": "#/$defs/bool" }, "parameters": { - "description": "Parameters for the Workday report. Each key represents the parameter name (e.g., \"start_date\", \"end_date\"),\nand the corresponding value is a SQL-like expression used to compute the parameter value at runtime.\nExample:\n{\n\"start_date\": \"{ coalesce(current_offset(), date(\\\"2025-02-01\\\")) }\",\n\"end_date\": \"{ current_date() - INTERVAL 1 DAY }\"\n}", "$ref": "#/$defs/map/string" }, "report_parameters": { - "description": "(Optional) Additional custom parameters for Workday Report\nThis field is deprecated and should not be used. Use `parameters` instead.", - "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue", - "deprecationMessage": "This field is deprecated", - "deprecated": true + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue" } }, "additionalProperties": false @@ -7176,11 +7137,9 @@ "type": "object", "properties": { "key": { - "description": "Key for the report parameter, can be a column name or other metadata", "$ref": "#/$defs/string" }, "value": { - "description": "Value for the report parameter.\nPossible values it can take are these sql functions:\n1. coalesce(current_offset(), date(\"YYYY-MM-DD\")) -\u003e if current_offset() is null, then the passed date, else current_offset()\n2. current_date()\n3. date_sub(current_date(), x) -\u003e subtract x (some non-negative integer) days from current date", "$ref": "#/$defs/string" } }, @@ -7199,6 +7158,8 @@ "enum": [ "MYSQL", "POSTGRESQL", + "REDSHIFT", + "SQLDW", "SQLSERVER", "SALESFORCE", "BIGQUERY", @@ -7210,7 +7171,9 @@ "ORACLE", "TERADATA", "SHAREPOINT", - "DYNAMICS365" + "DYNAMICS365", + "CONFLUENCE", + "META_MARKETING" ] }, { @@ -7845,10 +7808,7 @@ "$ref": "#/$defs/slice/string" }, "workday_report_parameters": { - "description": "(Optional) Additional custom parameters for Workday Report", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParameters", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParameters" } }, "additionalProperties": false @@ -8973,10 +8933,43 @@ ] }, "sql.Aggregation": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "SUM", + "COUNT", + "COUNT_DISTINCT", + "AVG", + "MEDIAN", + "MIN", + "MAX", + "STDDEV" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "sql.AlertEvaluationState": { - "type": "string" + "oneOf": [ + { + "type": "string", + "description": "UNSPECIFIED - default unspecify value for proto enum, do not use it in the code\nUNKNOWN - alert not yet evaluated\nTRIGGERED - alert is triggered\nOK - alert is not triggered\nERROR - alert evaluation failed", + "enum": [ + "UNKNOWN", + "TRIGGERED", + "OK", + "ERROR" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "sql.AlertLifecycleState": { "type": "string" @@ -8987,24 +8980,23 @@ "type": "object", "properties": { "comparison_operator": { + "description": "Operator used for comparison in alert evaluation.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.ComparisonOperator" }, "empty_result_state": { + "description": "Alert state if result is empty.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertEvaluationState" }, - "last_evaluated_at": { - "$ref": "#/$defs/string" - }, "notification": { + "description": "User or Notification Destination to notify when alert is triggered.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2Notification" }, "source": { + "description": "Source column from result to use to evaluate alert", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandColumn" }, - "state": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertEvaluationState" - }, "threshold": { + "description": "Threshold to user for alert evaluation, can be a column or a value.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertV2Operand" } }, @@ -9026,9 +9018,11 @@ "type": "object", "properties": { "notify_on_ok": { + "description": "Whether to notify alert subscribers when alert returns back to normal.", "$ref": "#/$defs/bool" }, "retrigger_seconds": { + "description": "Number of seconds an alert must wait after being triggered to rearm itself. After rearming, it can be triggered again. If 0 or not specified, the alert will not be triggered again.", "$ref": "#/$defs/int" }, "subscriptions": { @@ -9118,9 +9112,11 @@ "type": "object", "properties": { "service_principal_name": { + "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", "$ref": "#/$defs/string" }, "user_name": { + "description": "The email of an active workspace user. Can only set this field to their own email.", "$ref": "#/$defs/string" } }, @@ -9191,12 +9187,31 @@ ] }, "sql.ComparisonOperator": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "LESS_THAN", + "GREATER_THAN", + "EQUAL", + "NOT_EQUAL", + "GREATER_THAN_OR_EQUAL", + "LESS_THAN_OR_EQUAL", + "IS_NULL", + "IS_NOT_NULL" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "sql.CreateWarehouseRequestWarehouseType": { "oneOf": [ { "type": "string", + "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.", "enum": [ "TYPE_UNSPECIFIED", "CLASSIC", @@ -9215,12 +9230,15 @@ "type": "object", "properties": { "pause_status": { + "description": "Indicate whether this schedule is paused or not.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.SchedulePauseStatus" }, "quartz_cron_schedule": { + "description": "A cron expression using quartz syntax that specifies the schedule for this pipeline.\nShould use the quartz format described here: http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html", "$ref": "#/$defs/string" }, "timezone_id": { + "description": "A Java timezone id. The schedule will be resolved using this timezone.\nThis will be combined with the quartz_cron_schedule to determine the schedule.\nSee https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.", "$ref": "#/$defs/string" } }, @@ -9274,13 +9292,25 @@ ] }, "sql.SchedulePauseStatus": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "UNPAUSED", + "PAUSED" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "sql.SpotInstancePolicy": { "oneOf": [ { "type": "string", - "description": "EndpointSpotInstancePolicy configures whether the endpoint should use spot\ninstances.\n\nThe breakdown of how the EndpointSpotInstancePolicy converts to per cloud\nconfigurations is:\n\n+-------+--------------------------------------+--------------------------------+\n| Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED |\n+-------+--------------------------------------+--------------------------------+\n| AWS | On Demand Driver with Spot Executors | On Demand Driver and\nExecutors | | AZURE | On Demand Driver and Executors | On Demand Driver\nand Executors |\n+-------+--------------------------------------+--------------------------------+\n\nWhile including \"spot\" in the enum name may limit the the future\nextensibility of this field because it limits this enum to denoting \"spot or\nnot\", this is the field that PM recommends after discussion with customers\nper SC-48783.", + "description": "Configurations whether the warehouse should use spot instances.", "enum": [ "POLICY_UNSPECIFIED", "COST_OPTIMIZED", diff --git a/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py b/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py index d0281f144c..7d30a1bad1 100644 --- a/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py +++ b/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py @@ -15,10 +15,7 @@ @dataclass(kw_only=True) class CleanRoomsNotebookTask: - """ - Clean Rooms notebook task for V1 Clean Room service (GA). - Replaces the deprecated CleanRoomNotebookTask (defined above) which was for V0 service. - """ + """""" clean_room_name: VariableOr[str] """ diff --git a/python/databricks/bundles/jobs/_models/condition.py b/python/databricks/bundles/jobs/_models/condition.py index d7c1b25bc7..d1b3566d5d 100644 --- a/python/databricks/bundles/jobs/_models/condition.py +++ b/python/databricks/bundles/jobs/_models/condition.py @@ -3,6 +3,10 @@ class Condition(Enum): + """ + :meta private: [EXPERIMENTAL] + """ + ANY_UPDATED = "ANY_UPDATED" ALL_UPDATED = "ALL_UPDATED" diff --git a/python/databricks/bundles/jobs/_models/continuous.py b/python/databricks/bundles/jobs/_models/continuous.py index 43268661c1..92b1aad237 100644 --- a/python/databricks/bundles/jobs/_models/continuous.py +++ b/python/databricks/bundles/jobs/_models/continuous.py @@ -25,6 +25,8 @@ class Continuous: task_retry_mode: VariableOrOptional[TaskRetryMode] = None """ + :meta private: [EXPERIMENTAL] + Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER. """ @@ -46,6 +48,8 @@ class ContinuousDict(TypedDict, total=False): task_retry_mode: VariableOrOptional[TaskRetryModeParam] """ + :meta private: [EXPERIMENTAL] + Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER. """ diff --git a/python/databricks/bundles/jobs/_models/environment.py b/python/databricks/bundles/jobs/_models/environment.py index 1521fffeda..b912693ef2 100644 --- a/python/databricks/bundles/jobs/_models/environment.py +++ b/python/databricks/bundles/jobs/_models/environment.py @@ -34,11 +34,6 @@ class Environment: """ java_dependencies: VariableOrList[str] = field(default_factory=list) - """ - :meta private: [EXPERIMENTAL] - - List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`. - """ @classmethod def from_dict(cls, value: "EnvironmentDict") -> "Self": @@ -69,11 +64,6 @@ class EnvironmentDict(TypedDict, total=False): """ java_dependencies: VariableOrList[str] - """ - :meta private: [EXPERIMENTAL] - - List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`. - """ EnvironmentParam = EnvironmentDict | Environment diff --git a/python/databricks/bundles/jobs/_models/spark_jar_task.py b/python/databricks/bundles/jobs/_models/spark_jar_task.py index 9b72738437..40bbe92ba0 100644 --- a/python/databricks/bundles/jobs/_models/spark_jar_task.py +++ b/python/databricks/bundles/jobs/_models/spark_jar_task.py @@ -26,9 +26,7 @@ class SparkJarTask: jar_uri: VariableOrOptional[str] = None """ - [DEPRECATED] Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list. - - See the examples of classic and serverless compute usage at the top of the page. + [DEPRECATED] Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. """ parameters: VariableOrList[str] = field(default_factory=list) @@ -63,9 +61,7 @@ class SparkJarTaskDict(TypedDict, total=False): jar_uri: VariableOrOptional[str] """ - [DEPRECATED] Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list. - - See the examples of classic and serverless compute usage at the top of the page. + [DEPRECATED] Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. """ parameters: VariableOrList[str] diff --git a/python/databricks/bundles/jobs/_models/spark_submit_task.py b/python/databricks/bundles/jobs/_models/spark_submit_task.py index c809dbe721..edc57577b5 100644 --- a/python/databricks/bundles/jobs/_models/spark_submit_task.py +++ b/python/databricks/bundles/jobs/_models/spark_submit_task.py @@ -11,9 +11,7 @@ @dataclass(kw_only=True) class SparkSubmitTask: - """ - [DEPRECATED] - """ + """""" parameters: VariableOrList[str] = field(default_factory=list) """ diff --git a/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py b/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py index c824a72499..c89b0c4011 100644 --- a/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py +++ b/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py @@ -12,7 +12,9 @@ @dataclass(kw_only=True) class TableUpdateTriggerConfiguration: - """""" + """ + :meta private: [EXPERIMENTAL] + """ condition: VariableOrOptional[Condition] = None """ @@ -27,7 +29,7 @@ class TableUpdateTriggerConfiguration: table_names: VariableOrList[str] = field(default_factory=list) """ - A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. """ wait_after_last_change_seconds: VariableOrOptional[int] = None @@ -61,7 +63,7 @@ class TableUpdateTriggerConfigurationDict(TypedDict, total=False): table_names: VariableOrList[str] """ - A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. """ wait_after_last_change_seconds: VariableOrOptional[int] diff --git a/python/databricks/bundles/jobs/_models/task.py b/python/databricks/bundles/jobs/_models/task.py index 0393213b65..be497ff0f2 100644 --- a/python/databricks/bundles/jobs/_models/task.py +++ b/python/databricks/bundles/jobs/_models/task.py @@ -104,7 +104,7 @@ class Task: clean_rooms_notebook_task: VariableOrOptional[CleanRoomsNotebookTask] = None """ - The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook + The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook when the `clean_rooms_notebook_task` field is present. """ @@ -268,7 +268,15 @@ class Task: spark_submit_task: VariableOrOptional[SparkSubmitTask] = None """ - [DEPRECATED] (Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit). + (Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute. + + In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. + + `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters. + + By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage. + + The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. """ sql_task: VariableOrOptional[SqlTask] = None @@ -306,7 +314,7 @@ class TaskDict(TypedDict, total=False): clean_rooms_notebook_task: VariableOrOptional[CleanRoomsNotebookTaskParam] """ - The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook + The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook when the `clean_rooms_notebook_task` field is present. """ @@ -470,7 +478,15 @@ class TaskDict(TypedDict, total=False): spark_submit_task: VariableOrOptional[SparkSubmitTaskParam] """ - [DEPRECATED] (Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit). + (Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute. + + In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. + + `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters. + + By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage. + + The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. """ sql_task: VariableOrOptional[SqlTaskParam] diff --git a/python/databricks/bundles/jobs/_models/task_retry_mode.py b/python/databricks/bundles/jobs/_models/task_retry_mode.py index ce5ccaa687..36c29a7f9b 100644 --- a/python/databricks/bundles/jobs/_models/task_retry_mode.py +++ b/python/databricks/bundles/jobs/_models/task_retry_mode.py @@ -4,6 +4,8 @@ class TaskRetryMode(Enum): """ + :meta private: [EXPERIMENTAL] + task retry mode of the continuous job * NEVER: The failed task will not be retried. * ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first attempt. diff --git a/python/databricks/bundles/jobs/_models/trigger_settings.py b/python/databricks/bundles/jobs/_models/trigger_settings.py index 18cccd14dd..4608c98c82 100644 --- a/python/databricks/bundles/jobs/_models/trigger_settings.py +++ b/python/databricks/bundles/jobs/_models/trigger_settings.py @@ -42,6 +42,9 @@ class TriggerSettings: """ table_update: VariableOrOptional[TableUpdateTriggerConfiguration] = None + """ + :meta private: [EXPERIMENTAL] + """ @classmethod def from_dict(cls, value: "TriggerSettingsDict") -> "Self": @@ -70,6 +73,9 @@ class TriggerSettingsDict(TypedDict, total=False): """ table_update: VariableOrOptional[TableUpdateTriggerConfigurationParam] + """ + :meta private: [EXPERIMENTAL] + """ TriggerSettingsParam = TriggerSettingsDict | TriggerSettings diff --git a/python/databricks/bundles/pipelines/_models/day_of_week.py b/python/databricks/bundles/pipelines/_models/day_of_week.py index a685c2b308..eaf5cbc9ff 100644 --- a/python/databricks/bundles/pipelines/_models/day_of_week.py +++ b/python/databricks/bundles/pipelines/_models/day_of_week.py @@ -6,7 +6,7 @@ class DayOfWeek(Enum): """ :meta private: [EXPERIMENTAL] - Days of week in which the window is allowed to happen. + Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). If not specified all days of the week will be used. """ diff --git a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py index 6a5e058890..d07a068624 100644 --- a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py +++ b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py @@ -36,13 +36,6 @@ class IngestionPipelineDefinition: """ netsuite_jar_path: VariableOrOptional[str] = None - """ - :meta private: [EXPERIMENTAL] - - Netsuite only configuration. When the field is set for a netsuite connector, - the jar stored in the field will be validated and added to the classpath of - pipeline's cluster. - """ objects: VariableOrList[IngestionConfig] = field(default_factory=list) """ @@ -83,13 +76,6 @@ class IngestionPipelineDefinitionDict(TypedDict, total=False): """ netsuite_jar_path: VariableOrOptional[str] - """ - :meta private: [EXPERIMENTAL] - - Netsuite only configuration. When the field is set for a netsuite connector, - the jar stored in the field will be validated and added to the classpath of - pipeline's cluster. - """ objects: VariableOrList[IngestionConfigParam] """ diff --git a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py index d48d68495d..2aa7163b8f 100644 --- a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py +++ b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py @@ -19,35 +19,15 @@ @dataclass(kw_only=True) class IngestionPipelineDefinitionWorkdayReportParameters: - """ - :meta private: [EXPERIMENTAL] - """ + """""" incremental: VariableOrOptional[bool] = None - """ - [DEPRECATED] (Optional) Marks the report as incremental. - This field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now - controlled by the `parameters` field. - """ parameters: VariableOrDict[str] = field(default_factory=dict) - """ - Parameters for the Workday report. Each key represents the parameter name (e.g., "start_date", "end_date"), - and the corresponding value is a SQL-like expression used to compute the parameter value at runtime. - Example: - { - "start_date": "{ coalesce(current_offset(), date(\"2025-02-01\")) }", - "end_date": "{ current_date() - INTERVAL 1 DAY }" - } - """ report_parameters: VariableOrList[ IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue ] = field(default_factory=list) - """ - [DEPRECATED] (Optional) Additional custom parameters for Workday Report - This field is deprecated and should not be used. Use `parameters` instead. - """ @classmethod def from_dict( @@ -63,30 +43,12 @@ class IngestionPipelineDefinitionWorkdayReportParametersDict(TypedDict, total=Fa """""" incremental: VariableOrOptional[bool] - """ - [DEPRECATED] (Optional) Marks the report as incremental. - This field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now - controlled by the `parameters` field. - """ parameters: VariableOrDict[str] - """ - Parameters for the Workday report. Each key represents the parameter name (e.g., "start_date", "end_date"), - and the corresponding value is a SQL-like expression used to compute the parameter value at runtime. - Example: - { - "start_date": "{ coalesce(current_offset(), date(\"2025-02-01\")) }", - "end_date": "{ current_date() - INTERVAL 1 DAY }" - } - """ report_parameters: VariableOrList[ IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueParam ] - """ - [DEPRECATED] (Optional) Additional custom parameters for Workday Report - This field is deprecated and should not be used. Use `parameters` instead. - """ IngestionPipelineDefinitionWorkdayReportParametersParam = ( diff --git a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py index 2a24858d66..e207b4494a 100644 --- a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py +++ b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py @@ -11,25 +11,11 @@ @dataclass(kw_only=True) class IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue: - """ - :meta private: [EXPERIMENTAL] - - [DEPRECATED] - """ + """""" key: VariableOrOptional[str] = None - """ - Key for the report parameter, can be a column name or other metadata - """ value: VariableOrOptional[str] = None - """ - Value for the report parameter. - Possible values it can take are these sql functions: - 1. coalesce(current_offset(), date("YYYY-MM-DD")) -> if current_offset() is null, then the passed date, else current_offset() - 2. current_date() - 3. date_sub(current_date(), x) -> subtract x (some non-negative integer) days from current date - """ @classmethod def from_dict( @@ -50,18 +36,8 @@ class IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueDict( """""" key: VariableOrOptional[str] - """ - Key for the report parameter, can be a column name or other metadata - """ value: VariableOrOptional[str] - """ - Value for the report parameter. - Possible values it can take are these sql functions: - 1. coalesce(current_offset(), date("YYYY-MM-DD")) -> if current_offset() is null, then the passed date, else current_offset() - 2. current_date() - 3. date_sub(current_date(), x) -> subtract x (some non-negative integer) days from current date - """ IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueParam = ( diff --git a/python/databricks/bundles/pipelines/_models/pipeline.py b/python/databricks/bundles/pipelines/_models/pipeline.py index 2b4bbed23b..86f3bbc266 100644 --- a/python/databricks/bundles/pipelines/_models/pipeline.py +++ b/python/databricks/bundles/pipelines/_models/pipeline.py @@ -70,6 +70,8 @@ class Pipeline(Resource): budget_policy_id: VariableOrOptional[str] = None """ + :meta private: [EXPERIMENTAL] + Budget policy of this pipeline. """ @@ -182,6 +184,9 @@ class Pipeline(Resource): """ run_as: VariableOrOptional[RunAs] = None + """ + :meta private: [EXPERIMENTAL] + """ schema: VariableOrOptional[str] = None """ @@ -235,6 +240,8 @@ class PipelineDict(TypedDict, total=False): budget_policy_id: VariableOrOptional[str] """ + :meta private: [EXPERIMENTAL] + Budget policy of this pipeline. """ @@ -347,6 +354,9 @@ class PipelineDict(TypedDict, total=False): """ run_as: VariableOrOptional[RunAsParam] + """ + :meta private: [EXPERIMENTAL] + """ schema: VariableOrOptional[str] """ diff --git a/python/databricks/bundles/pipelines/_models/run_as.py b/python/databricks/bundles/pipelines/_models/run_as.py index b4d52af00a..dadceecac7 100644 --- a/python/databricks/bundles/pipelines/_models/run_as.py +++ b/python/databricks/bundles/pipelines/_models/run_as.py @@ -12,6 +12,8 @@ @dataclass(kw_only=True) class RunAs: """ + :meta private: [EXPERIMENTAL] + Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline. Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown. diff --git a/python/databricks/bundles/pipelines/_models/table_specific_config.py b/python/databricks/bundles/pipelines/_models/table_specific_config.py index 7211a9a951..11b1abac96 100644 --- a/python/databricks/bundles/pipelines/_models/table_specific_config.py +++ b/python/databricks/bundles/pipelines/_models/table_specific_config.py @@ -78,11 +78,6 @@ class TableSpecificConfig: workday_report_parameters: VariableOrOptional[ IngestionPipelineDefinitionWorkdayReportParameters ] = None - """ - :meta private: [EXPERIMENTAL] - - (Optional) Additional custom parameters for Workday Report - """ @classmethod def from_dict(cls, value: "TableSpecificConfigDict") -> "Self": @@ -148,11 +143,6 @@ class TableSpecificConfigDict(TypedDict, total=False): workday_report_parameters: VariableOrOptional[ IngestionPipelineDefinitionWorkdayReportParametersParam ] - """ - :meta private: [EXPERIMENTAL] - - (Optional) Additional custom parameters for Workday Report - """ TableSpecificConfigParam = TableSpecificConfigDict | TableSpecificConfig diff --git a/python/databricks/bundles/volumes/_models/volume_type.py b/python/databricks/bundles/volumes/_models/volume_type.py index 5c96db8fde..1b9bcd1089 100644 --- a/python/databricks/bundles/volumes/_models/volume_type.py +++ b/python/databricks/bundles/volumes/_models/volume_type.py @@ -3,8 +3,12 @@ class VolumeType(Enum): - MANAGED = "MANAGED" + """ + The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) + """ + EXTERNAL = "EXTERNAL" + MANAGED = "MANAGED" -VolumeTypeParam = Literal["MANAGED", "EXTERNAL"] | VolumeType +VolumeTypeParam = Literal["EXTERNAL", "MANAGED"] | VolumeType From c8cdb2e29b3936b8d506a9c71ffae2435c854f5c Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Mon, 1 Dec 2025 12:26:00 +0100 Subject: [PATCH 09/18] update modified status --- .../summary/modified_status/databricks.yml | 16 +- .../bundle/summary/modified_status/output.txt | 170 ++++++++++++++---- 2 files changed, 151 insertions(+), 35 deletions(-) diff --git a/acceptance/bundle/summary/modified_status/databricks.yml b/acceptance/bundle/summary/modified_status/databricks.yml index 2bc709b73e..7e940c721a 100644 --- a/acceptance/bundle/summary/modified_status/databricks.yml +++ b/acceptance/bundle/summary/modified_status/databricks.yml @@ -21,7 +21,15 @@ resources: alerts: my_alert: display_name: test-alert - query_text: "SELECT 1" - warehouse_id: "test-sql-warehouse" - custom_summary: "test-alert-summary" - custom_description: "test-alert-description" + evaluation: + comparison_operator: EQUAL + source: + name: "1" + threshold: + value: + double_value: 2 + query_text: select 2 + schedule: + quartz_cron_schedule: "44 19 */1 * * ?" + timezone_id: Europe/Amsterdam + warehouse_id: aaaaaaaaaaaaaaaa diff --git a/acceptance/bundle/summary/modified_status/output.txt b/acceptance/bundle/summary/modified_status/output.txt index 1ac7dce61c..a3c123eb03 100644 --- a/acceptance/bundle/summary/modified_status/output.txt +++ b/acceptance/bundle/summary/modified_status/output.txt @@ -1,24 +1,29 @@ === Initial view of resources without id and modified_status=created >>> [CLI] bundle summary -o json -Warning: required field "evaluation" is not set - at resources.alerts.my_alert - in databricks.yml:6:7 - -Warning: required field "schedule" is not set - at resources.alerts.my_alert - in databricks.yml:6:7 - { "alerts": { "my_alert": { - "custom_description": "test-alert-description", - "custom_summary": "test-alert-summary", "display_name": "test-alert", + "evaluation": { + "comparison_operator": "EQUAL", + "source": { + "name": "1" + }, + "threshold": { + "value": { + "double_value": 2 + } + } + }, "modified_status": "created", "parent_path": "/Workspace/Users/[USERNAME]/.bundle/test-bundle/default/resources", - "query_text": "SELECT 1", - "warehouse_id": "test-sql-warehouse" + "query_text": "select 2", + "schedule": { + "quartz_cron_schedule": "44 19 */1 * * ?", + "timezone_id": "Europe/Amsterdam" + }, + "warehouse_id": "aaaaaaaaaaaaaaaa" } }, "pipelines": { @@ -63,31 +68,134 @@ Warning: required field "schedule" is not set } >>> [CLI] bundle deploy -Warning: required field "evaluation" is not set - at resources.alerts.my_alert - in databricks.yml:6:7 - -Warning: required field "schedule" is not set - at resources.alerts.my_alert - in databricks.yml:6:7 - Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/test-bundle/default/files... -Error: exit status 1 +Deploying resources... +Updating deployment state... +Deployment complete! -Error: Missing required argument - - on bundle.tf.json line 22, in resource.databricks_alert_v2.my_alert: - 22: } +=== Post-deployment view of resources with id and without modified_status +>>> [CLI] bundle summary -o json +{ + "alerts": { + "my_alert": { + "display_name": "test-alert", + "evaluation": { + "comparison_operator": "EQUAL", + "source": { + "name": "1" + }, + "threshold": { + "value": { + "double_value": 2 + } + } + }, + "id": "[UUID]", + "parent_path": "/Workspace/Users/[USERNAME]/.bundle/test-bundle/default/resources", + "query_text": "select 2", + "schedule": { + "quartz_cron_schedule": "44 19 */1 * * ?", + "timezone_id": "Europe/Amsterdam" + }, + "url": "[DATABRICKS_URL]/sql/alerts-v2/[UUID]?o=[NUMID]", + "warehouse_id": "aaaaaaaaaaaaaaaa" + } + }, + "pipelines": { + "my_pipeline": { + "channel": "CURRENT", + "deployment": { + "kind": "BUNDLE", + "metadata_file_path": "/Workspace/Users/[USERNAME]/.bundle/test-bundle/default/state/metadata.json" + }, + "edition": "ADVANCED", + "id": "[UUID]", + "libraries": [ + { + "file": { + "path": "/Workspace/Users/[USERNAME]/.bundle/test-bundle/default/files/foo.py" + } + } + ], + "name": "test-pipeline", + "url": "[DATABRICKS_URL]/pipelines/[UUID]?o=[NUMID]" + } + }, + "schemas": { + "my_schema": { + "catalog_name": "main", + "comment": "COMMENT1", + "id": "main.test-schema", + "name": "test-schema", + "url": "[DATABRICKS_URL]/explore/data/main/test-schema?o=[NUMID]" + } + }, + "sql_warehouses": { + "my_sql_warehouse": { + "auto_stop_mins": 120, + "cluster_size": "X-Large", + "enable_photon": true, + "enable_serverless_compute": true, + "id": "[UUID]", + "max_num_clusters": 1, + "name": "test-sql-warehouse", + "spot_instance_policy": "COST_OPTIMIZED", + "url": "[DATABRICKS_URL]/sql/warehouses/[UUID]?o=[NUMID]" + } + } +} -The argument "evaluation" is required, but no definition was found. +=== Expecting all resources to have modified_status=deleted +>>> [CLI] bundle summary -o json +{ + "alerts": { + "my_alert": { + "id": "[UUID]", + "modified_status": "deleted", + "url": "[DATABRICKS_URL]/sql/alerts-v2/[UUID]?o=[NUMID]" + } + }, + "pipelines": { + "my_pipeline": { + "id": "[UUID]", + "modified_status": "deleted", + "url": "[DATABRICKS_URL]/pipelines/[UUID]?o=[NUMID]" + } + }, + "schemas": { + "my_schema": { + "id": "main.test-schema", + "modified_status": "deleted", + "url": "[DATABRICKS_URL]/explore/data/main/test-schema?o=[NUMID]" + } + }, + "sql_warehouses": { + "my_sql_warehouse": { + "id": "[UUID]", + "modified_status": "deleted", + "url": "[DATABRICKS_URL]/sql/warehouses/[UUID]?o=[NUMID]" + } + } +} -Error: Missing required argument +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete resources.alerts.my_alert + delete resources.pipelines.my_pipeline + delete resources.schemas.my_schema + delete resources.sql_warehouses.my_sql_warehouse - on bundle.tf.json line 22, in resource.databricks_alert_v2.my_alert: - 22: } +This action will result in the deletion of the following UC schemas. Any underlying data may be lost: + delete resources.schemas.my_schema -The argument "schedule" is required, but no definition was found. +This action will result in the deletion of the following Lakeflow Declarative Pipelines along with the +Streaming Tables (STs) and Materialized Views (MVs) managed by them: + delete resources.pipelines.my_pipeline +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/test-bundle/default +Deleting files... +Destroy complete! -Exit code: 1 +>>> [CLI] bundle summary -o json +{} From 6d62ba02efa8236afe3bb7198bbb84dce23b5520 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Mon, 1 Dec 2025 12:32:40 +0100 Subject: [PATCH 10/18] lint --- acceptance/bundle/deployment/bind/alert/test.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/acceptance/bundle/deployment/bind/alert/test.toml b/acceptance/bundle/deployment/bind/alert/test.toml index a69c533ecd..22b75b947f 100644 --- a/acceptance/bundle/deployment/bind/alert/test.toml +++ b/acceptance/bundle/deployment/bind/alert/test.toml @@ -5,5 +5,5 @@ BundleConfigTarget = "databricks.yml" Ignore = [ "databricks.yml", - "alert.json", + "alert.json", ] From 50fc3ace4a973d80e5c0e38a3767bcc8de74ff83 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Mon, 1 Dec 2025 14:57:26 +0100 Subject: [PATCH 11/18] update annotations --- bundle/internal/schema/annotations.yml | 20 +------------------- bundle/schema/jsonschema.json | 1 - 2 files changed, 1 insertion(+), 20 deletions(-) diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 4a8cd4a713..68928e918e 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -461,15 +461,9 @@ github.com/databricks/cli/bundle/config/resources.Alert: "effective_run_as": "description": |- PLACEHOLDER - "evaluation": - "description": |- - PLACEHOLDER "id": "description": |- PLACEHOLDER - "lifecycle": - "description": |- - PLACEHOLDER "lifecycle_state": "description": |- PLACEHOLDER @@ -479,9 +473,6 @@ github.com/databricks/cli/bundle/config/resources.Alert: "parent_path": "description": |- PLACEHOLDER - "permissions": - "description": |- - PLACEHOLDER "query_text": "description": |- PLACEHOLDER @@ -491,9 +482,6 @@ github.com/databricks/cli/bundle/config/resources.Alert: "run_as_user_name": "description": |- PLACEHOLDER - "schedule": - "description": |- - PLACEHOLDER "update_time": "description": |- PLACEHOLDER @@ -552,10 +540,7 @@ github.com/databricks/cli/bundle/config/resources.DashboardPermission: "user_name": "description": |- PLACEHOLDER -github.com/databricks/cli/bundle/config/resources.DatabaseInstance: - "effective_capacity": - "description": |- - PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.DatabaseInstance: {} github.com/databricks/cli/bundle/config/resources.DatabaseInstancePermission: "group_name": "description": |- @@ -725,9 +710,6 @@ github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: "effective_logical_database_name": "description": |- PLACEHOLDER - "lifecycle": - "description": |- - Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "logical_database_name": "description": |- PLACEHOLDER diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 9f273c7882..1ca613aa3f 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1885,7 +1885,6 @@ "$ref": "#/$defs/string" }, "lifecycle": { - "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, "logical_database_name": { From 1cbbc54e2eef727aa10e6f07fce552aa92faecd7 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Tue, 2 Dec 2025 19:22:02 +0100 Subject: [PATCH 12/18] - --- bundle/internal/schema/annotations.yml | 8 +++++++- bundle/schema/jsonschema.json | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 68928e918e..29b2fd03e6 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -540,7 +540,10 @@ github.com/databricks/cli/bundle/config/resources.DashboardPermission: "user_name": "description": |- PLACEHOLDER -github.com/databricks/cli/bundle/config/resources.DatabaseInstance: {} +github.com/databricks/cli/bundle/config/resources.DatabaseInstance: + "effective_capacity": + "description": |- + PLACEHOLDER github.com/databricks/cli/bundle/config/resources.DatabaseInstancePermission: "group_name": "description": |- @@ -710,6 +713,9 @@ github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: "effective_logical_database_name": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "logical_database_name": "description": |- PLACEHOLDER diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 1a207fb6f9..de17f44ed3 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1885,6 +1885,7 @@ "$ref": "#/$defs/string" }, "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, "logical_database_name": { From c0b126f4437b8cca7a704e038262a2667f5722f8 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Tue, 2 Dec 2025 19:23:29 +0100 Subject: [PATCH 13/18] - --- python/databricks/bundles/jobs/_models/environment.py | 6 ------ ...gestion_pipeline_definition_workday_report_parameters.py | 4 +++- ..._definition_workday_report_parameters_query_key_value.py | 4 +++- .../bundles/pipelines/_models/table_specific_config.py | 6 ++++++ 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/python/databricks/bundles/jobs/_models/environment.py b/python/databricks/bundles/jobs/_models/environment.py index 8beefedfb6..b912693ef2 100644 --- a/python/databricks/bundles/jobs/_models/environment.py +++ b/python/databricks/bundles/jobs/_models/environment.py @@ -34,9 +34,6 @@ class Environment: """ java_dependencies: VariableOrList[str] = field(default_factory=list) - """ - List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`. - """ @classmethod def from_dict(cls, value: "EnvironmentDict") -> "Self": @@ -67,9 +64,6 @@ class EnvironmentDict(TypedDict, total=False): """ java_dependencies: VariableOrList[str] - """ - List of java dependencies. Each dependency is a string representing a java library path. For example: `/Volumes/path/to/test.jar`. - """ EnvironmentParam = EnvironmentDict | Environment diff --git a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py index 2aa7163b8f..6a766e36ab 100644 --- a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py +++ b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py @@ -19,7 +19,9 @@ @dataclass(kw_only=True) class IngestionPipelineDefinitionWorkdayReportParameters: - """""" + """ + :meta private: [EXPERIMENTAL] + """ incremental: VariableOrOptional[bool] = None diff --git a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py index e207b4494a..2dff4275a2 100644 --- a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py +++ b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py @@ -11,7 +11,9 @@ @dataclass(kw_only=True) class IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue: - """""" + """ + :meta private: [EXPERIMENTAL] + """ key: VariableOrOptional[str] = None diff --git a/python/databricks/bundles/pipelines/_models/table_specific_config.py b/python/databricks/bundles/pipelines/_models/table_specific_config.py index 8fcfd7eee3..502d170878 100644 --- a/python/databricks/bundles/pipelines/_models/table_specific_config.py +++ b/python/databricks/bundles/pipelines/_models/table_specific_config.py @@ -78,6 +78,9 @@ class TableSpecificConfig: workday_report_parameters: VariableOrOptional[ IngestionPipelineDefinitionWorkdayReportParameters ] = None + """ + :meta private: [EXPERIMENTAL] + """ @classmethod def from_dict(cls, value: "TableSpecificConfigDict") -> "Self": @@ -143,6 +146,9 @@ class TableSpecificConfigDict(TypedDict, total=False): workday_report_parameters: VariableOrOptional[ IngestionPipelineDefinitionWorkdayReportParametersParam ] + """ + :meta private: [EXPERIMENTAL] + """ TableSpecificConfigParam = TableSpecificConfigDict | TableSpecificConfig From c8ce89548c9a7663b8e02f26d8e125007786bb4d Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Wed, 3 Dec 2025 01:57:16 +0100 Subject: [PATCH 14/18] regenerate schema --- .../internal/schema/annotations_openapi.yml | 338 ++++++++++++------ .../schema/annotations_openapi_overrides.yml | 22 +- bundle/schema/jsonschema.json | 161 ++++++--- 3 files changed, 348 insertions(+), 173 deletions(-) diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index fa2921ce76..14a8f3c124 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -77,6 +77,7 @@ github.com/databricks/cli/bundle/config/resources.App: "x-databricks-field-behaviors_output_only": |- true "budget_policy_id": {} + "compute_size": {} "compute_status": "x-databricks-field-behaviors_output_only": |- true @@ -389,50 +390,43 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "custom_tags": "description": |- Custom tags associated with the instance. This field is only included on create and update responses. - "x-databricks-preview": |- - PRIVATE + "effective_capacity": + "description": |- + Deprecated. The sku of the instance; this field will always match the value of capacity. + "deprecation_message": |- + This field is deprecated + "x-databricks-field-behaviors_output_only": |- + true "effective_custom_tags": "description": |- The recorded custom tags associated with the instance. "x-databricks-field-behaviors_output_only": |- true - "x-databricks-preview": |- - PRIVATE "effective_enable_pg_native_login": "description": |- - xref AIP-129. `enable_pg_native_login` is owned by the client, while `effective_enable_pg_native_login` is owned by the server. - `enable_pg_native_login` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_enable_pg_native_login` on the other hand will always bet set in all response messages (Create/Update/Get/List). + Whether the instance has PG native password login enabled. "x-databricks-field-behaviors_output_only": |- true - "x-databricks-preview": |- - PRIVATE "effective_enable_readable_secondaries": "description": |- - xref AIP-129. `enable_readable_secondaries` is owned by the client, while `effective_enable_readable_secondaries` is owned by the server. - `enable_readable_secondaries` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_enable_readable_secondaries` on the other hand will always bet set in all response messages (Create/Update/Get/List). + Whether secondaries serving read-only traffic are enabled. Defaults to false. "x-databricks-field-behaviors_output_only": |- true "effective_node_count": "description": |- - xref AIP-129. `node_count` is owned by the client, while `effective_node_count` is owned by the server. - `node_count` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_node_count` on the other hand will always bet set in all response messages (Create/Update/Get/List). + The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to + 1 primary and 0 secondaries. "x-databricks-field-behaviors_output_only": |- true "effective_retention_window_in_days": "description": |- - xref AIP-129. `retention_window_in_days` is owned by the client, while `effective_retention_window_in_days` is owned by the server. - `retention_window_in_days` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_retention_window_in_days` on the other hand will always bet set in all response messages (Create/Update/Get/List). + The retention window for the instance. This is the time window in days + for which the historical data is retained. "x-databricks-field-behaviors_output_only": |- true "effective_stopped": "description": |- - xref AIP-129. `stopped` is owned by the client, while `effective_stopped` is owned by the server. - `stopped` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_stopped` on the other hand will always bet set in all response messages (Create/Update/Get/List). + Whether the instance is stopped. "x-databricks-field-behaviors_output_only": |- true "effective_usage_policy_id": @@ -440,13 +434,9 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: The policy that is applied to the instance. "x-databricks-field-behaviors_output_only": |- true - "x-databricks-preview": |- - PRIVATE "enable_pg_native_login": "description": |- - Whether the instance has PG native password login enabled. Defaults to true. - "x-databricks-preview": |- - PRIVATE + Whether to enable PG native password login on the instance. Defaults to false. "enable_readable_secondaries": "description": |- Whether to enable secondaries to serve read-only traffic. Defaults to false. @@ -456,7 +446,7 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "node_count": "description": |- The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to - 1 primary and 0 secondaries. + 1 primary and 0 secondaries. This field is input only, see effective_node_count for the output. "parent_instance_ref": "description": |- The ref of the parent instance. This is only available if the instance is @@ -491,7 +481,7 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: true "stopped": "description": |- - Whether the instance is stopped. + Whether to stop the instance. An input only param, see effective_stopped for the output. "uid": "description": |- An immutable UUID identifier for the instance. @@ -500,8 +490,6 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "usage_policy_id": "description": |- The desired usage policy to associate with the instance. - "x-databricks-preview": |- - PRIVATE github.com/databricks/cli/bundle/config/resources.Job: "budget_policy_id": "description": |- @@ -667,8 +655,6 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: "budget_policy_id": "description": |- Budget policy of this pipeline. - "x-databricks-preview": |- - PRIVATE "catalog": "description": |- A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog. @@ -741,8 +727,6 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline. Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown. - "x-databricks-preview": |- - PRIVATE "schema": "description": |- The default schema (database) where tables are read from or published to. @@ -825,21 +809,48 @@ github.com/databricks/cli/bundle/config/resources.QualityMonitor: Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. github.com/databricks/cli/bundle/config/resources.RegisteredModel: + "aliases": + "description": |- + List of aliases associated with the registered model + "browse_only": + "description": |- + Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request. "catalog_name": "description": |- The name of the catalog where the schema and the registered model reside "comment": "description": |- The comment attached to the registered model + "created_at": + "description": |- + Creation timestamp of the registered model in milliseconds since the Unix epoch + "created_by": + "description": |- + The identifier of the user who created the registered model + "full_name": + "description": |- + The three-level (fully qualified) name of the registered model + "metastore_id": + "description": |- + The unique identifier of the metastore "name": "description": |- The name of the registered model + "owner": + "description": |- + The identifier of the user who owns the registered model "schema_name": "description": |- The name of the schema where the registered model resides "storage_location": "description": |- The storage location on the cloud under which model version data files are stored + "updated_at": + "description": |- + Last-update timestamp of the registered model in milliseconds since the Unix epoch + "updated_by": + "description": |- + The identifier of the user who updated the registered model last time github.com/databricks/cli/bundle/config/resources.Schema: "catalog_name": "description": |- @@ -857,36 +868,39 @@ github.com/databricks/cli/bundle/config/resources.Schema: "description": |- Storage root URL for managed tables within schema. github.com/databricks/cli/bundle/config/resources.SqlWarehouse: + "_": + "description": |- + Creates a new SQL warehouse. "auto_stop_mins": "description": |- - The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before - it is automatically stopped. + The amount of time in minutes that a SQL warehouse must be idle (i.e., no + RUNNING queries) before it is automatically stopped. Supported values: - - Must be >= 0 mins for serverless warehouses - - Must be == 0 or >= 10 mins for non-serverless warehouses - - 0 indicates no autostop. + - Must be == 0 or >= 10 mins + - 0 indicates no autostop. Defaults to 120 mins "channel": "description": |- Channel Details "cluster_size": - "description": | + "description": |- Size of the clusters allocated for this warehouse. - Increasing the size of a spark cluster allows you to run larger queries on it. - If you want to increase the number of concurrent queries, please tune max_num_clusters. + Increasing the size of a spark cluster allows you to run larger queries on + it. If you want to increase the number of concurrent queries, please tune + max_num_clusters. Supported values: - - 2X-Small - - X-Small - - Small - - Medium - - Large - - X-Large - - 2X-Large - - 3X-Large - - 4X-Large + - 2X-Small + - X-Small + - Small + - Medium + - Large + - X-Large + - 2X-Large + - 3X-Large + - 4X-Large "creator_name": "description": |- warehouse creator name @@ -905,22 +919,25 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: This field is deprecated "max_num_clusters": "description": |- - Maximum number of clusters that the autoscaler will create to handle concurrent queries. + Maximum number of clusters that the autoscaler will create to handle + concurrent queries. Supported values: - - Must be >= min_num_clusters - - Must be <= 30. + - Must be >= min_num_clusters + - Must be <= 40. Defaults to min_clusters if unset. "min_num_clusters": "description": |- - Minimum number of available clusters that will be maintained for this SQL warehouse. - Increasing this will ensure that a larger number of clusters are always running and therefore may reduce - the cold start time for new queries. This is similar to reserved vs. revocable cores in a resource manager. + Minimum number of available clusters that will be maintained for this SQL + warehouse. Increasing this will ensure that a larger number of clusters are + always running and therefore may reduce the cold start time for new + queries. This is similar to reserved vs. revocable cores in a resource + manager. Supported values: - - Must be > 0 - - Must be <= min(max_num_clusters, 30) + - Must be > 0 + - Must be <= min(max_num_clusters, 30) Defaults to 1 "name": @@ -928,25 +945,26 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: Logical name for the cluster. Supported values: - - Must be unique within an org. - - Must be less than 100 characters. + - Must be unique within an org. + - Must be less than 100 characters. "spot_instance_policy": "description": |- - Configurations whether the warehouse should use spot instances. + Configurations whether the endpoint should use spot instances. "tags": "description": |- A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. Supported values: - - Number of tags < 45. + - Number of tags < 45. "warehouse_type": "description": |- - Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. + Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, + you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: "_": "description": |- - Next field marker: 14 + Next field marker: 18 "data_synchronization_status": "description": |- Synced Table data synchronization status @@ -1012,7 +1030,9 @@ github.com/databricks/cli/bundle/config/resources.Volume: The storage location on the cloud "volume_type": "description": |- - The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) + The type of the volume. An external volume is located in the specified external location. + A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. + [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) github.com/databricks/databricks-sdk-go/service/apps.AppDeployment: "create_time": "description": |- @@ -1089,6 +1109,7 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResource: "description": "description": |- Description of the App Resource. + "genie_space": {} "job": {} "name": "description": |- @@ -1106,6 +1127,21 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResourceDatabaseDatabase "enum": - |- CAN_CONNECT_AND_CREATE +github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpace: + "name": {} + "permission": {} + "space_id": {} +github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpaceGenieSpacePermission: + "_": + "enum": + - |- + CAN_MANAGE + - |- + CAN_EDIT + - |- + CAN_RUN + - |- + CAN_VIEW github.com/databricks/databricks-sdk-go/service/apps.AppResourceJob: "id": "description": |- @@ -1215,6 +1251,15 @@ github.com/databricks/databricks-sdk-go/service/apps.ApplicationStatus: State of the application. "x-databricks-field-behaviors_output_only": |- true +github.com/databricks/databricks-sdk-go/service/apps.ComputeSize: + "_": + "enum": + - |- + MEDIUM + - |- + LARGE + - |- + LIQUID github.com/databricks/databricks-sdk-go/service/apps.ComputeState: "_": "enum": @@ -1374,24 +1419,31 @@ github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries: "description": |- Column for the timestamp. github.com/databricks/databricks-sdk-go/service/catalog.RegisteredModelAlias: - "_": - "description": |- - Registered model alias. "alias_name": "description": |- Name of the alias, e.g. 'champion' or 'latest_stable' + "catalog_name": + "description": |- + The name of the catalog containing the model version + "id": + "description": |- + The unique identifier of the alias + "model_name": + "description": |- + The name of the parent registered model of the model version, relative to parent schema + "schema_name": + "description": |- + The name of the schema containing the model version, relative to parent catalog "version_num": "description": |- Integer version number of the model version to which this alias points. github.com/databricks/databricks-sdk-go/service/catalog.VolumeType: "_": - "description": |- - The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) "enum": - - |- - EXTERNAL - |- MANAGED + - |- + EXTERNAL github.com/databricks/databricks-sdk-go/service/compute.Adlsgen2Info: "_": "description": |- @@ -2135,9 +2187,6 @@ github.com/databricks/databricks-sdk-go/service/database.DatabaseInstanceRef: Output: Only populated if provided as input to create a child instance. "effective_lsn": "description": |- - xref AIP-129. `lsn` is owned by the client, while `effective_lsn` is owned by the server. - `lsn` will only be set in Create/Update response messages if and only if the user provides the field via the request. - `effective_lsn` on the other hand will always bet set in all response messages (Create/Update/Get/List). For a parent ref instance, this is the LSN on the parent instance from which the instance was created. For a child ref instance, this is the LSN on the instance from which the child instance @@ -2483,6 +2532,10 @@ github.com/databricks/databricks-sdk-go/service/jobs.AuthenticationMethod: - |- PAT github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask: + "_": + "description": |- + Clean Rooms notebook task for V1 Clean Room service (GA). + Replaces the deprecated CleanRoomNotebookTask (defined above) which was for V0 service. "clean_room_name": "description": |- The clean room that the notebook belongs to. @@ -2553,8 +2606,6 @@ github.com/databricks/databricks-sdk-go/service/jobs.Continuous: "task_retry_mode": "description": |- Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER. - "x-databricks-preview": |- - PRIVATE github.com/databricks/databricks-sdk-go/service/jobs.CronSchedule: "pause_status": "description": |- @@ -3112,6 +3163,8 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: "dbt_commands": "description": |- An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` + + ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. "deprecation_message": |- This field is deprecated "x-databricks-preview": |- @@ -3124,7 +3177,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: jar_params cannot be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. "deprecation_message": |- This field is deprecated "x-databricks-preview": |- @@ -3144,7 +3197,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: notebook_params cannot be specified in conjunction with jar_params. - Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. "deprecation_message": |- @@ -3166,7 +3219,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. Important @@ -3183,7 +3236,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs + ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. Important @@ -3196,6 +3249,8 @@ github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask: "sql_params": "description": |- A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. + + ⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks. "deprecation_message": |- This field is deprecated "x-databricks-preview": |- @@ -3217,7 +3272,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.Source: github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask: "jar_uri": "description": |- - Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. + Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list. + + See the examples of classic and serverless compute usage at the top of the page. "deprecation_message": |- This field is deprecated "main_class_name": @@ -3360,7 +3417,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfigura the last time the trigger fired. The minimum allowed value is 60 seconds. "table_names": "description": |- - A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. "wait_after_last_change_seconds": "description": |- If set, the trigger starts a run only after no table updates have occurred for the specified time @@ -3369,7 +3426,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfigura github.com/databricks/databricks-sdk-go/service/jobs.Task: "clean_rooms_notebook_task": "description": |- - The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook + The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook when the `clean_rooms_notebook_task` field is present. "condition_task": "description": |- @@ -3483,15 +3540,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.Task: The task runs a Python file when the `spark_python_task` field is present. "spark_submit_task": "description": |- - (Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute. - - In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. - - `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters. - - By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage. - - The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. + (Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit). + "deprecation_message": |- + This field is deprecated "sql_task": "description": |- The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present. @@ -3572,9 +3623,7 @@ github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: "periodic": "description": |- Periodic trigger settings. - "table_update": - "x-databricks-preview": |- - PRIVATE + "table_update": {} github.com/databricks/databricks-sdk-go/service/jobs.Webhook: "id": {} github.com/databricks/databricks-sdk-go/service/jobs.WebhookNotifications: @@ -3631,7 +3680,7 @@ github.com/databricks/databricks-sdk-go/service/pipelines.CronTrigger: github.com/databricks/databricks-sdk-go/service/pipelines.DayOfWeek: "_": "description": |- - Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). + Days of week in which the window is allowed to happen. If not specified all days of the week will be used. "enum": - |- @@ -3730,6 +3779,13 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefin "ingestion_gateway_id": "description": |- Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server. + "netsuite_jar_path": + "description": |- + Netsuite only configuration. When the field is set for a netsuite connector, + the jar stored in the field will be validated and added to the classpath of + pipeline's cluster. + "x-databricks-preview": |- + PRIVATE "objects": "description": |- Required. Settings specifying tables to replicate and the destination for the replicated tables. @@ -3783,6 +3839,40 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefin This field is mutable and can be updated without triggering a full snapshot. "x-databricks-preview": |- PRIVATE +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParameters: + "incremental": + "description": |- + (Optional) Marks the report as incremental. + This field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now + controlled by the `parameters` field. + "deprecation_message": |- + This field is deprecated + "parameters": + "description": |- + Parameters for the Workday report. Each key represents the parameter name (e.g., "start_date", "end_date"), + and the corresponding value is a SQL-like expression used to compute the parameter value at runtime. + Example: + { + "start_date": "{ coalesce(current_offset(), date(\"2025-02-01\")) }", + "end_date": "{ current_date() - INTERVAL 1 DAY }" + } + "report_parameters": + "description": |- + (Optional) Additional custom parameters for Workday Report + This field is deprecated and should not be used. Use `parameters` instead. + "deprecation_message": |- + This field is deprecated +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue: + "key": + "description": |- + Key for the report parameter, can be a column name or other metadata + "value": + "description": |- + Value for the report parameter. + Possible values it can take are these sql functions: + 1. coalesce(current_offset(), date("YYYY-MM-DD")) -> if current_offset() is null, then the passed date, else current_offset() + 2. current_date() + 3. date_sub(current_date(), x) -> subtract x (some non-negative integer) days from current date github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: "_": "enum": @@ -3818,6 +3908,24 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: SHAREPOINT - |- DYNAMICS365 + - |- + CONFLUENCE + - |- + META_MARKETING + - |- + GOOGLE_ADS + - |- + TIKTOK_ADS + - |- + SALESFORCE_MARKETING_CLOUD + - |- + HUBSPOT + - |- + WORKDAY_HCM + - |- + GUIDEWIRE + - |- + ZENDESK - |- FOREIGN_CATALOG github.com/databricks/databricks-sdk-go/service/pipelines.ManualTrigger: {} @@ -4838,13 +4946,20 @@ github.com/databricks/databricks-sdk-go/service/sql.AlertEvaluationState: OK - |- ERROR +github.com/databricks/databricks-sdk-go/service/sql.AlertLifecycleState: + "_": + "enum": + - |- + ACTIVE + - |- + DELETED github.com/databricks/databricks-sdk-go/service/sql.AlertV2Evaluation: "comparison_operator": "description": |- Operator used for comparison in alert evaluation. "empty_result_state": "description": |- - Alert state if result is empty. + Alert state if result is empty. Please avoid setting this field to be `UNKNOWN` because `UNKNOWN` state is planned to be deprecated. "last_evaluated_at": "description": |- Timestamp of the last evaluation. @@ -4876,7 +4991,9 @@ github.com/databricks/databricks-sdk-go/service/sql.AlertV2Operand: "column": {} "value": {} github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandColumn: - "aggregation": {} + "aggregation": + "description": |- + If not set, the behavior is equivalent to using `First row` in the UI. "display": {} "name": {} github.com/databricks/databricks-sdk-go/service/sql.AlertV2OperandValue: @@ -4931,8 +5048,6 @@ github.com/databricks/databricks-sdk-go/service/sql.ComparisonOperator: IS_NOT_NULL github.com/databricks/databricks-sdk-go/service/sql.CreateWarehouseRequestWarehouseType: "_": - "description": |- - Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. "enum": - |- TYPE_UNSPECIFIED @@ -4968,7 +5083,24 @@ github.com/databricks/databricks-sdk-go/service/sql.SchedulePauseStatus: github.com/databricks/databricks-sdk-go/service/sql.SpotInstancePolicy: "_": "description": |- - Configurations whether the warehouse should use spot instances. + EndpointSpotInstancePolicy configures whether the endpoint should use spot + instances. + + The breakdown of how the EndpointSpotInstancePolicy converts to per cloud + configurations is: + + +-------+--------------------------------------+--------------------------------+ + | Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED | + +-------+--------------------------------------+--------------------------------+ + | AWS | On Demand Driver with Spot Executors | On Demand Driver and + Executors | | AZURE | On Demand Driver and Executors | On Demand Driver + and Executors | + +-------+--------------------------------------+--------------------------------+ + + While including "spot" in the enum name may limit the the future + extensibility of this field because it limits this enum to denoting "spot or + not", this is the field that PM recommends after discussion with customers + per SC-48783. "enum": - |- POLICY_UNSPECIFIED diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index dd744d9b1e..cb4f3d9fd2 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -1,14 +1,3 @@ -github.com/databricks/cli/bundle/config/resources.AlertPermissionLevel: - "_": - "enum": - - |- - CAN_EDIT - - |- - CAN_MANAGE - - |- - CAN_READ - - |- - CAN_RUN github.com/databricks/cli/bundle/config/resources.Alert: "evaluation": "description": |- @@ -22,6 +11,17 @@ github.com/databricks/cli/bundle/config/resources.Alert: "schedule": "description": |- PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.AlertPermissionLevel: + "_": + "enum": + - |- + CAN_EDIT + - |- + CAN_MANAGE + - |- + CAN_READ + - |- + CAN_RUN github.com/databricks/cli/bundle/config/resources.App: "app_status": "description": |- diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index de17f44ed3..023f60407a 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -612,18 +612,11 @@ }, "custom_tags": { "description": "Custom tags associated with the instance. This field is only included on create and update responses.", - "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/database.CustomTag", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true - }, - "effective_capacity": { - "$ref": "#/$defs/string" + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/database.CustomTag" }, "enable_pg_native_login": { - "description": "Whether the instance has PG native password login enabled. Defaults to true.", - "$ref": "#/$defs/bool", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "description": "Whether to enable PG native password login on the instance. Defaults to false.", + "$ref": "#/$defs/bool" }, "enable_readable_secondaries": { "description": "Whether to enable secondaries to serve read-only traffic. Defaults to false.", @@ -638,7 +631,7 @@ "$ref": "#/$defs/string" }, "node_count": { - "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries.", + "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries. This field is input only, see effective_node_count for the output.", "$ref": "#/$defs/int" }, "parent_instance_ref": { @@ -653,14 +646,12 @@ "$ref": "#/$defs/int" }, "stopped": { - "description": "Whether the instance is stopped.", + "description": "Whether to stop the instance. An input only param, see effective_stopped for the output.", "$ref": "#/$defs/bool" }, "usage_policy_id": { "description": "The desired usage policy to associate with the instance.", - "$ref": "#/$defs/string", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "$ref": "#/$defs/string" } }, "additionalProperties": false, @@ -1236,9 +1227,7 @@ }, "budget_policy_id": { "description": "Budget policy of this pipeline.", - "$ref": "#/$defs/string", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "$ref": "#/$defs/string" }, "catalog": { "description": "A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, `catalog`.`target`.`table`). If `target` is not specified, no data is published to Unity Catalog.", @@ -1328,9 +1317,7 @@ "$ref": "#/$defs/string" }, "run_as": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RunAs", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RunAs" }, "schema": { "description": "The default schema (database) where tables are read from or published to.", @@ -1760,9 +1747,10 @@ "oneOf": [ { "type": "object", + "description": "Creates a new SQL warehouse.", "properties": { "auto_stop_mins": { - "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before\nit is automatically stopped.\n\nSupported values:\n - Must be \u003e= 0 mins for serverless warehouses\n - Must be == 0 or \u003e= 10 mins for non-serverless warehouses\n - 0 indicates no autostop.\n\nDefaults to 120 mins", + "description": "The amount of time in minutes that a SQL warehouse must be idle (i.e., no\nRUNNING queries) before it is automatically stopped.\n\nSupported values:\n- Must be == 0 or \u003e= 10 mins\n- 0 indicates no autostop.\n\nDefaults to 120 mins", "$ref": "#/$defs/int" }, "channel": { @@ -1770,7 +1758,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.Channel" }, "cluster_size": { - "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on it.\nIf you want to increase the number of concurrent queries, please tune max_num_clusters.\n\nSupported values:\n - 2X-Small\n - X-Small\n - Small\n - Medium\n - Large\n - X-Large\n - 2X-Large\n - 3X-Large\n - 4X-Large\n", + "description": "Size of the clusters allocated for this warehouse.\nIncreasing the size of a spark cluster allows you to run larger queries on\nit. If you want to increase the number of concurrent queries, please tune\nmax_num_clusters.\n\nSupported values:\n- 2X-Small\n- X-Small\n- Small\n- Medium\n- Large\n- X-Large\n- 2X-Large\n- 3X-Large\n- 4X-Large", "$ref": "#/$defs/string" }, "creator_name": { @@ -1796,15 +1784,15 @@ "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, "max_num_clusters": { - "description": "Maximum number of clusters that the autoscaler will create to handle concurrent queries.\n\nSupported values:\n - Must be \u003e= min_num_clusters\n - Must be \u003c= 30.\n\nDefaults to min_clusters if unset.", + "description": "Maximum number of clusters that the autoscaler will create to handle\nconcurrent queries.\n\nSupported values:\n- Must be \u003e= min_num_clusters\n- Must be \u003c= 40.\n\nDefaults to min_clusters if unset.", "$ref": "#/$defs/int" }, "min_num_clusters": { - "description": "Minimum number of available clusters that will be maintained for this SQL warehouse.\nIncreasing this will ensure that a larger number of clusters are always running and therefore may reduce\nthe cold start time for new queries. This is similar to reserved vs. revocable cores in a resource manager.\n\nSupported values:\n - Must be \u003e 0\n - Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", + "description": "Minimum number of available clusters that will be maintained for this SQL\nwarehouse. Increasing this will ensure that a larger number of clusters are\nalways running and therefore may reduce the cold start time for new\nqueries. This is similar to reserved vs. revocable cores in a resource\nmanager.\n\nSupported values:\n- Must be \u003e 0\n- Must be \u003c= min(max_num_clusters, 30)\n\nDefaults to 1", "$ref": "#/$defs/int" }, "name": { - "description": "Logical name for the cluster.\n\nSupported values:\n - Must be unique within an org.\n - Must be less than 100 characters.", + "description": "Logical name for the cluster.\n\nSupported values:\n- Must be unique within an org.\n- Must be less than 100 characters.", "$ref": "#/$defs/string" }, "permissions": { @@ -1814,7 +1802,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.SpotInstancePolicy" }, "tags": { - "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated\nwith this SQL warehouse.\n\nSupported values:\n - Number of tags \u003c 45.", + "description": "A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated\nwith this SQL warehouse.\n\nSupported values:\n- Number of tags \u003c 45.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.EndpointTags" }, "warehouse_type": { @@ -1879,7 +1867,7 @@ "oneOf": [ { "type": "object", - "description": "Next field marker: 14", + "description": "Next field marker: 18", "properties": { "database_instance_name": { "$ref": "#/$defs/string" @@ -2939,7 +2927,21 @@ ] }, "apps.AppResourceGenieSpaceGenieSpacePermission": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "CAN_MANAGE", + "CAN_EDIT", + "CAN_RUN", + "CAN_VIEW" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "apps.AppResourceJob": { "oneOf": [ @@ -3194,7 +3196,20 @@ ] }, "apps.ComputeSize": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "MEDIUM", + "LARGE", + "LIQUID" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "apps.ComputeState": { "oneOf": [ @@ -3503,7 +3518,6 @@ "oneOf": [ { "type": "object", - "description": "Registered model alias.", "properties": { "alias_name": { "description": "Name of the alias, e.g. 'champion' or 'latest_stable'", @@ -3538,10 +3552,9 @@ "oneOf": [ { "type": "string", - "description": "The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external)", "enum": [ - "EXTERNAL", - "MANAGED" + "MANAGED", + "EXTERNAL" ] }, { @@ -4839,6 +4852,7 @@ "oneOf": [ { "type": "object", + "description": "Clean Rooms notebook task for V1 Clean Room service (GA).\nReplaces the deprecated CleanRoomNotebookTask (defined above) which was for V0 service.", "properties": { "clean_room_name": { "description": "The clean room that the notebook belongs to.", @@ -4975,9 +4989,7 @@ }, "task_retry_mode": { "description": "Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskRetryMode", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskRetryMode" } }, "additionalProperties": false @@ -6060,7 +6072,7 @@ "type": "object", "properties": { "dbt_commands": { - "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`", + "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", "$ref": "#/$defs/slice/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6068,7 +6080,7 @@ "deprecated": true }, "jar_params": { - "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", + "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", "$ref": "#/$defs/slice/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6084,7 +6096,7 @@ "$ref": "#/$defs/map/string" }, "notebook_params": { - "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", + "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", "$ref": "#/$defs/map/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6103,7 +6115,7 @@ "deprecated": true }, "python_params": { - "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", "$ref": "#/$defs/slice/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6111,7 +6123,7 @@ "deprecated": true }, "spark_submit_params": { - "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", "$ref": "#/$defs/slice/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6119,7 +6131,7 @@ "deprecated": true }, "sql_params": { - "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.", + "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.\n\n⚠ **Deprecation note** Use [job parameters](https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown) to pass information down to tasks.", "$ref": "#/$defs/map/string", "x-databricks-preview": "PRIVATE", "deprecationMessage": "This field is deprecated", @@ -6160,7 +6172,7 @@ "type": "object", "properties": { "jar_uri": { - "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.", + "description": "Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list.\n\nSee the examples of classic and serverless compute usage at the top of the page.", "$ref": "#/$defs/string", "deprecationMessage": "This field is deprecated", "deprecated": true @@ -6481,7 +6493,7 @@ "$ref": "#/$defs/int" }, "table_names": { - "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "description": "A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", "$ref": "#/$defs/slice/string" }, "wait_after_last_change_seconds": { @@ -6506,7 +6518,7 @@ "type": "object", "properties": { "clean_rooms_notebook_task": { - "description": "The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.", + "description": "The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook\nwhen the `clean_rooms_notebook_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.CleanRoomsNotebookTask" }, "condition_task": { @@ -6637,8 +6649,10 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask" }, "spark_submit_task": { - "description": "(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask" + "description": "(Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit).", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask", + "deprecationMessage": "This field is deprecated", + "deprecated": true }, "sql_task": { "description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.", @@ -6798,9 +6812,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PeriodicTriggerConfiguration" }, "table_update": { - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfiguration", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TableUpdateTriggerConfiguration" } }, "additionalProperties": false @@ -6955,7 +6967,7 @@ "oneOf": [ { "type": "string", - "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", + "description": "Days of week in which the window is allowed to happen.\nIf not specified all days of the week will be used.", "enum": [ "MONDAY", "TUESDAY", @@ -7147,7 +7159,9 @@ "$ref": "#/$defs/string" }, "netsuite_jar_path": { - "$ref": "#/$defs/string" + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true }, "objects": { "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", @@ -7209,13 +7223,20 @@ "type": "object", "properties": { "incremental": { - "$ref": "#/$defs/bool" + "description": "(Optional) Marks the report as incremental.\nThis field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now\ncontrolled by the `parameters` field.", + "$ref": "#/$defs/bool", + "deprecationMessage": "This field is deprecated", + "deprecated": true }, "parameters": { + "description": "Parameters for the Workday report. Each key represents the parameter name (e.g., \"start_date\", \"end_date\"),\nand the corresponding value is a SQL-like expression used to compute the parameter value at runtime.\nExample:\n{\n\"start_date\": \"{ coalesce(current_offset(), date(\\\"2025-02-01\\\")) }\",\n\"end_date\": \"{ current_date() - INTERVAL 1 DAY }\"\n}", "$ref": "#/$defs/map/string" }, "report_parameters": { - "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue" + "description": "(Optional) Additional custom parameters for Workday Report\nThis field is deprecated and should not be used. Use `parameters` instead.", + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue", + "deprecationMessage": "This field is deprecated", + "deprecated": true } }, "additionalProperties": false @@ -7232,9 +7253,11 @@ "type": "object", "properties": { "key": { + "description": "Key for the report parameter, can be a column name or other metadata", "$ref": "#/$defs/string" }, "value": { + "description": "Value for the report parameter.\nPossible values it can take are these sql functions:\n1. coalesce(current_offset(), date(\"YYYY-MM-DD\")) -\u003e if current_offset() is null, then the passed date, else current_offset()\n2. current_date()\n3. date_sub(current_date(), x) -\u003e subtract x (some non-negative integer) days from current date", "$ref": "#/$defs/string" } }, @@ -7267,6 +7290,15 @@ "TERADATA", "SHAREPOINT", "DYNAMICS365", + "CONFLUENCE", + "META_MARKETING", + "GOOGLE_ADS", + "TIKTOK_ADS", + "SALESFORCE_MARKETING_CLOUD", + "HUBSPOT", + "WORKDAY_HCM", + "GUIDEWIRE", + "ZENDESK", "FOREIGN_CATALOG" ] }, @@ -9056,7 +9088,19 @@ ] }, "sql.AlertLifecycleState": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "ACTIVE", + "DELETED" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "sql.AlertV2Evaluation": { "oneOf": [ @@ -9068,7 +9112,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.ComparisonOperator" }, "empty_result_state": { - "description": "Alert state if result is empty.", + "description": "Alert state if result is empty. Please avoid setting this field to be `UNKNOWN` because `UNKNOWN` state is planned to be deprecated.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/sql.AlertEvaluationState" }, "notification": { @@ -9295,7 +9339,6 @@ "oneOf": [ { "type": "string", - "description": "Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.", "enum": [ "TYPE_UNSPECIFIED", "CLASSIC", @@ -9394,7 +9437,7 @@ "oneOf": [ { "type": "string", - "description": "Configurations whether the warehouse should use spot instances.", + "description": "EndpointSpotInstancePolicy configures whether the endpoint should use spot\ninstances.\n\nThe breakdown of how the EndpointSpotInstancePolicy converts to per cloud\nconfigurations is:\n\n+-------+--------------------------------------+--------------------------------+\n| Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED |\n+-------+--------------------------------------+--------------------------------+\n| AWS | On Demand Driver with Spot Executors | On Demand Driver and\nExecutors | | AZURE | On Demand Driver and Executors | On Demand Driver\nand Executors |\n+-------+--------------------------------------+--------------------------------+\n\nWhile including \"spot\" in the enum name may limit the the future\nextensibility of this field because it limits this enum to denoting \"spot or\nnot\", this is the field that PM recommends after discussion with customers\nper SC-48783.", "enum": [ "POLICY_UNSPECIFIED", "COST_OPTIMIZED", From 741e32a6d9166d1c4df0444309c467402d06c117 Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Wed, 3 Dec 2025 01:59:24 +0100 Subject: [PATCH 15/18] fix failing unit test --- bundle/internal/schema/annotations_openapi_overrides.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index cb4f3d9fd2..427c184570 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -229,9 +229,6 @@ github.com/databricks/cli/bundle/config/resources.DatabaseCatalog: "description": |- PLACEHOLDER github.com/databricks/cli/bundle/config/resources.DatabaseInstance: - "effective_capacity": - "description": |- - PLACEHOLDER "lifecycle": "description": |- Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. @@ -648,10 +645,6 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehousePermissionLevel: CAN_MONITOR - |- CAN_VIEW -github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: - "lifecycle": - "description": |- - PLACEHOLDER github.com/databricks/cli/bundle/config/resources.Volume: "_": "markdown_description": |- From abe8f3c0094839f0fc96302f80e5469daff9b1fb Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Wed, 3 Dec 2025 02:05:38 +0100 Subject: [PATCH 16/18] fix other failing test --- libs/structs/structwalk/walktype_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/structs/structwalk/walktype_test.go b/libs/structs/structwalk/walktype_test.go index fded9f8055..0ad985cf71 100644 --- a/libs/structs/structwalk/walktype_test.go +++ b/libs/structs/structwalk/walktype_test.go @@ -136,7 +136,7 @@ func TestTypeJobSettings(t *testing.T) { func TestTypeRoot(t *testing.T) { testStruct(t, reflect.TypeOf(config.Root{}), - 4000, 4300, // 4003 at the time of the update + 4300, 4700, // 4322 at the time of the update map[string]any{ "bundle.target": "", `variables.*.lookup.dashboard`: "", From bda007344a9c1c7e9df746841174f15a9a62bd0d Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Wed, 3 Dec 2025 15:14:53 +0100 Subject: [PATCH 17/18] fix integration test --- acceptance/bundle/deployment/bind/alert/out.test.toml | 3 +++ acceptance/bundle/deployment/bind/alert/test.toml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/acceptance/bundle/deployment/bind/alert/out.test.toml b/acceptance/bundle/deployment/bind/alert/out.test.toml index 3cdb920b67..e1a07763f6 100644 --- a/acceptance/bundle/deployment/bind/alert/out.test.toml +++ b/acceptance/bundle/deployment/bind/alert/out.test.toml @@ -1,5 +1,8 @@ Local = false Cloud = true +[CloudEnvs] + aws = false + [EnvMatrix] DATABRICKS_BUNDLE_ENGINE = ["terraform"] diff --git a/acceptance/bundle/deployment/bind/alert/test.toml b/acceptance/bundle/deployment/bind/alert/test.toml index 22b75b947f..f1ac73233a 100644 --- a/acceptance/bundle/deployment/bind/alert/test.toml +++ b/acceptance/bundle/deployment/bind/alert/test.toml @@ -3,6 +3,9 @@ Local = false BundleConfigTarget = "databricks.yml" +# On aws the host URL includes the workspace ID as well. Thus skipping it to keep the test simple. +CloudEnvs.aws = false + Ignore = [ "databricks.yml", "alert.json", From caa845e4060179f2ddba7e004e0e15fe4886bbcd Mon Sep 17 00:00:00 2001 From: Shreyas Goenka Date: Wed, 3 Dec 2025 15:15:17 +0100 Subject: [PATCH 18/18] fix python codegen --- .../jobs/_models/clean_rooms_notebook_task.py | 5 ++- .../bundles/jobs/_models/condition.py | 4 --- .../bundles/jobs/_models/continuous.py | 4 --- .../bundles/jobs/_models/spark_jar_task.py | 8 +++-- .../bundles/jobs/_models/spark_submit_task.py | 4 ++- .../table_update_trigger_configuration.py | 8 ++--- .../databricks/bundles/jobs/_models/task.py | 24 +++---------- .../bundles/jobs/_models/task_retry_mode.py | 2 -- .../bundles/jobs/_models/trigger_settings.py | 6 ---- .../bundles/pipelines/_models/day_of_week.py | 2 +- .../_models/ingestion_pipeline_definition.py | 6 ++++ ...ne_definition_workday_report_parameters.py | 36 +++++++++++++++++++ ...rkday_report_parameters_query_key_value.py | 22 ++++++++++++ .../bundles/pipelines/_models/pipeline.py | 10 ------ .../bundles/pipelines/_models/run_as.py | 2 -- .../bundles/volumes/_models/volume_type.py | 8 ++--- 16 files changed, 87 insertions(+), 64 deletions(-) diff --git a/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py b/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py index 7d30a1bad1..d0281f144c 100644 --- a/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py +++ b/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py @@ -15,7 +15,10 @@ @dataclass(kw_only=True) class CleanRoomsNotebookTask: - """""" + """ + Clean Rooms notebook task for V1 Clean Room service (GA). + Replaces the deprecated CleanRoomNotebookTask (defined above) which was for V0 service. + """ clean_room_name: VariableOr[str] """ diff --git a/python/databricks/bundles/jobs/_models/condition.py b/python/databricks/bundles/jobs/_models/condition.py index d1b3566d5d..d7c1b25bc7 100644 --- a/python/databricks/bundles/jobs/_models/condition.py +++ b/python/databricks/bundles/jobs/_models/condition.py @@ -3,10 +3,6 @@ class Condition(Enum): - """ - :meta private: [EXPERIMENTAL] - """ - ANY_UPDATED = "ANY_UPDATED" ALL_UPDATED = "ALL_UPDATED" diff --git a/python/databricks/bundles/jobs/_models/continuous.py b/python/databricks/bundles/jobs/_models/continuous.py index 92b1aad237..43268661c1 100644 --- a/python/databricks/bundles/jobs/_models/continuous.py +++ b/python/databricks/bundles/jobs/_models/continuous.py @@ -25,8 +25,6 @@ class Continuous: task_retry_mode: VariableOrOptional[TaskRetryMode] = None """ - :meta private: [EXPERIMENTAL] - Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER. """ @@ -48,8 +46,6 @@ class ContinuousDict(TypedDict, total=False): task_retry_mode: VariableOrOptional[TaskRetryModeParam] """ - :meta private: [EXPERIMENTAL] - Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER. """ diff --git a/python/databricks/bundles/jobs/_models/spark_jar_task.py b/python/databricks/bundles/jobs/_models/spark_jar_task.py index 40bbe92ba0..9b72738437 100644 --- a/python/databricks/bundles/jobs/_models/spark_jar_task.py +++ b/python/databricks/bundles/jobs/_models/spark_jar_task.py @@ -26,7 +26,9 @@ class SparkJarTask: jar_uri: VariableOrOptional[str] = None """ - [DEPRECATED] Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. + [DEPRECATED] Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list. + + See the examples of classic and serverless compute usage at the top of the page. """ parameters: VariableOrList[str] = field(default_factory=list) @@ -61,7 +63,9 @@ class SparkJarTaskDict(TypedDict, total=False): jar_uri: VariableOrOptional[str] """ - [DEPRECATED] Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. + [DEPRECATED] Deprecated since 04/2016. For classic compute, provide a `jar` through the `libraries` field instead. For serverless compute, provide a `jar` though the `java_dependencies` field inside the `environments` list. + + See the examples of classic and serverless compute usage at the top of the page. """ parameters: VariableOrList[str] diff --git a/python/databricks/bundles/jobs/_models/spark_submit_task.py b/python/databricks/bundles/jobs/_models/spark_submit_task.py index edc57577b5..c809dbe721 100644 --- a/python/databricks/bundles/jobs/_models/spark_submit_task.py +++ b/python/databricks/bundles/jobs/_models/spark_submit_task.py @@ -11,7 +11,9 @@ @dataclass(kw_only=True) class SparkSubmitTask: - """""" + """ + [DEPRECATED] + """ parameters: VariableOrList[str] = field(default_factory=list) """ diff --git a/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py b/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py index c89b0c4011..c824a72499 100644 --- a/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py +++ b/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py @@ -12,9 +12,7 @@ @dataclass(kw_only=True) class TableUpdateTriggerConfiguration: - """ - :meta private: [EXPERIMENTAL] - """ + """""" condition: VariableOrOptional[Condition] = None """ @@ -29,7 +27,7 @@ class TableUpdateTriggerConfiguration: table_names: VariableOrList[str] = field(default_factory=list) """ - A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. """ wait_after_last_change_seconds: VariableOrOptional[int] = None @@ -63,7 +61,7 @@ class TableUpdateTriggerConfigurationDict(TypedDict, total=False): table_names: VariableOrList[str] """ - A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. + A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`. """ wait_after_last_change_seconds: VariableOrOptional[int] diff --git a/python/databricks/bundles/jobs/_models/task.py b/python/databricks/bundles/jobs/_models/task.py index be497ff0f2..0393213b65 100644 --- a/python/databricks/bundles/jobs/_models/task.py +++ b/python/databricks/bundles/jobs/_models/task.py @@ -104,7 +104,7 @@ class Task: clean_rooms_notebook_task: VariableOrOptional[CleanRoomsNotebookTask] = None """ - The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook + The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook when the `clean_rooms_notebook_task` field is present. """ @@ -268,15 +268,7 @@ class Task: spark_submit_task: VariableOrOptional[SparkSubmitTask] = None """ - (Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute. - - In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. - - `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters. - - By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage. - - The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. + [DEPRECATED] (Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit). """ sql_task: VariableOrOptional[SqlTask] = None @@ -314,7 +306,7 @@ class TaskDict(TypedDict, total=False): clean_rooms_notebook_task: VariableOrOptional[CleanRoomsNotebookTaskParam] """ - The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook + The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook when the `clean_rooms_notebook_task` field is present. """ @@ -478,15 +470,7 @@ class TaskDict(TypedDict, total=False): spark_submit_task: VariableOrOptional[SparkSubmitTaskParam] """ - (Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute. - - In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. - - `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters. - - By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage. - - The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. + [DEPRECATED] (Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit). """ sql_task: VariableOrOptional[SqlTaskParam] diff --git a/python/databricks/bundles/jobs/_models/task_retry_mode.py b/python/databricks/bundles/jobs/_models/task_retry_mode.py index 36c29a7f9b..ce5ccaa687 100644 --- a/python/databricks/bundles/jobs/_models/task_retry_mode.py +++ b/python/databricks/bundles/jobs/_models/task_retry_mode.py @@ -4,8 +4,6 @@ class TaskRetryMode(Enum): """ - :meta private: [EXPERIMENTAL] - task retry mode of the continuous job * NEVER: The failed task will not be retried. * ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first attempt. diff --git a/python/databricks/bundles/jobs/_models/trigger_settings.py b/python/databricks/bundles/jobs/_models/trigger_settings.py index afadca6d30..67b3a4def7 100644 --- a/python/databricks/bundles/jobs/_models/trigger_settings.py +++ b/python/databricks/bundles/jobs/_models/trigger_settings.py @@ -51,9 +51,6 @@ class TriggerSettings: """ table_update: VariableOrOptional[TableUpdateTriggerConfiguration] = None - """ - :meta private: [EXPERIMENTAL] - """ @classmethod def from_dict(cls, value: "TriggerSettingsDict") -> "Self": @@ -87,9 +84,6 @@ class TriggerSettingsDict(TypedDict, total=False): """ table_update: VariableOrOptional[TableUpdateTriggerConfigurationParam] - """ - :meta private: [EXPERIMENTAL] - """ TriggerSettingsParam = TriggerSettingsDict | TriggerSettings diff --git a/python/databricks/bundles/pipelines/_models/day_of_week.py b/python/databricks/bundles/pipelines/_models/day_of_week.py index eaf5cbc9ff..a685c2b308 100644 --- a/python/databricks/bundles/pipelines/_models/day_of_week.py +++ b/python/databricks/bundles/pipelines/_models/day_of_week.py @@ -6,7 +6,7 @@ class DayOfWeek(Enum): """ :meta private: [EXPERIMENTAL] - Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). + Days of week in which the window is allowed to happen. If not specified all days of the week will be used. """ diff --git a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py index a106d691a7..dc3b447396 100644 --- a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py +++ b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py @@ -46,6 +46,9 @@ class IngestionPipelineDefinition: """ netsuite_jar_path: VariableOrOptional[str] = None + """ + :meta private: [EXPERIMENTAL] + """ objects: VariableOrList[IngestionConfig] = field(default_factory=list) """ @@ -94,6 +97,9 @@ class IngestionPipelineDefinitionDict(TypedDict, total=False): """ netsuite_jar_path: VariableOrOptional[str] + """ + :meta private: [EXPERIMENTAL] + """ objects: VariableOrList[IngestionConfigParam] """ diff --git a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py index 6a766e36ab..d48d68495d 100644 --- a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py +++ b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters.py @@ -24,12 +24,30 @@ class IngestionPipelineDefinitionWorkdayReportParameters: """ incremental: VariableOrOptional[bool] = None + """ + [DEPRECATED] (Optional) Marks the report as incremental. + This field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now + controlled by the `parameters` field. + """ parameters: VariableOrDict[str] = field(default_factory=dict) + """ + Parameters for the Workday report. Each key represents the parameter name (e.g., "start_date", "end_date"), + and the corresponding value is a SQL-like expression used to compute the parameter value at runtime. + Example: + { + "start_date": "{ coalesce(current_offset(), date(\"2025-02-01\")) }", + "end_date": "{ current_date() - INTERVAL 1 DAY }" + } + """ report_parameters: VariableOrList[ IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue ] = field(default_factory=list) + """ + [DEPRECATED] (Optional) Additional custom parameters for Workday Report + This field is deprecated and should not be used. Use `parameters` instead. + """ @classmethod def from_dict( @@ -45,12 +63,30 @@ class IngestionPipelineDefinitionWorkdayReportParametersDict(TypedDict, total=Fa """""" incremental: VariableOrOptional[bool] + """ + [DEPRECATED] (Optional) Marks the report as incremental. + This field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now + controlled by the `parameters` field. + """ parameters: VariableOrDict[str] + """ + Parameters for the Workday report. Each key represents the parameter name (e.g., "start_date", "end_date"), + and the corresponding value is a SQL-like expression used to compute the parameter value at runtime. + Example: + { + "start_date": "{ coalesce(current_offset(), date(\"2025-02-01\")) }", + "end_date": "{ current_date() - INTERVAL 1 DAY }" + } + """ report_parameters: VariableOrList[ IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueParam ] + """ + [DEPRECATED] (Optional) Additional custom parameters for Workday Report + This field is deprecated and should not be used. Use `parameters` instead. + """ IngestionPipelineDefinitionWorkdayReportParametersParam = ( diff --git a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py index 2dff4275a2..2a24858d66 100644 --- a/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py +++ b/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition_workday_report_parameters_query_key_value.py @@ -13,11 +13,23 @@ class IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue: """ :meta private: [EXPERIMENTAL] + + [DEPRECATED] """ key: VariableOrOptional[str] = None + """ + Key for the report parameter, can be a column name or other metadata + """ value: VariableOrOptional[str] = None + """ + Value for the report parameter. + Possible values it can take are these sql functions: + 1. coalesce(current_offset(), date("YYYY-MM-DD")) -> if current_offset() is null, then the passed date, else current_offset() + 2. current_date() + 3. date_sub(current_date(), x) -> subtract x (some non-negative integer) days from current date + """ @classmethod def from_dict( @@ -38,8 +50,18 @@ class IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueDict( """""" key: VariableOrOptional[str] + """ + Key for the report parameter, can be a column name or other metadata + """ value: VariableOrOptional[str] + """ + Value for the report parameter. + Possible values it can take are these sql functions: + 1. coalesce(current_offset(), date("YYYY-MM-DD")) -> if current_offset() is null, then the passed date, else current_offset() + 2. current_date() + 3. date_sub(current_date(), x) -> subtract x (some non-negative integer) days from current date + """ IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueParam = ( diff --git a/python/databricks/bundles/pipelines/_models/pipeline.py b/python/databricks/bundles/pipelines/_models/pipeline.py index 86f3bbc266..2b4bbed23b 100644 --- a/python/databricks/bundles/pipelines/_models/pipeline.py +++ b/python/databricks/bundles/pipelines/_models/pipeline.py @@ -70,8 +70,6 @@ class Pipeline(Resource): budget_policy_id: VariableOrOptional[str] = None """ - :meta private: [EXPERIMENTAL] - Budget policy of this pipeline. """ @@ -184,9 +182,6 @@ class Pipeline(Resource): """ run_as: VariableOrOptional[RunAs] = None - """ - :meta private: [EXPERIMENTAL] - """ schema: VariableOrOptional[str] = None """ @@ -240,8 +235,6 @@ class PipelineDict(TypedDict, total=False): budget_policy_id: VariableOrOptional[str] """ - :meta private: [EXPERIMENTAL] - Budget policy of this pipeline. """ @@ -354,9 +347,6 @@ class PipelineDict(TypedDict, total=False): """ run_as: VariableOrOptional[RunAsParam] - """ - :meta private: [EXPERIMENTAL] - """ schema: VariableOrOptional[str] """ diff --git a/python/databricks/bundles/pipelines/_models/run_as.py b/python/databricks/bundles/pipelines/_models/run_as.py index dadceecac7..b4d52af00a 100644 --- a/python/databricks/bundles/pipelines/_models/run_as.py +++ b/python/databricks/bundles/pipelines/_models/run_as.py @@ -12,8 +12,6 @@ @dataclass(kw_only=True) class RunAs: """ - :meta private: [EXPERIMENTAL] - Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline. Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown. diff --git a/python/databricks/bundles/volumes/_models/volume_type.py b/python/databricks/bundles/volumes/_models/volume_type.py index 1b9bcd1089..5c96db8fde 100644 --- a/python/databricks/bundles/volumes/_models/volume_type.py +++ b/python/databricks/bundles/volumes/_models/volume_type.py @@ -3,12 +3,8 @@ class VolumeType(Enum): - """ - The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) - """ - - EXTERNAL = "EXTERNAL" MANAGED = "MANAGED" + EXTERNAL = "EXTERNAL" -VolumeTypeParam = Literal["EXTERNAL", "MANAGED"] | VolumeType +VolumeTypeParam = Literal["MANAGED", "EXTERNAL"] | VolumeType