diff --git a/acceptance/bundle/lifecycle/prevent-destroy/databricks.yml b/acceptance/bundle/lifecycle/prevent-destroy/databricks.yml new file mode 100644 index 0000000000..67f898bb53 --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/databricks.yml @@ -0,0 +1,5 @@ +bundle: + name: prevent-destroy + +include: + - resources/*.yml diff --git a/acceptance/bundle/lifecycle/prevent-destroy/out.direct-exp.txt b/acceptance/bundle/lifecycle/prevent-destroy/out.direct-exp.txt new file mode 100644 index 0000000000..0c27dcdea3 --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/out.direct-exp.txt @@ -0,0 +1,67 @@ + +>>> musterr [CLI] bundle destroy --auto-approve +Error: resource my_pipelines has lifecycle.prevent_destroy set, but the plan calls for this resource to be recreated or destroyed. To avoid this error, disable lifecycle.prevent_destroy for pipelines.my_pipelines +resource my_schema has lifecycle.prevent_destroy set, but the plan calls for this resource to be recreated or destroyed. To avoid this error, disable lifecycle.prevent_destroy for schemas.my_schema + + +Exit code (musterr): 1 + +>>> errcode [CLI] bundle plan +recreate pipelines.my_pipelines + +>>> musterr [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default/files... +Error: resource my_pipelines has lifecycle.prevent_destroy set, but the plan calls for this resource to be recreated or destroyed. To avoid this error, disable lifecycle.prevent_destroy for pipelines.my_pipelines + + +Exit code (musterr): 1 + +>>> errcode [CLI] bundle plan +recreate pipelines.my_pipelines +recreate schemas.my_schema + +>>> musterr [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default/files... +Error: resource my_pipelines has lifecycle.prevent_destroy set, but the plan calls for this resource to be recreated or destroyed. To avoid this error, disable lifecycle.prevent_destroy for pipelines.my_pipelines +resource my_schema has lifecycle.prevent_destroy set, but the plan calls for this resource to be recreated or destroyed. To avoid this error, disable lifecycle.prevent_destroy for schemas.my_schema + + +Exit code (musterr): 1 + +>>> errcode [CLI] bundle plan +recreate pipelines.my_pipelines +recreate schemas.my_schema + +>>> [CLI] bundle deploy --auto-approve +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default/files... + +This action will result in the deletion or recreation of the following UC schemas. Any underlying data may be lost: + recreate schema my_schema + +This action will result in the deletion or recreation of the following Lakeflow Declarative Pipelines along with the +Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the pipelines will +restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline +properties such as the 'catalog' or 'storage' are changed: + recreate pipeline my_pipelines +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> errcode [CLI] bundle plan +delete pipelines.my_pipelines +delete schemas.my_schema + +>>> [CLI] bundle deploy --auto-approve +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default/files... + +This action will result in the deletion or recreation of the following UC schemas. Any underlying data may be lost: + delete schema my_schema + +This action will result in the deletion or recreation of the following Lakeflow Declarative Pipelines along with the +Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the pipelines will +restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline +properties such as the 'catalog' or 'storage' are changed: + delete pipeline my_pipelines +Deploying resources... +Updating deployment state... +Deployment complete! diff --git a/acceptance/bundle/lifecycle/prevent-destroy/out.terraform.txt b/acceptance/bundle/lifecycle/prevent-destroy/out.terraform.txt new file mode 100644 index 0000000000..3c78d4985c --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/out.terraform.txt @@ -0,0 +1,155 @@ + +>>> musterr [CLI] bundle destroy --auto-approve +Error: exit status 1 + +Error: Instance cannot be destroyed + + on bundle.tf.json line 15, in resource.databricks_pipeline: + 15: "my_pipelines": { + +Resource databricks_pipeline.my_pipelines has lifecycle.prevent_destroy set, +but the plan calls for this resource to be destroyed. To avoid this error and +continue with the plan, either disable lifecycle.prevent_destroy or reduce +the scope of the plan using the -target flag. + +Error: Instance cannot be destroyed + + on bundle.tf.json line 38, in resource.databricks_schema: + 38: "my_schema": { + +Resource databricks_schema.my_schema has lifecycle.prevent_destroy set, but +the plan calls for this resource to be destroyed. To avoid this error and +continue with the plan, either disable lifecycle.prevent_destroy or reduce +the scope of the plan using the -target flag. + + + +Exit code (musterr): 1 + +>>> errcode [CLI] bundle plan +Error: exit status 1 + +Error: Instance cannot be destroyed + + on bundle.tf.json line 15, in resource.databricks_pipeline: + 15: "my_pipelines": { + +Resource databricks_pipeline.my_pipelines has lifecycle.prevent_destroy set, +but the plan calls for this resource to be destroyed. To avoid this error and +continue with the plan, either disable lifecycle.prevent_destroy or reduce +the scope of the plan using the -target flag. + + + +Exit code: 1 + +>>> musterr [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default/files... +Error: exit status 1 + +Error: Instance cannot be destroyed + + on bundle.tf.json line 15, in resource.databricks_pipeline: + 15: "my_pipelines": { + +Resource databricks_pipeline.my_pipelines has lifecycle.prevent_destroy set, +but the plan calls for this resource to be destroyed. To avoid this error and +continue with the plan, either disable lifecycle.prevent_destroy or reduce +the scope of the plan using the -target flag. + + + +Exit code (musterr): 1 + +>>> errcode [CLI] bundle plan +Error: exit status 1 + +Error: Instance cannot be destroyed + + on bundle.tf.json line 15, in resource.databricks_pipeline: + 15: "my_pipelines": { + +Resource databricks_pipeline.my_pipelines has lifecycle.prevent_destroy set, +but the plan calls for this resource to be destroyed. To avoid this error and +continue with the plan, either disable lifecycle.prevent_destroy or reduce +the scope of the plan using the -target flag. + +Error: Instance cannot be destroyed + + on bundle.tf.json line 38, in resource.databricks_schema: + 38: "my_schema": { + +Resource databricks_schema.my_schema has lifecycle.prevent_destroy set, but +the plan calls for this resource to be destroyed. To avoid this error and +continue with the plan, either disable lifecycle.prevent_destroy or reduce +the scope of the plan using the -target flag. + + + +Exit code: 1 + +>>> musterr [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default/files... +Error: exit status 1 + +Error: Instance cannot be destroyed + + on bundle.tf.json line 15, in resource.databricks_pipeline: + 15: "my_pipelines": { + +Resource databricks_pipeline.my_pipelines has lifecycle.prevent_destroy set, +but the plan calls for this resource to be destroyed. To avoid this error and +continue with the plan, either disable lifecycle.prevent_destroy or reduce +the scope of the plan using the -target flag. + +Error: Instance cannot be destroyed + + on bundle.tf.json line 38, in resource.databricks_schema: + 38: "my_schema": { + +Resource databricks_schema.my_schema has lifecycle.prevent_destroy set, but +the plan calls for this resource to be destroyed. To avoid this error and +continue with the plan, either disable lifecycle.prevent_destroy or reduce +the scope of the plan using the -target flag. + + + +Exit code (musterr): 1 + +>>> errcode [CLI] bundle plan +recreate pipelines.my_pipelines +recreate schemas.my_schema + +>>> [CLI] bundle deploy --auto-approve +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default/files... + +This action will result in the deletion or recreation of the following UC schemas. Any underlying data may be lost: + recreate schema my_schema + +This action will result in the deletion or recreation of the following Lakeflow Declarative Pipelines along with the +Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the pipelines will +restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline +properties such as the 'catalog' or 'storage' are changed: + recreate pipeline my_pipelines +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> errcode [CLI] bundle plan +delete pipelines.my_pipelines +delete schemas.my_schema + +>>> [CLI] bundle deploy --auto-approve +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default/files... + +This action will result in the deletion or recreation of the following UC schemas. Any underlying data may be lost: + delete schema my_schema + +This action will result in the deletion or recreation of the following Lakeflow Declarative Pipelines along with the +Streaming Tables (STs) and Materialized Views (MVs) managed by them. Recreating the pipelines will +restore the defined STs and MVs through full refresh. Note that recreation is necessary when pipeline +properties such as the 'catalog' or 'storage' are changed: + delete pipeline my_pipelines +Deploying resources... +Updating deployment state... +Deployment complete! diff --git a/acceptance/bundle/lifecycle/prevent-destroy/out.test.toml b/acceptance/bundle/lifecycle/prevent-destroy/out.test.toml new file mode 100644 index 0000000000..8f3575be7b --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/out.test.toml @@ -0,0 +1,5 @@ +Local = true +Cloud = false + +[EnvMatrix] + DATABRICKS_CLI_DEPLOYMENT = ["terraform", "direct-exp"] diff --git a/acceptance/bundle/lifecycle/prevent-destroy/output.txt b/acceptance/bundle/lifecycle/prevent-destroy/output.txt new file mode 100644 index 0000000000..33bdbbc346 --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/output.txt @@ -0,0 +1,17 @@ + +>>> [CLI] bundle validate +Name: prevent-destroy +Target: default +Workspace: + User: [USERNAME] + Path: /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default + +Validation OK! + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/prevent-destroy/default/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> errcode [CLI] bundle plan diff --git a/acceptance/bundle/lifecycle/prevent-destroy/resources/pipeline.yml b/acceptance/bundle/lifecycle/prevent-destroy/resources/pipeline.yml new file mode 100644 index 0000000000..075e31b8ad --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/resources/pipeline.yml @@ -0,0 +1,11 @@ +resources: + pipelines: + my_pipelines: + name: "test-pipeline" + libraries: + - notebook: + path: "../test-notebook.py" + lifecycle: + prevent_destroy: true + schema: test-schema + catalog: main diff --git a/acceptance/bundle/lifecycle/prevent-destroy/resources/schema.yml b/acceptance/bundle/lifecycle/prevent-destroy/resources/schema.yml new file mode 100644 index 0000000000..840f0c1cde --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/resources/schema.yml @@ -0,0 +1,7 @@ +resources: + schemas: + my_schema: + catalog_name: "test-catalog" + name: test-schema + lifecycle: + prevent_destroy: true diff --git a/acceptance/bundle/lifecycle/prevent-destroy/script b/acceptance/bundle/lifecycle/prevent-destroy/script new file mode 100644 index 0000000000..92ccac5a68 --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/script @@ -0,0 +1,30 @@ +trace $CLI bundle validate + +trace $CLI bundle deploy + +trace errcode $CLI bundle plan +trace musterr $CLI bundle destroy --auto-approve >>out.$DATABRICKS_CLI_DEPLOYMENT.txt 2>&1 + +# Changing the catalog name, deploy must fail because pipeline will be recreated +update_file.py resources/pipeline.yml 'catalog: main' 'catalog: mainnew' +trace errcode $CLI bundle plan >>out.$DATABRICKS_CLI_DEPLOYMENT.txt 2>&1 +trace musterr $CLI bundle deploy >>out.$DATABRICKS_CLI_DEPLOYMENT.txt 2>&1 + +# Changing the schema name, deploy must fail because schema will be recreated +update_file.py resources/schema.yml 'name: test-schema' 'name: test-schema-new' +trace errcode $CLI bundle plan >>out.$DATABRICKS_CLI_DEPLOYMENT.txt 2>&1 +trace musterr $CLI bundle deploy >>out.$DATABRICKS_CLI_DEPLOYMENT.txt 2>&1 + +# Removing the prevent_destroy, deploy must succeed +update_file.py resources/pipeline.yml 'prevent_destroy: true' 'prevent_destroy: false' +update_file.py resources/schema.yml 'prevent_destroy: true' 'prevent_destroy: false' +trace errcode $CLI bundle plan >>out.$DATABRICKS_CLI_DEPLOYMENT.txt 2>&1 +trace $CLI bundle deploy --auto-approve >>out.$DATABRICKS_CLI_DEPLOYMENT.txt 2>&1 +update_file.py resources/pipeline.yml 'prevent_destroy: false' 'prevent_destroy: true' +update_file.py resources/schema.yml 'prevent_destroy: false' 'prevent_destroy: true' + + +# Removing the pipeline and schema, deploy must succeed +rm resources/pipeline.yml resources/schema.yml +trace errcode $CLI bundle plan >>out.$DATABRICKS_CLI_DEPLOYMENT.txt 2>&1 +trace $CLI bundle deploy --auto-approve >>out.$DATABRICKS_CLI_DEPLOYMENT.txt 2>&1 diff --git a/acceptance/bundle/lifecycle/prevent-destroy/test-notebook.py b/acceptance/bundle/lifecycle/prevent-destroy/test-notebook.py new file mode 100644 index 0000000000..24dc150ffb --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/test-notebook.py @@ -0,0 +1,3 @@ +# Databricks notebook source + +print("Hello, World!") diff --git a/acceptance/bundle/lifecycle/prevent-destroy/test.toml b/acceptance/bundle/lifecycle/prevent-destroy/test.toml new file mode 100644 index 0000000000..3b15472573 --- /dev/null +++ b/acceptance/bundle/lifecycle/prevent-destroy/test.toml @@ -0,0 +1,32 @@ +EnvVaryOutput = "DATABRICKS_CLI_DEPLOYMENT" + +Ignore = [ + ".databricks" +] + +[[Server]] +Pattern = "POST /api/2.0/serving-endpoints" +Response.Body = ''' +{ + "id": "test-endpoint-6260d50f-e8ff-4905-8f28-812345678903", + "name": "test-endpoint-6260d50f-e8ff-4905-8f28-812345678903" +} +''' + +[[Server]] +Pattern = "GET /api/2.0/serving-endpoints/" + + +[[Server]] +Pattern = "GET /api/2.0/serving-endpoints/test-endpoint-6260d50f-e8ff-4905-8f28-812345678903" +Response.Body = ''' +{ + "id": "test-endpoint-6260d50f-e8ff-4905-8f28-812345678903", + "permission_level": "CAN_MANAGE", + "route_optimized": false, + "state": { + "config_update": "NOT_UPDATING", + "ready": "NOT_READY" + } +} +''' diff --git a/acceptance/bundle/lifecycle/test.toml b/acceptance/bundle/lifecycle/test.toml new file mode 100644 index 0000000000..7d36fb9dc1 --- /dev/null +++ b/acceptance/bundle/lifecycle/test.toml @@ -0,0 +1,2 @@ +Local = true +Cloud = false diff --git a/acceptance/bundle/refschema/out.fields.txt b/acceptance/bundle/refschema/out.fields.txt index 77ef1eb6f9..940f91f073 100644 --- a/acceptance/bundle/refschema/out.fields.txt +++ b/acceptance/bundle/refschema/out.fields.txt @@ -27,6 +27,8 @@ resources.apps.*.effective_budget_policy_id string ALL resources.apps.*.effective_user_api_scopes []string ALL resources.apps.*.effective_user_api_scopes[*] string ALL resources.apps.*.id string ALL +resources.apps.*.lifecycle resources.Lifecycle INPUT +resources.apps.*.lifecycle.prevent_destroy bool INPUT resources.apps.*.modified_status string INPUT resources.apps.*.name string ALL resources.apps.*.oauth2_app_client_id string ALL @@ -87,6 +89,8 @@ resources.database_catalogs.*.create_database_if_not_exists bool ALL resources.database_catalogs.*.database_instance_name string ALL resources.database_catalogs.*.database_name string ALL resources.database_catalogs.*.id string INPUT +resources.database_catalogs.*.lifecycle resources.Lifecycle INPUT +resources.database_catalogs.*.lifecycle.prevent_destroy bool INPUT resources.database_catalogs.*.modified_status string INPUT resources.database_catalogs.*.name string ALL resources.database_catalogs.*.uid string ALL @@ -109,6 +113,8 @@ resources.database_instances.*.effective_stopped bool ALL resources.database_instances.*.enable_pg_native_login bool ALL resources.database_instances.*.enable_readable_secondaries bool ALL resources.database_instances.*.id string INPUT +resources.database_instances.*.lifecycle resources.Lifecycle INPUT +resources.database_instances.*.lifecycle.prevent_destroy bool INPUT resources.database_instances.*.modified_status string INPUT resources.database_instances.*.name string ALL resources.database_instances.*.node_count int ALL @@ -294,6 +300,8 @@ resources.jobs.*.job_clusters[*].new_cluster.workload_type.clients compute.Clien resources.jobs.*.job_clusters[*].new_cluster.workload_type.clients.jobs bool INPUT STATE resources.jobs.*.job_clusters[*].new_cluster.workload_type.clients.notebooks bool INPUT STATE resources.jobs.*.job_id int64 REMOTE +resources.jobs.*.lifecycle resources.Lifecycle INPUT +resources.jobs.*.lifecycle.prevent_destroy bool INPUT resources.jobs.*.max_concurrent_runs int INPUT STATE resources.jobs.*.modified_status string INPUT resources.jobs.*.name string INPUT STATE @@ -2082,6 +2090,8 @@ resources.pipelines.*.libraries[*].maven.repo string INPUT STATE resources.pipelines.*.libraries[*].notebook *pipelines.NotebookLibrary INPUT STATE resources.pipelines.*.libraries[*].notebook.path string INPUT STATE resources.pipelines.*.libraries[*].whl string INPUT STATE +resources.pipelines.*.lifecycle resources.Lifecycle INPUT +resources.pipelines.*.lifecycle.prevent_destroy bool INPUT resources.pipelines.*.modified_status string INPUT resources.pipelines.*.name string ALL resources.pipelines.*.notifications []pipelines.Notifications INPUT STATE @@ -2389,6 +2399,8 @@ resources.schemas.*.grants[*].principal string INPUT resources.schemas.*.grants[*].privileges []resources.SchemaGrantPrivilege INPUT resources.schemas.*.grants[*].privileges[*] resources.SchemaGrantPrivilege INPUT resources.schemas.*.id string INPUT +resources.schemas.*.lifecycle resources.Lifecycle INPUT +resources.schemas.*.lifecycle.prevent_destroy bool INPUT resources.schemas.*.metastore_id string REMOTE resources.schemas.*.modified_status string INPUT resources.schemas.*.name string ALL @@ -2422,6 +2434,8 @@ resources.sql_warehouses.*.health.summary string REMOTE resources.sql_warehouses.*.id string INPUT REMOTE resources.sql_warehouses.*.instance_profile_arn string ALL resources.sql_warehouses.*.jdbc_url string REMOTE +resources.sql_warehouses.*.lifecycle resources.Lifecycle INPUT +resources.sql_warehouses.*.lifecycle.prevent_destroy bool INPUT resources.sql_warehouses.*.max_num_clusters int ALL resources.sql_warehouses.*.min_num_clusters int ALL resources.sql_warehouses.*.modified_status string INPUT @@ -2494,6 +2508,8 @@ resources.synced_database_tables.*.database_instance_name string ALL resources.synced_database_tables.*.effective_database_instance_name string ALL resources.synced_database_tables.*.effective_logical_database_name string ALL resources.synced_database_tables.*.id string INPUT +resources.synced_database_tables.*.lifecycle resources.Lifecycle INPUT +resources.synced_database_tables.*.lifecycle.prevent_destroy bool INPUT resources.synced_database_tables.*.logical_database_name string ALL resources.synced_database_tables.*.modified_status string INPUT resources.synced_database_tables.*.name string ALL @@ -2527,6 +2543,8 @@ resources.volumes.*.grants[*].principal string INPUT resources.volumes.*.grants[*].privileges []resources.VolumeGrantPrivilege INPUT resources.volumes.*.grants[*].privileges[*] resources.VolumeGrantPrivilege INPUT resources.volumes.*.id string INPUT +resources.volumes.*.lifecycle resources.Lifecycle INPUT +resources.volumes.*.lifecycle.prevent_destroy bool INPUT resources.volumes.*.metastore_id string REMOTE resources.volumes.*.modified_status string INPUT resources.volumes.*.name string ALL diff --git a/bundle/config/resources/apps.go b/bundle/config/resources/apps.go index 77486125e3..177c242e5b 100644 --- a/bundle/config/resources/apps.go +++ b/bundle/config/resources/apps.go @@ -24,6 +24,7 @@ type AppPermission struct { type App struct { BaseResource + apps.App // nolint App struct also defines Id and URL field with the same json tag "id" and "url" // SourceCodePath is a required field used by DABs to point to Databricks app source code // on local disk and to the corresponding workspace path during app deployment. @@ -36,8 +37,6 @@ type App struct { Config map[string]any `json:"config,omitempty"` Permissions []AppPermission `json:"permissions,omitempty"` - - apps.App // nolint App struct also defines Id and URL field with the same json tag "id" and "url" } func (a *App) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/base.go b/bundle/config/resources/base.go index 31996d9562..792db28972 100644 --- a/bundle/config/resources/base.go +++ b/bundle/config/resources/base.go @@ -5,4 +5,5 @@ type BaseResource struct { ID string `json:"id,omitempty" bundle:"readonly"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` URL string `json:"url,omitempty" bundle:"internal"` + Lifecycle Lifecycle `json:"lifecycle,omitempty"` } diff --git a/bundle/config/resources/clusters.go b/bundle/config/resources/clusters.go index feb36c272f..a604dc3ee0 100644 --- a/bundle/config/resources/clusters.go +++ b/bundle/config/resources/clusters.go @@ -24,10 +24,9 @@ type ClusterPermission struct { type Cluster struct { BaseResource + compute.ClusterSpec Permissions []ClusterPermission `json:"permissions,omitempty"` - - compute.ClusterSpec } func (s *Cluster) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/dashboard.go b/bundle/config/resources/dashboard.go index 8f20715626..ac88d801c5 100644 --- a/bundle/config/resources/dashboard.go +++ b/bundle/config/resources/dashboard.go @@ -46,10 +46,10 @@ type DashboardConfig struct { type Dashboard struct { BaseResource - Permissions []DashboardPermission `json:"permissions,omitempty"` - DashboardConfig + Permissions []DashboardPermission `json:"permissions,omitempty"` + // FilePath points to the local `.lvdash.json` file containing the dashboard definition. // This is inlined into serialized_dashboard during deployment. The file_path is kept around // as metadata which is needed for `databricks bundle generate dashboard --resource ` to work. diff --git a/bundle/config/resources/database_catalog.go b/bundle/config/resources/database_catalog.go index b248d6eb19..0de0fa4571 100644 --- a/bundle/config/resources/database_catalog.go +++ b/bundle/config/resources/database_catalog.go @@ -12,7 +12,6 @@ import ( type DatabaseCatalog struct { BaseResource - database.DatabaseCatalog } diff --git a/bundle/config/resources/database_instance.go b/bundle/config/resources/database_instance.go index c3f6c64101..75ebb17a12 100644 --- a/bundle/config/resources/database_instance.go +++ b/bundle/config/resources/database_instance.go @@ -24,10 +24,9 @@ type DatabaseInstancePermission struct { type DatabaseInstance struct { BaseResource + database.DatabaseInstance Permissions []DatabaseInstancePermission `json:"permissions,omitempty"` - - database.DatabaseInstance } func (d *DatabaseInstance) Exists(ctx context.Context, w *databricks.WorkspaceClient, name string) (bool, error) { diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index 29bb95cce4..f3b8e9405e 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -25,10 +25,9 @@ type JobPermission struct { type Job struct { BaseResource + jobs.JobSettings Permissions []JobPermission `json:"permissions,omitempty"` - - jobs.JobSettings } func (j *Job) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/lifecycle.go b/bundle/config/resources/lifecycle.go new file mode 100644 index 0000000000..c3de7ce8ea --- /dev/null +++ b/bundle/config/resources/lifecycle.go @@ -0,0 +1,8 @@ +package resources + +// Lifecycle is a struct that contains the lifecycle settings for a resource. +// It controls the behavior of the resource when it is deployed or destroyed. +type Lifecycle struct { + // Lifecycle setting to prevent the resource from being destroyed. + PreventDestroy bool `json:"prevent_destroy,omitempty"` +} diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index f6916edf90..8a9cc52d05 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -24,10 +24,9 @@ type MlflowExperimentPermission struct { type MlflowExperiment struct { BaseResource + ml.Experiment Permissions []MlflowExperimentPermission `json:"permissions,omitempty"` - - ml.Experiment } func (s *MlflowExperiment) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 76bf00bffe..ffa88f4d21 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -24,10 +24,9 @@ type MlflowModelPermission struct { type MlflowModel struct { BaseResource + ml.CreateModelRequest Permissions []MlflowModelPermission `json:"permissions,omitempty"` - - ml.CreateModelRequest } func (s *MlflowModel) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index 87a67787d3..fc597db51c 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -25,13 +25,13 @@ type ModelServingEndpointPermission struct { type ModelServingEndpoint struct { BaseResource - // This is a resource agnostic implementation of permissions for ACLs. - // Implementation could be different based on the resource type. - Permissions []ModelServingEndpointPermission `json:"permissions,omitempty"` - // This represents the input args for terraform, and will get converted // to a HCL representation for CRUD serving.CreateServingEndpoint + + // This is a resource agnostic implementation of permissions for ACLs. + // Implementation could be different based on the resource type. + Permissions []ModelServingEndpointPermission `json:"permissions,omitempty"` } func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index d2c60c79f2..b287dc0a12 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -24,9 +24,9 @@ type PipelinePermission struct { type Pipeline struct { BaseResource - Permissions []PipelinePermission `json:"permissions,omitempty"` - pipelines.CreatePipeline //nolint CreatePipeline also defines Id field with the same json tag "id" + + Permissions []PipelinePermission `json:"permissions,omitempty"` } func (p *Pipeline) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index 44e27dfebe..f373676d45 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -14,11 +14,11 @@ import ( type QualityMonitor struct { BaseResource - // The table name is a required field but not included as a JSON field in [catalog.CreateMonitor]. - TableName string `json:"table_name"` - // This struct defines the creation payload for a monitor. catalog.CreateMonitor + + // The table name is a required field but not included as a JSON field in [catalog.CreateMonitor]. + TableName string `json:"table_name"` } func (s *QualityMonitor) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/registered_model.go b/bundle/config/resources/registered_model.go index a5f568ece5..c8d82d08f1 100644 --- a/bundle/config/resources/registered_model.go +++ b/bundle/config/resources/registered_model.go @@ -14,13 +14,13 @@ import ( type RegisteredModel struct { BaseResource - // This is a resource agnostic implementation of grants. - // Implementation could be different based on the resource type. - Grants []Grant `json:"grants,omitempty"` - // This represents the input args for terraform, and will get converted // to a HCL representation for CRUD catalog.CreateRegisteredModelRequest + + // This is a resource agnostic implementation of grants. + // Implementation could be different based on the resource type. + Grants []Grant `json:"grants,omitempty"` } func (s *RegisteredModel) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/schema.go b/bundle/config/resources/schema.go index 79320baf85..af49b5dbe2 100644 --- a/bundle/config/resources/schema.go +++ b/bundle/config/resources/schema.go @@ -61,11 +61,9 @@ type SchemaGrant struct { type Schema struct { BaseResource - + catalog.CreateSchema // List of grants to apply on this schema. Grants []SchemaGrant `json:"grants,omitempty"` - - catalog.CreateSchema } func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, fullName string) (bool, error) { diff --git a/bundle/config/resources/sql_warehouses.go b/bundle/config/resources/sql_warehouses.go index 04b6fe027b..53302369b8 100644 --- a/bundle/config/resources/sql_warehouses.go +++ b/bundle/config/resources/sql_warehouses.go @@ -22,10 +22,9 @@ type SqlWarehousePermission struct { type SqlWarehouse struct { BaseResource + sql.CreateWarehouseRequest Permissions []SqlWarehousePermission `json:"permissions,omitempty"` - - sql.CreateWarehouseRequest } func (sw *SqlWarehouse) UnmarshalJSON(b []byte) error { diff --git a/bundle/config/resources/synced_database_table.go b/bundle/config/resources/synced_database_table.go index 922b2d4d47..5577fe47b0 100644 --- a/bundle/config/resources/synced_database_table.go +++ b/bundle/config/resources/synced_database_table.go @@ -12,7 +12,6 @@ import ( type SyncedDatabaseTable struct { BaseResource - database.SyncedDatabaseTable } diff --git a/bundle/config/resources/volume.go b/bundle/config/resources/volume.go index 7d171616b2..8c47a6afc4 100644 --- a/bundle/config/resources/volume.go +++ b/bundle/config/resources/volume.go @@ -42,11 +42,10 @@ type VolumeGrant struct { type Volume struct { BaseResource + catalog.CreateVolumeRequestContent // List of grants to apply on this volume. Grants []VolumeGrant `json:"grants,omitempty"` - - catalog.CreateVolumeRequestContent } func (v *Volume) UnmarshalJSON(b []byte) error { diff --git a/bundle/deploy/terraform/lifecycle_test.go b/bundle/deploy/terraform/lifecycle_test.go new file mode 100644 index 0000000000..697a2270cd --- /dev/null +++ b/bundle/deploy/terraform/lifecycle_test.go @@ -0,0 +1,39 @@ +package terraform + +import ( + "context" + "encoding/json" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/require" +) + +func TestConvertLifecycleForAllResources(t *testing.T) { + supportedResources := config.SupportedResources() + ctx := context.Background() + + for resourceType := range supportedResources { + t.Run(resourceType, func(t *testing.T) { + vin := dyn.NewValue(map[string]dyn.Value{ + "resources": dyn.NewValue(map[string]dyn.Value{ + resourceType: dyn.NewValue(map[string]dyn.Value{ + "test_resource": dyn.NewValue(map[string]dyn.Value{ + "lifecycle": dyn.NewValue(map[string]dyn.Value{ + "prevent_destroy": dyn.NewValue(true, nil), + }, nil), + }, nil), + }, nil), + }, nil), + }, nil) + + tfroot, err := BundleToTerraformWithDynValue(ctx, vin) + require.NoError(t, err) + + bytes, err := json.Marshal(tfroot.Resource) + require.NoError(t, err) + require.Contains(t, string(bytes), `"lifecycle":{"prevent_destroy":true}`) + }) + } +} diff --git a/bundle/deploy/terraform/tfdyn/convert_app.go b/bundle/deploy/terraform/tfdyn/convert_app.go index b3d599f15d..b25d403766 100644 --- a/bundle/deploy/terraform/tfdyn/convert_app.go +++ b/bundle/deploy/terraform/tfdyn/convert_app.go @@ -44,6 +44,11 @@ func (appConverter) Convert(ctx context.Context, key string, vin dyn.Value, out return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.App[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster.go b/bundle/deploy/terraform/tfdyn/convert_cluster.go index 18819c00fc..e53b22a38d 100644 --- a/bundle/deploy/terraform/tfdyn/convert_cluster.go +++ b/bundle/deploy/terraform/tfdyn/convert_cluster.go @@ -35,6 +35,11 @@ func (clusterConverter) Convert(ctx context.Context, key string, vin dyn.Value, return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.Cluster[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard.go b/bundle/deploy/terraform/tfdyn/convert_dashboard.go index b51f6ae445..eb9260e8ec 100644 --- a/bundle/deploy/terraform/tfdyn/convert_dashboard.go +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard.go @@ -70,6 +70,11 @@ func (dashboardConverter) Convert(ctx context.Context, key string, vin dyn.Value return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.Dashboard[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_database_catalog.go b/bundle/deploy/terraform/tfdyn/convert_database_catalog.go index 3aceff5e88..716c3baf81 100644 --- a/bundle/deploy/terraform/tfdyn/convert_database_catalog.go +++ b/bundle/deploy/terraform/tfdyn/convert_database_catalog.go @@ -18,6 +18,13 @@ func (d databaseCatalogConverter) Convert(ctx context.Context, key string, vin d for _, diag := range diags { log.Debugf(ctx, "database Catalog normalization diagnostic: %s", diag.Summary) } + + var err error + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + out.DatabaseDatabaseCatalog[key] = vout.AsAny() return nil diff --git a/bundle/deploy/terraform/tfdyn/convert_database_instance.go b/bundle/deploy/terraform/tfdyn/convert_database_instance.go index a0b4dfc768..1dfb4dc4c9 100644 --- a/bundle/deploy/terraform/tfdyn/convert_database_instance.go +++ b/bundle/deploy/terraform/tfdyn/convert_database_instance.go @@ -27,6 +27,11 @@ func (d databaseInstanceConverter) Convert(ctx context.Context, key string, vin return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + out.DatabaseInstance[key] = vout.AsAny() // Configure permissions for this resource. diff --git a/bundle/deploy/terraform/tfdyn/convert_experiment.go b/bundle/deploy/terraform/tfdyn/convert_experiment.go index 0c129181f2..a741b4dec4 100644 --- a/bundle/deploy/terraform/tfdyn/convert_experiment.go +++ b/bundle/deploy/terraform/tfdyn/convert_experiment.go @@ -28,6 +28,11 @@ func (experimentConverter) Convert(ctx context.Context, key string, vin dyn.Valu return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.MlflowExperiment[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_job.go b/bundle/deploy/terraform/tfdyn/convert_job.go index 21e7b4aca3..c9bd6c427f 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job.go +++ b/bundle/deploy/terraform/tfdyn/convert_job.go @@ -234,6 +234,11 @@ func (jobConverter) Convert(ctx context.Context, key string, vin dyn.Value, out return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.Job[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_model.go b/bundle/deploy/terraform/tfdyn/convert_model.go index f5d7d489b6..722f3aa636 100644 --- a/bundle/deploy/terraform/tfdyn/convert_model.go +++ b/bundle/deploy/terraform/tfdyn/convert_model.go @@ -28,6 +28,11 @@ func (modelConverter) Convert(ctx context.Context, key string, vin dyn.Value, ou return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.MlflowModel[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go index b67e4dcc34..b9a5d95c14 100644 --- a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go +++ b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go @@ -28,6 +28,11 @@ func (modelServingEndpointConverter) Convert(ctx context.Context, key string, vi return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.ModelServing[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_pipeline.go b/bundle/deploy/terraform/tfdyn/convert_pipeline.go index d2df60fa28..944ce0858c 100644 --- a/bundle/deploy/terraform/tfdyn/convert_pipeline.go +++ b/bundle/deploy/terraform/tfdyn/convert_pipeline.go @@ -43,6 +43,11 @@ func (pipelineConverter) Convert(ctx context.Context, key string, vin dyn.Value, return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.Pipeline[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go index 341df7c220..3e78bacc70 100644 --- a/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go @@ -26,6 +26,11 @@ func (qualityMonitorConverter) Convert(ctx context.Context, key string, vin dyn. return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.QualityMonitor[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_registered_model.go b/bundle/deploy/terraform/tfdyn/convert_registered_model.go index 20aa596f2c..49e05b47e5 100644 --- a/bundle/deploy/terraform/tfdyn/convert_registered_model.go +++ b/bundle/deploy/terraform/tfdyn/convert_registered_model.go @@ -28,6 +28,11 @@ func (registeredModelConverter) Convert(ctx context.Context, key string, vin dyn return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.RegisteredModel[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_schema.go b/bundle/deploy/terraform/tfdyn/convert_schema.go index b5e6a88c0d..33fd8aab04 100644 --- a/bundle/deploy/terraform/tfdyn/convert_schema.go +++ b/bundle/deploy/terraform/tfdyn/convert_schema.go @@ -36,6 +36,11 @@ func (schemaConverter) Convert(ctx context.Context, key string, vin dyn.Value, o return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.Schema[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_secret_scope.go b/bundle/deploy/terraform/tfdyn/convert_secret_scope.go index 97ebce7bba..880ed27ce4 100644 --- a/bundle/deploy/terraform/tfdyn/convert_secret_scope.go +++ b/bundle/deploy/terraform/tfdyn/convert_secret_scope.go @@ -54,6 +54,12 @@ func (s secretScopeConverter) Convert(ctx context.Context, key string, vin dyn.V for _, diag := range diags { log.Debugf(ctx, "secret scope normalization diagnostic: %s", diag.Summary) } + + vout, err := convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + out.SecretScope[key] = vout.AsAny() // Configure permissions for this resource diff --git a/bundle/deploy/terraform/tfdyn/convert_sql_warehouse.go b/bundle/deploy/terraform/tfdyn/convert_sql_warehouse.go index a4f489d553..0107ba2460 100644 --- a/bundle/deploy/terraform/tfdyn/convert_sql_warehouse.go +++ b/bundle/deploy/terraform/tfdyn/convert_sql_warehouse.go @@ -28,6 +28,11 @@ func (sqlWarehouseConverter) Convert(ctx context.Context, key string, vin dyn.Va return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.SqlEndpoint[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/convert_synced_database_table.go b/bundle/deploy/terraform/tfdyn/convert_synced_database_table.go index 8d485e7fdf..5e64711629 100644 --- a/bundle/deploy/terraform/tfdyn/convert_synced_database_table.go +++ b/bundle/deploy/terraform/tfdyn/convert_synced_database_table.go @@ -18,6 +18,12 @@ func (s syncedDatabaseTableConverter) Convert(ctx context.Context, key string, v for _, diag := range diags { log.Debugf(ctx, "synced database table normalization diagnostic: %s", diag.Summary) } + + var err error + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } out.DatabaseSyncedDatabaseTable[key] = vout.AsAny() return nil diff --git a/bundle/deploy/terraform/tfdyn/convert_volume.go b/bundle/deploy/terraform/tfdyn/convert_volume.go index 4211e1f9e1..287ddee0c6 100644 --- a/bundle/deploy/terraform/tfdyn/convert_volume.go +++ b/bundle/deploy/terraform/tfdyn/convert_volume.go @@ -28,6 +28,11 @@ func (volumeConverter) Convert(ctx context.Context, key string, vin dyn.Value, o return err } + vout, err = convertLifecycle(ctx, vout, vin.Get("lifecycle")) + if err != nil { + return err + } + // Add the converted resource to the output. out.Volume[key] = vout.AsAny() diff --git a/bundle/deploy/terraform/tfdyn/lifecycle.go b/bundle/deploy/terraform/tfdyn/lifecycle.go new file mode 100644 index 0000000000..1600cdef2d --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/lifecycle.go @@ -0,0 +1,20 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/libs/dyn" +) + +func convertLifecycle(ctx context.Context, vout, vLifecycle dyn.Value) (dyn.Value, error) { + if !vLifecycle.IsValid() { + return vout, nil + } + + vout, err := dyn.Set(vout, "lifecycle", vLifecycle) + if err != nil { + return dyn.InvalidValue, err + } + + return vout, nil +} diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 1d9d4e48a9..08d072226f 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -511,6 +511,10 @@ github.com/databricks/cli/bundle/config/resources.JobPermission: "user_name": "description": |- PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.Lifecycle: + "prevent_destroy": + "description": |- + Lifecycle setting to prevent the resource from being destroyed. github.com/databricks/cli/bundle/config/resources.MlflowExperimentPermission: "group_name": "description": |- @@ -595,6 +599,9 @@ github.com/databricks/cli/bundle/config/resources.SecretScope: "keyvault_metadata": "description": |- The metadata for the secret scope if the `backend_type` is `AZURE_KEYVAULT` + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "name": "description": |- Scope name requested by the user. Scope names are unique. @@ -640,6 +647,9 @@ github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: "effective_logical_database_name": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "logical_database_name": "description": |- PLACEHOLDER diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 2e49c2d516..35593f2e7f 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -14,6 +14,9 @@ github.com/databricks/cli/bundle/config/resources.App: "effective_budget_policy_id": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "oauth2_app_client_id": "description": |- PLACEHOLDER @@ -84,6 +87,9 @@ github.com/databricks/cli/bundle/config/resources.Cluster: "kind": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "permissions": "description": |- PLACEHOLDER @@ -140,6 +146,9 @@ github.com/databricks/cli/bundle/config/resources.Dashboard: "file_path": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "lifecycle_state": "description": |- The state of the dashboard resource. Used for tracking trashed status. @@ -186,10 +195,16 @@ github.com/databricks/cli/bundle/config/resources.DatabaseCatalog: "create_database_if_not_exists": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "uid": "description": |- PLACEHOLDER github.com/databricks/cli/bundle/config/resources.DatabaseInstance: + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "permissions": "description": |- PLACEHOLDER @@ -224,6 +239,9 @@ github.com/databricks/cli/bundle/config/resources.Job: "health": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "permissions": "description": |- PLACEHOLDER @@ -258,6 +276,9 @@ github.com/databricks/cli/bundle/config/resources.MlflowExperiment: group_name: users description: MLflow experiment used to track runs ``` + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "permissions": "description": |- PLACEHOLDER @@ -274,6 +295,9 @@ github.com/databricks/cli/bundle/config/resources.MlflowModel: "_": "markdown_description": |- The model resource allows you to define [legacy models](/api/workspace/modelregistry/createmodel) in bundles. Databricks recommends you use Unity Catalog [registered models](#registered-model) instead. + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "permissions": "description": |- PLACEHOLDER @@ -319,6 +343,9 @@ github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: "description": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "permissions": "description": |- PLACEHOLDER @@ -358,6 +385,9 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: "dry_run": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "permissions": "description": |- PLACEHOLDER @@ -406,6 +436,9 @@ github.com/databricks/cli/bundle/config/resources.QualityMonitor: "inference_log": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "table_name": "description": |- PLACEHOLDER @@ -432,6 +465,9 @@ github.com/databricks/cli/bundle/config/resources.RegisteredModel: "grants": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. github.com/databricks/cli/bundle/config/resources.Schema: "_": "markdown_description": |- @@ -482,6 +518,9 @@ github.com/databricks/cli/bundle/config/resources.Schema: "grants": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "properties": "description": |- PLACEHOLDER @@ -529,6 +568,9 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: Configures whether the warehouse should use Photon optimized clusters. Defaults to true. + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "permissions": "description": |- PLACEHOLDER @@ -573,6 +615,9 @@ github.com/databricks/cli/bundle/config/resources.Volume: "grants": "description": |- PLACEHOLDER + "lifecycle": + "description": |- + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "volume_type": "description": |- PLACEHOLDER diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 10a723891d..f2661c5a2d 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -52,6 +52,11 @@ func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) { return false, err } + err = checkForPreventDestroy(b, actions) + if err != nil { + return false, err + } + types := []deployplan.ActionType{deployplan.ActionTypeRecreate, deployplan.ActionTypeDelete} schemaActions := deployplan.FilterGroup(actions, "schemas", types...) dltActions := deployplan.FilterGroup(actions, "pipelines", types...) diff --git a/bundle/phases/destroy.go b/bundle/phases/destroy.go index 90c65520b6..619cfeb9b5 100644 --- a/bundle/phases/destroy.go +++ b/bundle/phases/destroy.go @@ -65,6 +65,11 @@ func approvalForDestroy(ctx context.Context, b *bundle.Bundle) (bool, error) { return false, err } + err = checkForPreventDestroy(b, deleteActions) + if err != nil { + return false, err + } + if len(deleteActions) > 0 { cmdio.LogString(ctx, "The following resources will be deleted:") for _, a := range deleteActions { diff --git a/bundle/phases/plan.go b/bundle/phases/plan.go index 2b0f09e2f1..65e826118a 100644 --- a/bundle/phases/plan.go +++ b/bundle/phases/plan.go @@ -2,14 +2,18 @@ package phases import ( "context" + "errors" + "fmt" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/bundle/deployplan" "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/bundle/statemgmt" "github.com/databricks/cli/bundle/trampoline" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/logdiag" ) @@ -46,3 +50,32 @@ func deployPrepare(ctx context.Context, b *bundle.Bundle) map[string][]libraries return libs } + +// checkForPreventDestroy checks if the resource has lifecycle.prevent_destroy set, but the plan calls for this resource to be recreated or destroyed. +// If it does, it returns an error. +func checkForPreventDestroy(b *bundle.Bundle, actions []deployplan.Action) error { + root := b.Config.Value() + var errs []error + for _, action := range actions { + if action.ActionType != deployplan.ActionTypeRecreate && action.ActionType != deployplan.ActionTypeDelete { + continue + } + + path := dyn.NewPath(dyn.Key("resources"), dyn.Key(action.Group), dyn.Key(action.Key), dyn.Key("lifecycle"), dyn.Key("prevent_destroy")) + // If there is no prevent_destroy, skip + preventDestroyV, err := dyn.GetByPath(root, path) + if err != nil { + return nil + } + + preventDestroy, ok := preventDestroyV.AsBool() + if !ok { + return fmt.Errorf("internal error: prevent_destroy is not a boolean for %s.%s", action.Group, action.Key) + } + if preventDestroy { + errs = append(errs, fmt.Errorf("resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be recreated or destroyed. To avoid this error, disable lifecycle.prevent_destroy for %s.%s", action.Key, action.Group, action.Key)) + } + } + + return errors.Join(errs...) +} diff --git a/bundle/phases/plan_test.go b/bundle/phases/plan_test.go new file mode 100644 index 0000000000..c3ccc460ed --- /dev/null +++ b/bundle/phases/plan_test.go @@ -0,0 +1,64 @@ +package phases + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/deployplan" + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/require" +) + +func TestCheckPreventDestroyForAllResources(t *testing.T) { + supportedResources := config.SupportedResources() + + for resourceType := range supportedResources { + t.Run(resourceType, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "test", + }, + Resources: config.Resources{}, + }, + } + + ctx := context.Background() + bundle.ApplyFuncContext(ctx, b, func(ctx context.Context, b *bundle.Bundle) { + // Use Mutate to set the configuration dynamically + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + // Set the resource with lifecycle.prevent_destroy = true + return dyn.Set(v, "resources", dyn.NewValue(map[string]dyn.Value{ + resourceType: dyn.NewValue(map[string]dyn.Value{ + "test_resource": dyn.NewValue(map[string]dyn.Value{ + "lifecycle": dyn.NewValue(map[string]dyn.Value{ + "prevent_destroy": dyn.NewValue(true, nil), + }, nil), + }, nil), + }, nil), + }, nil)) + }) + require.NoError(t, err) + }) + + actions := []deployplan.Action{ + { + ResourceNode: deployplan.ResourceNode{ + Group: resourceType, + Key: "test_resource", + }, + ActionType: deployplan.ActionTypeRecreate, + }, + } + + err := checkForPreventDestroy(b, actions) + require.Error(t, err) + require.Contains(t, err.Error(), "resource test_resource has lifecycle.prevent_destroy set") + require.Contains(t, err.Error(), "but the plan calls for this resource to be recreated or destroyed") + require.Contains(t, err.Error(), fmt.Sprintf("disable lifecycle.prevent_destroy for %s.test_resource", resourceType)) + }) + } +} diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 0bea336a7d..4a5200be0c 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -107,6 +107,10 @@ "description": "The unique identifier of the app.", "$ref": "#/$defs/string" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "The name of the app. The name must contain only lowercase alphanumeric characters and hyphens.\nIt must be unique within the workspace.", "$ref": "#/$defs/string" @@ -291,6 +295,10 @@ "kind": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.Kind" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "node_type_id": { "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.", "$ref": "#/$defs/string" @@ -426,6 +434,10 @@ "file_path": { "$ref": "#/$defs/string" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "lifecycle_state": { "description": "The state of the dashboard resource. Used for tracking trashed status.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState" @@ -525,6 +537,10 @@ "description": "The name of the database (in a instance) associated with the catalog.", "$ref": "#/$defs/string" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "The name of the catalog in UC.", "$ref": "#/$defs/string" @@ -596,6 +612,10 @@ "description": "Whether to enable secondaries to serve read-only traffic. Defaults to false.", "$ref": "#/$defs/bool" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "The name of the instance. This is the unique identifier for the instance.", "$ref": "#/$defs/string" @@ -758,6 +778,10 @@ "description": "A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.", "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/jobs.JobCluster" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "max_concurrent_runs": { "description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped.", "$ref": "#/$defs/int" @@ -874,6 +898,24 @@ } ] }, + "resources.Lifecycle": { + "oneOf": [ + { + "type": "object", + "properties": { + "prevent_destroy": { + "description": "Lifecycle setting to prevent the resource from being destroyed.", + "$ref": "#/$defs/bool" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "resources.MlflowExperiment": { "oneOf": [ { @@ -896,6 +938,10 @@ "description": "Last update time", "$ref": "#/$defs/int64" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "lifecycle_stage": { "description": "Current life cycle stage of the experiment: \"active\" or \"deleted\".\nDeleted experiments are not returned by APIs.", "$ref": "#/$defs/string" @@ -975,6 +1021,10 @@ "description": "Optional description for registered model.", "$ref": "#/$defs/string" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "Register models under this name", "$ref": "#/$defs/string" @@ -1070,6 +1120,10 @@ "description": "Email notification settings.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.EmailNotifications" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "The name of the serving endpoint. This field is required and must be unique across a Databricks workspace.\nAn endpoint name can consist of alphanumeric characters, dashes, and underscores.", "$ref": "#/$defs/string" @@ -1255,6 +1309,10 @@ "description": "Libraries or code needed by this deployment.", "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineLibrary" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "Friendly identifier for this pipeline.", "$ref": "#/$defs/string" @@ -1399,6 +1457,10 @@ "description": "[Create:ERR Update:IGN] The latest error message for a monitor failure.", "$ref": "#/$defs/string" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "notifications": { "description": "[Create:OPT Update:OPT] Field for specifying notification settings.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorNotifications" @@ -1465,6 +1527,10 @@ "grants": { "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Grant" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "The name of the registered model", "$ref": "#/$defs/string" @@ -1508,6 +1574,10 @@ "grants": { "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.SchemaGrant" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "Name of schema, relative to parent catalog.", "$ref": "#/$defs/string" @@ -1596,6 +1666,10 @@ "description": "The metadata for the secret scope if the `backend_type` is `AZURE_KEYVAULT`", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/workspace.AzureKeyVaultSecretScopeMetadata" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "Scope name requested by the user. Scope names are unique.", "$ref": "#/$defs/string" @@ -1700,6 +1774,10 @@ "deprecationMessage": "This field is deprecated", "deprecated": true }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "max_num_clusters": { "description": "Maximum number of clusters that the autoscaler will create to handle concurrent queries.\n\nSupported values:\n - Must be \u003e= min_num_clusters\n - Must be \u003c= 30.\n\nDefaults to min_clusters if unset.", "$ref": "#/$defs/int" @@ -1798,6 +1876,10 @@ "effective_logical_database_name": { "$ref": "#/$defs/string" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "logical_database_name": { "$ref": "#/$defs/string" }, @@ -1838,6 +1920,10 @@ "grants": { "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.VolumeGrant" }, + "lifecycle": { + "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" + }, "name": { "description": "The name of the volume", "$ref": "#/$defs/string" diff --git a/experimental/python/databricks/bundles/jobs/__init__.py b/experimental/python/databricks/bundles/jobs/__init__.py index dec48ac51c..3c3df3a693 100644 --- a/experimental/python/databricks/bundles/jobs/__init__.py +++ b/experimental/python/databricks/bundles/jobs/__init__.py @@ -134,6 +134,9 @@ "Library", "LibraryDict", "LibraryParam", + "Lifecycle", + "LifecycleDict", + "LifecycleParam", "LocalFileInfo", "LocalFileInfoDict", "LocalFileInfoParam", @@ -482,6 +485,11 @@ JobsHealthRulesParam, ) from databricks.bundles.jobs._models.library import Library, LibraryDict, LibraryParam +from databricks.bundles.jobs._models.lifecycle import ( + Lifecycle, + LifecycleDict, + LifecycleParam, +) from databricks.bundles.jobs._models.local_file_info import ( LocalFileInfo, LocalFileInfoDict, diff --git a/experimental/python/databricks/bundles/jobs/_models/job.py b/experimental/python/databricks/bundles/jobs/_models/job.py index e66e18f40b..a751c6c43e 100644 --- a/experimental/python/databricks/bundles/jobs/_models/job.py +++ b/experimental/python/databricks/bundles/jobs/_models/job.py @@ -44,6 +44,7 @@ JobsHealthRules, JobsHealthRulesParam, ) +from databricks.bundles.jobs._models.lifecycle import Lifecycle, LifecycleParam from databricks.bundles.jobs._models.performance_target import ( PerformanceTarget, PerformanceTargetParam, @@ -116,6 +117,11 @@ class Job(Resource): A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. """ + lifecycle: VariableOrOptional[Lifecycle] = None + """ + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. + """ + max_concurrent_runs: VariableOrOptional[int] = None """ An optional maximum allowed number of concurrent runs of the job. @@ -256,6 +262,11 @@ class JobDict(TypedDict, total=False): A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. """ + lifecycle: VariableOrOptional[LifecycleParam] + """ + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. + """ + max_concurrent_runs: VariableOrOptional[int] """ An optional maximum allowed number of concurrent runs of the job. diff --git a/experimental/python/databricks/bundles/jobs/_models/lifecycle.py b/experimental/python/databricks/bundles/jobs/_models/lifecycle.py new file mode 100644 index 0000000000..c934967f37 --- /dev/null +++ b/experimental/python/databricks/bundles/jobs/_models/lifecycle.py @@ -0,0 +1,38 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class Lifecycle: + """""" + + prevent_destroy: VariableOrOptional[bool] = None + """ + Lifecycle setting to prevent the resource from being destroyed. + """ + + @classmethod + def from_dict(cls, value: "LifecycleDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "LifecycleDict": + return _transform_to_json_value(self) # type:ignore + + +class LifecycleDict(TypedDict, total=False): + """""" + + prevent_destroy: VariableOrOptional[bool] + """ + Lifecycle setting to prevent the resource from being destroyed. + """ + + +LifecycleParam = LifecycleDict | Lifecycle diff --git a/experimental/python/databricks/bundles/pipelines/__init__.py b/experimental/python/databricks/bundles/pipelines/__init__.py index 06dfc4390e..8801727328 100644 --- a/experimental/python/databricks/bundles/pipelines/__init__.py +++ b/experimental/python/databricks/bundles/pipelines/__init__.py @@ -56,6 +56,9 @@ "InitScriptInfo", "InitScriptInfoDict", "InitScriptInfoParam", + "Lifecycle", + "LifecycleDict", + "LifecycleParam", "LocalFileInfo", "LocalFileInfoDict", "LocalFileInfoParam", @@ -236,6 +239,11 @@ InitScriptInfoDict, InitScriptInfoParam, ) +from databricks.bundles.pipelines._models.lifecycle import ( + Lifecycle, + LifecycleDict, + LifecycleParam, +) from databricks.bundles.pipelines._models.local_file_info import ( LocalFileInfo, LocalFileInfoDict, diff --git a/experimental/python/databricks/bundles/pipelines/_models/lifecycle.py b/experimental/python/databricks/bundles/pipelines/_models/lifecycle.py new file mode 100644 index 0000000000..c934967f37 --- /dev/null +++ b/experimental/python/databricks/bundles/pipelines/_models/lifecycle.py @@ -0,0 +1,38 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class Lifecycle: + """""" + + prevent_destroy: VariableOrOptional[bool] = None + """ + Lifecycle setting to prevent the resource from being destroyed. + """ + + @classmethod + def from_dict(cls, value: "LifecycleDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "LifecycleDict": + return _transform_to_json_value(self) # type:ignore + + +class LifecycleDict(TypedDict, total=False): + """""" + + prevent_destroy: VariableOrOptional[bool] + """ + Lifecycle setting to prevent the resource from being destroyed. + """ + + +LifecycleParam = LifecycleDict | Lifecycle diff --git a/experimental/python/databricks/bundles/pipelines/_models/pipeline.py b/experimental/python/databricks/bundles/pipelines/_models/pipeline.py index 817689f338..919238e743 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/pipeline.py +++ b/experimental/python/databricks/bundles/pipelines/_models/pipeline.py @@ -25,6 +25,10 @@ IngestionPipelineDefinition, IngestionPipelineDefinitionParam, ) +from databricks.bundles.pipelines._models.lifecycle import ( + Lifecycle, + LifecycleParam, +) from databricks.bundles.pipelines._models.notifications import ( Notifications, NotificationsParam, @@ -143,6 +147,11 @@ class Pipeline(Resource): Libraries or code needed by this deployment. """ + lifecycle: VariableOrOptional[Lifecycle] = None + """ + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. + """ + name: VariableOrOptional[str] = None """ Friendly identifier for this pipeline. @@ -301,6 +310,11 @@ class PipelineDict(TypedDict, total=False): Libraries or code needed by this deployment. """ + lifecycle: VariableOrOptional[LifecycleParam] + """ + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. + """ + name: VariableOrOptional[str] """ Friendly identifier for this pipeline. diff --git a/experimental/python/databricks/bundles/schemas/__init__.py b/experimental/python/databricks/bundles/schemas/__init__.py index 69e9d4a279..d4d0fa33a3 100644 --- a/experimental/python/databricks/bundles/schemas/__init__.py +++ b/experimental/python/databricks/bundles/schemas/__init__.py @@ -1,4 +1,7 @@ __all__ = [ + "Lifecycle", + "LifecycleDict", + "LifecycleParam", "Schema", "SchemaDict", "SchemaGrant", @@ -10,6 +13,11 @@ ] +from databricks.bundles.schemas._models.lifecycle import ( + Lifecycle, + LifecycleDict, + LifecycleParam, +) from databricks.bundles.schemas._models.schema import Schema, SchemaDict, SchemaParam from databricks.bundles.schemas._models.schema_grant import ( SchemaGrant, diff --git a/experimental/python/databricks/bundles/schemas/_models/lifecycle.py b/experimental/python/databricks/bundles/schemas/_models/lifecycle.py new file mode 100644 index 0000000000..c934967f37 --- /dev/null +++ b/experimental/python/databricks/bundles/schemas/_models/lifecycle.py @@ -0,0 +1,38 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class Lifecycle: + """""" + + prevent_destroy: VariableOrOptional[bool] = None + """ + Lifecycle setting to prevent the resource from being destroyed. + """ + + @classmethod + def from_dict(cls, value: "LifecycleDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "LifecycleDict": + return _transform_to_json_value(self) # type:ignore + + +class LifecycleDict(TypedDict, total=False): + """""" + + prevent_destroy: VariableOrOptional[bool] + """ + Lifecycle setting to prevent the resource from being destroyed. + """ + + +LifecycleParam = LifecycleDict | Lifecycle diff --git a/experimental/python/databricks/bundles/schemas/_models/schema.py b/experimental/python/databricks/bundles/schemas/_models/schema.py index f3de56b1e8..58975f0474 100644 --- a/experimental/python/databricks/bundles/schemas/_models/schema.py +++ b/experimental/python/databricks/bundles/schemas/_models/schema.py @@ -10,6 +10,7 @@ VariableOrList, VariableOrOptional, ) +from databricks.bundles.schemas._models.lifecycle import Lifecycle, LifecycleParam from databricks.bundles.schemas._models.schema_grant import ( SchemaGrant, SchemaGrantParam, @@ -40,6 +41,11 @@ class Schema(Resource): grants: VariableOrList[SchemaGrant] = field(default_factory=list) + lifecycle: VariableOrOptional[Lifecycle] = None + """ + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. + """ + properties: VariableOrDict[str] = field(default_factory=dict) storage_root: VariableOrOptional[str] = None @@ -75,6 +81,11 @@ class SchemaDict(TypedDict, total=False): grants: VariableOrList[SchemaGrantParam] + lifecycle: VariableOrOptional[LifecycleParam] + """ + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. + """ + properties: VariableOrDict[str] storage_root: VariableOrOptional[str] diff --git a/experimental/python/databricks/bundles/volumes/__init__.py b/experimental/python/databricks/bundles/volumes/__init__.py index 177e6480b4..065713bf6c 100644 --- a/experimental/python/databricks/bundles/volumes/__init__.py +++ b/experimental/python/databricks/bundles/volumes/__init__.py @@ -1,4 +1,7 @@ __all__ = [ + "Lifecycle", + "LifecycleDict", + "LifecycleParam", "Volume", "VolumeDict", "VolumeGrant", @@ -12,6 +15,11 @@ ] +from databricks.bundles.volumes._models.lifecycle import ( + Lifecycle, + LifecycleDict, + LifecycleParam, +) from databricks.bundles.volumes._models.volume import Volume, VolumeDict, VolumeParam from databricks.bundles.volumes._models.volume_grant import ( VolumeGrant, diff --git a/experimental/python/databricks/bundles/volumes/_models/lifecycle.py b/experimental/python/databricks/bundles/volumes/_models/lifecycle.py new file mode 100644 index 0000000000..c934967f37 --- /dev/null +++ b/experimental/python/databricks/bundles/volumes/_models/lifecycle.py @@ -0,0 +1,38 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class Lifecycle: + """""" + + prevent_destroy: VariableOrOptional[bool] = None + """ + Lifecycle setting to prevent the resource from being destroyed. + """ + + @classmethod + def from_dict(cls, value: "LifecycleDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "LifecycleDict": + return _transform_to_json_value(self) # type:ignore + + +class LifecycleDict(TypedDict, total=False): + """""" + + prevent_destroy: VariableOrOptional[bool] + """ + Lifecycle setting to prevent the resource from being destroyed. + """ + + +LifecycleParam = LifecycleDict | Lifecycle diff --git a/experimental/python/databricks/bundles/volumes/_models/volume.py b/experimental/python/databricks/bundles/volumes/_models/volume.py index 65c5c7ab2f..20132cca96 100644 --- a/experimental/python/databricks/bundles/volumes/_models/volume.py +++ b/experimental/python/databricks/bundles/volumes/_models/volume.py @@ -9,6 +9,7 @@ VariableOrList, VariableOrOptional, ) +from databricks.bundles.volumes._models.lifecycle import Lifecycle, LifecycleParam from databricks.bundles.volumes._models.volume_grant import ( VolumeGrant, VolumeGrantParam, @@ -45,6 +46,11 @@ class Volume(Resource): grants: VariableOrList[VolumeGrant] = field(default_factory=list) + lifecycle: VariableOrOptional[Lifecycle] = None + """ + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. + """ + storage_location: VariableOrOptional[str] = None """ The storage location on the cloud @@ -85,6 +91,11 @@ class VolumeDict(TypedDict, total=False): grants: VariableOrList[VolumeGrantParam] + lifecycle: VariableOrOptional[LifecycleParam] + """ + Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. + """ + storage_location: VariableOrOptional[str] """ The storage location on the cloud diff --git a/libs/structwalk/walktype_test.go b/libs/structwalk/walktype_test.go index 0ac241d3b8..e0fb2faebc 100644 --- a/libs/structwalk/walktype_test.go +++ b/libs/structwalk/walktype_test.go @@ -123,7 +123,7 @@ func TestTypeJobSettings(t *testing.T) { func TestTypeRoot(t *testing.T) { testStruct(t, reflect.TypeOf(config.Root{}), - 3800, 4200, // 4001 at the time of the update + 4000, 4300, // 4003 at the time of the update map[string]any{ ".bundle.target": "", `.variables[*].lookup.dashboard`: "",