diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 3461bf6840..8af35ea492 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -6701524136501ef070774942ef5d6e01cfaafb88 \ No newline at end of file +b95c2c6e21bec9551ec7d7d51ddf2dfe390b4522 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index a23db094e9..b6714c9609 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -10,6 +10,7 @@ ### Dependency updates * Upgrade TF provider to 1.88.0 ([#3529](https://github.com/databricks/cli/pull/3529)) +* Upgrade Go SDK to 0.82.0 ### Bundles * Update default-python template to make DB Connect work out of the box for unit tests, using uv to install dependencies ([#3254](https://github.com/databricks/cli/pull/3254)) diff --git a/acceptance/cmd/workspace/database/update-database-instance/output.txt b/acceptance/cmd/workspace/database/update-database-instance/output.txt index 97eddcdbce..8772dca783 100644 --- a/acceptance/cmd/workspace/database/update-database-instance/output.txt +++ b/acceptance/cmd/workspace/database/update-database-instance/output.txt @@ -7,6 +7,7 @@ Usage: Flags: --capacity string The sku of the instance. + --enable-pg-native-login Whether the instance has PG native password login enabled. --enable-readable-secondaries Whether to enable secondaries to serve read-only traffic. -h, --help help for update-database-instance --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) diff --git a/acceptance/help/output.txt b/acceptance/help/output.txt index d6ed2b16d7..6c454de344 100644 --- a/acceptance/help/output.txt +++ b/acceptance/help/output.txt @@ -150,6 +150,7 @@ Additional Commands: configure Configure authentication help Help about any command labs Manage Databricks Labs installations + tag-policies The Tag Policy API allows you to manage tag policies in Databricks. version Retrieve information about the current version of this CLI Flags: diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 924c3a898c..1d9d4e48a9 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -478,89 +478,6 @@ github.com/databricks/cli/bundle/config/resources.DashboardPermission: "user_name": "description": |- PLACEHOLDER -github.com/databricks/cli/bundle/config/resources.DatabaseCatalog: - "create_database_if_not_exists": - "description": |- - PLACEHOLDER - "database_instance_name": - "description": |- - PLACEHOLDER - "database_name": - "description": |- - PLACEHOLDER - "name": - "description": |- - PLACEHOLDER - "permissions": - "description": |- - PLACEHOLDER - "uid": - "description": |- - PLACEHOLDER -github.com/databricks/cli/bundle/config/resources.DatabaseInstance: - "capacity": - "description": |- - PLACEHOLDER - "child_instance_refs": - "description": |- - PLACEHOLDER - "creation_time": - "description": |- - PLACEHOLDER - "creator": - "description": |- - PLACEHOLDER - "effective_enable_readable_secondaries": - "description": |- - PLACEHOLDER - "effective_node_count": - "description": |- - PLACEHOLDER - "effective_retention_window_in_days": - "description": |- - PLACEHOLDER - "effective_stopped": - "description": |- - PLACEHOLDER - "enable_readable_secondaries": - "description": |- - PLACEHOLDER - "name": - "description": |- - PLACEHOLDER - "node_count": - "description": |- - PLACEHOLDER - "parent_instance_ref": - "description": |- - PLACEHOLDER - "permissions": - "description": |- - PLACEHOLDER - "pg_version": - "description": |- - PLACEHOLDER - "purge_on_delete": - "description": |- - PLACEHOLDER - "read_only_dns": - "description": |- - PLACEHOLDER - "read_write_dns": - "description": |- - PLACEHOLDER - "retention_window_in_days": - "description": |- - PLACEHOLDER - "state": - "description": |- - PLACEHOLDER - "stopped": - "description": |- - PLACEHOLDER - "uid": - "description": |- - PLACEHOLDER github.com/databricks/cli/bundle/config/resources.DatabaseInstancePermission: "group_name": "description": |- diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index 79cfb8fc06..f03f7cc1de 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -275,6 +275,11 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: "creator": "description": |- The email of the creator of the instance. + "effective_enable_pg_native_login": + "description": |- + xref AIP-129. `enable_pg_native_login` is owned by the client, while `effective_enable_pg_native_login` is owned by the server. + `enable_pg_native_login` will only be set in Create/Update response messages if and only if the user provides the field via the request. + `effective_enable_pg_native_login` on the other hand will always bet set in all response messages (Create/Update/Get/List). "effective_enable_readable_secondaries": "description": |- xref AIP-129. `enable_readable_secondaries` is owned by the client, while `effective_enable_readable_secondaries` is owned by the server. @@ -295,6 +300,9 @@ github.com/databricks/cli/bundle/config/resources.DatabaseInstance: xref AIP-129. `stopped` is owned by the client, while `effective_stopped` is owned by the server. `stopped` will only be set in Create/Update response messages if and only if the user provides the field via the request. `effective_stopped` on the other hand will always bet set in all response messages (Create/Update/Get/List). + "enable_pg_native_login": + "description": |- + Whether the instance has PG native password login enabled. Defaults to true. "enable_readable_secondaries": "description": |- Whether to enable secondaries to serve read-only traffic. Defaults to false. @@ -784,6 +792,49 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehouse: "warehouse_type": "description": |- Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. +github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: + "_": + "description": |- + Next field marker: 14 + "data_synchronization_status": + "description": |- + Synced Table data synchronization status + "database_instance_name": + "description": |- + Name of the target database instance. This is required when creating synced database tables in standard catalogs. + This is optional when creating synced database tables in registered catalogs. If this field is specified + when creating synced database tables in registered catalogs, the database instance name MUST + match that of the registered catalog (or the request will be rejected). + "effective_database_instance_name": + "description": |- + The name of the database instance that this table is registered to. This field is always returned, and for + tables inside database catalogs is inferred database instance associated with the catalog. + "effective_logical_database_name": + "description": |- + The name of the logical database that this table is registered to. + "logical_database_name": + "description": |- + Target Postgres database object (logical database) name for this table. + + When creating a synced table in a registered Postgres catalog, the + target Postgres database name is inferred to be that of the registered catalog. + If this field is specified in this scenario, the Postgres database name MUST + match that of the registered catalog (or the request will be rejected). + + When creating a synced table in a standard catalog, this field is required. + In this scenario, specifying this field will allow targeting an arbitrary postgres database. + Note that this has implications for the `create_database_objects_is_missing` field in `spec`. + "name": + "description": |- + Full three-part (catalog, schema, table) name of the table. + "spec": + "description": |- + Specification of a synced database table. + "unity_catalog_provisioning_state": + "description": |- + The provisioning state of the synced table entity in Unity Catalog. This is distinct from the + state of the data synchronization pipeline (i.e. the table may be in "ACTIVE" but the pipeline + may be in "PROVISIONING" as it runs asynchronously). github.com/databricks/cli/bundle/config/resources.Volume: "catalog_name": "description": |- @@ -1922,6 +1973,257 @@ github.com/databricks/databricks-sdk-go/service/database.DatabaseInstanceState: UPDATING - |- FAILING_OVER +github.com/databricks/databricks-sdk-go/service/database.DeltaTableSyncInfo: + "delta_commit_timestamp": + "description": |- + The timestamp when the above Delta version was committed in the source Delta table. + Note: This is the Delta commit time, not the time the data was written to the synced table. + "delta_commit_version": + "description": |- + The Delta Lake commit version that was last successfully synced. +github.com/databricks/databricks-sdk-go/service/database.NewPipelineSpec: + "_": + "description": |- + Custom fields that user can set for pipeline while creating SyncedDatabaseTable. + Note that other fields of pipeline are still inferred by table def internally + "storage_catalog": + "description": |- + This field needs to be specified if the destination catalog is a managed postgres catalog. + + UC catalog for the pipeline to store intermediate files (checkpoints, event logs etc). + This needs to be a standard catalog where the user has permissions to create Delta tables. + "storage_schema": + "description": |- + This field needs to be specified if the destination catalog is a managed postgres catalog. + + UC schema for the pipeline to store intermediate files (checkpoints, event logs etc). + This needs to be in the standard catalog where the user has permissions to create Delta tables. +github.com/databricks/databricks-sdk-go/service/database.ProvisioningInfoState: + "_": + "enum": + - |- + PROVISIONING + - |- + ACTIVE + - |- + FAILED + - |- + DELETING + - |- + UPDATING + - |- + DEGRADED +github.com/databricks/databricks-sdk-go/service/database.ProvisioningPhase: + "_": + "enum": + - |- + PROVISIONING_PHASE_MAIN + - |- + PROVISIONING_PHASE_INDEX_SCAN + - |- + PROVISIONING_PHASE_INDEX_SORT +github.com/databricks/databricks-sdk-go/service/database.SyncedTableContinuousUpdateStatus: + "_": + "description": |- + Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE + or the SYNCED_UPDATING_PIPELINE_RESOURCES state. + "initial_pipeline_sync_progress": + "description": |- + Progress of the initial data synchronization. + "last_processed_commit_version": + "description": |- + The last source table Delta version that was successfully synced to the synced table. + "timestamp": + "description": |- + The end timestamp of the last time any data was synchronized from the source table to the synced + table. This is when the data is available in the synced table. +github.com/databricks/databricks-sdk-go/service/database.SyncedTableFailedStatus: + "_": + "description": |- + Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the + SYNCED_PIPELINE_FAILED state. + "last_processed_commit_version": + "description": |- + The last source table Delta version that was successfully synced to the synced table. + The last source table Delta version that was synced to the synced table. + Only populated if the table is still + synced and available for serving. + "timestamp": + "description": |- + The end timestamp of the last time any data was synchronized from the source table to the synced + table. Only populated if the table is still synced and available for serving. +github.com/databricks/databricks-sdk-go/service/database.SyncedTablePipelineProgress: + "_": + "description": |- + Progress information of the Synced Table data synchronization pipeline. + "estimated_completion_time_seconds": + "description": |- + The estimated time remaining to complete this update in seconds. + "latest_version_currently_processing": + "description": |- + The source table Delta version that was last processed by the pipeline. The pipeline may not + have completely processed this version yet. + "provisioning_phase": + "description": |- + The current phase of the data synchronization pipeline. + "sync_progress_completion": + "description": |- + The completion ratio of this update. This is a number between 0 and 1. + "synced_row_count": + "description": |- + The number of rows that have been synced in this update. + "total_row_count": + "description": |- + The total number of rows that need to be synced in this update. This number may be an estimate. +github.com/databricks/databricks-sdk-go/service/database.SyncedTablePosition: + "delta_table_sync_info": {} + "sync_end_timestamp": + "description": |- + The end timestamp of the most recent successful synchronization. + This is the time when the data is available in the synced table. + "sync_start_timestamp": + "description": |- + The starting timestamp of the most recent successful synchronization from the source table + to the destination (synced) table. + Note this is the starting timestamp of the sync operation, not the end time. + E.g., for a batch, this is the time when the sync operation started. +github.com/databricks/databricks-sdk-go/service/database.SyncedTableProvisioningStatus: + "_": + "description": |- + Detailed status of a synced table. Shown if the synced table is in the + PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state. + "initial_pipeline_sync_progress": + "description": |- + Details about initial data synchronization. Only populated when in the + PROVISIONING_INITIAL_SNAPSHOT state. +github.com/databricks/databricks-sdk-go/service/database.SyncedTableSchedulingPolicy: + "_": + "enum": + - |- + CONTINUOUS + - |- + TRIGGERED + - |- + SNAPSHOT +github.com/databricks/databricks-sdk-go/service/database.SyncedTableSpec: + "_": + "description": |- + Specification of a synced database table. + "create_database_objects_if_missing": + "description": |- + If true, the synced table's logical database and schema resources in PG + will be created if they do not already exist. + "existing_pipeline_id": + "description": |- + At most one of existing_pipeline_id and new_pipeline_spec should be defined. + + If existing_pipeline_id is defined, the synced table will be bin packed into the existing pipeline + referenced. This avoids creating a new pipeline and allows sharing existing compute. + In this case, the scheduling_policy of this synced table must match the scheduling policy of the existing pipeline. + "new_pipeline_spec": + "description": |- + At most one of existing_pipeline_id and new_pipeline_spec should be defined. + + If new_pipeline_spec is defined, a new pipeline is created for this synced table. The location pointed to is used + to store intermediate files (checkpoints, event logs etc). The caller must have write permissions to create Delta + tables in the specified catalog and schema. Again, note this requires write permissions, whereas the source table + only requires read permissions. + "primary_key_columns": + "description": |- + Primary Key columns to be used for data insert/update in the destination. + "scheduling_policy": + "description": |- + Scheduling policy of the underlying pipeline. + "source_table_full_name": + "description": |- + Three-part (catalog, schema, table) name of the source Delta table. + "timeseries_key": + "description": |- + Time series key to deduplicate (tie-break) rows with the same primary key. +github.com/databricks/databricks-sdk-go/service/database.SyncedTableState: + "_": + "description": |- + The state of a synced table. + "enum": + - |- + SYNCED_TABLE_PROVISIONING + - |- + SYNCED_TABLE_PROVISIONING_PIPELINE_RESOURCES + - |- + SYNCED_TABLE_PROVISIONING_INITIAL_SNAPSHOT + - |- + SYNCED_TABLE_ONLINE + - |- + SYNCED_TABLE_ONLINE_CONTINUOUS_UPDATE + - |- + SYNCED_TABLE_ONLINE_TRIGGERED_UPDATE + - |- + SYNCED_TABLE_ONLINE_NO_PENDING_UPDATE + - |- + SYNCED_TABLED_OFFLINE + - |- + SYNCED_TABLE_OFFLINE_FAILED + - |- + SYNCED_TABLE_ONLINE_PIPELINE_FAILED + - |- + SYNCED_TABLE_ONLINE_UPDATING_PIPELINE_RESOURCES +github.com/databricks/databricks-sdk-go/service/database.SyncedTableStatus: + "_": + "description": |- + Status of a synced table. + "continuous_update_status": + "description": |- + Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE + or the SYNCED_UPDATING_PIPELINE_RESOURCES state. + "detailed_state": + "description": |- + The state of the synced table. + "failed_status": + "description": |- + Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the + SYNCED_PIPELINE_FAILED state. + "last_sync": + "description": |- + Summary of the last successful synchronization from source to destination. + + Will always be present if there has been a successful sync. Even if the most recent syncs have failed. + + Limitation: + The only exception is if the synced table is doing a FULL REFRESH, then the last sync information + will not be available until the full refresh is complete. This limitation will be addressed in a future version. + + This top-level field is a convenience for consumers who want easy access to last sync information + without having to traverse detailed_status. + "message": + "description": |- + A text description of the current state of the synced table. + "pipeline_id": + "description": |- + ID of the associated pipeline. The pipeline ID may have been provided by the client + (in the case of bin packing), or generated by the server (when creating a new pipeline). + "provisioning_status": + "description": |- + Detailed status of a synced table. Shown if the synced table is in the + PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state. + "triggered_update_status": + "description": |- + Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE + or the SYNCED_NO_PENDING_UPDATE state. +github.com/databricks/databricks-sdk-go/service/database.SyncedTableTriggeredUpdateStatus: + "_": + "description": |- + Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE + or the SYNCED_NO_PENDING_UPDATE state. + "last_processed_commit_version": + "description": |- + The last source table Delta version that was successfully synced to the synced table. + "timestamp": + "description": |- + The end timestamp of the last time any data was synchronized from the source table to the synced + table. This is when the data is available in the synced table. + "triggered_update_progress": + "description": |- + Progress of the active data synchronization pipeline. github.com/databricks/databricks-sdk-go/service/jobs.AuthenticationMethod: "_": "enum": @@ -1997,6 +2299,9 @@ github.com/databricks/databricks-sdk-go/service/jobs.Continuous: "pause_status": "description": |- Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED. + "task_retry_mode": + "description": |- + Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER. github.com/databricks/databricks-sdk-go/service/jobs.CronSchedule: "pause_status": "description": |- @@ -2955,6 +3260,18 @@ github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings: "no_alert_for_skipped_runs": "description": |- If true, do not send notifications to recipients specified in `on_failure` if the run is skipped. +github.com/databricks/databricks-sdk-go/service/jobs.TaskRetryMode: + "_": + "description": |- + task retry mode of the continuous job + * NEVER: The failed task will not be retried. + * ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first attempt. + When this condition is no longer met or the retry limit is reached, the job run is cancelled and a new run is started. + "enum": + - |- + NEVER + - |- + ON_FAILURE github.com/databricks/databricks-sdk-go/service/jobs.TriggerSettings: "file_arrival": "description": |- @@ -3111,6 +3428,11 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefin "objects": "description": |- Required. Settings specifying tables to replicate and the destination for the replicated tables. + "source_configurations": + "description": |- + Top-level source configurations + "x-databricks-preview": |- + PRIVATE "source_type": "description": |- The type of the foreign source. @@ -3390,6 +3712,29 @@ github.com/databricks/databricks-sdk-go/service/pipelines.PipelinesEnvironment: List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/ Allowed dependency could be , , (WSFS or Volumes in Databricks), +github.com/databricks/databricks-sdk-go/service/pipelines.PostgresCatalogConfig: + "_": + "description": |- + PG-specific catalog-level configuration parameters + "slot_config": + "description": |- + Optional. The Postgres slot configuration to use for logical replication + "x-databricks-preview": |- + PRIVATE +github.com/databricks/databricks-sdk-go/service/pipelines.PostgresSlotConfig: + "_": + "description": |- + PostgresSlotConfig contains the configuration for a Postgres logical replication slot + "publication_name": + "description": |- + The name of the publication to use for the Postgres source + "x-databricks-preview": |- + PRIVATE + "slot_name": + "description": |- + The name of the logical replication slot to use for the Postgres source + "x-databricks-preview": |- + PRIVATE github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec: "destination_catalog": "description": |- @@ -3447,6 +3792,26 @@ github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec: "table_configuration": "description": |- Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the IngestionPipelineDefinition object. +github.com/databricks/databricks-sdk-go/service/pipelines.SourceCatalogConfig: + "_": + "description": |- + SourceCatalogConfig contains catalog-level custom configuration parameters for each source + "postgres": + "description": |- + Postgres-specific catalog-level configuration parameters + "x-databricks-preview": |- + PRIVATE + "source_catalog": + "description": |- + Source catalog name + "x-databricks-preview": |- + PRIVATE +github.com/databricks/databricks-sdk-go/service/pipelines.SourceConfig: + "catalog": + "description": |- + Catalog-level source configuration parameters + "x-databricks-preview": |- + PRIVATE github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec: "destination_catalog": "description": |- diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 01deda3623..2e49c2d516 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -182,6 +182,17 @@ github.com/databricks/cli/bundle/config/resources.DashboardPermissionLevel: CAN_EDIT - |- CAN_MANAGE +github.com/databricks/cli/bundle/config/resources.DatabaseCatalog: + "create_database_if_not_exists": + "description": |- + PLACEHOLDER + "uid": + "description": |- + PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.DatabaseInstance: + "permissions": + "description": |- + PLACEHOLDER github.com/databricks/cli/bundle/config/resources.DatabaseInstancePermissionLevel: "_": "enum": @@ -421,35 +432,6 @@ github.com/databricks/cli/bundle/config/resources.RegisteredModel: "grants": "description": |- PLACEHOLDER -github.com/databricks/cli/bundle/config/resources.SchemaGrantPrivilege: - "_": - "enum": - - |- - ALL_PRIVILEGES - - |- - APPLY_TAG - - |- - CREATE_FUNCTION - - |- - CREATE_TABLE - - |- - CREATE_VOLUME - - |- - MANAGE - - |- - USE_SCHEMA - - |- - EXECUTE - - |- - MODIFY - - |- - REFRESH - - |- - SELECT - - |- - READ_VOLUME - - |- - WRITE_VOLUME github.com/databricks/cli/bundle/config/resources.Schema: "_": "markdown_description": |- @@ -503,6 +485,35 @@ github.com/databricks/cli/bundle/config/resources.Schema: "properties": "description": |- PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.SchemaGrantPrivilege: + "_": + "enum": + - |- + ALL_PRIVILEGES + - |- + APPLY_TAG + - |- + CREATE_FUNCTION + - |- + CREATE_TABLE + - |- + CREATE_VOLUME + - |- + MANAGE + - |- + USE_SCHEMA + - |- + EXECUTE + - |- + MODIFY + - |- + REFRESH + - |- + SELECT + - |- + READ_VOLUME + - |- + WRITE_VOLUME github.com/databricks/cli/bundle/config/resources.SecretScopePermissionLevel: "_": "enum": @@ -754,6 +765,10 @@ github.com/databricks/databricks-sdk-go/service/compute.LogAnalyticsInfo: "log_analytics_workspace_id": "description": |- The workspace ID for the Azure Log Analytics agent configuration +github.com/databricks/databricks-sdk-go/service/database.SyncedTablePosition: + "delta_table_sync_info": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/jobs.DashboardTask: "dashboard_id": "description": |- diff --git a/bundle/internal/validation/generated/enum_fields.go b/bundle/internal/validation/generated/enum_fields.go index e148b2dfa2..11b93c00ca 100644 --- a/bundle/internal/validation/generated/enum_fields.go +++ b/bundle/internal/validation/generated/enum_fields.go @@ -39,6 +39,7 @@ var EnumFields = map[string][]string{ "resources.database_instances.*.state": {"AVAILABLE", "DELETING", "FAILING_OVER", "STARTING", "STOPPED", "UPDATING"}, "resources.jobs.*.continuous.pause_status": {"PAUSED", "UNPAUSED"}, + "resources.jobs.*.continuous.task_retry_mode": {"NEVER", "ON_FAILURE"}, "resources.jobs.*.deployment.kind": {"BUNDLE"}, "resources.jobs.*.edit_mode": {"EDITABLE", "UI_LOCKED"}, "resources.jobs.*.format": {"MULTI_TASK", "SINGLE_TASK"}, @@ -126,12 +127,21 @@ var EnumFields = map[string][]string{ "resources.quality_monitors.*.inference_log.problem_type": {"PROBLEM_TYPE_CLASSIFICATION", "PROBLEM_TYPE_REGRESSION"}, "resources.quality_monitors.*.schedule.pause_status": {"PAUSED", "UNPAUSED", "UNSPECIFIED"}, + "resources.schemas.*.grants[*].privileges[*]": {"ALL_PRIVILEGES", "APPLY_TAG", "CREATE_FUNCTION", "CREATE_TABLE", "CREATE_VOLUME", "MANAGE", "USE_SCHEMA", "EXECUTE", "MODIFY", "REFRESH", "SELECT", "READ_VOLUME", "WRITE_VOLUME"}, + "resources.secret_scopes.*.backend_type": {"AZURE_KEYVAULT", "DATABRICKS"}, "resources.sql_warehouses.*.channel.name": {"CHANNEL_NAME_CURRENT", "CHANNEL_NAME_CUSTOM", "CHANNEL_NAME_PREVIEW", "CHANNEL_NAME_PREVIOUS"}, "resources.sql_warehouses.*.spot_instance_policy": {"COST_OPTIMIZED", "POLICY_UNSPECIFIED", "RELIABILITY_OPTIMIZED"}, "resources.sql_warehouses.*.warehouse_type": {"CLASSIC", "PRO", "TYPE_UNSPECIFIED"}, + "resources.synced_database_tables.*.data_synchronization_status.continuous_update_status.initial_pipeline_sync_progress.provisioning_phase": {"PROVISIONING_PHASE_INDEX_SCAN", "PROVISIONING_PHASE_INDEX_SORT", "PROVISIONING_PHASE_MAIN"}, + "resources.synced_database_tables.*.data_synchronization_status.detailed_state": {"SYNCED_TABLED_OFFLINE", "SYNCED_TABLE_OFFLINE_FAILED", "SYNCED_TABLE_ONLINE", "SYNCED_TABLE_ONLINE_CONTINUOUS_UPDATE", "SYNCED_TABLE_ONLINE_NO_PENDING_UPDATE", "SYNCED_TABLE_ONLINE_PIPELINE_FAILED", "SYNCED_TABLE_ONLINE_TRIGGERED_UPDATE", "SYNCED_TABLE_ONLINE_UPDATING_PIPELINE_RESOURCES", "SYNCED_TABLE_PROVISIONING", "SYNCED_TABLE_PROVISIONING_INITIAL_SNAPSHOT", "SYNCED_TABLE_PROVISIONING_PIPELINE_RESOURCES"}, + "resources.synced_database_tables.*.data_synchronization_status.provisioning_status.initial_pipeline_sync_progress.provisioning_phase": {"PROVISIONING_PHASE_INDEX_SCAN", "PROVISIONING_PHASE_INDEX_SORT", "PROVISIONING_PHASE_MAIN"}, + "resources.synced_database_tables.*.data_synchronization_status.triggered_update_status.triggered_update_progress.provisioning_phase": {"PROVISIONING_PHASE_INDEX_SCAN", "PROVISIONING_PHASE_INDEX_SORT", "PROVISIONING_PHASE_MAIN"}, + "resources.synced_database_tables.*.spec.scheduling_policy": {"CONTINUOUS", "SNAPSHOT", "TRIGGERED"}, + "resources.synced_database_tables.*.unity_catalog_provisioning_state": {"ACTIVE", "DEGRADED", "DELETING", "FAILED", "PROVISIONING", "UPDATING"}, + "resources.volumes.*.grants[*].privileges[*]": {"ALL_PRIVILEGES", "APPLY_TAG", "MANAGE", "READ_VOLUME", "WRITE_VOLUME"}, "resources.volumes.*.volume_type": {"EXTERNAL", "MANAGED"}, diff --git a/bundle/internal/validation/generated/required_fields.go b/bundle/internal/validation/generated/required_fields.go index 542e64ae44..621ed53aeb 100644 --- a/bundle/internal/validation/generated/required_fields.go +++ b/bundle/internal/validation/generated/required_fields.go @@ -208,6 +208,8 @@ var RequiredFields = map[string][]string{ "resources.sql_warehouses.*.permissions[*]": {"level"}, + "resources.synced_database_tables.*": {"name"}, + "resources.volumes.*": {"catalog_name", "name", "schema_name", "volume_type"}, "resources.volumes.*.grants[*]": {"privileges", "principal"}, diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 9c32f2dba7..0bea336a7d 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -518,12 +518,15 @@ "$ref": "#/$defs/bool" }, "database_instance_name": { + "description": "The name of the DatabaseInstance housing the database.", "$ref": "#/$defs/string" }, "database_name": { + "description": "The name of the database (in a instance) associated with the catalog.", "$ref": "#/$defs/string" }, "name": { + "description": "The name of the catalog in UC.", "$ref": "#/$defs/string" }, "uid": { @@ -550,63 +553,90 @@ "description": "A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage.", "properties": { "capacity": { + "description": "The sku of the instance. Valid values are \"CU_1\", \"CU_2\", \"CU_4\", \"CU_8\".", "$ref": "#/$defs/string" }, "child_instance_refs": { + "description": "The refs of the child instances. This is only available if the instance is\nparent instance.", "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/database.DatabaseInstanceRef" }, "creation_time": { + "description": "The timestamp when the instance was created.", "$ref": "#/$defs/string" }, "creator": { + "description": "The email of the creator of the instance.", "$ref": "#/$defs/string" }, + "effective_enable_pg_native_login": { + "description": "xref AIP-129. `enable_pg_native_login` is owned by the client, while `effective_enable_pg_native_login` is owned by the server.\n`enable_pg_native_login` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_enable_pg_native_login` on the other hand will always bet set in all response messages (Create/Update/Get/List).", + "$ref": "#/$defs/bool" + }, "effective_enable_readable_secondaries": { + "description": "xref AIP-129. `enable_readable_secondaries` is owned by the client, while `effective_enable_readable_secondaries` is owned by the server.\n`enable_readable_secondaries` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_enable_readable_secondaries` on the other hand will always bet set in all response messages (Create/Update/Get/List).", "$ref": "#/$defs/bool" }, "effective_node_count": { + "description": "xref AIP-129. `node_count` is owned by the client, while `effective_node_count` is owned by the server.\n`node_count` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_node_count` on the other hand will always bet set in all response messages (Create/Update/Get/List).", "$ref": "#/$defs/int" }, "effective_retention_window_in_days": { + "description": "xref AIP-129. `retention_window_in_days` is owned by the client, while `effective_retention_window_in_days` is owned by the server.\n`retention_window_in_days` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_retention_window_in_days` on the other hand will always bet set in all response messages (Create/Update/Get/List).", "$ref": "#/$defs/int" }, "effective_stopped": { + "description": "xref AIP-129. `stopped` is owned by the client, while `effective_stopped` is owned by the server.\n`stopped` will only be set in Create/Update response messages if and only if the user provides the field via the request.\n`effective_stopped` on the other hand will always bet set in all response messages (Create/Update/Get/List).", + "$ref": "#/$defs/bool" + }, + "enable_pg_native_login": { + "description": "Whether the instance has PG native password login enabled. Defaults to true.", "$ref": "#/$defs/bool" }, "enable_readable_secondaries": { + "description": "Whether to enable secondaries to serve read-only traffic. Defaults to false.", "$ref": "#/$defs/bool" }, "name": { + "description": "The name of the instance. This is the unique identifier for the instance.", "$ref": "#/$defs/string" }, "node_count": { + "description": "The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults to\n1 primary and 0 secondaries.", "$ref": "#/$defs/int" }, "parent_instance_ref": { + "description": "The ref of the parent instance. This is only available if the instance is\nchild instance.\nInput: For specifying the parent instance to create a child instance. Optional.\nOutput: Only populated if provided as input to create a child instance.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.DatabaseInstanceRef" }, "permissions": { "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.DatabaseInstancePermission" }, "pg_version": { + "description": "The version of Postgres running on the instance.", "$ref": "#/$defs/string" }, "read_only_dns": { + "description": "The DNS endpoint to connect to the instance for read only access. This is only available if\nenable_readable_secondaries is true.", "$ref": "#/$defs/string" }, "read_write_dns": { + "description": "The DNS endpoint to connect to the instance for read+write access.", "$ref": "#/$defs/string" }, "retention_window_in_days": { + "description": "The retention window for the instance. This is the time window in days\nfor which the historical data is retained. The default value is 7 days.\nValid values are 2 to 35 days.", "$ref": "#/$defs/int" }, "state": { + "description": "The current state of the instance.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.DatabaseInstanceState" }, "stopped": { + "description": "Whether the instance is stopped.", "$ref": "#/$defs/bool" }, "uid": { + "description": "An immutable UUID identifier for the instance.", "$ref": "#/$defs/string" } }, @@ -1754,6 +1784,7 @@ "oneOf": [ { "type": "object", + "description": "Next field marker: 14", "properties": { "data_synchronization_status": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTableStatus" @@ -4381,9 +4412,11 @@ "type": "object", "properties": { "delta_commit_timestamp": { + "description": "The timestamp when the above Delta version was committed in the source Delta table.\nNote: This is the Delta commit time, not the time the data was written to the synced table.", "$ref": "#/$defs/string" }, "delta_commit_version": { + "description": "The Delta Lake commit version that was last successfully synced.", "$ref": "#/$defs/int64" } }, @@ -4399,11 +4432,14 @@ "oneOf": [ { "type": "object", + "description": "Custom fields that user can set for pipeline while creating SyncedDatabaseTable.\nNote that other fields of pipeline are still inferred by table def internally", "properties": { "storage_catalog": { + "description": "This field needs to be specified if the destination catalog is a managed postgres catalog.\n\nUC catalog for the pipeline to store intermediate files (checkpoints, event logs etc).\nThis needs to be a standard catalog where the user has permissions to create Delta tables.", "$ref": "#/$defs/string" }, "storage_schema": { + "description": "This field needs to be specified if the destination catalog is a managed postgres catalog.\n\nUC schema for the pipeline to store intermediate files (checkpoints, event logs etc).\nThis needs to be in the standard catalog where the user has permissions to create Delta tables.", "$ref": "#/$defs/string" } }, @@ -4416,23 +4452,56 @@ ] }, "database.ProvisioningInfoState": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "PROVISIONING", + "ACTIVE", + "FAILED", + "DELETING", + "UPDATING", + "DEGRADED" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "database.ProvisioningPhase": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "PROVISIONING_PHASE_MAIN", + "PROVISIONING_PHASE_INDEX_SCAN", + "PROVISIONING_PHASE_INDEX_SORT" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "database.SyncedTableContinuousUpdateStatus": { "oneOf": [ { "type": "object", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE\nor the SYNCED_UPDATING_PIPELINE_RESOURCES state.", "properties": { "initial_pipeline_sync_progress": { + "description": "Progress of the initial data synchronization.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTablePipelineProgress" }, "last_processed_commit_version": { + "description": "The last source table Delta version that was successfully synced to the synced table.", "$ref": "#/$defs/int64" }, "timestamp": { + "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. This is when the data is available in the synced table.", "$ref": "#/$defs/string" } }, @@ -4448,11 +4517,14 @@ "oneOf": [ { "type": "object", + "description": "Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the\nSYNCED_PIPELINE_FAILED state.", "properties": { "last_processed_commit_version": { + "description": "The last source table Delta version that was successfully synced to the synced table.\nThe last source table Delta version that was synced to the synced table.\nOnly populated if the table is still\nsynced and available for serving.", "$ref": "#/$defs/int64" }, "timestamp": { + "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. Only populated if the table is still synced and available for serving.", "$ref": "#/$defs/string" } }, @@ -4468,23 +4540,30 @@ "oneOf": [ { "type": "object", + "description": "Progress information of the Synced Table data synchronization pipeline.", "properties": { "estimated_completion_time_seconds": { + "description": "The estimated time remaining to complete this update in seconds.", "$ref": "#/$defs/float64" }, "latest_version_currently_processing": { + "description": "The source table Delta version that was last processed by the pipeline. The pipeline may not\nhave completely processed this version yet.", "$ref": "#/$defs/int64" }, "provisioning_phase": { + "description": "The current phase of the data synchronization pipeline.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.ProvisioningPhase" }, "sync_progress_completion": { + "description": "The completion ratio of this update. This is a number between 0 and 1.", "$ref": "#/$defs/float64" }, "synced_row_count": { + "description": "The number of rows that have been synced in this update.", "$ref": "#/$defs/int64" }, "total_row_count": { + "description": "The total number of rows that need to be synced in this update. This number may be an estimate.", "$ref": "#/$defs/int64" } }, @@ -4505,9 +4584,11 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.DeltaTableSyncInfo" }, "sync_end_timestamp": { + "description": "The end timestamp of the most recent successful synchronization.\nThis is the time when the data is available in the synced table.", "$ref": "#/$defs/string" }, "sync_start_timestamp": { + "description": "The starting timestamp of the most recent successful synchronization from the source table\nto the destination (synced) table.\nNote this is the starting timestamp of the sync operation, not the end time.\nE.g., for a batch, this is the time when the sync operation started.", "$ref": "#/$defs/string" } }, @@ -4523,8 +4604,10 @@ "oneOf": [ { "type": "object", + "description": "Detailed status of a synced table. Shown if the synced table is in the\nPROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.", "properties": { "initial_pipeline_sync_progress": { + "description": "Details about initial data synchronization. Only populated when in the\nPROVISIONING_INITIAL_SNAPSHOT state.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTablePipelineProgress" } }, @@ -4537,32 +4620,53 @@ ] }, "database.SyncedTableSchedulingPolicy": { - "type": "string" + "oneOf": [ + { + "type": "string", + "enum": [ + "CONTINUOUS", + "TRIGGERED", + "SNAPSHOT" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "database.SyncedTableSpec": { "oneOf": [ { "type": "object", + "description": "Specification of a synced database table.", "properties": { "create_database_objects_if_missing": { + "description": "If true, the synced table's logical database and schema resources in PG\nwill be created if they do not already exist.", "$ref": "#/$defs/bool" }, "existing_pipeline_id": { + "description": "At most one of existing_pipeline_id and new_pipeline_spec should be defined.\n\nIf existing_pipeline_id is defined, the synced table will be bin packed into the existing pipeline\nreferenced. This avoids creating a new pipeline and allows sharing existing compute.\nIn this case, the scheduling_policy of this synced table must match the scheduling policy of the existing pipeline.", "$ref": "#/$defs/string" }, "new_pipeline_spec": { + "description": "At most one of existing_pipeline_id and new_pipeline_spec should be defined.\n\nIf new_pipeline_spec is defined, a new pipeline is created for this synced table. The location pointed to is used\nto store intermediate files (checkpoints, event logs etc). The caller must have write permissions to create Delta\ntables in the specified catalog and schema. Again, note this requires write permissions, whereas the source table\nonly requires read permissions.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.NewPipelineSpec" }, "primary_key_columns": { + "description": "Primary Key columns to be used for data insert/update in the destination.", "$ref": "#/$defs/slice/string" }, "scheduling_policy": { + "description": "Scheduling policy of the underlying pipeline.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTableSchedulingPolicy" }, "source_table_full_name": { + "description": "Three-part (catalog, schema, table) name of the source Delta table.", "$ref": "#/$defs/string" }, "timeseries_key": { + "description": "Time series key to deduplicate (tie-break) rows with the same primary key.", "$ref": "#/$defs/string" } }, @@ -4575,35 +4679,66 @@ ] }, "database.SyncedTableState": { - "type": "string" + "oneOf": [ + { + "type": "string", + "description": "The state of a synced table.", + "enum": [ + "SYNCED_TABLE_PROVISIONING", + "SYNCED_TABLE_PROVISIONING_PIPELINE_RESOURCES", + "SYNCED_TABLE_PROVISIONING_INITIAL_SNAPSHOT", + "SYNCED_TABLE_ONLINE", + "SYNCED_TABLE_ONLINE_CONTINUOUS_UPDATE", + "SYNCED_TABLE_ONLINE_TRIGGERED_UPDATE", + "SYNCED_TABLE_ONLINE_NO_PENDING_UPDATE", + "SYNCED_TABLED_OFFLINE", + "SYNCED_TABLE_OFFLINE_FAILED", + "SYNCED_TABLE_ONLINE_PIPELINE_FAILED", + "SYNCED_TABLE_ONLINE_UPDATING_PIPELINE_RESOURCES" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] }, "database.SyncedTableStatus": { "oneOf": [ { "type": "object", + "description": "Status of a synced table.", "properties": { "continuous_update_status": { + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE\nor the SYNCED_UPDATING_PIPELINE_RESOURCES state.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTableContinuousUpdateStatus" }, "detailed_state": { + "description": "The state of the synced table.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTableState" }, "failed_status": { + "description": "Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the\nSYNCED_PIPELINE_FAILED state.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTableFailedStatus" }, "last_sync": { + "description": "Summary of the last successful synchronization from source to destination.\n\nWill always be present if there has been a successful sync. Even if the most recent syncs have failed.\n\nLimitation:\nThe only exception is if the synced table is doing a FULL REFRESH, then the last sync information\nwill not be available until the full refresh is complete. This limitation will be addressed in a future version.\n\nThis top-level field is a convenience for consumers who want easy access to last sync information\nwithout having to traverse detailed_status.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTablePosition" }, "message": { + "description": "A text description of the current state of the synced table.", "$ref": "#/$defs/string" }, "pipeline_id": { + "description": "ID of the associated pipeline. The pipeline ID may have been provided by the client\n(in the case of bin packing), or generated by the server (when creating a new pipeline).", "$ref": "#/$defs/string" }, "provisioning_status": { + "description": "Detailed status of a synced table. Shown if the synced table is in the\nPROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTableProvisioningStatus" }, "triggered_update_status": { + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE\nor the SYNCED_NO_PENDING_UPDATE state.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTableTriggeredUpdateStatus" } }, @@ -4619,14 +4754,18 @@ "oneOf": [ { "type": "object", + "description": "Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE\nor the SYNCED_NO_PENDING_UPDATE state.", "properties": { "last_processed_commit_version": { + "description": "The last source table Delta version that was successfully synced to the synced table.", "$ref": "#/$defs/int64" }, "timestamp": { + "description": "The end timestamp of the last time any data was synchronized from the source table to the synced\ntable. This is when the data is available in the synced table.", "$ref": "#/$defs/string" }, "triggered_update_progress": { + "description": "Progress of the active data synchronization pipeline.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/database.SyncedTablePipelineProgress" } }, @@ -4790,6 +4929,10 @@ "pause_status": { "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PauseStatus" + }, + "task_retry_mode": { + "description": "Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskRetryMode" } }, "additionalProperties": false @@ -6509,6 +6652,22 @@ } ] }, + "jobs.TaskRetryMode": { + "oneOf": [ + { + "type": "string", + "description": "task retry mode of the continuous job\n* NEVER: The failed task will not be retried.\n* ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first attempt.\nWhen this condition is no longer met or the retry limit is reached, the job run is cancelled and a new run is started.", + "enum": [ + "NEVER", + "ON_FAILURE" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "jobs.TriggerSettings": { "oneOf": [ { @@ -6855,6 +7014,12 @@ "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionConfig" }, + "source_configurations": { + "description": "Top-level source configurations", + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.SourceConfig", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "source_type": { "description": "The type of the foreign source.\nThe source type will be inferred from the source connection or ingestion gateway.\nThis field is output only and will be ignored if provided.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType" @@ -7250,6 +7415,54 @@ } ] }, + "pipelines.PostgresCatalogConfig": { + "oneOf": [ + { + "type": "object", + "description": "PG-specific catalog-level configuration parameters", + "properties": { + "slot_config": { + "description": "Optional. The Postgres slot configuration to use for logical replication", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PostgresSlotConfig", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "pipelines.PostgresSlotConfig": { + "oneOf": [ + { + "type": "object", + "description": "PostgresSlotConfig contains the configuration for a Postgres logical replication slot", + "properties": { + "publication_name": { + "description": "The name of the publication to use for the Postgres source", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, + "slot_name": { + "description": "The name of the logical replication slot to use for the Postgres source", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "pipelines.ReportSpec": { "oneOf": [ { @@ -7380,6 +7593,53 @@ } ] }, + "pipelines.SourceCatalogConfig": { + "oneOf": [ + { + "type": "object", + "description": "SourceCatalogConfig contains catalog-level custom configuration parameters for each source", + "properties": { + "postgres": { + "description": "Postgres-specific catalog-level configuration parameters", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PostgresCatalogConfig", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, + "source_catalog": { + "description": "Source catalog name", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "pipelines.SourceConfig": { + "oneOf": [ + { + "type": "object", + "properties": { + "catalog": { + "description": "Catalog-level source configuration parameters", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SourceCatalogConfig", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "pipelines.TableSpec": { "oneOf": [ { @@ -9732,6 +9992,20 @@ } ] }, + "pipelines.SourceConfig": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SourceConfig" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "serving.AiGatewayRateLimit": { "oneOf": [ { diff --git a/cmd/account/federation-policy/federation-policy.go b/cmd/account/federation-policy/federation-policy.go index 0304f73f06..d7b2e6c13c 100755 --- a/cmd/account/federation-policy/federation-policy.go +++ b/cmd/account/federation-policy/federation-policy.go @@ -110,7 +110,6 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.PolicyId, "policy-id", createReq.PolicyId, `The identifier for the federation policy.`) cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`) - cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Resource name for the federation policy.`) // TODO: complex arg: oidc_policy cmd.Use = "create" @@ -338,7 +337,6 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.UpdateMask, "update-mask", updateReq.UpdateMask, `The field mask specifies which fields of the policy to update.`) cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`) - cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Resource name for the federation policy.`) // TODO: complex arg: oidc_policy cmd.Use = "update POLICY_ID" diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index 3318a34b42..e446bb04cb 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -291,7 +291,9 @@ func newList() *cobra.Command { cmd.Short = `List group details.` cmd.Long = `List group details. - Gets all details of the groups associated with the Databricks account.` + Gets all details of the groups associated with the Databricks account. As of + 08/22/2025, this endpoint will not return members. Instead, members should be + retrieved by iterating through Get group details.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go index faeff49071..132be7e958 100755 --- a/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go +++ b/cmd/account/service-principal-federation-policy/service-principal-federation-policy.go @@ -117,7 +117,6 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.PolicyId, "policy-id", createReq.PolicyId, `The identifier for the federation policy.`) cmd.Flags().StringVar(&createReq.Policy.Description, "description", createReq.Policy.Description, `Description of the federation policy.`) - cmd.Flags().StringVar(&createReq.Policy.Name, "name", createReq.Policy.Name, `Resource name for the federation policy.`) // TODO: complex arg: oidc_policy cmd.Use = "create SERVICE_PRINCIPAL_ID" @@ -378,7 +377,6 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.UpdateMask, "update-mask", updateReq.UpdateMask, `The field mask specifies which fields of the policy to update.`) cmd.Flags().StringVar(&updateReq.Policy.Description, "description", updateReq.Policy.Description, `Description of the federation policy.`) - cmd.Flags().StringVar(&updateReq.Policy.Name, "name", updateReq.Policy.Name, `Resource name for the federation policy.`) // TODO: complex arg: oidc_policy cmd.Use = "update SERVICE_PRINCIPAL_ID POLICY_ID" diff --git a/cmd/workspace/clean-room-assets/clean-room-assets.go b/cmd/workspace/clean-room-assets/clean-room-assets.go index be89689c32..5a1904550c 100755 --- a/cmd/workspace/clean-room-assets/clean-room-assets.go +++ b/cmd/workspace/clean-room-assets/clean-room-assets.go @@ -176,6 +176,8 @@ func newCreateCleanRoomAssetReview() *cobra.Command { cmd.Flags().Var(&createCleanRoomAssetReviewJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: complex arg: notebook_review + cmd.Use = "create-clean-room-asset-review CLEAN_ROOM_NAME ASSET_TYPE NAME" cmd.Short = `Create a review (e.g. approval) for an asset.` cmd.Long = `Create a review (e.g. approval) for an asset. @@ -184,7 +186,7 @@ func newCreateCleanRoomAssetReview() *cobra.Command { Arguments: CLEAN_ROOM_NAME: Name of the clean room - ASSET_TYPE: Asset type. Can only be NOTEBOOK_FILE. + ASSET_TYPE: Asset type. Can either be NOTEBOOK_FILE or JAR_ANALYSIS. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] NAME: Name of the asset` @@ -211,8 +213,6 @@ func newCreateCleanRoomAssetReview() *cobra.Command { return err } } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } createCleanRoomAssetReviewReq.CleanRoomName = args[0] _, err = fmt.Sscan(args[1], &createCleanRoomAssetReviewReq.AssetType) diff --git a/cmd/workspace/database/database.go b/cmd/workspace/database/database.go index 9cc5724e6d..efca299716 100755 --- a/cmd/workspace/database/database.go +++ b/cmd/workspace/database/database.go @@ -179,6 +179,7 @@ func newCreateDatabaseInstance() *cobra.Command { cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", createDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) // TODO: array: child_instance_refs + cmd.Flags().BoolVar(&createDatabaseInstanceReq.DatabaseInstance.EnablePgNativeLogin, "enable-pg-native-login", createDatabaseInstanceReq.DatabaseInstance.EnablePgNativeLogin, `Whether the instance has PG native password login enabled.`) cmd.Flags().BoolVar(&createDatabaseInstanceReq.DatabaseInstance.EnableReadableSecondaries, "enable-readable-secondaries", createDatabaseInstanceReq.DatabaseInstance.EnableReadableSecondaries, `Whether to enable secondaries to serve read-only traffic.`) cmd.Flags().IntVar(&createDatabaseInstanceReq.DatabaseInstance.NodeCount, "node-count", createDatabaseInstanceReq.DatabaseInstance.NodeCount, `The number of nodes in the instance, composed of 1 primary and 0 or more secondaries.`) // TODO: complex arg: parent_instance_ref @@ -1499,6 +1500,7 @@ func newUpdateDatabaseInstance() *cobra.Command { cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", updateDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) // TODO: array: child_instance_refs + cmd.Flags().BoolVar(&updateDatabaseInstanceReq.DatabaseInstance.EnablePgNativeLogin, "enable-pg-native-login", updateDatabaseInstanceReq.DatabaseInstance.EnablePgNativeLogin, `Whether the instance has PG native password login enabled.`) cmd.Flags().BoolVar(&updateDatabaseInstanceReq.DatabaseInstance.EnableReadableSecondaries, "enable-readable-secondaries", updateDatabaseInstanceReq.DatabaseInstance.EnableReadableSecondaries, `Whether to enable secondaries to serve read-only traffic.`) cmd.Flags().IntVar(&updateDatabaseInstanceReq.DatabaseInstance.NodeCount, "node-count", updateDatabaseInstanceReq.DatabaseInstance.NodeCount, `The number of nodes in the instance, composed of 1 primary and 0 or more secondaries.`) // TODO: complex arg: parent_instance_ref diff --git a/cmd/workspace/genie/genie.go b/cmd/workspace/genie/genie.go index 3238f68b99..5a0a5f046c 100755 --- a/cmd/workspace/genie/genie.go +++ b/cmd/workspace/genie/genie.go @@ -920,9 +920,9 @@ func newSendMessageFeedback() *cobra.Command { cmd.Flags().Var(&sendMessageFeedbackJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&sendMessageFeedbackReq.FeedbackText, "feedback-text", sendMessageFeedbackReq.FeedbackText, `Optional text feedback that will be stored as a comment.`) + cmd.Flags().StringVar(&sendMessageFeedbackReq.Comment, "comment", sendMessageFeedbackReq.Comment, `Optional text feedback that will be stored as a comment.`) - cmd.Use = "send-message-feedback SPACE_ID CONVERSATION_ID MESSAGE_ID FEEDBACK_RATING" + cmd.Use = "send-message-feedback SPACE_ID CONVERSATION_ID MESSAGE_ID RATING" cmd.Short = `Send message feedback.` cmd.Long = `Send message feedback. @@ -932,7 +932,7 @@ func newSendMessageFeedback() *cobra.Command { SPACE_ID: The ID associated with the Genie space where the message is located. CONVERSATION_ID: The ID associated with the conversation. MESSAGE_ID: The ID associated with the message to provide feedback for. - FEEDBACK_RATING: The rating (POSITIVE, NEGATIVE, or NONE). + RATING: The rating (POSITIVE, NEGATIVE, or NONE). Supported values: [NEGATIVE, NONE, POSITIVE]` // This command is being previewed; hide from help output. @@ -944,7 +944,7 @@ func newSendMessageFeedback() *cobra.Command { if cmd.Flags().Changed("json") { err := root.ExactArgs(3)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, provide only SPACE_ID, CONVERSATION_ID, MESSAGE_ID as positional arguments. Provide 'feedback_rating' in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only SPACE_ID, CONVERSATION_ID, MESSAGE_ID as positional arguments. Provide 'rating' in your JSON input") } return nil } @@ -973,9 +973,9 @@ func newSendMessageFeedback() *cobra.Command { sendMessageFeedbackReq.ConversationId = args[1] sendMessageFeedbackReq.MessageId = args[2] if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[3], &sendMessageFeedbackReq.FeedbackRating) + _, err = fmt.Sscan(args[3], &sendMessageFeedbackReq.Rating) if err != nil { - return fmt.Errorf("invalid FEEDBACK_RATING: %s", args[3]) + return fmt.Errorf("invalid RATING: %s", args[3]) } } diff --git a/cmd/workspace/lakeview-embedded/lakeview-embedded.go b/cmd/workspace/lakeview-embedded/lakeview-embedded.go index ef585adf9e..5700d23645 100755 --- a/cmd/workspace/lakeview-embedded/lakeview-embedded.go +++ b/cmd/workspace/lakeview-embedded/lakeview-embedded.go @@ -55,8 +55,8 @@ func newGetPublishedDashboardTokenInfo() *cobra.Command { cmd.Flags().StringVar(&getPublishedDashboardTokenInfoReq.ExternalViewerId, "external-viewer-id", getPublishedDashboardTokenInfoReq.ExternalViewerId, `Provided external viewer id to be included in the custom claim.`) cmd.Use = "get-published-dashboard-token-info DASHBOARD_ID" - cmd.Short = `Read an information of a published dashboard to mint an OAuth token.` - cmd.Long = `Read an information of a published dashboard to mint an OAuth token. + cmd.Short = `Read information of a published dashboard to mint an OAuth token.` + cmd.Long = `Read information of a published dashboard to mint an OAuth token. Get a required authorization details and scopes of a published dashboard to mint an OAuth token. @@ -64,9 +64,6 @@ func newGetPublishedDashboardTokenInfo() *cobra.Command { Arguments: DASHBOARD_ID: UUID identifying the published dashboard.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/tag-policies/tag-policies.go b/cmd/workspace/tag-policies/tag-policies.go index 6bfe59d8ab..a54125fa92 100755 --- a/cmd/workspace/tag-policies/tag-policies.go +++ b/cmd/workspace/tag-policies/tag-policies.go @@ -26,10 +26,7 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "tags", }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + RunE: root.ReportUnknownSubcommand, } // Add methods diff --git a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go index f74775a03f..17da8b0a74 100755 --- a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go +++ b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go @@ -348,7 +348,8 @@ func newUpdateEndpointBudgetPolicy() *cobra.Command { Arguments: ENDPOINT_NAME: Name of the vector search endpoint - BUDGET_POLICY_ID: The budget policy id to be applied` + BUDGET_POLICY_ID: The budget policy id to be applied (hima-sheth) TODO: remove this once + we've migrated to usage policies` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index 8a97b75227..b39fa35b40 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -298,6 +298,8 @@ func newGetIndex() *cobra.Command { var getIndexReq vectorsearch.GetIndexRequest + cmd.Flags().BoolVar(&getIndexReq.EnsureRerankerCompatible, "ensure-reranker-compatible", getIndexReq.EnsureRerankerCompatible, `If true, the URL returned for the index is guaranteed to be compatible with the reranker.`) + cmd.Use = "get-index INDEX_NAME" cmd.Short = `Get an index.` cmd.Long = `Get an index. @@ -416,6 +418,7 @@ func newQueryIndex() *cobra.Command { cmd.Flags().StringVar(&queryIndexReq.QueryText, "query-text", queryIndexReq.QueryText, `Query text.`) cmd.Flags().StringVar(&queryIndexReq.QueryType, "query-type", queryIndexReq.QueryType, `The query type to use.`) // TODO: array: query_vector + // TODO: complex arg: reranker cmd.Flags().Float64Var(&queryIndexReq.ScoreThreshold, "score-threshold", queryIndexReq.ScoreThreshold, `Threshold for the approximate nearest neighbor search.`) cmd.Use = "query-index INDEX_NAME" diff --git a/experimental/python/databricks/bundles/jobs/__init__.py b/experimental/python/databricks/bundles/jobs/__init__.py index 02835f6a6a..dec48ac51c 100644 --- a/experimental/python/databricks/bundles/jobs/__init__.py +++ b/experimental/python/databricks/bundles/jobs/__init__.py @@ -244,6 +244,8 @@ "TaskNotificationSettingsDict", "TaskNotificationSettingsParam", "TaskParam", + "TaskRetryMode", + "TaskRetryModeParam", "TriggerSettings", "TriggerSettingsDict", "TriggerSettingsParam", @@ -648,6 +650,10 @@ TaskNotificationSettingsDict, TaskNotificationSettingsParam, ) +from databricks.bundles.jobs._models.task_retry_mode import ( + TaskRetryMode, + TaskRetryModeParam, +) from databricks.bundles.jobs._models.trigger_settings import ( TriggerSettings, TriggerSettingsDict, diff --git a/experimental/python/databricks/bundles/jobs/_models/continuous.py b/experimental/python/databricks/bundles/jobs/_models/continuous.py index f98f9c890b..43268661c1 100644 --- a/experimental/python/databricks/bundles/jobs/_models/continuous.py +++ b/experimental/python/databricks/bundles/jobs/_models/continuous.py @@ -5,6 +5,10 @@ from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrOptional from databricks.bundles.jobs._models.pause_status import PauseStatus, PauseStatusParam +from databricks.bundles.jobs._models.task_retry_mode import ( + TaskRetryMode, + TaskRetryModeParam, +) if TYPE_CHECKING: from typing_extensions import Self @@ -19,6 +23,11 @@ class Continuous: Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED. """ + task_retry_mode: VariableOrOptional[TaskRetryMode] = None + """ + Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER. + """ + @classmethod def from_dict(cls, value: "ContinuousDict") -> "Self": return _transform(cls, value) @@ -35,5 +44,10 @@ class ContinuousDict(TypedDict, total=False): Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED. """ + task_retry_mode: VariableOrOptional[TaskRetryModeParam] + """ + Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER. + """ + ContinuousParam = ContinuousDict | Continuous diff --git a/experimental/python/databricks/bundles/jobs/_models/task_retry_mode.py b/experimental/python/databricks/bundles/jobs/_models/task_retry_mode.py new file mode 100644 index 0000000000..ce5ccaa687 --- /dev/null +++ b/experimental/python/databricks/bundles/jobs/_models/task_retry_mode.py @@ -0,0 +1,17 @@ +from enum import Enum +from typing import Literal + + +class TaskRetryMode(Enum): + """ + task retry mode of the continuous job + * NEVER: The failed task will not be retried. + * ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first attempt. + When this condition is no longer met or the retry limit is reached, the job run is cancelled and a new run is started. + """ + + NEVER = "NEVER" + ON_FAILURE = "ON_FAILURE" + + +TaskRetryModeParam = Literal["NEVER", "ON_FAILURE"] | TaskRetryMode diff --git a/experimental/python/databricks/bundles/pipelines/__init__.py b/experimental/python/databricks/bundles/pipelines/__init__.py index ab7ee1a061..06dfc4390e 100644 --- a/experimental/python/databricks/bundles/pipelines/__init__.py +++ b/experimental/python/databricks/bundles/pipelines/__init__.py @@ -96,6 +96,12 @@ "PipelinesEnvironment", "PipelinesEnvironmentDict", "PipelinesEnvironmentParam", + "PostgresCatalogConfig", + "PostgresCatalogConfigDict", + "PostgresCatalogConfigParam", + "PostgresSlotConfig", + "PostgresSlotConfigDict", + "PostgresSlotConfigParam", "ReportSpec", "ReportSpecDict", "ReportSpecParam", @@ -111,6 +117,12 @@ "SchemaSpec", "SchemaSpecDict", "SchemaSpecParam", + "SourceCatalogConfig", + "SourceCatalogConfigDict", + "SourceCatalogConfigParam", + "SourceConfig", + "SourceConfigDict", + "SourceConfigParam", "TableSpec", "TableSpecDict", "TableSpecParam", @@ -292,6 +304,16 @@ PipelinesEnvironmentDict, PipelinesEnvironmentParam, ) +from databricks.bundles.pipelines._models.postgres_catalog_config import ( + PostgresCatalogConfig, + PostgresCatalogConfigDict, + PostgresCatalogConfigParam, +) +from databricks.bundles.pipelines._models.postgres_slot_config import ( + PostgresSlotConfig, + PostgresSlotConfigDict, + PostgresSlotConfigParam, +) from databricks.bundles.pipelines._models.report_spec import ( ReportSpec, ReportSpecDict, @@ -313,6 +335,16 @@ SchemaSpecDict, SchemaSpecParam, ) +from databricks.bundles.pipelines._models.source_catalog_config import ( + SourceCatalogConfig, + SourceCatalogConfigDict, + SourceCatalogConfigParam, +) +from databricks.bundles.pipelines._models.source_config import ( + SourceConfig, + SourceConfigDict, + SourceConfigParam, +) from databricks.bundles.pipelines._models.table_spec import ( TableSpec, TableSpecDict, diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py index 9316069266..cad856056d 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py @@ -12,6 +12,10 @@ IngestionSourceType, IngestionSourceTypeParam, ) +from databricks.bundles.pipelines._models.source_config import ( + SourceConfig, + SourceConfigParam, +) from databricks.bundles.pipelines._models.table_specific_config import ( TableSpecificConfig, TableSpecificConfigParam, @@ -40,6 +44,13 @@ class IngestionPipelineDefinition: Required. Settings specifying tables to replicate and the destination for the replicated tables. """ + source_configurations: VariableOrList[SourceConfig] = field(default_factory=list) + """ + :meta private: [EXPERIMENTAL] + + Top-level source configurations + """ + source_type: VariableOrOptional[IngestionSourceType] = None """ The type of the foreign source. @@ -78,6 +89,13 @@ class IngestionPipelineDefinitionDict(TypedDict, total=False): Required. Settings specifying tables to replicate and the destination for the replicated tables. """ + source_configurations: VariableOrList[SourceConfigParam] + """ + :meta private: [EXPERIMENTAL] + + Top-level source configurations + """ + source_type: VariableOrOptional[IngestionSourceTypeParam] """ The type of the foreign source. diff --git a/experimental/python/databricks/bundles/pipelines/_models/postgres_catalog_config.py b/experimental/python/databricks/bundles/pipelines/_models/postgres_catalog_config.py new file mode 100644 index 0000000000..e7216f14a8 --- /dev/null +++ b/experimental/python/databricks/bundles/pipelines/_models/postgres_catalog_config.py @@ -0,0 +1,50 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional +from databricks.bundles.pipelines._models.postgres_slot_config import ( + PostgresSlotConfig, + PostgresSlotConfigParam, +) + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class PostgresCatalogConfig: + """ + :meta private: [EXPERIMENTAL] + + PG-specific catalog-level configuration parameters + """ + + slot_config: VariableOrOptional[PostgresSlotConfig] = None + """ + :meta private: [EXPERIMENTAL] + + Optional. The Postgres slot configuration to use for logical replication + """ + + @classmethod + def from_dict(cls, value: "PostgresCatalogConfigDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "PostgresCatalogConfigDict": + return _transform_to_json_value(self) # type:ignore + + +class PostgresCatalogConfigDict(TypedDict, total=False): + """""" + + slot_config: VariableOrOptional[PostgresSlotConfigParam] + """ + :meta private: [EXPERIMENTAL] + + Optional. The Postgres slot configuration to use for logical replication + """ + + +PostgresCatalogConfigParam = PostgresCatalogConfigDict | PostgresCatalogConfig diff --git a/experimental/python/databricks/bundles/pipelines/_models/postgres_slot_config.py b/experimental/python/databricks/bundles/pipelines/_models/postgres_slot_config.py new file mode 100644 index 0000000000..329e3a5dbc --- /dev/null +++ b/experimental/python/databricks/bundles/pipelines/_models/postgres_slot_config.py @@ -0,0 +1,60 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class PostgresSlotConfig: + """ + :meta private: [EXPERIMENTAL] + + PostgresSlotConfig contains the configuration for a Postgres logical replication slot + """ + + publication_name: VariableOrOptional[str] = None + """ + :meta private: [EXPERIMENTAL] + + The name of the publication to use for the Postgres source + """ + + slot_name: VariableOrOptional[str] = None + """ + :meta private: [EXPERIMENTAL] + + The name of the logical replication slot to use for the Postgres source + """ + + @classmethod + def from_dict(cls, value: "PostgresSlotConfigDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "PostgresSlotConfigDict": + return _transform_to_json_value(self) # type:ignore + + +class PostgresSlotConfigDict(TypedDict, total=False): + """""" + + publication_name: VariableOrOptional[str] + """ + :meta private: [EXPERIMENTAL] + + The name of the publication to use for the Postgres source + """ + + slot_name: VariableOrOptional[str] + """ + :meta private: [EXPERIMENTAL] + + The name of the logical replication slot to use for the Postgres source + """ + + +PostgresSlotConfigParam = PostgresSlotConfigDict | PostgresSlotConfig diff --git a/experimental/python/databricks/bundles/pipelines/_models/source_catalog_config.py b/experimental/python/databricks/bundles/pipelines/_models/source_catalog_config.py new file mode 100644 index 0000000000..008893d461 --- /dev/null +++ b/experimental/python/databricks/bundles/pipelines/_models/source_catalog_config.py @@ -0,0 +1,64 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional +from databricks.bundles.pipelines._models.postgres_catalog_config import ( + PostgresCatalogConfig, + PostgresCatalogConfigParam, +) + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class SourceCatalogConfig: + """ + :meta private: [EXPERIMENTAL] + + SourceCatalogConfig contains catalog-level custom configuration parameters for each source + """ + + postgres: VariableOrOptional[PostgresCatalogConfig] = None + """ + :meta private: [EXPERIMENTAL] + + Postgres-specific catalog-level configuration parameters + """ + + source_catalog: VariableOrOptional[str] = None + """ + :meta private: [EXPERIMENTAL] + + Source catalog name + """ + + @classmethod + def from_dict(cls, value: "SourceCatalogConfigDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "SourceCatalogConfigDict": + return _transform_to_json_value(self) # type:ignore + + +class SourceCatalogConfigDict(TypedDict, total=False): + """""" + + postgres: VariableOrOptional[PostgresCatalogConfigParam] + """ + :meta private: [EXPERIMENTAL] + + Postgres-specific catalog-level configuration parameters + """ + + source_catalog: VariableOrOptional[str] + """ + :meta private: [EXPERIMENTAL] + + Source catalog name + """ + + +SourceCatalogConfigParam = SourceCatalogConfigDict | SourceCatalogConfig diff --git a/experimental/python/databricks/bundles/pipelines/_models/source_config.py b/experimental/python/databricks/bundles/pipelines/_models/source_config.py new file mode 100644 index 0000000000..b404c90f0b --- /dev/null +++ b/experimental/python/databricks/bundles/pipelines/_models/source_config.py @@ -0,0 +1,48 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional +from databricks.bundles.pipelines._models.source_catalog_config import ( + SourceCatalogConfig, + SourceCatalogConfigParam, +) + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class SourceConfig: + """ + :meta private: [EXPERIMENTAL] + """ + + catalog: VariableOrOptional[SourceCatalogConfig] = None + """ + :meta private: [EXPERIMENTAL] + + Catalog-level source configuration parameters + """ + + @classmethod + def from_dict(cls, value: "SourceConfigDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "SourceConfigDict": + return _transform_to_json_value(self) # type:ignore + + +class SourceConfigDict(TypedDict, total=False): + """""" + + catalog: VariableOrOptional[SourceCatalogConfigParam] + """ + :meta private: [EXPERIMENTAL] + + Catalog-level source configuration parameters + """ + + +SourceConfigParam = SourceConfigDict | SourceConfig diff --git a/go.mod b/go.mod index ae37d311e1..62606ac753 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/BurntSushi/toml v1.5.0 // MIT github.com/Masterminds/semver/v3 v3.4.0 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.81.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.82.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index d9bec44251..faa59dc4b6 100644 --- a/go.sum +++ b/go.sum @@ -33,8 +33,8 @@ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.81.0 h1:ka+1w6TG2ne+1IfFgJ6g1Sm4JEAsSClNU6z1mZQSqak= -github.com/databricks/databricks-sdk-go v0.81.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= +github.com/databricks/databricks-sdk-go v0.82.0 h1:Amosg1Jp6M3w04jrvL+sIdMyPx7M+D1W/JtYJFwsGGA= +github.com/databricks/databricks-sdk-go v0.82.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=