From 7ec1237c983558ff9f53fa888bb5dd1063b54ad8 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 6 Jan 2026 19:15:31 +0100 Subject: [PATCH 1/4] Upgrade Go SDK to 0.96.0 --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + .../internal/schema/annotations_openapi.yml | 111 ++ .../schema/annotations_openapi_overrides.yml | 23 + .../validation/generated/enum_fields.go | 5 +- .../validation/generated/required_fields.go | 24 +- bundle/schema/jsonschema.json | 186 +- cmd/workspace/apps/apps.go | 7 + cmd/workspace/cmd.go | 2 + .../feature-engineering.go | 22 +- cmd/workspace/feature-store/feature-store.go | 2 + cmd/workspace/groups.go | 4 + cmd/workspace/metastores/metastores.go | 2 + cmd/workspace/postgres/postgres.go | 1665 +++++++++++++++++ cmd/workspace/rfa/rfa.go | 5 +- cmd/workspace/tables/tables.go | 3 + .../vector-search-endpoints.go | 3 +- go.mod | 2 +- go.sum | 4 +- .../bundles/jobs/_models/job_run_as.py | 14 + .../_models/table_specific_config.py | 18 + 21 files changed, 2082 insertions(+), 23 deletions(-) create mode 100755 cmd/workspace/postgres/postgres.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 6854c7f284..a9ea4ce63e 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -e1ea3f5ba0bc5b53be94f56535a67ba701a52a52 \ No newline at end of file +dbf9b0a4e0432e846520442b14c34fc7f0ca0d8c \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 3cf3669eda..f7b511de6c 100755 --- a/.gitattributes +++ b/.gitattributes @@ -120,6 +120,7 @@ cmd/workspace/policies/policies.go linguist-generated=true cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go linguist-generated=true cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go linguist-generated=true cmd/workspace/policy-families/policy-families.go linguist-generated=true +cmd/workspace/postgres/postgres.go linguist-generated=true cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true cmd/workspace/provider-files/provider-files.go linguist-generated=true diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index c75e8007c7..d9d506a7b6 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -103,11 +103,20 @@ github.com/databricks/cli/bundle/config/resources.App: "effective_budget_policy_id": "x-databricks-field-behaviors_output_only": |- true + "effective_usage_policy_id": + "x-databricks-field-behaviors_output_only": |- + true "effective_user_api_scopes": "description": |- The effective api scopes granted to the user access token. "x-databricks-field-behaviors_output_only": |- true + "git_repository": + "description": |- + Git repository configuration for app deployments. When specified, deployments can + reference code from this repository by providing only the git reference (branch, tag, or commit). + "x-databricks-preview": |- + PRIVATE "id": "description": |- The unique identifier of the app. @@ -156,6 +165,7 @@ github.com/databricks/cli/bundle/config/resources.App: The URL of the app once it is deployed. "x-databricks-field-behaviors_output_only": |- true + "usage_policy_id": {} "user_api_scopes": {} github.com/databricks/cli/bundle/config/resources.Cluster: "_": @@ -1034,6 +1044,9 @@ github.com/databricks/cli/bundle/config/resources.Volume: A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) github.com/databricks/databricks-sdk-go/service/apps.AppDeployment: + "command": + "description": |- + The command with which to run the app. This will override the command specified in the app.yaml file. "create_time": "description": |- The creation time of the deployment. Formatted timestamp in ISO 6801. @@ -1052,6 +1065,14 @@ github.com/databricks/databricks-sdk-go/service/apps.AppDeployment: "deployment_id": "description": |- The unique id of the deployment. + "env_vars": + "description": |- + The environment variables to set in the app runtime environment. This will override the environment variables specified in the app.yaml file. + "git_source": + "description": |- + Git repository to use as the source for the app deployment. + "x-databricks-preview": |- + PRIVATE "mode": "description": |- The mode of which the deployment will manage the source code. @@ -1109,6 +1130,7 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResource: "description": "description": |- Description of the App Resource. + "experiment": {} "genie_space": {} "job": {} "name": @@ -1127,6 +1149,18 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResourceDatabaseDatabase "enum": - |- CAN_CONNECT_AND_CREATE +github.com/databricks/databricks-sdk-go/service/apps.AppResourceExperiment: + "experiment_id": {} + "permission": {} +github.com/databricks/databricks-sdk-go/service/apps.AppResourceExperimentExperimentPermission: + "_": + "enum": + - |- + CAN_MANAGE + - |- + CAN_EDIT + - |- + CAN_READ github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpace: "name": {} "permission": {} @@ -1224,11 +1258,23 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResourceUcSecurableUcSec READ_VOLUME - |- WRITE_VOLUME + - |- + SELECT + - |- + EXECUTE + - |- + USE_CONNECTION github.com/databricks/databricks-sdk-go/service/apps.AppResourceUcSecurableUcSecurableType: "_": "enum": - |- VOLUME + - |- + TABLE + - |- + FUNCTION + - |- + CONNECTION github.com/databricks/databricks-sdk-go/service/apps.ApplicationState: "_": "enum": @@ -1286,6 +1332,59 @@ github.com/databricks/databricks-sdk-go/service/apps.ComputeStatus: State of the app compute. "x-databricks-field-behaviors_output_only": |- true +github.com/databricks/databricks-sdk-go/service/apps.EnvVar: + "name": + "description": |- + The name of the environment variable. + "value": + "description": |- + The value for the environment variable. + "value_from": + "description": |- + The name of an external Databricks resource that contains the value, such as a secret or a database table. +github.com/databricks/databricks-sdk-go/service/apps.GitRepository: + "_": + "description": |- + Git repository configuration specifying the location of the repository. + "provider": + "description": |- + Git provider. Case insensitive. Supported values: gitHub, gitHubEnterprise, bitbucketCloud, + bitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition, awsCodeCommit. + "url": + "description": |- + URL of the Git repository. +github.com/databricks/databricks-sdk-go/service/apps.GitSource: + "_": + "description": |- + Complete git source specification including repository location and reference. + "branch": + "description": |- + Git branch to checkout. + "commit": + "description": |- + Git commit SHA to checkout. + "git_repository": + "description": |- + Git repository configuration. Populated from the app's git_repository configuration. + "x-databricks-field-behaviors_output_only": |- + true + "resolved_commit": + "description": |- + The resolved commit SHA that was actually used for the deployment. This is populated by the + system after resolving the reference (branch, tag, or commit). If commit is specified + directly, this will match commit. If a branch or tag is specified, this contains the + commit SHA that the branch or tag pointed to at deployment time. + "x-databricks-field-behaviors_output_only": |- + true + "x-databricks-preview": |- + PRIVATE + "source_code_path": + "description": |- + Relative path to the app source code within the Git repository. If not specified, the root + of the repository is used. + "tag": + "description": |- + Git tag to checkout. github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedule: "pause_status": "description": |- @@ -2879,6 +2978,11 @@ github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs: Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job. Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown. + "group_name": + "description": |- + Group name of an account group assigned to the workspace. Setting this field requires being a member of the group. + "x-databricks-preview": |- + PRIVATE "service_principal_name": "description": |- Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role. @@ -4230,6 +4334,13 @@ github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig: Configurations that are only applicable for query-based ingestion connectors. "x-databricks-preview": |- PRIVATE + "row_filter": + "description": |- + (Optional, Immutable) The row filter condition to be applied to the table. + It must not contain the WHERE keyword, only the actual filter condition. + It must be in DBSQL format. + "x-databricks-preview": |- + PRIVATE "salesforce_include_formula_fields": "description": |- If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 427c184570..1421a1bc23 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -2,6 +2,9 @@ github.com/databricks/cli/bundle/config/resources.Alert: "evaluation": "description": |- PLACEHOLDER + "file_path": + "description": |- + PLACEHOLDER "lifecycle": "description": |- PLACEHOLDER @@ -41,6 +44,9 @@ github.com/databricks/cli/bundle/config/resources.App: "effective_budget_policy_id": "description": |- PLACEHOLDER + "effective_usage_policy_id": + "description": |- + PLACEHOLDER "lifecycle": "description": |- Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. @@ -65,6 +71,9 @@ github.com/databricks/cli/bundle/config/resources.App: "source_code_path": "description": |- PLACEHOLDER + "usage_policy_id": + "description": |- + PLACEHOLDER "user_api_scopes": "description": |- PLACEHOLDER @@ -645,6 +654,10 @@ github.com/databricks/cli/bundle/config/resources.SqlWarehousePermissionLevel: CAN_MONITOR - |- CAN_VIEW +github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: + "lifecycle": + "description": |- + PLACEHOLDER github.com/databricks/cli/bundle/config/resources.Volume: "_": "markdown_description": |- @@ -728,6 +741,9 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResource: "database": "description": |- PLACEHOLDER + "experiment": + "description": |- + PLACEHOLDER "genie_space": "description": |- PLACEHOLDER @@ -756,6 +772,13 @@ github.com/databricks/databricks-sdk-go/service/apps.AppResourceDatabase: "permission": "description": |- PLACEHOLDER +github.com/databricks/databricks-sdk-go/service/apps.AppResourceExperiment: + "experiment_id": + "description": |- + PLACEHOLDER + "permission": + "description": |- + PLACEHOLDER github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpace: "name": "description": |- diff --git a/bundle/internal/validation/generated/enum_fields.go b/bundle/internal/validation/generated/enum_fields.go index 536c867668..ecb042e270 100644 --- a/bundle/internal/validation/generated/enum_fields.go +++ b/bundle/internal/validation/generated/enum_fields.go @@ -24,13 +24,14 @@ var EnumFields = map[string][]string{ "resources.apps.*.pending_deployment.mode": {"AUTO_SYNC", "SNAPSHOT"}, "resources.apps.*.pending_deployment.status.state": {"CANCELLED", "FAILED", "IN_PROGRESS", "SUCCEEDED"}, "resources.apps.*.resources[*].database.permission": {"CAN_CONNECT_AND_CREATE"}, + "resources.apps.*.resources[*].experiment.permission": {"CAN_EDIT", "CAN_MANAGE", "CAN_READ"}, "resources.apps.*.resources[*].genie_space.permission": {"CAN_EDIT", "CAN_MANAGE", "CAN_RUN", "CAN_VIEW"}, "resources.apps.*.resources[*].job.permission": {"CAN_MANAGE", "CAN_MANAGE_RUN", "CAN_VIEW", "IS_OWNER"}, "resources.apps.*.resources[*].secret.permission": {"MANAGE", "READ", "WRITE"}, "resources.apps.*.resources[*].serving_endpoint.permission": {"CAN_MANAGE", "CAN_QUERY", "CAN_VIEW"}, "resources.apps.*.resources[*].sql_warehouse.permission": {"CAN_MANAGE", "CAN_USE", "IS_OWNER"}, - "resources.apps.*.resources[*].uc_securable.permission": {"READ_VOLUME", "WRITE_VOLUME"}, - "resources.apps.*.resources[*].uc_securable.securable_type": {"VOLUME"}, + "resources.apps.*.resources[*].uc_securable.permission": {"EXECUTE", "READ_VOLUME", "SELECT", "USE_CONNECTION", "WRITE_VOLUME"}, + "resources.apps.*.resources[*].uc_securable.securable_type": {"CONNECTION", "FUNCTION", "TABLE", "VOLUME"}, "resources.clusters.*.aws_attributes.availability": {"ON_DEMAND", "SPOT", "SPOT_WITH_FALLBACK"}, "resources.clusters.*.aws_attributes.ebs_volume_type": {"GENERAL_PURPOSE_SSD", "THROUGHPUT_OPTIMIZED_HDD"}, diff --git a/bundle/internal/validation/generated/required_fields.go b/bundle/internal/validation/generated/required_fields.go index 2cc2045b9a..8cc0d075de 100644 --- a/bundle/internal/validation/generated/required_fields.go +++ b/bundle/internal/validation/generated/required_fields.go @@ -18,16 +18,20 @@ var RequiredFields = map[string][]string{ "resources.alerts.*.permissions[*]": {"level"}, "resources.alerts.*.schedule": {"quartz_cron_schedule", "timezone_id"}, - "resources.apps.*": {"name", "source_code_path"}, - "resources.apps.*.permissions[*]": {"level"}, - "resources.apps.*.resources[*]": {"name"}, - "resources.apps.*.resources[*].database": {"database_name", "instance_name", "permission"}, - "resources.apps.*.resources[*].genie_space": {"name", "permission", "space_id"}, - "resources.apps.*.resources[*].job": {"id", "permission"}, - "resources.apps.*.resources[*].secret": {"key", "permission", "scope"}, - "resources.apps.*.resources[*].serving_endpoint": {"name", "permission"}, - "resources.apps.*.resources[*].sql_warehouse": {"id", "permission"}, - "resources.apps.*.resources[*].uc_securable": {"permission", "securable_full_name", "securable_type"}, + "resources.apps.*": {"name", "source_code_path"}, + "resources.apps.*.active_deployment.git_source.git_repository": {"provider", "url"}, + "resources.apps.*.git_repository": {"provider", "url"}, + "resources.apps.*.pending_deployment.git_source.git_repository": {"provider", "url"}, + "resources.apps.*.permissions[*]": {"level"}, + "resources.apps.*.resources[*]": {"name"}, + "resources.apps.*.resources[*].database": {"database_name", "instance_name", "permission"}, + "resources.apps.*.resources[*].experiment": {"experiment_id", "permission"}, + "resources.apps.*.resources[*].genie_space": {"name", "permission", "space_id"}, + "resources.apps.*.resources[*].job": {"id", "permission"}, + "resources.apps.*.resources[*].secret": {"key", "permission", "scope"}, + "resources.apps.*.resources[*].serving_endpoint": {"name", "permission"}, + "resources.apps.*.resources[*].sql_warehouse": {"id", "permission"}, + "resources.apps.*.resources[*].uc_securable": {"permission", "securable_full_name", "securable_type"}, "resources.clusters.*.cluster_log_conf.dbfs": {"destination"}, "resources.clusters.*.cluster_log_conf.s3": {"destination"}, diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 6dd4558254..e9a83d5ceb 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -182,6 +182,12 @@ "description": "The description of the app.", "$ref": "#/$defs/string" }, + "git_repository": { + "description": "Git repository configuration for app deployments. When specified, deployments can\nreference code from this repository by providing only the git reference (branch, tag, or commit).", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.GitRepository", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "lifecycle": { "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" @@ -200,6 +206,9 @@ "source_code_path": { "$ref": "#/$defs/string" }, + "usage_policy_id": { + "$ref": "#/$defs/string" + }, "user_api_scopes": { "$ref": "#/$defs/slice/string" } @@ -2742,9 +2751,23 @@ { "type": "object", "properties": { + "command": { + "description": "The command with which to run the app. This will override the command specified in the app.yaml file.", + "$ref": "#/$defs/slice/string" + }, "deployment_id": { "$ref": "#/$defs/string" }, + "env_vars": { + "description": "The environment variables to set in the app runtime environment. This will override the environment variables specified in the app.yaml file.", + "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/apps.EnvVar" + }, + "git_source": { + "description": "Git repository to use as the source for the app deployment.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.GitSource", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "mode": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.AppDeploymentMode" }, @@ -2833,6 +2856,9 @@ "description": "Description of the App Resource.", "$ref": "#/$defs/string" }, + "experiment": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.AppResourceExperiment" + }, "genie_space": { "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.AppResourceGenieSpace" }, @@ -2909,6 +2935,46 @@ } ] }, + "apps.AppResourceExperiment": { + "oneOf": [ + { + "type": "object", + "properties": { + "experiment_id": { + "$ref": "#/$defs/string" + }, + "permission": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.AppResourceExperimentExperimentPermission" + } + }, + "additionalProperties": false, + "required": [ + "experiment_id", + "permission" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "apps.AppResourceExperimentExperimentPermission": { + "oneOf": [ + { + "type": "string", + "enum": [ + "CAN_MANAGE", + "CAN_EDIT", + "CAN_READ" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "apps.AppResourceGenieSpace": { "oneOf": [ { @@ -3154,7 +3220,10 @@ "type": "string", "enum": [ "READ_VOLUME", - "WRITE_VOLUME" + "WRITE_VOLUME", + "SELECT", + "EXECUTE", + "USE_CONNECTION" ] }, { @@ -3168,7 +3237,10 @@ { "type": "string", "enum": [ - "VOLUME" + "VOLUME", + "TABLE", + "FUNCTION", + "CONNECTION" ] }, { @@ -3253,6 +3325,90 @@ } ] }, + "apps.EnvVar": { + "oneOf": [ + { + "type": "object", + "properties": { + "name": { + "description": "The name of the environment variable.", + "$ref": "#/$defs/string" + }, + "value": { + "description": "The value for the environment variable.", + "$ref": "#/$defs/string" + }, + "value_from": { + "description": "The name of an external Databricks resource that contains the value, such as a secret or a database table.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "apps.GitRepository": { + "oneOf": [ + { + "type": "object", + "description": "Git repository configuration specifying the location of the repository.", + "properties": { + "provider": { + "description": "Git provider. Case insensitive. Supported values: gitHub, gitHubEnterprise, bitbucketCloud,\nbitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition, awsCodeCommit.", + "$ref": "#/$defs/string" + }, + "url": { + "description": "URL of the Git repository.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "provider", + "url" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "apps.GitSource": { + "oneOf": [ + { + "type": "object", + "description": "Complete git source specification including repository location and reference.", + "properties": { + "branch": { + "description": "Git branch to checkout.", + "$ref": "#/$defs/string" + }, + "commit": { + "description": "Git commit SHA to checkout.", + "$ref": "#/$defs/string" + }, + "source_code_path": { + "description": "Relative path to the app source code within the Git repository. If not specified, the root\nof the repository is used.", + "$ref": "#/$defs/string" + }, + "tag": { + "description": "Git tag to checkout.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "catalog.MonitorCronSchedule": { "oneOf": [ { @@ -5556,6 +5712,12 @@ "type": "object", "description": "Write-only setting. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", "properties": { + "group_name": { + "description": "Group name of an account group assigned to the workspace. Setting this field requires being a member of the group.", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "service_principal_name": { "description": "The application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", "$ref": "#/$defs/string" @@ -7904,6 +8066,12 @@ "x-databricks-preview": "PRIVATE", "doNotSuggest": true }, + "row_filter": { + "description": "(Optional, Immutable) The row filter condition to be applied to the table.\nIt must not contain the WHERE keyword, only the actual filter condition.\nIt must be in DBSQL format.", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "salesforce_include_formula_fields": { "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector", "$ref": "#/$defs/bool", @@ -10204,6 +10372,20 @@ } ] }, + "apps.EnvVar": { + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/apps.EnvVar" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "catalog.MonitorMetric": { "oneOf": [ { diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 626ef7cd78..a321b5ff6d 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -87,8 +87,10 @@ func newCreate() *cobra.Command { // TODO: complex arg: compute_status cmd.Flags().StringVar(&createReq.App.Description, "description", createReq.App.Description, `The description of the app.`) // TODO: array: effective_user_api_scopes + // TODO: complex arg: git_repository // TODO: complex arg: pending_deployment // TODO: array: resources + cmd.Flags().StringVar(&createReq.App.UsagePolicyId, "usage-policy-id", createReq.App.UsagePolicyId, ``) // TODO: array: user_api_scopes cmd.Use = "create NAME" @@ -375,8 +377,11 @@ func newDeploy() *cobra.Command { cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: array: command // TODO: complex arg: deployment_artifacts cmd.Flags().StringVar(&deployReq.AppDeployment.DeploymentId, "deployment-id", deployReq.AppDeployment.DeploymentId, `The unique id of the deployment.`) + // TODO: array: env_vars + // TODO: complex arg: git_source cmd.Flags().Var(&deployReq.AppDeployment.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`) cmd.Flags().StringVar(&deployReq.AppDeployment.SourceCodePath, "source-code-path", deployReq.AppDeployment.SourceCodePath, `The workspace file system path of the source code used to create the app deployment.`) // TODO: complex arg: status @@ -1107,8 +1112,10 @@ func newUpdate() *cobra.Command { // TODO: complex arg: compute_status cmd.Flags().StringVar(&updateReq.App.Description, "description", updateReq.App.Description, `The description of the app.`) // TODO: array: effective_user_api_scopes + // TODO: complex arg: git_repository // TODO: complex arg: pending_deployment // TODO: array: resources + cmd.Flags().StringVar(&updateReq.App.UsagePolicyId, "usage-policy-id", updateReq.App.UsagePolicyId, ``) // TODO: array: user_api_scopes cmd.Use = "update NAME" diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 4e6daa1dc4..902884bdfe 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -67,6 +67,7 @@ import ( policy_compliance_for_clusters "github.com/databricks/cli/cmd/workspace/policy-compliance-for-clusters" policy_compliance_for_jobs "github.com/databricks/cli/cmd/workspace/policy-compliance-for-jobs" policy_families "github.com/databricks/cli/cmd/workspace/policy-families" + postgres "github.com/databricks/cli/cmd/workspace/postgres" provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters" provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges" provider_files "github.com/databricks/cli/cmd/workspace/provider-files" @@ -191,6 +192,7 @@ func All() []*cobra.Command { out = append(out, policy_compliance_for_clusters.New()) out = append(out, policy_compliance_for_jobs.New()) out = append(out, policy_families.New()) + out = append(out, postgres.New()) out = append(out, provider_exchange_filters.New()) out = append(out, provider_exchanges.New()) out = append(out, provider_files.New()) diff --git a/cmd/workspace/feature-engineering/feature-engineering.go b/cmd/workspace/feature-engineering/feature-engineering.go index 621fd66bb1..b6e6e91556 100755 --- a/cmd/workspace/feature-engineering/feature-engineering.go +++ b/cmd/workspace/feature-engineering/feature-engineering.go @@ -192,6 +192,10 @@ func newCreateKafkaConfig() *cobra.Command { cmd.Short = `Create a Kafka config.` cmd.Long = `Create a Kafka config. + Create a Kafka config. During PrPr, Kafka configs can be read and used when + creating features under the entire metastore. Only the creator of the Kafka + config can delete it. + Arguments: NAME: Name that uniquely identifies this Kafka config within the metastore. This will be the identifier used from the Feature object to reference these @@ -429,6 +433,10 @@ func newDeleteKafkaConfig() *cobra.Command { cmd.Short = `Delete a Kafka config.` cmd.Long = `Delete a Kafka config. + Delete a Kafka config. During PrPr, Kafka configs can be read and used when + creating features under the entire metastore. Only the creator of the Kafka + config can delete it. + Arguments: NAME: Name of the Kafka config to delete.` @@ -593,6 +601,10 @@ func newGetKafkaConfig() *cobra.Command { cmd.Short = `Get a Kafka config.` cmd.Long = `Get a Kafka config. + Get a Kafka config. During PrPr, Kafka configs can be read and used when + creating features under the entire metastore. Only the creator of the Kafka + config can delete it. + Arguments: NAME: Name of the Kafka config to get.` @@ -753,7 +765,11 @@ func newListKafkaConfigs() *cobra.Command { cmd.Use = "list-kafka-configs" cmd.Short = `List Kafka configs.` - cmd.Long = `List Kafka configs.` + cmd.Long = `List Kafka configs. + + List Kafka configs. During PrPr, Kafka configs can be read and used when + creating features under the entire metastore. Only the creator of the Kafka + config can delete it.` cmd.Annotations = make(map[string]string) @@ -969,6 +985,10 @@ func newUpdateKafkaConfig() *cobra.Command { cmd.Short = `Update a Kafka config.` cmd.Long = `Update a Kafka config. + Update a Kafka config. During PrPr, Kafka configs can be read and used when + creating features under the entire metastore. Only the creator of the Kafka + config can delete it. + Arguments: NAME: Name that uniquely identifies this Kafka config within the metastore. This will be the identifier used from the Feature object to reference these diff --git a/cmd/workspace/feature-store/feature-store.go b/cmd/workspace/feature-store/feature-store.go index b0033dc491..e0baf4664d 100755 --- a/cmd/workspace/feature-store/feature-store.go +++ b/cmd/workspace/feature-store/feature-store.go @@ -71,6 +71,7 @@ func newCreateOnlineStore() *cobra.Command { cmd.Flags().Var(&createOnlineStoreJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().IntVar(&createOnlineStoreReq.OnlineStore.ReadReplicaCount, "read-replica-count", createOnlineStoreReq.OnlineStore.ReadReplicaCount, `The number of read replicas for the online store.`) + cmd.Flags().StringVar(&createOnlineStoreReq.OnlineStore.UsagePolicyId, "usage-policy-id", createOnlineStoreReq.OnlineStore.UsagePolicyId, `The usage policy applied to the online store to track billing.`) cmd.Use = "create-online-store NAME CAPACITY" cmd.Short = `Create an Online Feature Store.` @@ -442,6 +443,7 @@ func newUpdateOnlineStore() *cobra.Command { cmd.Flags().Var(&updateOnlineStoreJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().IntVar(&updateOnlineStoreReq.OnlineStore.ReadReplicaCount, "read-replica-count", updateOnlineStoreReq.OnlineStore.ReadReplicaCount, `The number of read replicas for the online store.`) + cmd.Flags().StringVar(&updateOnlineStoreReq.OnlineStore.UsagePolicyId, "usage-policy-id", updateOnlineStoreReq.OnlineStore.UsagePolicyId, `The usage policy applied to the online store to track billing.`) cmd.Use = "update-online-store NAME UPDATE_MASK CAPACITY" cmd.Short = `Update an Online Feature Store.` diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 4b99544cd6..b4bdcc25ae 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -96,5 +96,9 @@ func Groups() []cobra.Group { ID: "tags", Title: "Tags", }, + { + ID: "postgres", + Title: "Postgres", + }, } } diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 326687d33b..3ddaad15b0 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -169,6 +169,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + cmd.Flags().BoolVar(&createReq.ExternalAccessEnabled, "external-access-enabled", createReq.ExternalAccessEnabled, `Whether to allow non-DBR clients to directly access entities under the metastore.`) cmd.Flags().StringVar(&createReq.Region, "region", createReq.Region, `Cloud region which the metastore serves (e.g., us-west-2, westus).`) cmd.Flags().StringVar(&createReq.StorageRoot, "storage-root", createReq.StorageRoot, `The storage root URL for metastore.`) @@ -582,6 +583,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.DeltaSharingOrganizationName, "delta-sharing-organization-name", updateReq.DeltaSharingOrganizationName, `The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name.`) cmd.Flags().Int64Var(&updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, "delta-sharing-recipient-token-lifetime-in-seconds", updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, `The lifetime of delta sharing recipient token in seconds.`) cmd.Flags().Var(&updateReq.DeltaSharingScope, "delta-sharing-scope", `The scope of Delta Sharing enabled for the metastore. Supported values: [INTERNAL, INTERNAL_AND_EXTERNAL]`) + cmd.Flags().BoolVar(&updateReq.ExternalAccessEnabled, "external-access-enabled", updateReq.ExternalAccessEnabled, `Whether to allow non-DBR clients to directly access entities under the metastore.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the metastore.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the metastore.`) cmd.Flags().StringVar(&updateReq.PrivilegeModelVersion, "privilege-model-version", updateReq.PrivilegeModelVersion, `Privilege model version of the metastore, of the form major.minor (e.g., 1.0).`) diff --git a/cmd/workspace/postgres/postgres.go b/cmd/workspace/postgres/postgres.go new file mode 100755 index 0000000000..b4de46a354 --- /dev/null +++ b/cmd/workspace/postgres/postgres.go @@ -0,0 +1,1665 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package postgres + +import ( + "strings" + "time" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/common/types/fieldmask" + "github.com/databricks/databricks-sdk-go/experimental/api" + "github.com/databricks/databricks-sdk-go/service/postgres" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "postgres", + Short: `The Postgres API provides access to a Postgres database via REST API or direct SQL.`, + Long: `The Postgres API provides access to a Postgres database via REST API or direct + SQL.`, + GroupID: "postgres", + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCreateBranch()) + cmd.AddCommand(newCreateEndpoint()) + cmd.AddCommand(newCreateProject()) + cmd.AddCommand(newCreateRole()) + cmd.AddCommand(newDeleteBranch()) + cmd.AddCommand(newDeleteEndpoint()) + cmd.AddCommand(newDeleteProject()) + cmd.AddCommand(newDeleteRole()) + cmd.AddCommand(newGetBranch()) + cmd.AddCommand(newGetEndpoint()) + cmd.AddCommand(newGetOperation()) + cmd.AddCommand(newGetProject()) + cmd.AddCommand(newGetRole()) + cmd.AddCommand(newListBranches()) + cmd.AddCommand(newListEndpoints()) + cmd.AddCommand(newListProjects()) + cmd.AddCommand(newListRoles()) + cmd.AddCommand(newUpdateBranch()) + cmd.AddCommand(newUpdateEndpoint()) + cmd.AddCommand(newUpdateProject()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-branch command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createBranchOverrides []func( + *cobra.Command, + *postgres.CreateBranchRequest, +) + +func newCreateBranch() *cobra.Command { + cmd := &cobra.Command{} + + var createBranchReq postgres.CreateBranchRequest + createBranchReq.Branch = postgres.Branch{} + var createBranchJson flags.JsonFlag + + var createBranchSkipWait bool + var createBranchTimeout time.Duration + + cmd.Flags().BoolVar(&createBranchSkipWait, "no-wait", createBranchSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&createBranchTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().Var(&createBranchJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createBranchReq.BranchId, "branch-id", createBranchReq.BranchId, `The ID to use for the Branch, which will become the final component of the branch's resource name.`) + cmd.Flags().StringVar(&createBranchReq.Branch.Name, "name", createBranchReq.Branch.Name, `The resource name of the branch.`) + // TODO: complex arg: spec + // TODO: complex arg: status + + cmd.Use = "create-branch PARENT" + cmd.Short = `Create a Branch.` + cmd.Long = `Create a Branch. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + + Arguments: + PARENT: The Project where this Branch will be created. Format: + projects/{project_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createBranchJson.Unmarshal(&createBranchReq.Branch) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createBranchReq.Parent = args[0] + + // Determine which mode to execute based on flags. + switch { + case createBranchSkipWait: + wait, err := w.Postgres.CreateBranch(ctx, createBranchReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.CreateBranch(ctx, createBranchReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for create-branch to complete..." + + // Wait for completion. + opts := api.WithTimeout(createBranchTimeout) + response, err := wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return cmdio.Render(ctx, response) + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createBranchOverrides { + fn(cmd, &createBranchReq) + } + + return cmd +} + +// start create-endpoint command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createEndpointOverrides []func( + *cobra.Command, + *postgres.CreateEndpointRequest, +) + +func newCreateEndpoint() *cobra.Command { + cmd := &cobra.Command{} + + var createEndpointReq postgres.CreateEndpointRequest + createEndpointReq.Endpoint = postgres.Endpoint{} + var createEndpointJson flags.JsonFlag + + var createEndpointSkipWait bool + var createEndpointTimeout time.Duration + + cmd.Flags().BoolVar(&createEndpointSkipWait, "no-wait", createEndpointSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&createEndpointTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().Var(&createEndpointJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createEndpointReq.EndpointId, "endpoint-id", createEndpointReq.EndpointId, `The ID to use for the Endpoint, which will become the final component of the endpoint's resource name.`) + cmd.Flags().StringVar(&createEndpointReq.Endpoint.Name, "name", createEndpointReq.Endpoint.Name, `The resource name of the endpoint.`) + // TODO: complex arg: spec + // TODO: complex arg: status + + cmd.Use = "create-endpoint PARENT" + cmd.Short = `Create an Endpoint.` + cmd.Long = `Create an Endpoint. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + + Arguments: + PARENT: The Branch where this Endpoint will be created. Format: + projects/{project_id}/branches/{branch_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createEndpointJson.Unmarshal(&createEndpointReq.Endpoint) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createEndpointReq.Parent = args[0] + + // Determine which mode to execute based on flags. + switch { + case createEndpointSkipWait: + wait, err := w.Postgres.CreateEndpoint(ctx, createEndpointReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.CreateEndpoint(ctx, createEndpointReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for create-endpoint to complete..." + + // Wait for completion. + opts := api.WithTimeout(createEndpointTimeout) + response, err := wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return cmdio.Render(ctx, response) + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createEndpointOverrides { + fn(cmd, &createEndpointReq) + } + + return cmd +} + +// start create-project command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createProjectOverrides []func( + *cobra.Command, + *postgres.CreateProjectRequest, +) + +func newCreateProject() *cobra.Command { + cmd := &cobra.Command{} + + var createProjectReq postgres.CreateProjectRequest + createProjectReq.Project = postgres.Project{} + var createProjectJson flags.JsonFlag + + var createProjectSkipWait bool + var createProjectTimeout time.Duration + + cmd.Flags().BoolVar(&createProjectSkipWait, "no-wait", createProjectSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&createProjectTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().Var(&createProjectJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createProjectReq.ProjectId, "project-id", createProjectReq.ProjectId, `The ID to use for the Project, which will become the final component of the project's resource name.`) + cmd.Flags().StringVar(&createProjectReq.Project.Name, "name", createProjectReq.Project.Name, `The resource name of the project.`) + // TODO: complex arg: spec + // TODO: complex arg: status + + cmd.Use = "create-project" + cmd.Short = `Create a Project.` + cmd.Long = `Create a Project. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createProjectJson.Unmarshal(&createProjectReq.Project) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + // Determine which mode to execute based on flags. + switch { + case createProjectSkipWait: + wait, err := w.Postgres.CreateProject(ctx, createProjectReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.CreateProject(ctx, createProjectReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for create-project to complete..." + + // Wait for completion. + opts := api.WithTimeout(createProjectTimeout) + response, err := wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return cmdio.Render(ctx, response) + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createProjectOverrides { + fn(cmd, &createProjectReq) + } + + return cmd +} + +// start create-role command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createRoleOverrides []func( + *cobra.Command, + *postgres.CreateRoleRequest, +) + +func newCreateRole() *cobra.Command { + cmd := &cobra.Command{} + + var createRoleReq postgres.CreateRoleRequest + createRoleReq.Role = postgres.Role{} + var createRoleJson flags.JsonFlag + + var createRoleSkipWait bool + var createRoleTimeout time.Duration + + cmd.Flags().BoolVar(&createRoleSkipWait, "no-wait", createRoleSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&createRoleTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().Var(&createRoleJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createRoleReq.Role.Name, "name", createRoleReq.Role.Name, `The resource name of the role.`) + // TODO: complex arg: spec + // TODO: complex arg: status + + cmd.Use = "create-role PARENT ROLE_ID" + cmd.Short = `Create a postgres role for a branch.` + cmd.Long = `Create a postgres role for a branch. + + Create a role for a branch. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + + Arguments: + PARENT: The Branch where this Role is created. Format: + projects/{project_id}/branches/{branch_id} + ROLE_ID: The ID to use for the Role, which will become the final component of the + branch's resource name. This ID becomes the role in postgres. + + This value should be 4-63 characters, and only use characters available in + DNS names, as defined by RFC-1123` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createRoleJson.Unmarshal(&createRoleReq.Role) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + createRoleReq.Parent = args[0] + createRoleReq.RoleId = args[1] + + // Determine which mode to execute based on flags. + switch { + case createRoleSkipWait: + wait, err := w.Postgres.CreateRole(ctx, createRoleReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.CreateRole(ctx, createRoleReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for create-role to complete..." + + // Wait for completion. + opts := api.WithTimeout(createRoleTimeout) + response, err := wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return cmdio.Render(ctx, response) + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createRoleOverrides { + fn(cmd, &createRoleReq) + } + + return cmd +} + +// start delete-branch command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteBranchOverrides []func( + *cobra.Command, + *postgres.DeleteBranchRequest, +) + +func newDeleteBranch() *cobra.Command { + cmd := &cobra.Command{} + + var deleteBranchReq postgres.DeleteBranchRequest + + cmd.Use = "delete-branch NAME" + cmd.Short = `Delete a Branch.` + cmd.Long = `Delete a Branch. + + Arguments: + NAME: The name of the Branch to delete. Format: + projects/{project_id}/branches/{branch_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteBranchReq.Name = args[0] + + err = w.Postgres.DeleteBranch(ctx, deleteBranchReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteBranchOverrides { + fn(cmd, &deleteBranchReq) + } + + return cmd +} + +// start delete-endpoint command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteEndpointOverrides []func( + *cobra.Command, + *postgres.DeleteEndpointRequest, +) + +func newDeleteEndpoint() *cobra.Command { + cmd := &cobra.Command{} + + var deleteEndpointReq postgres.DeleteEndpointRequest + + cmd.Use = "delete-endpoint NAME" + cmd.Short = `Delete an Endpoint.` + cmd.Long = `Delete an Endpoint. + + Arguments: + NAME: The name of the Endpoint to delete. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteEndpointReq.Name = args[0] + + err = w.Postgres.DeleteEndpoint(ctx, deleteEndpointReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteEndpointOverrides { + fn(cmd, &deleteEndpointReq) + } + + return cmd +} + +// start delete-project command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteProjectOverrides []func( + *cobra.Command, + *postgres.DeleteProjectRequest, +) + +func newDeleteProject() *cobra.Command { + cmd := &cobra.Command{} + + var deleteProjectReq postgres.DeleteProjectRequest + + cmd.Use = "delete-project NAME" + cmd.Short = `Delete a Project.` + cmd.Long = `Delete a Project. + + Arguments: + NAME: The name of the Project to delete. Format: projects/{project_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteProjectReq.Name = args[0] + + err = w.Postgres.DeleteProject(ctx, deleteProjectReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteProjectOverrides { + fn(cmd, &deleteProjectReq) + } + + return cmd +} + +// start delete-role command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteRoleOverrides []func( + *cobra.Command, + *postgres.DeleteRoleRequest, +) + +func newDeleteRole() *cobra.Command { + cmd := &cobra.Command{} + + var deleteRoleReq postgres.DeleteRoleRequest + + var deleteRoleSkipWait bool + var deleteRoleTimeout time.Duration + + cmd.Flags().BoolVar(&deleteRoleSkipWait, "no-wait", deleteRoleSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&deleteRoleTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().StringVar(&deleteRoleReq.ReassignOwnedTo, "reassign-owned-to", deleteRoleReq.ReassignOwnedTo, `Reassign objects.`) + + cmd.Use = "delete-role NAME" + cmd.Short = `Delete a postgres role in a branch.` + cmd.Long = `Delete a postgres role in a branch. + + Delete a role in a branch. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + + Arguments: + NAME: The resource name of the postgres role. Format: + projects/{project_id}/branch/{branch_id}/roles/{role_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteRoleReq.Name = args[0] + + // Determine which mode to execute based on flags. + switch { + case deleteRoleSkipWait: + wait, err := w.Postgres.DeleteRole(ctx, deleteRoleReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.DeleteRole(ctx, deleteRoleReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for delete-role to complete..." + + // Wait for completion. + opts := api.WithTimeout(deleteRoleTimeout) + + err = wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return nil + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteRoleOverrides { + fn(cmd, &deleteRoleReq) + } + + return cmd +} + +// start get-branch command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getBranchOverrides []func( + *cobra.Command, + *postgres.GetBranchRequest, +) + +func newGetBranch() *cobra.Command { + cmd := &cobra.Command{} + + var getBranchReq postgres.GetBranchRequest + + cmd.Use = "get-branch NAME" + cmd.Short = `Get a Branch.` + cmd.Long = `Get a Branch. + + Arguments: + NAME: The name of the Branch to retrieve. Format: + projects/{project_id}/branches/{branch_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getBranchReq.Name = args[0] + + response, err := w.Postgres.GetBranch(ctx, getBranchReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getBranchOverrides { + fn(cmd, &getBranchReq) + } + + return cmd +} + +// start get-endpoint command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getEndpointOverrides []func( + *cobra.Command, + *postgres.GetEndpointRequest, +) + +func newGetEndpoint() *cobra.Command { + cmd := &cobra.Command{} + + var getEndpointReq postgres.GetEndpointRequest + + cmd.Use = "get-endpoint NAME" + cmd.Short = `Get an Endpoint.` + cmd.Long = `Get an Endpoint. + + Arguments: + NAME: The name of the Endpoint to retrieve. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getEndpointReq.Name = args[0] + + response, err := w.Postgres.GetEndpoint(ctx, getEndpointReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getEndpointOverrides { + fn(cmd, &getEndpointReq) + } + + return cmd +} + +// start get-operation command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOperationOverrides []func( + *cobra.Command, + *postgres.GetOperationRequest, +) + +func newGetOperation() *cobra.Command { + cmd := &cobra.Command{} + + var getOperationReq postgres.GetOperationRequest + + cmd.Use = "get-operation NAME" + cmd.Short = `Get an Operation.` + cmd.Long = `Get an Operation. + + Arguments: + NAME: The name of the operation resource.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getOperationReq.Name = args[0] + + response, err := w.Postgres.GetOperation(ctx, getOperationReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOperationOverrides { + fn(cmd, &getOperationReq) + } + + return cmd +} + +// start get-project command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getProjectOverrides []func( + *cobra.Command, + *postgres.GetProjectRequest, +) + +func newGetProject() *cobra.Command { + cmd := &cobra.Command{} + + var getProjectReq postgres.GetProjectRequest + + cmd.Use = "get-project NAME" + cmd.Short = `Get a Project.` + cmd.Long = `Get a Project. + + Arguments: + NAME: The name of the Project to retrieve. Format: projects/{project_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getProjectReq.Name = args[0] + + response, err := w.Postgres.GetProject(ctx, getProjectReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getProjectOverrides { + fn(cmd, &getProjectReq) + } + + return cmd +} + +// start get-role command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getRoleOverrides []func( + *cobra.Command, + *postgres.GetRoleRequest, +) + +func newGetRole() *cobra.Command { + cmd := &cobra.Command{} + + var getRoleReq postgres.GetRoleRequest + + cmd.Use = "get-role NAME" + cmd.Short = `Get a postgres role in a branch.` + cmd.Long = `Get a postgres role in a branch. + + Get a Role. + + Arguments: + NAME: The name of the Role to retrieve. Format: + projects/{project_id}/branches/{branch_id}/roles/{role_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getRoleReq.Name = args[0] + + response, err := w.Postgres.GetRole(ctx, getRoleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getRoleOverrides { + fn(cmd, &getRoleReq) + } + + return cmd +} + +// start list-branches command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listBranchesOverrides []func( + *cobra.Command, + *postgres.ListBranchesRequest, +) + +func newListBranches() *cobra.Command { + cmd := &cobra.Command{} + + var listBranchesReq postgres.ListBranchesRequest + + cmd.Flags().IntVar(&listBranchesReq.PageSize, "page-size", listBranchesReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listBranchesReq.PageToken, "page-token", listBranchesReq.PageToken, `Pagination token to go to the next page of Branches.`) + + cmd.Use = "list-branches PARENT" + cmd.Short = `List Branches.` + cmd.Long = `List Branches. + + Arguments: + PARENT: The Project that owns this collection of branches. Format: + projects/{project_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + listBranchesReq.Parent = args[0] + + response := w.Postgres.ListBranches(ctx, listBranchesReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listBranchesOverrides { + fn(cmd, &listBranchesReq) + } + + return cmd +} + +// start list-endpoints command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listEndpointsOverrides []func( + *cobra.Command, + *postgres.ListEndpointsRequest, +) + +func newListEndpoints() *cobra.Command { + cmd := &cobra.Command{} + + var listEndpointsReq postgres.ListEndpointsRequest + + cmd.Flags().IntVar(&listEndpointsReq.PageSize, "page-size", listEndpointsReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listEndpointsReq.PageToken, "page-token", listEndpointsReq.PageToken, `Pagination token to go to the next page of Endpoints.`) + + cmd.Use = "list-endpoints PARENT" + cmd.Short = `List Endpoints.` + cmd.Long = `List Endpoints. + + Arguments: + PARENT: The Branch that owns this collection of endpoints. Format: + projects/{project_id}/branches/{branch_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + listEndpointsReq.Parent = args[0] + + response := w.Postgres.ListEndpoints(ctx, listEndpointsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listEndpointsOverrides { + fn(cmd, &listEndpointsReq) + } + + return cmd +} + +// start list-projects command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listProjectsOverrides []func( + *cobra.Command, + *postgres.ListProjectsRequest, +) + +func newListProjects() *cobra.Command { + cmd := &cobra.Command{} + + var listProjectsReq postgres.ListProjectsRequest + + cmd.Flags().IntVar(&listProjectsReq.PageSize, "page-size", listProjectsReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listProjectsReq.PageToken, "page-token", listProjectsReq.PageToken, `Pagination token to go to the next page of Projects.`) + + cmd.Use = "list-projects" + cmd.Short = `List Projects.` + cmd.Long = `List Projects.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response := w.Postgres.ListProjects(ctx, listProjectsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listProjectsOverrides { + fn(cmd, &listProjectsReq) + } + + return cmd +} + +// start list-roles command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listRolesOverrides []func( + *cobra.Command, + *postgres.ListRolesRequest, +) + +func newListRoles() *cobra.Command { + cmd := &cobra.Command{} + + var listRolesReq postgres.ListRolesRequest + + cmd.Flags().IntVar(&listRolesReq.PageSize, "page-size", listRolesReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listRolesReq.PageToken, "page-token", listRolesReq.PageToken, `Pagination token to go to the next page of Roles.`) + + cmd.Use = "list-roles PARENT" + cmd.Short = `List postgres roles in a branch.` + cmd.Long = `List postgres roles in a branch. + + List Roles. + + Arguments: + PARENT: The Branch that owns this collection of roles. Format: + projects/{project_id}/branches/{branch_id}` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + listRolesReq.Parent = args[0] + + response := w.Postgres.ListRoles(ctx, listRolesReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listRolesOverrides { + fn(cmd, &listRolesReq) + } + + return cmd +} + +// start update-branch command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateBranchOverrides []func( + *cobra.Command, + *postgres.UpdateBranchRequest, +) + +func newUpdateBranch() *cobra.Command { + cmd := &cobra.Command{} + + var updateBranchReq postgres.UpdateBranchRequest + updateBranchReq.Branch = postgres.Branch{} + var updateBranchJson flags.JsonFlag + + var updateBranchSkipWait bool + var updateBranchTimeout time.Duration + + cmd.Flags().BoolVar(&updateBranchSkipWait, "no-wait", updateBranchSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&updateBranchTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().Var(&updateBranchJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateBranchReq.Branch.Name, "name", updateBranchReq.Branch.Name, `The resource name of the branch.`) + // TODO: complex arg: spec + // TODO: complex arg: status + + cmd.Use = "update-branch NAME UPDATE_MASK" + cmd.Short = `Update a Branch.` + cmd.Long = `Update a Branch. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + + Arguments: + NAME: The resource name of the branch. Format: + projects/{project_id}/branches/{branch_id} + UPDATE_MASK: The list of fields to update. If unspecified, all fields will be updated + when possible.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateBranchJson.Unmarshal(&updateBranchReq.Branch) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateBranchReq.Name = args[0] + if args[1] != "" { + updateMaskArray := strings.Split(args[1], ",") + updateBranchReq.UpdateMask = *fieldmask.New(updateMaskArray) + } + + // Determine which mode to execute based on flags. + switch { + case updateBranchSkipWait: + wait, err := w.Postgres.UpdateBranch(ctx, updateBranchReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.UpdateBranch(ctx, updateBranchReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for update-branch to complete..." + + // Wait for completion. + opts := api.WithTimeout(updateBranchTimeout) + response, err := wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return cmdio.Render(ctx, response) + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateBranchOverrides { + fn(cmd, &updateBranchReq) + } + + return cmd +} + +// start update-endpoint command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateEndpointOverrides []func( + *cobra.Command, + *postgres.UpdateEndpointRequest, +) + +func newUpdateEndpoint() *cobra.Command { + cmd := &cobra.Command{} + + var updateEndpointReq postgres.UpdateEndpointRequest + updateEndpointReq.Endpoint = postgres.Endpoint{} + var updateEndpointJson flags.JsonFlag + + var updateEndpointSkipWait bool + var updateEndpointTimeout time.Duration + + cmd.Flags().BoolVar(&updateEndpointSkipWait, "no-wait", updateEndpointSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&updateEndpointTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().Var(&updateEndpointJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateEndpointReq.Endpoint.Name, "name", updateEndpointReq.Endpoint.Name, `The resource name of the endpoint.`) + // TODO: complex arg: spec + // TODO: complex arg: status + + cmd.Use = "update-endpoint NAME UPDATE_MASK" + cmd.Short = `Update an Endpoint.` + cmd.Long = `Update an Endpoint. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + + Arguments: + NAME: The resource name of the endpoint. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + UPDATE_MASK: The list of fields to update. If unspecified, all fields will be updated + when possible.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateEndpointJson.Unmarshal(&updateEndpointReq.Endpoint) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateEndpointReq.Name = args[0] + if args[1] != "" { + updateMaskArray := strings.Split(args[1], ",") + updateEndpointReq.UpdateMask = *fieldmask.New(updateMaskArray) + } + + // Determine which mode to execute based on flags. + switch { + case updateEndpointSkipWait: + wait, err := w.Postgres.UpdateEndpoint(ctx, updateEndpointReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.UpdateEndpoint(ctx, updateEndpointReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for update-endpoint to complete..." + + // Wait for completion. + opts := api.WithTimeout(updateEndpointTimeout) + response, err := wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return cmdio.Render(ctx, response) + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateEndpointOverrides { + fn(cmd, &updateEndpointReq) + } + + return cmd +} + +// start update-project command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateProjectOverrides []func( + *cobra.Command, + *postgres.UpdateProjectRequest, +) + +func newUpdateProject() *cobra.Command { + cmd := &cobra.Command{} + + var updateProjectReq postgres.UpdateProjectRequest + updateProjectReq.Project = postgres.Project{} + var updateProjectJson flags.JsonFlag + + var updateProjectSkipWait bool + var updateProjectTimeout time.Duration + + cmd.Flags().BoolVar(&updateProjectSkipWait, "no-wait", updateProjectSkipWait, `do not wait to reach DONE state`) + cmd.Flags().DurationVar(&updateProjectTimeout, "timeout", 0, `maximum amount of time to reach DONE state`) + + cmd.Flags().Var(&updateProjectJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateProjectReq.Project.Name, "name", updateProjectReq.Project.Name, `The resource name of the project.`) + // TODO: complex arg: spec + // TODO: complex arg: status + + cmd.Use = "update-project NAME UPDATE_MASK" + cmd.Short = `Update a Project.` + cmd.Long = `Update a Project. + + This is a long-running operation. By default, the command waits for the + operation to complete. Use --no-wait to return immediately with the raw + operation details. The operation's 'name' field can then be used to poll for + completion using the get-operation command. + + Arguments: + NAME: The resource name of the project. Format: projects/{project_id} + UPDATE_MASK: The list of fields to update. If unspecified, all fields will be updated + when possible.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateProjectJson.Unmarshal(&updateProjectReq.Project) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateProjectReq.Name = args[0] + if args[1] != "" { + updateMaskArray := strings.Split(args[1], ",") + updateProjectReq.UpdateMask = *fieldmask.New(updateMaskArray) + } + + // Determine which mode to execute based on flags. + switch { + case updateProjectSkipWait: + wait, err := w.Postgres.UpdateProject(ctx, updateProjectReq) + if err != nil { + return err + } + + // Return operation immediately without waiting. + operation, err := w.Postgres.GetOperation(ctx, postgres.GetOperationRequest{ + Name: wait.Name(), + }) + if err != nil { + return err + } + return cmdio.Render(ctx, operation) + + default: + wait, err := w.Postgres.UpdateProject(ctx, updateProjectReq) + if err != nil { + return err + } + + // Show spinner while waiting for completion. + spinner := cmdio.Spinner(ctx) + spinner <- "Waiting for update-project to complete..." + + // Wait for completion. + opts := api.WithTimeout(updateProjectTimeout) + response, err := wait.Wait(ctx, opts) + if err != nil { + return err + } + close(spinner) + return cmdio.Render(ctx, response) + } + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateProjectOverrides { + fn(cmd, &updateProjectReq) + } + + return cmd +} + +// end service Postgres diff --git a/cmd/workspace/rfa/rfa.go b/cmd/workspace/rfa/rfa.go index d16e3c4604..281c130f44 100755 --- a/cmd/workspace/rfa/rfa.go +++ b/cmd/workspace/rfa/rfa.go @@ -203,6 +203,7 @@ func newUpdateAccessRequestDestinations() *cobra.Command { cmd.Flags().Var(&updateAccessRequestDestinationsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: complex arg: destination_source_securable // TODO: array: destinations cmd.Use = "update-access-request-destinations UPDATE_MASK SECURABLE" @@ -235,8 +236,8 @@ func newUpdateAccessRequestDestinations() *cobra.Command { always explicitly list the fields being updated and avoid using * wildcards, as it can lead to unintended results if the API changes in the future. - SECURABLE: The securable for which the access request destinations are being - retrieved.` + SECURABLE: The securable for which the access request destinations are being modified + or read.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index 49ebd82f5c..8eab5adfcb 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -450,6 +450,9 @@ func newList() *cobra.Command { the **USE_SCHEMA** privilege on the parent schema. There is no guarantee of a specific ordering of the elements in the array. + NOTE: **view_dependencies** and **table_constraints** are not returned by + ListTables queries. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls will be deprecated soon. diff --git a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go index dcdcf47498..43813e8d72 100755 --- a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go +++ b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go @@ -424,8 +424,7 @@ func newUpdateEndpointBudgetPolicy() *cobra.Command { Arguments: ENDPOINT_NAME: Name of the vector search endpoint - BUDGET_POLICY_ID: The budget policy id to be applied (hima-sheth) TODO: remove this once - we've migrated to usage policies` + BUDGET_POLICY_ID: The budget policy id to be applied` cmd.Annotations = make(map[string]string) diff --git a/go.mod b/go.mod index 86870ba346..645f9372c8 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/BurntSushi/toml v1.6.0 // MIT github.com/Masterminds/semver/v3 v3.4.0 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.94.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.96.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index 0d0546b127..8be5ce6c28 100644 --- a/go.sum +++ b/go.sum @@ -29,8 +29,8 @@ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.94.0 h1:+ITzL/O6/8FkHBJ4oWj1o8PkZ9GPozqydpXJRjsdxzw= -github.com/databricks/databricks-sdk-go v0.94.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= +github.com/databricks/databricks-sdk-go v0.96.0 h1:tpR3GSwkM3Vd6P9KfYEXAJiKZ1KLJ2T2+J3tF8jxlEk= +github.com/databricks/databricks-sdk-go v0.96.0/go.mod h1:hWoHnHbNLjPKiTm5K/7bcIv3J3Pkgo5x9pPzh8K3RVE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/python/databricks/bundles/jobs/_models/job_run_as.py b/python/databricks/bundles/jobs/_models/job_run_as.py index 59b1af3161..ac980cfc75 100644 --- a/python/databricks/bundles/jobs/_models/job_run_as.py +++ b/python/databricks/bundles/jobs/_models/job_run_as.py @@ -17,6 +17,13 @@ class JobRunAs: Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown. """ + group_name: VariableOrOptional[str] = None + """ + :meta private: [EXPERIMENTAL] + + Group name of an account group assigned to the workspace. Setting this field requires being a member of the group. + """ + service_principal_name: VariableOrOptional[str] = None """ The application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role. @@ -38,6 +45,13 @@ def as_dict(self) -> "JobRunAsDict": class JobRunAsDict(TypedDict, total=False): """""" + group_name: VariableOrOptional[str] + """ + :meta private: [EXPERIMENTAL] + + Group name of an account group assigned to the workspace. Setting this field requires being a member of the group. + """ + service_principal_name: VariableOrOptional[str] """ The application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role. diff --git a/python/databricks/bundles/pipelines/_models/table_specific_config.py b/python/databricks/bundles/pipelines/_models/table_specific_config.py index 502d170878..218a8581e5 100644 --- a/python/databricks/bundles/pipelines/_models/table_specific_config.py +++ b/python/databricks/bundles/pipelines/_models/table_specific_config.py @@ -56,6 +56,15 @@ class TableSpecificConfig: Configurations that are only applicable for query-based ingestion connectors. """ + row_filter: VariableOrOptional[str] = None + """ + :meta private: [EXPERIMENTAL] + + (Optional, Immutable) The row filter condition to be applied to the table. + It must not contain the WHERE keyword, only the actual filter condition. + It must be in DBSQL format. + """ + salesforce_include_formula_fields: VariableOrOptional[bool] = None """ :meta private: [EXPERIMENTAL] @@ -124,6 +133,15 @@ class TableSpecificConfigDict(TypedDict, total=False): Configurations that are only applicable for query-based ingestion connectors. """ + row_filter: VariableOrOptional[str] + """ + :meta private: [EXPERIMENTAL] + + (Optional, Immutable) The row filter condition to be applied to the table. + It must not contain the WHERE keyword, only the actual filter condition. + It must be in DBSQL format. + """ + salesforce_include_formula_fields: VariableOrOptional[bool] """ :meta private: [EXPERIMENTAL] From bc788dbc8c767e2e874a948fca8023916b0702d4 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 7 Jan 2026 16:17:11 +0100 Subject: [PATCH 2/4] fixed annotations --- bundle/internal/schema/annotations.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 296a4f3606..329b1a8925 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -461,9 +461,6 @@ github.com/databricks/cli/bundle/config/resources.Alert: "effective_run_as": "description": |- PLACEHOLDER - "file_path": - "description": |- - PLACEHOLDER "id": "description": |- PLACEHOLDER @@ -723,9 +720,6 @@ github.com/databricks/cli/bundle/config/resources.SyncedDatabaseTable: "effective_logical_database_name": "description": |- PLACEHOLDER - "lifecycle": - "description": |- - Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed. "logical_database_name": "description": |- PLACEHOLDER From e431582a0451eff7eb5e0c0c7c42414fab62a8ad Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 7 Jan 2026 16:37:41 +0100 Subject: [PATCH 3/4] fixed acc output --- acceptance/bundle/refschema/out.fields.txt | 50 ++++++++++++++++++++++ acceptance/cmd/workspace/apps/output.txt | 1 + 2 files changed, 51 insertions(+) diff --git a/acceptance/bundle/refschema/out.fields.txt b/acceptance/bundle/refschema/out.fields.txt index cad36c9523..6b0facaea0 100644 --- a/acceptance/bundle/refschema/out.fields.txt +++ b/acceptance/bundle/refschema/out.fields.txt @@ -64,11 +64,27 @@ resources.alerts.*.permissions.permissions[*].permission_level iam.PermissionLev resources.alerts.*.permissions.permissions[*].service_principal_name string ALL resources.alerts.*.permissions.permissions[*].user_name string ALL resources.apps.*.active_deployment *apps.AppDeployment ALL +resources.apps.*.active_deployment.command []string ALL +resources.apps.*.active_deployment.command[*] string ALL resources.apps.*.active_deployment.create_time string ALL resources.apps.*.active_deployment.creator string ALL resources.apps.*.active_deployment.deployment_artifacts *apps.AppDeploymentArtifacts ALL resources.apps.*.active_deployment.deployment_artifacts.source_code_path string ALL resources.apps.*.active_deployment.deployment_id string ALL +resources.apps.*.active_deployment.env_vars []apps.EnvVar ALL +resources.apps.*.active_deployment.env_vars[*] apps.EnvVar ALL +resources.apps.*.active_deployment.env_vars[*].name string ALL +resources.apps.*.active_deployment.env_vars[*].value string ALL +resources.apps.*.active_deployment.env_vars[*].value_from string ALL +resources.apps.*.active_deployment.git_source *apps.GitSource ALL +resources.apps.*.active_deployment.git_source.branch string ALL +resources.apps.*.active_deployment.git_source.commit string ALL +resources.apps.*.active_deployment.git_source.git_repository *apps.GitRepository ALL +resources.apps.*.active_deployment.git_source.git_repository.provider string ALL +resources.apps.*.active_deployment.git_source.git_repository.url string ALL +resources.apps.*.active_deployment.git_source.resolved_commit string ALL +resources.apps.*.active_deployment.git_source.source_code_path string ALL +resources.apps.*.active_deployment.git_source.tag string ALL resources.apps.*.active_deployment.mode apps.AppDeploymentMode ALL resources.apps.*.active_deployment.source_code_path string ALL resources.apps.*.active_deployment.status *apps.AppDeploymentStatus ALL @@ -88,8 +104,12 @@ resources.apps.*.creator string ALL resources.apps.*.default_source_code_path string ALL resources.apps.*.description string ALL resources.apps.*.effective_budget_policy_id string ALL +resources.apps.*.effective_usage_policy_id string ALL resources.apps.*.effective_user_api_scopes []string ALL resources.apps.*.effective_user_api_scopes[*] string ALL +resources.apps.*.git_repository *apps.GitRepository ALL +resources.apps.*.git_repository.provider string ALL +resources.apps.*.git_repository.url string ALL resources.apps.*.id string ALL resources.apps.*.lifecycle resources.Lifecycle INPUT resources.apps.*.lifecycle.prevent_destroy bool INPUT @@ -98,11 +118,27 @@ resources.apps.*.name string ALL resources.apps.*.oauth2_app_client_id string ALL resources.apps.*.oauth2_app_integration_id string ALL resources.apps.*.pending_deployment *apps.AppDeployment ALL +resources.apps.*.pending_deployment.command []string ALL +resources.apps.*.pending_deployment.command[*] string ALL resources.apps.*.pending_deployment.create_time string ALL resources.apps.*.pending_deployment.creator string ALL resources.apps.*.pending_deployment.deployment_artifacts *apps.AppDeploymentArtifacts ALL resources.apps.*.pending_deployment.deployment_artifacts.source_code_path string ALL resources.apps.*.pending_deployment.deployment_id string ALL +resources.apps.*.pending_deployment.env_vars []apps.EnvVar ALL +resources.apps.*.pending_deployment.env_vars[*] apps.EnvVar ALL +resources.apps.*.pending_deployment.env_vars[*].name string ALL +resources.apps.*.pending_deployment.env_vars[*].value string ALL +resources.apps.*.pending_deployment.env_vars[*].value_from string ALL +resources.apps.*.pending_deployment.git_source *apps.GitSource ALL +resources.apps.*.pending_deployment.git_source.branch string ALL +resources.apps.*.pending_deployment.git_source.commit string ALL +resources.apps.*.pending_deployment.git_source.git_repository *apps.GitRepository ALL +resources.apps.*.pending_deployment.git_source.git_repository.provider string ALL +resources.apps.*.pending_deployment.git_source.git_repository.url string ALL +resources.apps.*.pending_deployment.git_source.resolved_commit string ALL +resources.apps.*.pending_deployment.git_source.source_code_path string ALL +resources.apps.*.pending_deployment.git_source.tag string ALL resources.apps.*.pending_deployment.mode apps.AppDeploymentMode ALL resources.apps.*.pending_deployment.source_code_path string ALL resources.apps.*.pending_deployment.status *apps.AppDeploymentStatus ALL @@ -122,6 +158,9 @@ resources.apps.*.resources[*].database.database_name string ALL resources.apps.*.resources[*].database.instance_name string ALL resources.apps.*.resources[*].database.permission apps.AppResourceDatabaseDatabasePermission ALL resources.apps.*.resources[*].description string ALL +resources.apps.*.resources[*].experiment *apps.AppResourceExperiment ALL +resources.apps.*.resources[*].experiment.experiment_id string ALL +resources.apps.*.resources[*].experiment.permission apps.AppResourceExperimentExperimentPermission ALL resources.apps.*.resources[*].genie_space *apps.AppResourceGenieSpace ALL resources.apps.*.resources[*].genie_space.name string ALL resources.apps.*.resources[*].genie_space.permission apps.AppResourceGenieSpaceGenieSpacePermission ALL @@ -151,6 +190,7 @@ resources.apps.*.source_code_path string INPUT resources.apps.*.update_time string ALL resources.apps.*.updater string ALL resources.apps.*.url string ALL +resources.apps.*.usage_policy_id string ALL resources.apps.*.user_api_scopes []string ALL resources.apps.*.user_api_scopes[*] string ALL resources.apps.*.permissions.object_id string ALL @@ -739,6 +779,7 @@ resources.jobs.*.permissions[*].user_name string INPUT resources.jobs.*.queue *jobs.QueueSettings INPUT STATE resources.jobs.*.queue.enabled bool INPUT STATE resources.jobs.*.run_as *jobs.JobRunAs INPUT STATE +resources.jobs.*.run_as.group_name string INPUT STATE resources.jobs.*.run_as.service_principal_name string INPUT STATE resources.jobs.*.run_as.user_name string INPUT STATE resources.jobs.*.run_as_user_name string REMOTE @@ -915,6 +956,7 @@ resources.jobs.*.settings.performance_target jobs.PerformanceTarget REMOTE resources.jobs.*.settings.queue *jobs.QueueSettings REMOTE resources.jobs.*.settings.queue.enabled bool REMOTE resources.jobs.*.settings.run_as *jobs.JobRunAs REMOTE +resources.jobs.*.settings.run_as.group_name string REMOTE resources.jobs.*.settings.run_as.service_principal_name string REMOTE resources.jobs.*.settings.run_as.user_name string REMOTE resources.jobs.*.settings.schedule *jobs.CronSchedule REMOTE @@ -2944,6 +2986,7 @@ resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.query_based_connector_config.cursor_columns[*] string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.query_based_connector_config.deletion_condition string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.query_based_connector_config.hard_deletion_sync_min_interval_in_seconds int64 INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.row_filter string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.salesforce_include_formula_fields bool INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.scd_type pipelines.TableSpecificConfigScdType INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].report.table_configuration.sequence_by []string INPUT STATE @@ -2973,6 +3016,7 @@ resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.query_based_connector_config.cursor_columns[*] string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.query_based_connector_config.deletion_condition string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.query_based_connector_config.hard_deletion_sync_min_interval_in_seconds int64 INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.row_filter string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.salesforce_include_formula_fields bool INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.scd_type pipelines.TableSpecificConfigScdType INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].schema.table_configuration.sequence_by []string INPUT STATE @@ -3004,6 +3048,7 @@ resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration. resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.query_based_connector_config.cursor_columns[*] string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.query_based_connector_config.deletion_condition string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.query_based_connector_config.hard_deletion_sync_min_interval_in_seconds int64 INPUT STATE +resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.row_filter string INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.salesforce_include_formula_fields bool INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.scd_type pipelines.TableSpecificConfigScdType INPUT STATE resources.pipelines.*.ingestion_definition.objects[*].table.table_configuration.sequence_by []string INPUT STATE @@ -3037,6 +3082,7 @@ resources.pipelines.*.ingestion_definition.table_configuration.query_based_conne resources.pipelines.*.ingestion_definition.table_configuration.query_based_connector_config.cursor_columns[*] string INPUT STATE resources.pipelines.*.ingestion_definition.table_configuration.query_based_connector_config.deletion_condition string INPUT STATE resources.pipelines.*.ingestion_definition.table_configuration.query_based_connector_config.hard_deletion_sync_min_interval_in_seconds int64 INPUT STATE +resources.pipelines.*.ingestion_definition.table_configuration.row_filter string INPUT STATE resources.pipelines.*.ingestion_definition.table_configuration.salesforce_include_formula_fields bool INPUT STATE resources.pipelines.*.ingestion_definition.table_configuration.scd_type pipelines.TableSpecificConfigScdType INPUT STATE resources.pipelines.*.ingestion_definition.table_configuration.sequence_by []string INPUT STATE @@ -3241,6 +3287,7 @@ resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configur resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.query_based_connector_config.cursor_columns[*] string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.query_based_connector_config.deletion_condition string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.query_based_connector_config.hard_deletion_sync_min_interval_in_seconds int64 REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.row_filter string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.salesforce_include_formula_fields bool REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.scd_type pipelines.TableSpecificConfigScdType REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].report.table_configuration.sequence_by []string REMOTE @@ -3270,6 +3317,7 @@ resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configur resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.query_based_connector_config.cursor_columns[*] string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.query_based_connector_config.deletion_condition string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.query_based_connector_config.hard_deletion_sync_min_interval_in_seconds int64 REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.row_filter string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.salesforce_include_formula_fields bool REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.scd_type pipelines.TableSpecificConfigScdType REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].schema.table_configuration.sequence_by []string REMOTE @@ -3301,6 +3349,7 @@ resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configura resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.query_based_connector_config.cursor_columns[*] string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.query_based_connector_config.deletion_condition string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.query_based_connector_config.hard_deletion_sync_min_interval_in_seconds int64 REMOTE +resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.row_filter string REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.salesforce_include_formula_fields bool REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.scd_type pipelines.TableSpecificConfigScdType REMOTE resources.pipelines.*.spec.ingestion_definition.objects[*].table.table_configuration.sequence_by []string REMOTE @@ -3334,6 +3383,7 @@ resources.pipelines.*.spec.ingestion_definition.table_configuration.query_based_ resources.pipelines.*.spec.ingestion_definition.table_configuration.query_based_connector_config.cursor_columns[*] string REMOTE resources.pipelines.*.spec.ingestion_definition.table_configuration.query_based_connector_config.deletion_condition string REMOTE resources.pipelines.*.spec.ingestion_definition.table_configuration.query_based_connector_config.hard_deletion_sync_min_interval_in_seconds int64 REMOTE +resources.pipelines.*.spec.ingestion_definition.table_configuration.row_filter string REMOTE resources.pipelines.*.spec.ingestion_definition.table_configuration.salesforce_include_formula_fields bool REMOTE resources.pipelines.*.spec.ingestion_definition.table_configuration.scd_type pipelines.TableSpecificConfigScdType REMOTE resources.pipelines.*.spec.ingestion_definition.table_configuration.sequence_by []string REMOTE diff --git a/acceptance/cmd/workspace/apps/output.txt b/acceptance/cmd/workspace/apps/output.txt index f7d7a7b07b..e1524affa4 100644 --- a/acceptance/cmd/workspace/apps/output.txt +++ b/acceptance/cmd/workspace/apps/output.txt @@ -68,6 +68,7 @@ Flags: --description string The description of the app. -h, --help help for update --json JSON either inline JSON string or @path/to/file.json with request body (default JSON (0 bytes)) + --usage-policy-id string Global Flags: --debug enable debug logging From de5c22ae6c8b5e20ec7bff82fde223fce2df142b Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 7 Jan 2026 17:25:42 +0100 Subject: [PATCH 4/4] fix spaces --- bundle/schema/jsonschema.json | 1 - cmd/workspace/postgres/postgres.go | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index e9a83d5ceb..b746626664 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1893,7 +1893,6 @@ "$ref": "#/$defs/string" }, "lifecycle": { - "description": "Lifecycle is a struct that contains the lifecycle settings for a resource. It controls the behavior of the resource when it is deployed or destroyed.", "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.Lifecycle" }, "logical_database_name": { diff --git a/cmd/workspace/postgres/postgres.go b/cmd/workspace/postgres/postgres.go index b4de46a354..f8707f2f25 100755 --- a/cmd/workspace/postgres/postgres.go +++ b/cmd/workspace/postgres/postgres.go @@ -443,7 +443,7 @@ func newCreateRole() *cobra.Command { cmd.Use = "create-role PARENT ROLE_ID" cmd.Short = `Create a postgres role for a branch.` cmd.Long = `Create a postgres role for a branch. - + Create a role for a branch. This is a long-running operation. By default, the command waits for the @@ -456,7 +456,7 @@ func newCreateRole() *cobra.Command { projects/{project_id}/branches/{branch_id} ROLE_ID: The ID to use for the Role, which will become the final component of the branch's resource name. This ID becomes the role in postgres. - + This value should be 4-63 characters, and only use characters available in DNS names, as defined by RFC-1123` @@ -726,7 +726,7 @@ func newDeleteRole() *cobra.Command { cmd.Use = "delete-role NAME" cmd.Short = `Delete a postgres role in a branch.` cmd.Long = `Delete a postgres role in a branch. - + Delete a role in a branch. This is a long-running operation. By default, the command waits for the @@ -1038,7 +1038,7 @@ func newGetRole() *cobra.Command { cmd.Use = "get-role NAME" cmd.Short = `Get a postgres role in a branch.` cmd.Long = `Get a postgres role in a branch. - + Get a Role. Arguments: @@ -1257,7 +1257,7 @@ func newListRoles() *cobra.Command { cmd.Use = "list-roles PARENT" cmd.Short = `List postgres roles in a branch.` cmd.Long = `List postgres roles in a branch. - + List Roles. Arguments: