From d540050ba9e9886171efc968acea5ba80abe6fd1 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 13 May 2025 17:15:28 +0200 Subject: [PATCH 1/3] Upgraded Go SDK to 0.69.0 --- .codegen/_openapi_sha | 2 +- .gitattributes | 4 + Makefile | 1 + .../internal/schema/annotations_openapi.yml | 76 +- bundle/schema/jsonschema.json | 74 +- .../llm-proxy-partner-powered-account.go | 163 ++++ .../llm-proxy-partner-powered-enforce.go | 165 ++++ cmd/account/settings/settings.go | 4 + cmd/workspace/alerts-v2/alerts-v2.go | 51 +- cmd/workspace/cmd.go | 2 + .../database-instances/database-instances.go | 824 ++++++++++++++++++ .../instance-pools/instance-pools.go | 1 - .../llm-proxy-partner-powered-workspace.go | 221 +++++ cmd/workspace/permissions/permissions.go | 93 +- cmd/workspace/pipelines/pipelines.go | 1 + .../serving-endpoints/serving-endpoints.go | 181 ++++ cmd/workspace/settings/settings.go | 2 + .../system-schemas/system-schemas.go | 17 + .../workspace-bindings/workspace-bindings.go | 22 +- .../bundles/compute/_models/environment.py | 26 +- .../bundles/jobs/_models/compute_config.py | 16 +- .../databricks/bundles/pipelines/__init__.py | 14 + .../pipelines/_models/ingestion_config.py | 5 +- .../_models/ingestion_pipeline_definition.py | 18 + .../_models/ingestion_source_type.py | 36 + .../bundles/pipelines/_models/path_pattern.py | 40 + .../pipelines/_models/pipeline_library.py | 22 + go.mod | 2 +- go.sum | 4 +- 29 files changed, 1953 insertions(+), 134 deletions(-) create mode 100755 cmd/account/llm-proxy-partner-powered-account/llm-proxy-partner-powered-account.go create mode 100755 cmd/account/llm-proxy-partner-powered-enforce/llm-proxy-partner-powered-enforce.go create mode 100755 cmd/workspace/database-instances/database-instances.go create mode 100755 cmd/workspace/llm-proxy-partner-powered-workspace/llm-proxy-partner-powered-workspace.go create mode 100644 experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py create mode 100644 experimental/python/databricks/bundles/pipelines/_models/path_pattern.py diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 3b0b1fdac0..864d90a5f1 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -d4c86c045ee9d0410a41ef07e8ae708673b95fa1 \ No newline at end of file +6b2dbf5489ec706709fed80ee65caed7d10a2f38 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 8c01d8d74a..e4b7a4140e 100755 --- a/.gitattributes +++ b/.gitattributes @@ -13,6 +13,8 @@ cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated= cmd/account/federation-policy/federation-policy.go linguist-generated=true cmd/account/groups/groups.go linguist-generated=true cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true +cmd/account/llm-proxy-partner-powered-account/llm-proxy-partner-powered-account.go linguist-generated=true +cmd/account/llm-proxy-partner-powered-enforce/llm-proxy-partner-powered-enforce.go linguist-generated=true cmd/account/log-delivery/log-delivery.go linguist-generated=true cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true cmd/account/metastores/metastores.go linguist-generated=true @@ -62,6 +64,7 @@ cmd/workspace/current-user/current-user.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true +cmd/workspace/database-instances/database-instances.go linguist-generated=true cmd/workspace/default-namespace/default-namespace.go linguist-generated=true cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go linguist-generated=true @@ -85,6 +88,7 @@ cmd/workspace/jobs/jobs.go linguist-generated=true cmd/workspace/lakeview-embedded/lakeview-embedded.go linguist-generated=true cmd/workspace/lakeview/lakeview.go linguist-generated=true cmd/workspace/libraries/libraries.go linguist-generated=true +cmd/workspace/llm-proxy-partner-powered-workspace/llm-proxy-partner-powered-workspace.go linguist-generated=true cmd/workspace/metastores/metastores.go linguist-generated=true cmd/workspace/model-registry/model-registry.go linguist-generated=true cmd/workspace/model-versions/model-versions.go linguist-generated=true diff --git a/Makefile b/Makefile index ce58562927..4192be3f5a 100644 --- a/Makefile +++ b/Makefile @@ -69,6 +69,7 @@ integration-short: vendor generate: genkit update-sdk [ ! -f tagging.py ] || mv tagging.py internal/genkit/tagging.py + [ ! -f .github/workflows/tagging.yml ] || sed -i '' 's/python tagging.py/python internal\/genkit\/tagging.py/g' .github/workflows/tagging.yml [ ! -f .github/workflows/next-changelog.yml ] || rm .github/workflows/next-changelog.yml pushd experimental/python && make codegen diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index 7819672dc8..e633a6cdeb 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -408,28 +408,15 @@ github.com/databricks/cli/bundle/config/resources.MlflowExperiment: "description": |- Tags: Additional metadata key-value pairs. github.com/databricks/cli/bundle/config/resources.MlflowModel: - "creation_timestamp": - "description": |- - Timestamp recorded when this `registered_model` was created. "description": "description": |- - Description of this `registered_model`. - "last_updated_timestamp": - "description": |- - Timestamp recorded when metadata for this `registered_model` was last updated. - "latest_versions": - "description": |- - Collection of latest model versions for each stage. - Only contains models with current `READY` status. + Optional description for registered model. "name": "description": |- - Unique name for the model. + Register models under this name "tags": "description": |- - Tags: Additional metadata key-value pairs for this `registered_model`. - "user_id": - "description": |- - User that created this `registered_model` + Additional metadata for registered model. github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: "ai_gateway": "description": |- @@ -1423,6 +1410,14 @@ github.com/databricks/databricks-sdk-go/service/compute.Environment: Each dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/ Allowed dependency could be , , (WSFS or Volumes in Databricks), E.g. dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] + "environment_version": + "description": |- + We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized + correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field + for any other purpose, e.g. notebook storage. + This field is not yet exposed to customers (e.g. in the jobs API). + "x-databricks-preview": |- + PRIVATE "jar_dependencies": "description": |- List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`. @@ -2828,9 +2823,41 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionPipelineDefin "objects": "description": |- Required. Settings specifying tables to replicate and the destination for the replicated tables. + "source_type": + "description": |- + The type of the foreign source. + The source type will be inferred from the source connection or ingestion gateway. + This field is output only and will be ignored if provided. "table_configuration": "description": |- Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline. +github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: + "_": + "enum": + - |- + MYSQL + - |- + POSTGRESQL + - |- + SQLSERVER + - |- + SALESFORCE + - |- + NETSUITE + - |- + WORKDAY_RAAS + - |- + GA4_RAW_DATA + - |- + SERVICENOW + - |- + MANAGED_POSTGRESQL + - |- + ORACLE + - |- + SHAREPOINT + - |- + DYNAMICS365 github.com/databricks/databricks-sdk-go/service/pipelines.ManualTrigger: {} github.com/databricks/databricks-sdk-go/service/pipelines.NotebookLibrary: "path": @@ -2849,6 +2876,10 @@ github.com/databricks/databricks-sdk-go/service/pipelines.Notifications: "email_recipients": "description": |- A list of email addresses notified when a configured alert is triggered. +github.com/databricks/databricks-sdk-go/service/pipelines.PathPattern: + "include": + "description": |- + The source code to include for pipelines github.com/databricks/databricks-sdk-go/service/pipelines.PipelineCluster: "apply_policy_default_values": "description": |- @@ -2986,6 +3017,13 @@ github.com/databricks/databricks-sdk-go/service/pipelines.PipelineLibrary: "file": "description": |- The path to a file that defines a pipeline and is stored in the Databricks Repos. + "glob": + "description": |- + The unified field to include source codes. + Each entry can be a notebook path, a file path, or a folder path that ends `/**`. + This field cannot be used together with `notebook` or `file`. + "x-databricks-preview": |- + PRIVATE "jar": "description": |- URI of the jar to be installed. Currently only DBFS is supported. @@ -3639,6 +3677,9 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput: "name": "description": |- The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version. + "provisioned_model_units": + "description": |- + The number of model units provisioned. "scale_to_zero_enabled": "description": |- Whether the compute resources for the served entity should scale down to zero. @@ -3666,6 +3707,9 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput: "name": "description": |- The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version. + "provisioned_model_units": + "description": |- + The number of model units provisioned. "scale_to_zero_enabled": "description": |- Whether the compute resources for the served entity should scale down to zero. diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index ed7ab6bab3..25a82698f2 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -771,18 +771,18 @@ "type": "object", "properties": { "description": { - "description": "Description of this `registered_model`.", + "description": "Optional description for registered model.", "$ref": "#/$defs/string" }, "name": { - "description": "Unique name for the model.", + "description": "Register models under this name", "$ref": "#/$defs/string" }, "permissions": { "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.MlflowModelPermission" }, "tags": { - "description": "Tags: Additional metadata key-value pairs for this `registered_model`.", + "description": "Additional metadata for registered model.", "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/ml.ModelTag" } }, @@ -3176,6 +3176,12 @@ "description": "List of pip dependencies, as supported by the version of pip in this environment.", "$ref": "#/$defs/slice/string" }, + "environment_version": { + "description": "We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized\ncorrectly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field\nfor any other purpose, e.g. notebook storage.\nThis field is not yet exposed to customers (e.g. in the jobs API).", + "$ref": "#/$defs/string", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "jar_dependencies": { "description": "List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`.", "$ref": "#/$defs/slice/string", @@ -3699,7 +3705,6 @@ }, "additionalProperties": false, "required": [ - "gpu_node_pool_id", "num_gpus" ] }, @@ -5789,6 +5794,10 @@ "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", "$ref": "#/$defs/slice/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionConfig" }, + "source_type": { + "description": "The type of the foreign source.\nThe source type will be inferred from the source connection or ingestion gateway.\nThis field is output only and will be ignored if provided.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType" + }, "table_configuration": { "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfig" @@ -5802,6 +5811,31 @@ } ] }, + "pipelines.IngestionSourceType": { + "oneOf": [ + { + "type": "string", + "enum": [ + "MYSQL", + "POSTGRESQL", + "SQLSERVER", + "SALESFORCE", + "NETSUITE", + "WORKDAY_RAAS", + "GA4_RAW_DATA", + "SERVICENOW", + "MANAGED_POSTGRESQL", + "ORACLE", + "SHAREPOINT", + "DYNAMICS365" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "pipelines.ManualTrigger": { "oneOf": [ { @@ -5854,6 +5888,24 @@ } ] }, + "pipelines.PathPattern": { + "oneOf": [ + { + "type": "object", + "properties": { + "include": { + "description": "The source code to include for pipelines", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "pipelines.PipelineCluster": { "oneOf": [ { @@ -6024,6 +6076,12 @@ "description": "The path to a file that defines a pipeline and is stored in the Databricks Repos.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.FileLibrary" }, + "glob": { + "description": "The unified field to include source codes.\nEach entry can be a notebook path, a file path, or a folder path that ends `/**`.\nThis field cannot be used together with `notebook` or `file`.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PathPattern", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "jar": { "description": "URI of the jar to be installed. Currently only DBFS is supported.", "$ref": "#/$defs/string", @@ -7210,6 +7268,10 @@ "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.", "$ref": "#/$defs/string" }, + "provisioned_model_units": { + "description": "The number of model units provisioned.", + "$ref": "#/$defs/int64" + }, "scale_to_zero_enabled": { "description": "Whether the compute resources for the served entity should scale down to zero.", "$ref": "#/$defs/bool" @@ -7262,6 +7324,10 @@ "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to entity_name-entity_version.", "$ref": "#/$defs/string" }, + "provisioned_model_units": { + "description": "The number of model units provisioned.", + "$ref": "#/$defs/int64" + }, "scale_to_zero_enabled": { "description": "Whether the compute resources for the served entity should scale down to zero.", "$ref": "#/$defs/bool" diff --git a/cmd/account/llm-proxy-partner-powered-account/llm-proxy-partner-powered-account.go b/cmd/account/llm-proxy-partner-powered-account/llm-proxy-partner-powered-account.go new file mode 100755 index 0000000000..fc983c684f --- /dev/null +++ b/cmd/account/llm-proxy-partner-powered-account/llm-proxy-partner-powered-account.go @@ -0,0 +1,163 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package llm_proxy_partner_powered_account + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "llm-proxy-partner-powered-account", + Short: `Determines if partner powered models are enabled or not for a specific account.`, + Long: `Determines if partner powered models are enabled or not for a specific account`, + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetLlmProxyPartnerPoweredAccountRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetLlmProxyPartnerPoweredAccountRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the enable partner powered AI features account setting.` + cmd.Long = `Get the enable partner powered AI features account setting. + + Gets the enable partner powered AI features account setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + response, err := a.Settings.LlmProxyPartnerPoweredAccount().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateLlmProxyPartnerPoweredAccountRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateLlmProxyPartnerPoweredAccountRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the enable partner powered AI features account setting.` + cmd.Long = `Update the enable partner powered AI features account setting. + + Updates the enable partner powered AI features account setting.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := a.Settings.LlmProxyPartnerPoweredAccount().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service LlmProxyPartnerPoweredAccount diff --git a/cmd/account/llm-proxy-partner-powered-enforce/llm-proxy-partner-powered-enforce.go b/cmd/account/llm-proxy-partner-powered-enforce/llm-proxy-partner-powered-enforce.go new file mode 100755 index 0000000000..ce0f9d8b98 --- /dev/null +++ b/cmd/account/llm-proxy-partner-powered-enforce/llm-proxy-partner-powered-enforce.go @@ -0,0 +1,165 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package llm_proxy_partner_powered_enforce + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "llm-proxy-partner-powered-enforce", + Short: `Determines if the account-level partner-powered setting value is enforced upon the workspace-level partner-powered setting.`, + Long: `Determines if the account-level partner-powered setting value is enforced upon + the workspace-level partner-powered setting`, + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetLlmProxyPartnerPoweredEnforceRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetLlmProxyPartnerPoweredEnforceRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the enforcement status of partner powered AI features account setting.` + cmd.Long = `Get the enforcement status of partner powered AI features account setting. + + Gets the enforcement status of partner powered AI features account setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + response, err := a.Settings.LlmProxyPartnerPoweredEnforce().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateLlmProxyPartnerPoweredEnforceRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateLlmProxyPartnerPoweredEnforceRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the enforcement status of partner powered AI features account setting.` + cmd.Long = `Update the enforcement status of partner powered AI features account setting. + + Updates the enable enforcement status of partner powered AI features account + setting.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := cmdctx.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := a.Settings.LlmProxyPartnerPoweredEnforce().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service LlmProxyPartnerPoweredEnforce diff --git a/cmd/account/settings/settings.go b/cmd/account/settings/settings.go index 2d5a9b80c4..28cb70d98f 100755 --- a/cmd/account/settings/settings.go +++ b/cmd/account/settings/settings.go @@ -10,6 +10,8 @@ import ( disable_legacy_features "github.com/databricks/cli/cmd/account/disable-legacy-features" enable_ip_access_lists "github.com/databricks/cli/cmd/account/enable-ip-access-lists" esm_enablement_account "github.com/databricks/cli/cmd/account/esm-enablement-account" + llm_proxy_partner_powered_account "github.com/databricks/cli/cmd/account/llm-proxy-partner-powered-account" + llm_proxy_partner_powered_enforce "github.com/databricks/cli/cmd/account/llm-proxy-partner-powered-enforce" personal_compute "github.com/databricks/cli/cmd/account/personal-compute" ) @@ -34,6 +36,8 @@ func New() *cobra.Command { cmd.AddCommand(disable_legacy_features.New()) cmd.AddCommand(enable_ip_access_lists.New()) cmd.AddCommand(esm_enablement_account.New()) + cmd.AddCommand(llm_proxy_partner_powered_account.New()) + cmd.AddCommand(llm_proxy_partner_powered_enforce.New()) cmd.AddCommand(personal_compute.New()) // Apply optional overrides to this command. diff --git a/cmd/workspace/alerts-v2/alerts-v2.go b/cmd/workspace/alerts-v2/alerts-v2.go index 45413c2130..37db596a0e 100755 --- a/cmd/workspace/alerts-v2/alerts-v2.go +++ b/cmd/workspace/alerts-v2/alerts-v2.go @@ -60,12 +60,20 @@ func newCreateAlert() *cobra.Command { cmd := &cobra.Command{} var createAlertReq sql.CreateAlertV2Request + createAlertReq.Alert = sql.AlertV2{} var createAlertJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createAlertJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: complex arg: alert + cmd.Flags().StringVar(&createAlertReq.Alert.CustomDescription, "custom-description", createAlertReq.Alert.CustomDescription, `Custom description for the alert.`) + cmd.Flags().StringVar(&createAlertReq.Alert.CustomSummary, "custom-summary", createAlertReq.Alert.CustomSummary, `Custom summary for the alert.`) + cmd.Flags().StringVar(&createAlertReq.Alert.DisplayName, "display-name", createAlertReq.Alert.DisplayName, `The display name of the alert.`) + // TODO: complex arg: evaluation + cmd.Flags().StringVar(&createAlertReq.Alert.ParentPath, "parent-path", createAlertReq.Alert.ParentPath, `The workspace path of the folder containing the alert.`) + cmd.Flags().StringVar(&createAlertReq.Alert.QueryText, "query-text", createAlertReq.Alert.QueryText, `Text of the query to be run.`) + // TODO: complex arg: schedule + cmd.Flags().StringVar(&createAlertReq.Alert.WarehouseId, "warehouse-id", createAlertReq.Alert.WarehouseId, `ID of the SQL warehouse attached to the alert.`) cmd.Use = "create-alert" cmd.Short = `Create an alert.` @@ -86,7 +94,7 @@ func newCreateAlert() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createAlertJson.Unmarshal(&createAlertReq) + diags := createAlertJson.Unmarshal(&createAlertReq.Alert) if diags.HasError() { return diags.Error() } @@ -319,44 +327,34 @@ func newUpdateAlert() *cobra.Command { cmd := &cobra.Command{} var updateAlertReq sql.UpdateAlertV2Request + updateAlertReq.Alert = sql.AlertV2{} var updateAlertJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateAlertJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: complex arg: alert + cmd.Flags().StringVar(&updateAlertReq.Alert.CustomDescription, "custom-description", updateAlertReq.Alert.CustomDescription, `Custom description for the alert.`) + cmd.Flags().StringVar(&updateAlertReq.Alert.CustomSummary, "custom-summary", updateAlertReq.Alert.CustomSummary, `Custom summary for the alert.`) + cmd.Flags().StringVar(&updateAlertReq.Alert.DisplayName, "display-name", updateAlertReq.Alert.DisplayName, `The display name of the alert.`) + // TODO: complex arg: evaluation + cmd.Flags().StringVar(&updateAlertReq.Alert.ParentPath, "parent-path", updateAlertReq.Alert.ParentPath, `The workspace path of the folder containing the alert.`) + cmd.Flags().StringVar(&updateAlertReq.Alert.QueryText, "query-text", updateAlertReq.Alert.QueryText, `Text of the query to be run.`) + // TODO: complex arg: schedule + cmd.Flags().StringVar(&updateAlertReq.Alert.WarehouseId, "warehouse-id", updateAlertReq.Alert.WarehouseId, `ID of the SQL warehouse attached to the alert.`) - cmd.Use = "update-alert ID UPDATE_MASK" + cmd.Use = "update-alert ID" cmd.Short = `Update an alert.` cmd.Long = `Update an alert. Update alert Arguments: - ID: UUID identifying the alert. - UPDATE_MASK: The field mask must be a single string, with multiple fields separated by - commas (no spaces). The field path is relative to the resource object, - using a dot (.) to navigate sub-fields (e.g., author.given_name). - Specification of elements in sequence or map fields is not allowed, as - only the entire collection field can be specified. Field names must - exactly match the resource field names. - - A field mask of * indicates full replacement. It’s recommended to - always explicitly list the fields being updated and avoid using * - wildcards, as it can lead to unintended results if the API changes in the - future.` + ID: UUID identifying the alert.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(1)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, provide only ID as positional arguments. Provide 'update_mask' in your JSON input") - } - return nil - } - check := root.ExactArgs(2) + check := root.ExactArgs(1) return check(cmd, args) } @@ -366,7 +364,7 @@ func newUpdateAlert() *cobra.Command { w := cmdctx.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := updateAlertJson.Unmarshal(&updateAlertReq) + diags := updateAlertJson.Unmarshal(&updateAlertReq.Alert) if diags.HasError() { return diags.Error() } @@ -378,9 +376,6 @@ func newUpdateAlert() *cobra.Command { } } updateAlertReq.Id = args[0] - if !cmd.Flags().Changed("json") { - updateAlertReq.UpdateMask = args[1] - } response, err := w.AlertsV2.UpdateAlert(ctx, updateAlertReq) if err != nil { diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index c5911b70d6..53e8cc604d 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -27,6 +27,7 @@ import ( dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" dashboards "github.com/databricks/cli/cmd/workspace/dashboards" data_sources "github.com/databricks/cli/cmd/workspace/data-sources" + database_instances "github.com/databricks/cli/cmd/workspace/database-instances" experiments "github.com/databricks/cli/cmd/workspace/experiments" external_locations "github.com/databricks/cli/cmd/workspace/external-locations" forecasting "github.com/databricks/cli/cmd/workspace/forecasting" @@ -126,6 +127,7 @@ func All() []*cobra.Command { out = append(out, dashboard_widgets.New()) out = append(out, dashboards.New()) out = append(out, data_sources.New()) + out = append(out, database_instances.New()) out = append(out, experiments.New()) out = append(out, external_locations.New()) out = append(out, functions.New()) diff --git a/cmd/workspace/database-instances/database-instances.go b/cmd/workspace/database-instances/database-instances.go new file mode 100755 index 0000000000..64fe1d7f82 --- /dev/null +++ b/cmd/workspace/database-instances/database-instances.go @@ -0,0 +1,824 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package database_instances + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "database-instances", + Short: `Database Instances provide access to a database via REST API or direct SQL.`, + Long: `Database Instances provide access to a database via REST API or direct SQL.`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCreateDatabaseCatalog()) + cmd.AddCommand(newCreateDatabaseInstance()) + cmd.AddCommand(newCreateSyncedDatabaseTable()) + cmd.AddCommand(newDeleteDatabaseCatalog()) + cmd.AddCommand(newDeleteDatabaseInstance()) + cmd.AddCommand(newDeleteSyncedDatabaseTable()) + cmd.AddCommand(newFindDatabaseInstanceByUid()) + cmd.AddCommand(newGetDatabaseCatalog()) + cmd.AddCommand(newGetDatabaseInstance()) + cmd.AddCommand(newGetSyncedDatabaseTable()) + cmd.AddCommand(newListDatabaseInstances()) + cmd.AddCommand(newUpdateDatabaseInstance()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-database-catalog command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createDatabaseCatalogOverrides []func( + *cobra.Command, + *catalog.CreateDatabaseCatalogRequest, +) + +func newCreateDatabaseCatalog() *cobra.Command { + cmd := &cobra.Command{} + + var createDatabaseCatalogReq catalog.CreateDatabaseCatalogRequest + createDatabaseCatalogReq.Catalog = catalog.DatabaseCatalog{} + var createDatabaseCatalogJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createDatabaseCatalogJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&createDatabaseCatalogReq.Catalog.CreateDatabaseIfNotExists, "create-database-if-not-exists", createDatabaseCatalogReq.Catalog.CreateDatabaseIfNotExists, ``) + + cmd.Use = "create-database-catalog NAME DATABASE_INSTANCE_NAME DATABASE_NAME" + cmd.Short = `Create a Database Catalog.` + cmd.Long = `Create a Database Catalog. + + Arguments: + NAME: The name of the catalog in UC. + DATABASE_INSTANCE_NAME: The name of the DatabaseInstance housing the database. + DATABASE_NAME: The name of the database (in a instance) associated with the catalog.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'database_instance_name', 'database_name' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createDatabaseCatalogJson.Unmarshal(&createDatabaseCatalogReq.Catalog) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createDatabaseCatalogReq.Catalog.Name = args[0] + } + if !cmd.Flags().Changed("json") { + createDatabaseCatalogReq.Catalog.DatabaseInstanceName = args[1] + } + if !cmd.Flags().Changed("json") { + createDatabaseCatalogReq.Catalog.DatabaseName = args[2] + } + + response, err := w.DatabaseInstances.CreateDatabaseCatalog(ctx, createDatabaseCatalogReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createDatabaseCatalogOverrides { + fn(cmd, &createDatabaseCatalogReq) + } + + return cmd +} + +// start create-database-instance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createDatabaseInstanceOverrides []func( + *cobra.Command, + *catalog.CreateDatabaseInstanceRequest, +) + +func newCreateDatabaseInstance() *cobra.Command { + cmd := &cobra.Command{} + + var createDatabaseInstanceReq catalog.CreateDatabaseInstanceRequest + createDatabaseInstanceReq.DatabaseInstance = catalog.DatabaseInstance{} + var createDatabaseInstanceJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createDatabaseInstanceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.AdminPassword, "admin-password", createDatabaseInstanceReq.DatabaseInstance.AdminPassword, `Password for admin user to create.`) + cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.AdminRolename, "admin-rolename", createDatabaseInstanceReq.DatabaseInstance.AdminRolename, `Name of the admin role for the instance.`) + cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", createDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) + cmd.Flags().BoolVar(&createDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", createDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether the instance is stopped.`) + + cmd.Use = "create-database-instance NAME" + cmd.Short = `Create a Database Instance.` + cmd.Long = `Create a Database Instance. + + Arguments: + NAME: The name of the instance. This is the unique identifier for the instance.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createDatabaseInstanceJson.Unmarshal(&createDatabaseInstanceReq.DatabaseInstance) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createDatabaseInstanceReq.DatabaseInstance.Name = args[0] + } + + response, err := w.DatabaseInstances.CreateDatabaseInstance(ctx, createDatabaseInstanceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createDatabaseInstanceOverrides { + fn(cmd, &createDatabaseInstanceReq) + } + + return cmd +} + +// start create-synced-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createSyncedDatabaseTableOverrides []func( + *cobra.Command, + *catalog.CreateSyncedDatabaseTableRequest, +) + +func newCreateSyncedDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var createSyncedDatabaseTableReq catalog.CreateSyncedDatabaseTableRequest + createSyncedDatabaseTableReq.SyncedTable = catalog.SyncedDatabaseTable{} + var createSyncedDatabaseTableJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createSyncedDatabaseTableJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: data_synchronization_status + cmd.Flags().StringVar(&createSyncedDatabaseTableReq.SyncedTable.DatabaseInstanceName, "database-instance-name", createSyncedDatabaseTableReq.SyncedTable.DatabaseInstanceName, `Name of the target database instance.`) + cmd.Flags().StringVar(&createSyncedDatabaseTableReq.SyncedTable.LogicalDatabaseName, "logical-database-name", createSyncedDatabaseTableReq.SyncedTable.LogicalDatabaseName, `Target Postgres database object (logical database) name for this table.`) + // TODO: complex arg: spec + + cmd.Use = "create-synced-database-table NAME" + cmd.Short = `Create a Synced Database Table.` + cmd.Long = `Create a Synced Database Table. + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createSyncedDatabaseTableJson.Unmarshal(&createSyncedDatabaseTableReq.SyncedTable) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createSyncedDatabaseTableReq.SyncedTable.Name = args[0] + } + + response, err := w.DatabaseInstances.CreateSyncedDatabaseTable(ctx, createSyncedDatabaseTableReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createSyncedDatabaseTableOverrides { + fn(cmd, &createSyncedDatabaseTableReq) + } + + return cmd +} + +// start delete-database-catalog command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteDatabaseCatalogOverrides []func( + *cobra.Command, + *catalog.DeleteDatabaseCatalogRequest, +) + +func newDeleteDatabaseCatalog() *cobra.Command { + cmd := &cobra.Command{} + + var deleteDatabaseCatalogReq catalog.DeleteDatabaseCatalogRequest + + // TODO: short flags + + cmd.Use = "delete-database-catalog NAME" + cmd.Short = `Delete a Database Catalog.` + cmd.Long = `Delete a Database Catalog.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteDatabaseCatalogReq.Name = args[0] + + err = w.DatabaseInstances.DeleteDatabaseCatalog(ctx, deleteDatabaseCatalogReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteDatabaseCatalogOverrides { + fn(cmd, &deleteDatabaseCatalogReq) + } + + return cmd +} + +// start delete-database-instance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteDatabaseInstanceOverrides []func( + *cobra.Command, + *catalog.DeleteDatabaseInstanceRequest, +) + +func newDeleteDatabaseInstance() *cobra.Command { + cmd := &cobra.Command{} + + var deleteDatabaseInstanceReq catalog.DeleteDatabaseInstanceRequest + + // TODO: short flags + + cmd.Flags().BoolVar(&deleteDatabaseInstanceReq.Force, "force", deleteDatabaseInstanceReq.Force, `By default, a instance cannot be deleted if it has descendant instances created via PITR.`) + cmd.Flags().BoolVar(&deleteDatabaseInstanceReq.Purge, "purge", deleteDatabaseInstanceReq.Purge, `If false, the database instance is soft deleted.`) + + cmd.Use = "delete-database-instance NAME" + cmd.Short = `Delete a Database Instance.` + cmd.Long = `Delete a Database Instance. + + Arguments: + NAME: Name of the instance to delete.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteDatabaseInstanceReq.Name = args[0] + + err = w.DatabaseInstances.DeleteDatabaseInstance(ctx, deleteDatabaseInstanceReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteDatabaseInstanceOverrides { + fn(cmd, &deleteDatabaseInstanceReq) + } + + return cmd +} + +// start delete-synced-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteSyncedDatabaseTableOverrides []func( + *cobra.Command, + *catalog.DeleteSyncedDatabaseTableRequest, +) + +func newDeleteSyncedDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var deleteSyncedDatabaseTableReq catalog.DeleteSyncedDatabaseTableRequest + + // TODO: short flags + + cmd.Use = "delete-synced-database-table NAME" + cmd.Short = `Delete a Synced Database Table.` + cmd.Long = `Delete a Synced Database Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteSyncedDatabaseTableReq.Name = args[0] + + err = w.DatabaseInstances.DeleteSyncedDatabaseTable(ctx, deleteSyncedDatabaseTableReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteSyncedDatabaseTableOverrides { + fn(cmd, &deleteSyncedDatabaseTableReq) + } + + return cmd +} + +// start find-database-instance-by-uid command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var findDatabaseInstanceByUidOverrides []func( + *cobra.Command, + *catalog.FindDatabaseInstanceByUidRequest, +) + +func newFindDatabaseInstanceByUid() *cobra.Command { + cmd := &cobra.Command{} + + var findDatabaseInstanceByUidReq catalog.FindDatabaseInstanceByUidRequest + + // TODO: short flags + + cmd.Flags().StringVar(&findDatabaseInstanceByUidReq.Uid, "uid", findDatabaseInstanceByUidReq.Uid, `UID of the cluster to get.`) + + cmd.Use = "find-database-instance-by-uid" + cmd.Short = `Find a Database Instance by uid.` + cmd.Long = `Find a Database Instance by uid.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.DatabaseInstances.FindDatabaseInstanceByUid(ctx, findDatabaseInstanceByUidReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range findDatabaseInstanceByUidOverrides { + fn(cmd, &findDatabaseInstanceByUidReq) + } + + return cmd +} + +// start get-database-catalog command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getDatabaseCatalogOverrides []func( + *cobra.Command, + *catalog.GetDatabaseCatalogRequest, +) + +func newGetDatabaseCatalog() *cobra.Command { + cmd := &cobra.Command{} + + var getDatabaseCatalogReq catalog.GetDatabaseCatalogRequest + + // TODO: short flags + + cmd.Use = "get-database-catalog NAME" + cmd.Short = `Get a Database Catalog.` + cmd.Long = `Get a Database Catalog.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getDatabaseCatalogReq.Name = args[0] + + response, err := w.DatabaseInstances.GetDatabaseCatalog(ctx, getDatabaseCatalogReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getDatabaseCatalogOverrides { + fn(cmd, &getDatabaseCatalogReq) + } + + return cmd +} + +// start get-database-instance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getDatabaseInstanceOverrides []func( + *cobra.Command, + *catalog.GetDatabaseInstanceRequest, +) + +func newGetDatabaseInstance() *cobra.Command { + cmd := &cobra.Command{} + + var getDatabaseInstanceReq catalog.GetDatabaseInstanceRequest + + // TODO: short flags + + cmd.Use = "get-database-instance NAME" + cmd.Short = `Get a Database Instance.` + cmd.Long = `Get a Database Instance. + + Arguments: + NAME: Name of the cluster to get.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getDatabaseInstanceReq.Name = args[0] + + response, err := w.DatabaseInstances.GetDatabaseInstance(ctx, getDatabaseInstanceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getDatabaseInstanceOverrides { + fn(cmd, &getDatabaseInstanceReq) + } + + return cmd +} + +// start get-synced-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getSyncedDatabaseTableOverrides []func( + *cobra.Command, + *catalog.GetSyncedDatabaseTableRequest, +) + +func newGetSyncedDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var getSyncedDatabaseTableReq catalog.GetSyncedDatabaseTableRequest + + // TODO: short flags + + cmd.Use = "get-synced-database-table NAME" + cmd.Short = `Get a Synced Database Table.` + cmd.Long = `Get a Synced Database Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getSyncedDatabaseTableReq.Name = args[0] + + response, err := w.DatabaseInstances.GetSyncedDatabaseTable(ctx, getSyncedDatabaseTableReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getSyncedDatabaseTableOverrides { + fn(cmd, &getSyncedDatabaseTableReq) + } + + return cmd +} + +// start list-database-instances command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listDatabaseInstancesOverrides []func( + *cobra.Command, + *catalog.ListDatabaseInstancesRequest, +) + +func newListDatabaseInstances() *cobra.Command { + cmd := &cobra.Command{} + + var listDatabaseInstancesReq catalog.ListDatabaseInstancesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listDatabaseInstancesReq.PageSize, "page-size", listDatabaseInstancesReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listDatabaseInstancesReq.PageToken, "page-token", listDatabaseInstancesReq.PageToken, `Pagination token to go to the next page of Database Instances.`) + + cmd.Use = "list-database-instances" + cmd.Short = `List Database Instances.` + cmd.Long = `List Database Instances.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response := w.DatabaseInstances.ListDatabaseInstances(ctx, listDatabaseInstancesReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listDatabaseInstancesOverrides { + fn(cmd, &listDatabaseInstancesReq) + } + + return cmd +} + +// start update-database-instance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateDatabaseInstanceOverrides []func( + *cobra.Command, + *catalog.UpdateDatabaseInstanceRequest, +) + +func newUpdateDatabaseInstance() *cobra.Command { + cmd := &cobra.Command{} + + var updateDatabaseInstanceReq catalog.UpdateDatabaseInstanceRequest + updateDatabaseInstanceReq.DatabaseInstance = catalog.DatabaseInstance{} + var updateDatabaseInstanceJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateDatabaseInstanceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.AdminPassword, "admin-password", updateDatabaseInstanceReq.DatabaseInstance.AdminPassword, `Password for admin user to create.`) + cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.AdminRolename, "admin-rolename", updateDatabaseInstanceReq.DatabaseInstance.AdminRolename, `Name of the admin role for the instance.`) + cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", updateDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) + cmd.Flags().BoolVar(&updateDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", updateDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether the instance is stopped.`) + + cmd.Use = "update-database-instance NAME" + cmd.Short = `Update a Database Instance.` + cmd.Long = `Update a Database Instance. + + Arguments: + NAME: The name of the instance. This is the unique identifier for the instance.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateDatabaseInstanceJson.Unmarshal(&updateDatabaseInstanceReq.DatabaseInstance) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateDatabaseInstanceReq.Name = args[0] + + response, err := w.DatabaseInstances.UpdateDatabaseInstance(ctx, updateDatabaseInstanceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateDatabaseInstanceOverrides { + fn(cmd, &updateDatabaseInstanceReq) + } + + return cmd +} + +// end service DatabaseInstances diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 99a656ac52..4bf23399f4 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -286,7 +286,6 @@ func newEdit() *cobra.Command { cmd.Flags().IntVar(&editReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", editReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`) cmd.Flags().IntVar(&editReq.MaxCapacity, "max-capacity", editReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`) cmd.Flags().IntVar(&editReq.MinIdleInstances, "min-idle-instances", editReq.MinIdleInstances, `Minimum number of idle instances to keep in the instance pool.`) - // TODO: complex arg: node_type_flexibility cmd.Use = "edit INSTANCE_POOL_ID INSTANCE_POOL_NAME NODE_TYPE_ID" cmd.Short = `Edit an existing instance pool.` diff --git a/cmd/workspace/llm-proxy-partner-powered-workspace/llm-proxy-partner-powered-workspace.go b/cmd/workspace/llm-proxy-partner-powered-workspace/llm-proxy-partner-powered-workspace.go new file mode 100755 index 0000000000..79b26167b0 --- /dev/null +++ b/cmd/workspace/llm-proxy-partner-powered-workspace/llm-proxy-partner-powered-workspace.go @@ -0,0 +1,221 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package llm_proxy_partner_powered_workspace + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "llm-proxy-partner-powered-workspace", + Short: `Determines if partner powered models are enabled or not for a specific workspace.`, + Long: `Determines if partner powered models are enabled or not for a specific + workspace`, + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteLlmProxyPartnerPoweredWorkspaceRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteLlmProxyPartnerPoweredWorkspaceRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the enable partner powered AI features workspace setting.` + cmd.Long = `Delete the enable partner powered AI features workspace setting. + + Reverts the enable partner powered AI features workspace setting to its + default value.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.LlmProxyPartnerPoweredWorkspace().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetLlmProxyPartnerPoweredWorkspaceRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetLlmProxyPartnerPoweredWorkspaceRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the enable partner powered AI features workspace setting.` + cmd.Long = `Get the enable partner powered AI features workspace setting. + + Gets the enable partner powered AI features workspace setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.LlmProxyPartnerPoweredWorkspace().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateLlmProxyPartnerPoweredWorkspaceRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateLlmProxyPartnerPoweredWorkspaceRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the enable partner powered AI features workspace setting.` + cmd.Long = `Update the enable partner powered AI features workspace setting. + + Updates the enable partner powered AI features workspace setting.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.LlmProxyPartnerPoweredWorkspace().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service LlmProxyPartnerPoweredWorkspace diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index 998777af5d..5c2292dcb9 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -20,57 +20,34 @@ func New() *cobra.Command { Use: "permissions", Short: `Permissions API are used to create read, write, edit, update and manage access for various users on different objects and endpoints.`, Long: `Permissions API are used to create read, write, edit, update and manage access - for various users on different objects and endpoints. - - * **[Apps permissions](:service:apps)** — Manage which users can manage or - use apps. - - * **[Cluster permissions](:service:clusters)** — Manage which users can - manage, restart, or attach to clusters. - - * **[Cluster policy permissions](:service:clusterpolicies)** — Manage which - users can use cluster policies. - - * **[Delta Live Tables pipeline permissions](:service:pipelines)** — Manage - which users can view, manage, run, cancel, or own a Delta Live Tables - pipeline. - - * **[Job permissions](:service:jobs)** — Manage which users can view, - manage, trigger, cancel, or own a job. - - * **[MLflow experiment permissions](:service:experiments)** — Manage which - users can read, edit, or manage MLflow experiments. - - * **[MLflow registered model permissions](:service:modelregistry)** — Manage - which users can read, edit, or manage MLflow registered models. - - * **[Password permissions](:service:users)** — Manage which users can use - password login when SSO is enabled. - - * **[Instance Pool permissions](:service:instancepools)** — Manage which - users can manage or attach to pools. - - * **[Repo permissions](repos)** — Manage which users can read, run, edit, or - manage a repo. - - * **[Serving endpoint permissions](:service:servingendpoints)** — Manage - which users can view, query, or manage a serving endpoint. - - * **[SQL warehouse permissions](:service:warehouses)** — Manage which users - can use or manage SQL warehouses. - - * **[Token permissions](:service:tokenmanagement)** — Manage which users can - create or use tokens. - - * **[Workspace object permissions](:service:workspace)** — Manage which - users can read, run, edit, or manage alerts, dbsql-dashboards, directories, - files, notebooks and queries. - + for various users on different objects and endpoints. * **[Apps + permissions](:service:apps)** — Manage which users can manage or use apps. * + **[Cluster permissions](:service:clusters)** — Manage which users can + manage, restart, or attach to clusters. * **[Cluster policy + permissions](:service:clusterpolicies)** — Manage which users can use + cluster policies. * **[Delta Live Tables pipeline + permissions](:service:pipelines)** — Manage which users can view, manage, + run, cancel, or own a Delta Live Tables pipeline. * **[Job + permissions](:service:jobs)** — Manage which users can view, manage, + trigger, cancel, or own a job. * **[MLflow experiment + permissions](:service:experiments)** — Manage which users can read, edit, or + manage MLflow experiments. * **[MLflow registered model + permissions](:service:modelregistry)** — Manage which users can read, edit, + or manage MLflow registered models. * **[Instance Pool + permissions](:service:instancepools)** — Manage which users can manage or + attach to pools. * **[Repo permissions](repos)** — Manage which users can + read, run, edit, or manage a repo. * **[Serving endpoint + permissions](:service:servingendpoints)** — Manage which users can view, + query, or manage a serving endpoint. * **[SQL warehouse + permissions](:service:warehouses)** — Manage which users can use or manage + SQL warehouses. * **[Token permissions](:service:tokenmanagement)** — Manage + which users can create or use tokens. * **[Workspace object + permissions](:service:workspace)** — Manage which users can read, run, edit, + or manage alerts, dbsql-dashboards, directories, files, notebooks and queries. For the mapping of the required permissions for specific actions or abilities - and other important information, see [Access Control]. - - Note that to manage access control on service principals, use **[Account - Access Control Proxy](:service:accountaccesscontrolproxy)**. + and other important information, see [Access Control]. Note that to manage + access control on service principals, use **[Account Access Control + Proxy](:service:accountaccesscontrolproxy)**. [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html`, GroupID: "iam", @@ -182,8 +159,12 @@ func newGetPermissionLevels() *cobra.Command { Gets the permission levels that a user can have on an object. Arguments: - REQUEST_OBJECT_TYPE: - REQUEST_OBJECT_ID: ` + REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, + authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + directories, experiments, files, instance-pools, jobs, notebooks, + pipelines, queries, registered-models, repos, serving-endpoints, or + warehouses. + REQUEST_OBJECT_ID: ` cmd.Annotations = make(map[string]string) @@ -225,13 +206,13 @@ func newGetPermissionLevels() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var setOverrides []func( *cobra.Command, - *iam.PermissionsRequest, + *iam.SetObjectPermissions, ) func newSet() *cobra.Command { cmd := &cobra.Command{} - var setReq iam.PermissionsRequest + var setReq iam.SetObjectPermissions var setJson flags.JsonFlag // TODO: short flags @@ -307,13 +288,13 @@ func newSet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateOverrides []func( *cobra.Command, - *iam.PermissionsRequest, + *iam.UpdateObjectPermissions, ) func newUpdate() *cobra.Command { cmd := &cobra.Command{} - var updateReq iam.PermissionsRequest + var updateReq iam.UpdateObjectPermissions var updateJson flags.JsonFlag // TODO: short flags diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 250464959c..7ea89b10de 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -775,6 +775,7 @@ func newStartUpdate() *cobra.Command { cmd.Flags().Var(&startUpdateReq.Cause, "cause", `What triggered this update. Supported values: [ API_CALL, + INFRASTRUCTURE_MAINTENANCE, JOB_TASK, RETRY_ON_FAILURE, SCHEMA_CHANGE, diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index c13a87e17a..29f94f6036 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -45,6 +45,7 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newBuildLogs()) cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateProvisionedThroughputEndpoint()) cmd.AddCommand(newDelete()) cmd.AddCommand(newExportMetrics()) cmd.AddCommand(newGet()) @@ -61,6 +62,7 @@ func New() *cobra.Command { cmd.AddCommand(newSetPermissions()) cmd.AddCommand(newUpdateConfig()) cmd.AddCommand(newUpdatePermissions()) + cmd.AddCommand(newUpdateProvisionedThroughputEndpointConfig()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -238,6 +240,91 @@ func newCreate() *cobra.Command { return cmd } +// start create-provisioned-throughput-endpoint command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createProvisionedThroughputEndpointOverrides []func( + *cobra.Command, + *serving.CreatePtEndpointRequest, +) + +func newCreateProvisionedThroughputEndpoint() *cobra.Command { + cmd := &cobra.Command{} + + var createProvisionedThroughputEndpointReq serving.CreatePtEndpointRequest + var createProvisionedThroughputEndpointJson flags.JsonFlag + + var createProvisionedThroughputEndpointSkipWait bool + var createProvisionedThroughputEndpointTimeout time.Duration + + cmd.Flags().BoolVar(&createProvisionedThroughputEndpointSkipWait, "no-wait", createProvisionedThroughputEndpointSkipWait, `do not wait to reach NOT_UPDATING state`) + cmd.Flags().DurationVar(&createProvisionedThroughputEndpointTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach NOT_UPDATING state`) + // TODO: short flags + cmd.Flags().Var(&createProvisionedThroughputEndpointJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: ai_gateway + cmd.Flags().StringVar(&createProvisionedThroughputEndpointReq.BudgetPolicyId, "budget-policy-id", createProvisionedThroughputEndpointReq.BudgetPolicyId, `The budget policy associated with the endpoint.`) + // TODO: array: tags + + cmd.Use = "create-provisioned-throughput-endpoint" + cmd.Short = `Create a new PT serving endpoint.` + cmd.Long = `Create a new PT serving endpoint.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createProvisionedThroughputEndpointJson.Unmarshal(&createProvisionedThroughputEndpointReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + wait, err := w.ServingEndpoints.CreateProvisionedThroughputEndpoint(ctx, createProvisionedThroughputEndpointReq) + if err != nil { + return err + } + if createProvisionedThroughputEndpointSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *serving.ServingEndpointDetailed) { + status := i.State.ConfigUpdate + statusMessage := fmt.Sprintf("current status: %s", status) + spinner <- statusMessage + }).GetWithTimeout(createProvisionedThroughputEndpointTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createProvisionedThroughputEndpointOverrides { + fn(cmd, &createProvisionedThroughputEndpointReq) + } + + return cmd +} + // start delete command // Slice with functions to override default command behavior. @@ -1327,4 +1414,98 @@ func newUpdatePermissions() *cobra.Command { return cmd } +// start update-provisioned-throughput-endpoint-config command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateProvisionedThroughputEndpointConfigOverrides []func( + *cobra.Command, + *serving.UpdateProvisionedThroughputEndpointConfigRequest, +) + +func newUpdateProvisionedThroughputEndpointConfig() *cobra.Command { + cmd := &cobra.Command{} + + var updateProvisionedThroughputEndpointConfigReq serving.UpdateProvisionedThroughputEndpointConfigRequest + var updateProvisionedThroughputEndpointConfigJson flags.JsonFlag + + var updateProvisionedThroughputEndpointConfigSkipWait bool + var updateProvisionedThroughputEndpointConfigTimeout time.Duration + + cmd.Flags().BoolVar(&updateProvisionedThroughputEndpointConfigSkipWait, "no-wait", updateProvisionedThroughputEndpointConfigSkipWait, `do not wait to reach NOT_UPDATING state`) + cmd.Flags().DurationVar(&updateProvisionedThroughputEndpointConfigTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach NOT_UPDATING state`) + // TODO: short flags + cmd.Flags().Var(&updateProvisionedThroughputEndpointConfigJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update-provisioned-throughput-endpoint-config NAME" + cmd.Short = `Update config of a PT serving endpoint.` + cmd.Long = `Update config of a PT serving endpoint. + + Updates any combination of the pt endpoint's served entities, the compute + configuration of those served entities, and the endpoint's traffic config. + Updates are instantaneous and endpoint should be updated instantly + + Arguments: + NAME: The name of the pt endpoint to update. This field is required.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateProvisionedThroughputEndpointConfigJson.Unmarshal(&updateProvisionedThroughputEndpointConfigReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateProvisionedThroughputEndpointConfigReq.Name = args[0] + + wait, err := w.ServingEndpoints.UpdateProvisionedThroughputEndpointConfig(ctx, updateProvisionedThroughputEndpointConfigReq) + if err != nil { + return err + } + if updateProvisionedThroughputEndpointConfigSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *serving.ServingEndpointDetailed) { + status := i.State.ConfigUpdate + statusMessage := fmt.Sprintf("current status: %s", status) + spinner <- statusMessage + }).GetWithTimeout(updateProvisionedThroughputEndpointConfigTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateProvisionedThroughputEndpointConfigOverrides { + fn(cmd, &updateProvisionedThroughputEndpointConfigReq) + } + + return cmd +} + // end service ServingEndpoints diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 677b5d687a..50519f2adf 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -17,6 +17,7 @@ import ( enable_notebook_table_clipboard "github.com/databricks/cli/cmd/workspace/enable-notebook-table-clipboard" enable_results_downloading "github.com/databricks/cli/cmd/workspace/enable-results-downloading" enhanced_security_monitoring "github.com/databricks/cli/cmd/workspace/enhanced-security-monitoring" + llm_proxy_partner_powered_workspace "github.com/databricks/cli/cmd/workspace/llm-proxy-partner-powered-workspace" restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins" ) @@ -48,6 +49,7 @@ func New() *cobra.Command { cmd.AddCommand(enable_notebook_table_clipboard.New()) cmd.AddCommand(enable_results_downloading.New()) cmd.AddCommand(enhanced_security_monitoring.New()) + cmd.AddCommand(llm_proxy_partner_powered_workspace.New()) cmd.AddCommand(restrict_workspace_admins.New()) // Apply optional overrides to this command. diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index f14505398a..f9d9197b1d 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/spf13/cobra" ) @@ -115,8 +116,12 @@ func newEnable() *cobra.Command { cmd := &cobra.Command{} var enableReq catalog.EnableRequest + var enableJson flags.JsonFlag // TODO: short flags + cmd.Flags().Var(&enableJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&enableReq.CatalogName, "catalog-name", enableReq.CatalogName, `the catalog for which the system schema is to enabled in.`) cmd.Use = "enable METASTORE_ID SCHEMA_NAME" cmd.Short = `Enable a system schema.` @@ -141,6 +146,18 @@ func newEnable() *cobra.Command { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) + if cmd.Flags().Changed("json") { + diags := enableJson.Unmarshal(&enableReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } enableReq.MetastoreId = args[0] enableReq.SchemaName = args[1] diff --git a/cmd/workspace/workspace-bindings/workspace-bindings.go b/cmd/workspace/workspace-bindings/workspace-bindings.go index 51dcbf2e04..82ad7712d8 100755 --- a/cmd/workspace/workspace-bindings/workspace-bindings.go +++ b/cmd/workspace/workspace-bindings/workspace-bindings.go @@ -3,8 +3,6 @@ package workspace_bindings import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" @@ -39,7 +37,7 @@ func New() *cobra.Command { introduces the ability to bind a securable in READ_ONLY mode (catalogs only). Securable types that support binding: - catalog - storage_credential - - external_location`, + credential - external_location`, GroupID: "catalog", Annotations: map[string]string{ "package": "catalog", @@ -147,8 +145,8 @@ func newGetBindings() *cobra.Command { or an owner of the securable. Arguments: - SECURABLE_TYPE: The type of the securable to bind to a workspace. - Supported values: [catalog, credential, external_location, storage_credential] + SECURABLE_TYPE: The type of the securable to bind to a workspace (catalog, + storage_credential, credential, or external_location). SECURABLE_NAME: The name of the securable.` cmd.Annotations = make(map[string]string) @@ -163,10 +161,7 @@ func newGetBindings() *cobra.Command { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - _, err = fmt.Sscan(args[0], &getBindingsReq.SecurableType) - if err != nil { - return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) - } + getBindingsReq.SecurableType = args[0] getBindingsReq.SecurableName = args[1] response := w.WorkspaceBindings.GetBindings(ctx, getBindingsReq) @@ -290,8 +285,8 @@ func newUpdateBindings() *cobra.Command { admin or an owner of the securable. Arguments: - SECURABLE_TYPE: The type of the securable to bind to a workspace. - Supported values: [catalog, credential, external_location, storage_credential] + SECURABLE_TYPE: The type of the securable to bind to a workspace (catalog, + storage_credential, credential, or external_location). SECURABLE_NAME: The name of the securable.` cmd.Annotations = make(map[string]string) @@ -318,10 +313,7 @@ func newUpdateBindings() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &updateBindingsReq.SecurableType) - if err != nil { - return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) - } + updateBindingsReq.SecurableType = args[0] updateBindingsReq.SecurableName = args[1] response, err := w.WorkspaceBindings.UpdateBindings(ctx, updateBindingsReq) diff --git a/experimental/python/databricks/bundles/compute/_models/environment.py b/experimental/python/databricks/bundles/compute/_models/environment.py index 3e0d1cc916..07d8d9bf07 100644 --- a/experimental/python/databricks/bundles/compute/_models/environment.py +++ b/experimental/python/databricks/bundles/compute/_models/environment.py @@ -3,7 +3,11 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value -from databricks.bundles.core._variable import VariableOr, VariableOrList +from databricks.bundles.core._variable import ( + VariableOr, + VariableOrList, + VariableOrOptional, +) if TYPE_CHECKING: from typing_extensions import Self @@ -30,6 +34,16 @@ class Environment: List of pip dependencies, as supported by the version of pip in this environment. """ + environment_version: VariableOrOptional[str] = None + """ + :meta private: [EXPERIMENTAL] + + We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized + correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field + for any other purpose, e.g. notebook storage. + This field is not yet exposed to customers (e.g. in the jobs API). + """ + jar_dependencies: VariableOrList[str] = field(default_factory=list) """ :meta private: [EXPERIMENTAL] @@ -61,6 +75,16 @@ class EnvironmentDict(TypedDict, total=False): List of pip dependencies, as supported by the version of pip in this environment. """ + environment_version: VariableOrOptional[str] + """ + :meta private: [EXPERIMENTAL] + + We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized + correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field + for any other purpose, e.g. notebook storage. + This field is not yet exposed to customers (e.g. in the jobs API). + """ + jar_dependencies: VariableOrList[str] """ :meta private: [EXPERIMENTAL] diff --git a/experimental/python/databricks/bundles/jobs/_models/compute_config.py b/experimental/python/databricks/bundles/jobs/_models/compute_config.py index a3d47305dd..b1194a80cd 100644 --- a/experimental/python/databricks/bundles/jobs/_models/compute_config.py +++ b/experimental/python/databricks/bundles/jobs/_models/compute_config.py @@ -15,14 +15,14 @@ class ComputeConfig: :meta private: [EXPERIMENTAL] """ - gpu_node_pool_id: VariableOr[str] + num_gpus: VariableOr[int] """ - IDof the GPU pool to use. + Number of GPUs. """ - num_gpus: VariableOr[int] + gpu_node_pool_id: VariableOrOptional[str] = None """ - Number of GPUs. + IDof the GPU pool to use. """ gpu_type: VariableOrOptional[str] = None @@ -41,14 +41,14 @@ def as_dict(self) -> "ComputeConfigDict": class ComputeConfigDict(TypedDict, total=False): """""" - gpu_node_pool_id: VariableOr[str] + num_gpus: VariableOr[int] """ - IDof the GPU pool to use. + Number of GPUs. """ - num_gpus: VariableOr[int] + gpu_node_pool_id: VariableOrOptional[str] """ - Number of GPUs. + IDof the GPU pool to use. """ gpu_type: VariableOrOptional[str] diff --git a/experimental/python/databricks/bundles/pipelines/__init__.py b/experimental/python/databricks/bundles/pipelines/__init__.py index 73c884493f..01e7892ac4 100644 --- a/experimental/python/databricks/bundles/pipelines/__init__.py +++ b/experimental/python/databricks/bundles/pipelines/__init__.py @@ -48,6 +48,8 @@ "IngestionPipelineDefinition", "IngestionPipelineDefinitionDict", "IngestionPipelineDefinitionParam", + "IngestionSourceType", + "IngestionSourceTypeParam", "InitScriptInfo", "InitScriptInfoDict", "InitScriptInfoParam", @@ -66,6 +68,9 @@ "Notifications", "NotificationsDict", "NotificationsParam", + "PathPattern", + "PathPatternDict", + "PathPatternParam", "Pipeline", "PipelineCluster", "PipelineClusterAutoscale", @@ -234,6 +239,10 @@ IngestionPipelineDefinitionDict, IngestionPipelineDefinitionParam, ) +from databricks.bundles.pipelines._models.ingestion_source_type import ( + IngestionSourceType, + IngestionSourceTypeParam, +) from databricks.bundles.pipelines._models.notebook_library import ( NotebookLibrary, NotebookLibraryDict, @@ -244,6 +253,11 @@ NotificationsDict, NotificationsParam, ) +from databricks.bundles.pipelines._models.path_pattern import ( + PathPattern, + PathPatternDict, + PathPatternParam, +) from databricks.bundles.pipelines._models.pipeline import ( Pipeline, PipelineDict, diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py index c452222df9..988227c43e 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py @@ -4,7 +4,10 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrOptional -from databricks.bundles.pipelines._models.report_spec import ReportSpec, ReportSpecParam +from databricks.bundles.pipelines._models.report_spec import ( + ReportSpec, + ReportSpecParam, +) from databricks.bundles.pipelines._models.schema_spec import SchemaSpec, SchemaSpecParam from databricks.bundles.pipelines._models.table_spec import TableSpec, TableSpecParam diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py index cf1f451a3a..9316069266 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py @@ -8,6 +8,10 @@ IngestionConfig, IngestionConfigParam, ) +from databricks.bundles.pipelines._models.ingestion_source_type import ( + IngestionSourceType, + IngestionSourceTypeParam, +) from databricks.bundles.pipelines._models.table_specific_config import ( TableSpecificConfig, TableSpecificConfigParam, @@ -36,6 +40,13 @@ class IngestionPipelineDefinition: Required. Settings specifying tables to replicate and the destination for the replicated tables. """ + source_type: VariableOrOptional[IngestionSourceType] = None + """ + The type of the foreign source. + The source type will be inferred from the source connection or ingestion gateway. + This field is output only and will be ignored if provided. + """ + table_configuration: VariableOrOptional[TableSpecificConfig] = None """ Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline. @@ -67,6 +78,13 @@ class IngestionPipelineDefinitionDict(TypedDict, total=False): Required. Settings specifying tables to replicate and the destination for the replicated tables. """ + source_type: VariableOrOptional[IngestionSourceTypeParam] + """ + The type of the foreign source. + The source type will be inferred from the source connection or ingestion gateway. + This field is output only and will be ignored if provided. + """ + table_configuration: VariableOrOptional[TableSpecificConfigParam] """ Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline. diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py new file mode 100644 index 0000000000..50754bee6a --- /dev/null +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py @@ -0,0 +1,36 @@ +from enum import Enum +from typing import Literal + + +class IngestionSourceType(Enum): + MYSQL = "MYSQL" + POSTGRESQL = "POSTGRESQL" + SQLSERVER = "SQLSERVER" + SALESFORCE = "SALESFORCE" + NETSUITE = "NETSUITE" + WORKDAY_RAAS = "WORKDAY_RAAS" + GA4_RAW_DATA = "GA4_RAW_DATA" + SERVICENOW = "SERVICENOW" + MANAGED_POSTGRESQL = "MANAGED_POSTGRESQL" + ORACLE = "ORACLE" + SHAREPOINT = "SHAREPOINT" + DYNAMICS365 = "DYNAMICS365" + + +IngestionSourceTypeParam = ( + Literal[ + "MYSQL", + "POSTGRESQL", + "SQLSERVER", + "SALESFORCE", + "NETSUITE", + "WORKDAY_RAAS", + "GA4_RAW_DATA", + "SERVICENOW", + "MANAGED_POSTGRESQL", + "ORACLE", + "SHAREPOINT", + "DYNAMICS365", + ] + | IngestionSourceType +) diff --git a/experimental/python/databricks/bundles/pipelines/_models/path_pattern.py b/experimental/python/databricks/bundles/pipelines/_models/path_pattern.py new file mode 100644 index 0000000000..557767c213 --- /dev/null +++ b/experimental/python/databricks/bundles/pipelines/_models/path_pattern.py @@ -0,0 +1,40 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class PathPattern: + """ + :meta private: [EXPERIMENTAL] + """ + + include: VariableOrOptional[str] = None + """ + The source code to include for pipelines + """ + + @classmethod + def from_dict(cls, value: "PathPatternDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "PathPatternDict": + return _transform_to_json_value(self) # type:ignore + + +class PathPatternDict(TypedDict, total=False): + """""" + + include: VariableOrOptional[str] + """ + The source code to include for pipelines + """ + + +PathPatternParam = PathPatternDict | PathPattern diff --git a/experimental/python/databricks/bundles/pipelines/_models/pipeline_library.py b/experimental/python/databricks/bundles/pipelines/_models/pipeline_library.py index 6a15df15f1..68bf2e5c47 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/pipeline_library.py +++ b/experimental/python/databricks/bundles/pipelines/_models/pipeline_library.py @@ -16,6 +16,10 @@ NotebookLibrary, NotebookLibraryParam, ) +from databricks.bundles.pipelines._models.path_pattern import ( + PathPattern, + PathPatternParam, +) if TYPE_CHECKING: from typing_extensions import Self @@ -30,6 +34,15 @@ class PipelineLibrary: The path to a file that defines a pipeline and is stored in the Databricks Repos. """ + glob: VariableOrOptional[PathPattern] = None + """ + :meta private: [EXPERIMENTAL] + + The unified field to include source codes. + Each entry can be a notebook path, a file path, or a folder path that ends `/**`. + This field cannot be used together with `notebook` or `file`. + """ + jar: VariableOrOptional[str] = None """ :meta private: [EXPERIMENTAL] @@ -65,6 +78,15 @@ class PipelineLibraryDict(TypedDict, total=False): The path to a file that defines a pipeline and is stored in the Databricks Repos. """ + glob: VariableOrOptional[PathPatternParam] + """ + :meta private: [EXPERIMENTAL] + + The unified field to include source codes. + Each entry can be a notebook path, a file path, or a folder path that ends `/**`. + This field cannot be used together with `notebook` or `file`. + """ + jar: VariableOrOptional[str] """ :meta private: [EXPERIMENTAL] diff --git a/go.mod b/go.mod index dfa8d1ad0c..5223c744d9 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/BurntSushi/toml v1.5.0 // MIT github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.68.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.69.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index c8a7ca9e12..a7301b42d7 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.68.0 h1:sBsdiF2B0hCmT8NEEkc45tQ73oQSA9nNP7Fhgk/mSR8= -github.com/databricks/databricks-sdk-go v0.68.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= +github.com/databricks/databricks-sdk-go v0.69.0 h1:W3R+sP9O/dIys3Q4ZnY02DO93h5Urujv811PJYA1CEQ= +github.com/databricks/databricks-sdk-go v0.69.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From d2580a0be973ff30e0c6e2fd64b9eeccf5841073 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 13 May 2025 17:16:28 +0200 Subject: [PATCH 2/3] add changelog --- NEXT_CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index e0f22d558b..ac08d86362 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -5,6 +5,7 @@ ### Notable Changes ### Dependency updates +* Upgraded Go SDK to 0.69.0 ([#2867](https://github.com/databricks/cli/pull/2867)) ### CLI From 470212599c4cc117ae98600ed704689e84eebc1d Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 14 May 2025 13:00:48 +0200 Subject: [PATCH 3/3] fix permissions help --- Makefile | 2 + cmd/workspace/permissions/overrides.go | 62 ++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 cmd/workspace/permissions/overrides.go diff --git a/Makefile b/Makefile index 07fff3b8fb..a34d04c23e 100644 --- a/Makefile +++ b/Makefile @@ -66,6 +66,8 @@ integration-short: generate: genkit update-sdk [ ! -f tagging.py ] || mv tagging.py internal/genkit/tagging.py +# tagging.yml is automatically synced by update-sdk command and contains a reference to tagging.py in root +# since we move tagging.py to different folder, we need to update this reference here as well [ ! -f .github/workflows/tagging.yml ] || sed -i '' 's/python tagging.py/python internal\/genkit\/tagging.py/g' .github/workflows/tagging.yml [ ! -f .github/workflows/next-changelog.yml ] || rm .github/workflows/next-changelog.yml pushd experimental/python && make codegen diff --git a/cmd/workspace/permissions/overrides.go b/cmd/workspace/permissions/overrides.go new file mode 100644 index 0000000000..f5efce48ee --- /dev/null +++ b/cmd/workspace/permissions/overrides.go @@ -0,0 +1,62 @@ +package permissions + +import "github.com/spf13/cobra" + +func cmdOverride(cmd *cobra.Command) { + cmd.Long = `Permissions API are used to create read, write, edit, update and manage access + for various users on different objects and endpoints. + + * **[Apps permissions](:service:apps)** — Manage which users can manage or + use apps. + + * **[Cluster permissions](:service:clusters)** — Manage which users can + manage, restart, or attach to clusters. + + * **[Cluster policy permissions](:service:clusterpolicies)** — Manage which + users can use cluster policies. + + * **[Delta Live Tables pipeline permissions](:service:pipelines)** — Manage + which users can view, manage, run, cancel, or own a Delta Live Tables + pipeline. + + * **[Job permissions](:service:jobs)** — Manage which users can view, + manage, trigger, cancel, or own a job. + + * **[MLflow experiment permissions](:service:experiments)** — Manage which + users can read, edit, or manage MLflow experiments. + + * **[MLflow registered model permissions](:service:modelregistry)** — Manage + which users can read, edit, or manage MLflow registered models. + + * **[Password permissions](:service:users)** — Manage which users can use + password login when SSO is enabled. + + * **[Instance Pool permissions](:service:instancepools)** — Manage which + users can manage or attach to pools. + + * **[Repo permissions](repos)** — Manage which users can read, run, edit, or + manage a repo. + + * **[Serving endpoint permissions](:service:servingendpoints)** — Manage + which users can view, query, or manage a serving endpoint. + + * **[SQL warehouse permissions](:service:warehouses)** — Manage which users + can use or manage SQL warehouses. + + * **[Token permissions](:service:tokenmanagement)** — Manage which users can + create or use tokens. + + * **[Workspace object permissions](:service:workspace)** — Manage which + users can read, run, edit, or manage alerts, dbsql-dashboards, directories, + files, notebooks and queries. + + For the mapping of the required permissions for specific actions or abilities + and other important information, see [Access Control]. + + Note that to manage access control on service principals, use **[Account + Access Control Proxy](:service:accountaccesscontrolproxy)**.` +} + +func init() { + cmdOverrides = append(cmdOverrides, cmdOverride) +}