diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 3e67081803..ac1c24d104 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -2cee201b2e8d656f7306b2f9ec98edfa721e9829 \ No newline at end of file +a8f547d3728fba835fbdda301e846829c5cbbef5 \ No newline at end of file diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index b5df1e2a91..c8d50adfaf 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -412,7 +412,7 @@ func new{{.PascalName}}() *cobra.Command { {{- if $optionalIfJsonIsUsed }} if !cmd.Flags().Changed("json") { {{- end }} - {{if and (not $field.Entity.IsString) (not $field.Entity.IsFieldMask) (not $field.Entity.IsTimestamp) (not $field.Entity.IsDuration) -}} {{/* TODO: add support for well known types */}} + {{if and (not $field.Entity.IsString) (not $field.Entity.IsFieldMask) (not $field.Entity.IsTimestamp) (not $field.Entity.IsDuration) -}} {{/* TODO: add support for well known types */ -}} _, err = fmt.Sscan(args[{{$arg}}], &{{- template "request-body-obj" (dict "Method" $method "Field" $field)}}) if err != nil { return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}]) diff --git a/.gitattributes b/.gitattributes index 629be14230..66a1ee60c5 100755 --- a/.gitattributes +++ b/.gitattributes @@ -63,10 +63,12 @@ cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true cmd/workspace/credentials/credentials.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true +cmd/workspace/custom-llms/custom-llms.go linguist-generated=true +cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true -cmd/workspace/database-instances/database-instances.go linguist-generated=true +cmd/workspace/database/database.go linguist-generated=true cmd/workspace/default-namespace/default-namespace.go linguist-generated=true cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go linguist-generated=true @@ -110,10 +112,10 @@ cmd/workspace/provider-personalization-requests/provider-personalization-request cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true cmd/workspace/provider-providers/provider-providers.go linguist-generated=true cmd/workspace/providers/providers.go linguist-generated=true +cmd/workspace/quality-monitor-v2/quality-monitor-v2.go linguist-generated=true cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true cmd/workspace/queries-legacy/queries-legacy.go linguist-generated=true cmd/workspace/queries/queries.go linguist-generated=true -cmd/workspace/query-execution/query-execution.go linguist-generated=true cmd/workspace/query-history/query-history.go linguist-generated=true cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go linguist-generated=true cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true @@ -131,6 +133,7 @@ cmd/workspace/service-principals/service-principals.go linguist-generated=true cmd/workspace/serving-endpoints/serving-endpoints.go linguist-generated=true cmd/workspace/settings/settings.go linguist-generated=true cmd/workspace/shares/shares.go linguist-generated=true +cmd/workspace/sql-results-download/sql-results-download.go linguist-generated=true cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true cmd/workspace/system-schemas/system-schemas.go linguist-generated=true cmd/workspace/table-constraints/table-constraints.go linguist-generated=true diff --git a/acceptance/help/output.txt b/acceptance/help/output.txt index 4f2f1163aa..cdfa4d5a82 100644 --- a/acceptance/help/output.txt +++ b/acceptance/help/output.txt @@ -44,6 +44,7 @@ Identity and Access Management Databricks SQL alerts The alerts API can be used to perform CRUD operations on alerts. alerts-legacy The alerts API can be used to perform CRUD operations on alerts. + alerts-v2 New version of SQL Alerts. dashboards In general, there is little need to modify dashboards using the API. data-sources This API is provided to assist you in making new query objects. queries The queries API can be used to perform CRUD operations on queries. @@ -123,6 +124,9 @@ Clean Rooms clean-room-task-runs Clean room task runs are the executions of notebooks in a clean room. clean-rooms A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data. +Quality Monitor v2 + quality-monitor-v2 Manage data quality of UC objects (currently support schema). + Additional Commands: account Databricks Account Commands api Perform Databricks API call diff --git a/bundle/config/variable/resolve_metastore.go b/bundle/config/variable/resolve_metastore.go index 8a0a8c7edb..5460ccb3d3 100644 --- a/bundle/config/variable/resolve_metastore.go +++ b/bundle/config/variable/resolve_metastore.go @@ -2,8 +2,10 @@ package variable import ( "context" + "fmt" "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/catalog" ) type resolveMetastore struct { @@ -11,11 +13,28 @@ type resolveMetastore struct { } func (l resolveMetastore) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { - entity, err := w.Metastores.GetByName(ctx, l.name) + result, err := w.Metastores.ListAll(ctx, catalog.ListMetastoresRequest{}) if err != nil { return "", err } - return entity.MetastoreId, nil + + // Collect all metastores with the given name. + var entities []catalog.MetastoreInfo + for _, entity := range result { + if entity.Name == l.name { + entities = append(entities, entity) + } + } + + // Return the ID of the first matching metastore. + switch len(entities) { + case 0: + return "", fmt.Errorf("metastoren named %q does not exist", l.name) + case 1: + return entities[0].MetastoreId, nil + default: + return "", fmt.Errorf("there are %d instances of clusters named %q", len(entities), l.name) + } } func (l resolveMetastore) String() string { diff --git a/bundle/config/variable/resolve_metastore_test.go b/bundle/config/variable/resolve_metastore_test.go index 55c4d92d09..5d772e65bf 100644 --- a/bundle/config/variable/resolve_metastore_test.go +++ b/bundle/config/variable/resolve_metastore_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/stretchr/testify/assert" @@ -17,9 +16,9 @@ func TestResolveMetastore_ResolveSuccess(t *testing.T) { api := m.GetMockMetastoresAPI() api.EXPECT(). - GetByName(mock.Anything, "metastore"). - Return(&catalog.MetastoreInfo{ - MetastoreId: "abcd", + ListAll(mock.Anything, mock.Anything). + Return([]catalog.MetastoreInfo{ + {MetastoreId: "abcd", Name: "metastore"}, }, nil) ctx := context.Background() @@ -34,13 +33,15 @@ func TestResolveMetastore_ResolveNotFound(t *testing.T) { api := m.GetMockMetastoresAPI() api.EXPECT(). - GetByName(mock.Anything, "metastore"). - Return(nil, &apierr.APIError{StatusCode: 404}) + ListAll(mock.Anything, mock.Anything). + Return([]catalog.MetastoreInfo{ + {MetastoreId: "abcd", Name: "different"}, + }, nil) ctx := context.Background() l := resolveMetastore{name: "metastore"} _, err := l.Resolve(ctx, m.WorkspaceClient) - require.ErrorIs(t, err, apierr.ErrNotFound) + require.ErrorContains(t, err, "metastoren named \"metastore\" does not exist") } func TestResolveMetastore_String(t *testing.T) { diff --git a/bundle/internal/schema/annotations_openapi.yml b/bundle/internal/schema/annotations_openapi.yml index 395284c861..3f84637d46 100644 --- a/bundle/internal/schema/annotations_openapi.yml +++ b/bundle/internal/schema/annotations_openapi.yml @@ -534,6 +534,11 @@ github.com/databricks/cli/bundle/config/resources.Pipeline: "storage": "description": |- DBFS root directory for storing checkpoints and tables. + "tags": + "description": |- + A map of tags associated with the pipeline. + These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. + A maximum of 25 tags can be added to the pipeline. "target": "description": |- Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field. @@ -1425,24 +1430,19 @@ github.com/databricks/databricks-sdk-go/service/compute.Environment: In this minimal environment spec, only pip dependencies are supported. "client": "description": |- - Client version used by the environment - The client is the user-facing environment of the runtime. - Each client comes with a specific set of pre-installed libraries. - The version is a string, consisting of the major client version. + Use `environment_version` instead. + "deprecation_message": |- + This field is deprecated "dependencies": "description": |- List of pip dependencies, as supported by the version of pip in this environment. - Each dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/ - Allowed dependency could be , , (WSFS or Volumes in Databricks), - E.g. dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] + Each dependency is a valid pip requirements file line per https://pip.pypa.io/en/stable/reference/requirements-file-format/. + Allowed dependencies include a requirement specifier, an archive URL, a local project path (such as WSFS or UC Volumes in Databricks), or a VCS project URL. "environment_version": "description": |- - We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized - correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field - for any other purpose, e.g. notebook storage. - This field is not yet exposed to customers (e.g. in the jobs API). - "x-databricks-preview": |- - PRIVATE + Required. Environment version used by the environment. + Each version comes with a specific Python version and a set of Python packages. + The version is a string, consisting of an integer. "jar_dependencies": "description": |- List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`. @@ -1787,6 +1787,13 @@ github.com/databricks/databricks-sdk-go/service/jobs.DashboardTask: "description": |- Optional: The warehouse id to execute the dashboard with for the schedule. If not specified, the default warehouse of the dashboard will be used. +github.com/databricks/databricks-sdk-go/service/jobs.DbtCloudTask: + "connection_resource_name": + "description": |- + The resource name of the UC connection that authenticates the dbt Cloud for this task + "dbt_cloud_job_id": + "description": |- + Id of the dbt Cloud job to be triggered github.com/databricks/databricks-sdk-go/service/jobs.DbtTask: "catalog": "description": |- @@ -2540,6 +2547,11 @@ github.com/databricks/databricks-sdk-go/service/jobs.Task: "dashboard_task": "description": |- The task refreshes a dashboard and sends a snapshot to subscribers. + "dbt_cloud_task": + "description": |- + Task type for dbt cloud + "x-databricks-preview": |- + PRIVATE "dbt_task": "description": |- The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse. @@ -2878,6 +2890,8 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType: MANAGED_POSTGRESQL - |- ORACLE + - |- + TERADATA - |- SHAREPOINT - |- @@ -3692,9 +3706,15 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput: "instance_profile_arn": "description": |- ARN of the instance profile that the served entity uses to access AWS resources. + "max_provisioned_concurrency": + "description": |- + The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified. "max_provisioned_throughput": "description": |- The maximum tokens per second that the endpoint can scale up to. + "min_provisioned_concurrency": + "description": |- + The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified. "min_provisioned_throughput": "description": |- The minimum tokens per second that the endpoint can scale down to. @@ -3709,7 +3729,7 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput: Whether the compute resources for the served entity should scale down to zero. "workload_size": "description": |- - The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. + The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified. "workload_type": "description": |- The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). @@ -3720,9 +3740,15 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput: "instance_profile_arn": "description": |- ARN of the instance profile that the served entity uses to access AWS resources. + "max_provisioned_concurrency": + "description": |- + The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified. "max_provisioned_throughput": "description": |- The maximum tokens per second that the endpoint can scale up to. + "min_provisioned_concurrency": + "description": |- + The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified. "min_provisioned_throughput": "description": |- The minimum tokens per second that the endpoint can scale down to. @@ -3739,7 +3765,7 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput: Whether the compute resources for the served entity should scale down to zero. "workload_size": "description": |- - The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. + The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified. "workload_type": "description": |- The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index cf79d61226..5b220c5c0b 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1083,6 +1083,10 @@ "description": "DBFS root directory for storing checkpoints and tables.", "$ref": "#/$defs/string" }, + "tags": { + "description": "A map of tags associated with the pipeline.\nThese are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations.\nA maximum of 25 tags can be added to the pipeline.", + "$ref": "#/$defs/map/string" + }, "target": { "description": "Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field.", "$ref": "#/$defs/string" @@ -3325,18 +3329,18 @@ "description": "The environment entity used to preserve serverless environment side panel, jobs' environment for non-notebook task, and DLT's environment for classic and serverless pipelines.\nIn this minimal environment spec, only pip dependencies are supported.", "properties": { "client": { - "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version.", - "$ref": "#/$defs/string" + "description": "Use `environment_version` instead.", + "$ref": "#/$defs/string", + "deprecationMessage": "This field is deprecated", + "deprecated": true }, "dependencies": { "description": "List of pip dependencies, as supported by the version of pip in this environment.", "$ref": "#/$defs/slice/string" }, "environment_version": { - "description": "We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized\ncorrectly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field\nfor any other purpose, e.g. notebook storage.\nThis field is not yet exposed to customers (e.g. in the jobs API).", - "$ref": "#/$defs/string", - "x-databricks-preview": "PRIVATE", - "doNotSuggest": true + "description": "Required. Environment version used by the environment.\nEach version comes with a specific Python version and a set of Python packages.\nThe version is a string, consisting of an integer.", + "$ref": "#/$defs/string" }, "jar_dependencies": { "description": "List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`.", @@ -3345,10 +3349,7 @@ "doNotSuggest": true } }, - "additionalProperties": false, - "required": [ - "client" - ] + "additionalProperties": false }, { "type": "string", @@ -4009,6 +4010,28 @@ } ] }, + "jobs.DbtCloudTask": { + "oneOf": [ + { + "type": "object", + "properties": { + "connection_resource_name": { + "description": "The resource name of the UC connection that authenticates the dbt Cloud for this task", + "$ref": "#/$defs/string" + }, + "dbt_cloud_job_id": { + "description": "Id of the dbt Cloud job to be triggered", + "$ref": "#/$defs/int64" + } + }, + "additionalProperties": false + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "jobs.DbtTask": { "oneOf": [ { @@ -5382,6 +5405,12 @@ "description": "The task refreshes a dashboard and sends a snapshot to subscribers.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DashboardTask" }, + "dbt_cloud_task": { + "description": "Task type for dbt cloud", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtCloudTask", + "x-databricks-preview": "PRIVATE", + "doNotSuggest": true + }, "dbt_task": { "description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtTask" @@ -5982,6 +6011,7 @@ "SERVICENOW", "MANAGED_POSTGRESQL", "ORACLE", + "TERADATA", "SHAREPOINT", "DYNAMICS365" ] @@ -7412,10 +7442,18 @@ "description": "ARN of the instance profile that the served entity uses to access AWS resources.", "$ref": "#/$defs/string" }, + "max_provisioned_concurrency": { + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.", + "$ref": "#/$defs/int" + }, "max_provisioned_throughput": { "description": "The maximum tokens per second that the endpoint can scale up to.", "$ref": "#/$defs/int" }, + "min_provisioned_concurrency": { + "description": "The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.", + "$ref": "#/$defs/int" + }, "min_provisioned_throughput": { "description": "The minimum tokens per second that the endpoint can scale down to.", "$ref": "#/$defs/int" @@ -7433,7 +7471,7 @@ "$ref": "#/$defs/bool" }, "workload_size": { - "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.", + "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.", "$ref": "#/$defs/string" }, "workload_type": { @@ -7462,10 +7500,18 @@ "description": "ARN of the instance profile that the served entity uses to access AWS resources.", "$ref": "#/$defs/string" }, + "max_provisioned_concurrency": { + "description": "The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.", + "$ref": "#/$defs/int" + }, "max_provisioned_throughput": { "description": "The maximum tokens per second that the endpoint can scale up to.", "$ref": "#/$defs/int" }, + "min_provisioned_concurrency": { + "description": "The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.", + "$ref": "#/$defs/int" + }, "min_provisioned_throughput": { "description": "The minimum tokens per second that the endpoint can scale down to.", "$ref": "#/$defs/int" @@ -7489,7 +7535,7 @@ "$ref": "#/$defs/bool" }, "workload_size": { - "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.", + "description": "The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.", "$ref": "#/$defs/string" }, "workload_type": { diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index e2833263b2..036ea0c449 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -20,66 +20,10 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "log-delivery", - Short: `These APIs manage log delivery configurations for this account.`, - Long: `These APIs manage log delivery configurations for this account. The two - supported log types for this API are _billable usage logs_ and _audit logs_. - This feature is in Public Preview. This feature works with all account ID - types. - - Log delivery works with all account types. However, if your account is on the - E2 version of the platform or on a select custom plan that allows multiple - workspaces per account, you can optionally configure different storage - destinations for each workspace. Log delivery status is also provided to know - the latest status of log delivery attempts. The high-level flow of billable - usage delivery: - - 1. **Create storage**: In AWS, [create a new AWS S3 bucket] with a specific - bucket policy. Using Databricks APIs, call the Account API to create a - [storage configuration object](:method:Storage/Create) that uses the bucket - name. 2. **Create credentials**: In AWS, create the appropriate AWS IAM role. - For full details, including the required IAM role policies and trust - relationship, see [Billable usage log delivery]. Using Databricks APIs, call - the Account API to create a [credential configuration - object](:method:Credentials/Create) that uses the IAM role"s ARN. 3. **Create - log delivery configuration**: Using Databricks APIs, call the Account API to - [create a log delivery configuration](:method:LogDelivery/Create) that uses - the credential and storage configuration objects from previous steps. You can - specify if the logs should include all events of that log type in your account - (_Account level_ delivery) or only events for a specific set of workspaces - (_workspace level_ delivery). Account level log delivery applies to all - current and future workspaces plus account level logs, while workspace level - log delivery solely delivers logs related to the specified workspaces. You can - create multiple types of delivery configurations per account. - - For billable usage delivery: * For more information about billable usage logs, - see [Billable usage log delivery]. For the CSV schema, see the [Usage page]. * - The delivery location is //billable-usage/csv/, where - is the name of the optional delivery path prefix you set up during - log delivery configuration. Files are named - workspaceId=-usageMonth=.csv. * All billable usage logs - apply to specific workspaces (_workspace level_ logs). You can aggregate usage - for your entire account by creating an _account level_ delivery configuration - that delivers logs for all current and future workspaces in your account. * - The files are delivered daily by overwriting the month's CSV file for each - workspace. - - For audit log delivery: * For more information about about audit log delivery, - see [Audit log delivery], which includes information about the used JSON - schema. * The delivery location is - //workspaceId=/date=/auditlogs_.json. - Files may get overwritten with the same content multiple times to achieve - exactly-once delivery. * If the audit log delivery configuration included - specific workspace IDs, only _workspace-level_ audit logs for those workspaces - are delivered. If the log delivery configuration applies to the entire account - (_account level_ delivery configuration), the audit log delivery includes - workspace-level audit logs for all workspaces in the account as well as - account-level audit logs. See [Audit log delivery] for details. * Auditable - events are typically available in logs within 15 minutes. - - [Audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [Billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html - [Usage page]: https://docs.databricks.com/administration-guide/account-settings/usage.html - [create a new AWS S3 bucket]: https://docs.databricks.com/administration-guide/account-api/aws-storage.html`, + Short: `These APIs manage Log delivery configurations for this account.`, + Long: `These APIs manage Log delivery configurations for this account. Log delivery + configs enable you to configure the delivery of the specified type of logs to + your storage account.`, GroupID: "billing", Annotations: map[string]string{ "package": "billing", @@ -119,8 +63,6 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: complex arg: log_delivery_configuration - cmd.Use = "create" cmd.Short = `Create a new log delivery configuration.` cmd.Long = `Create a new log delivery configuration. @@ -153,11 +95,6 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) - return check(cmd, args) - } - cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -174,6 +111,8 @@ func newCreate() *cobra.Command { return err } } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } response, err := a.LogDelivery.Create(ctx, createReq) @@ -219,7 +158,7 @@ func newGet() *cobra.Command { specified by ID. Arguments: - LOG_DELIVERY_CONFIGURATION_ID: Databricks log delivery configuration ID` + LOG_DELIVERY_CONFIGURATION_ID: The log delivery configuration id of customer` cmd.Annotations = make(map[string]string) @@ -236,14 +175,14 @@ func newGet() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Log Delivery drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "Databricks log delivery configuration ID") + id, err := cmdio.Select(ctx, names, "The log delivery configuration id of customer") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have databricks log delivery configuration id") + return fmt.Errorf("expected to have the log delivery configuration id of customer") } getReq.LogDeliveryConfigurationId = args[0] @@ -282,9 +221,10 @@ func newList() *cobra.Command { // TODO: short flags - cmd.Flags().StringVar(&listReq.CredentialsId, "credentials-id", listReq.CredentialsId, `Filter by credential configuration ID.`) - cmd.Flags().Var(&listReq.Status, "status", `Filter by status ENABLED or DISABLED. Supported values: [DISABLED, ENABLED]`) - cmd.Flags().StringVar(&listReq.StorageConfigurationId, "storage-configuration-id", listReq.StorageConfigurationId, `Filter by storage configuration ID.`) + cmd.Flags().StringVar(&listReq.CredentialsId, "credentials-id", listReq.CredentialsId, `The Credentials id to filter the search results with.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token received from a previous get all budget configurations call.`) + cmd.Flags().Var(&listReq.Status, "status", `The log delivery status to filter the search results with. Supported values: [DISABLED, ENABLED]`) + cmd.Flags().StringVar(&listReq.StorageConfigurationId, "storage-configuration-id", listReq.StorageConfigurationId, `The Storage Configuration id to filter the search results with.`) cmd.Use = "list" cmd.Short = `Get all log delivery configurations.` @@ -350,7 +290,7 @@ func newPatchStatus() *cobra.Command { [Create log delivery](:method:LogDelivery/Create). Arguments: - LOG_DELIVERY_CONFIGURATION_ID: Databricks log delivery configuration ID + LOG_DELIVERY_CONFIGURATION_ID: The log delivery configuration id of customer STATUS: Status of log delivery configuration. Set to ENABLED (enabled) or DISABLED (disabled). Defaults to ENABLED. You can [enable or disable the configuration](#operation/patch-log-delivery-config-status) later. diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index 5b098ed01d..b7682f8780 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -46,7 +46,7 @@ func New() *cobra.Command { cmd.AddCommand(newGetPrivateEndpointRule()) cmd.AddCommand(newListNetworkConnectivityConfigurations()) cmd.AddCommand(newListPrivateEndpointRules()) - cmd.AddCommand(newUpdateNccAzurePrivateEndpointRulePublic()) + cmd.AddCommand(newUpdatePrivateEndpointRule()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -178,9 +178,12 @@ func newCreatePrivateEndpointRule() *cobra.Command { cmd.Flags().Var(&createPrivateEndpointRuleJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: domain_names - cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, "group-id", createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, `Only used by private endpoints to Azure first-party services.`) + cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.EndpointService, "endpoint-service", createPrivateEndpointRuleReq.PrivateEndpointRule.EndpointService, `The full target AWS endpoint service name that connects to the destination resources of the private endpoint.`) + cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, "group-id", createPrivateEndpointRuleReq.PrivateEndpointRule.GroupId, `Not used by customer-managed private endpoint services.`) + cmd.Flags().StringVar(&createPrivateEndpointRuleReq.PrivateEndpointRule.ResourceId, "resource-id", createPrivateEndpointRuleReq.PrivateEndpointRule.ResourceId, `The Azure resource ID of the target resource.`) + // TODO: array: resource_names - cmd.Use = "create-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID RESOURCE_ID" + cmd.Use = "create-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID" cmd.Short = `Create a private endpoint rule.` cmd.Long = `Create a private endpoint rule. @@ -196,20 +199,12 @@ func newCreatePrivateEndpointRule() *cobra.Command { [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link Arguments: - NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectivity Configuration ID. - RESOURCE_ID: The Azure resource ID of the target resource.` + NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectivity Configuration ID.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(1)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, provide only NETWORK_CONNECTIVITY_CONFIG_ID as positional arguments. Provide 'resource_id' in your JSON input") - } - return nil - } - check := root.ExactArgs(2) + check := root.ExactArgs(1) return check(cmd, args) } @@ -231,9 +226,6 @@ func newCreatePrivateEndpointRule() *cobra.Command { } } createPrivateEndpointRuleReq.NetworkConnectivityConfigId = args[0] - if !cmd.Flags().Changed("json") { - createPrivateEndpointRuleReq.PrivateEndpointRule.ResourceId = args[1] - } response, err := a.NetworkConnectivity.CreatePrivateEndpointRule(ctx, createPrivateEndpointRuleReq) if err != nil { @@ -604,28 +596,30 @@ func newListPrivateEndpointRules() *cobra.Command { return cmd } -// start update-ncc-azure-private-endpoint-rule-public command +// start update-private-endpoint-rule command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var updateNccAzurePrivateEndpointRulePublicOverrides []func( +var updatePrivateEndpointRuleOverrides []func( *cobra.Command, - *settings.UpdateNccAzurePrivateEndpointRulePublicRequest, + *settings.UpdateNccPrivateEndpointRuleRequest, ) -func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { +func newUpdatePrivateEndpointRule() *cobra.Command { cmd := &cobra.Command{} - var updateNccAzurePrivateEndpointRulePublicReq settings.UpdateNccAzurePrivateEndpointRulePublicRequest - updateNccAzurePrivateEndpointRulePublicReq.PrivateEndpointRule = settings.UpdatePrivateEndpointRule{} - var updateNccAzurePrivateEndpointRulePublicJson flags.JsonFlag + var updatePrivateEndpointRuleReq settings.UpdateNccPrivateEndpointRuleRequest + updatePrivateEndpointRuleReq.PrivateEndpointRule = settings.UpdatePrivateEndpointRule{} + var updatePrivateEndpointRuleJson flags.JsonFlag // TODO: short flags - cmd.Flags().Var(&updateNccAzurePrivateEndpointRulePublicJson, "json", `either inline JSON string or @path/to/file.json with request body`) + cmd.Flags().Var(&updatePrivateEndpointRuleJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: domain_names + cmd.Flags().BoolVar(&updatePrivateEndpointRuleReq.PrivateEndpointRule.Enabled, "enabled", updatePrivateEndpointRuleReq.PrivateEndpointRule.Enabled, `Only used by private endpoints towards an AWS S3 service.`) + // TODO: array: resource_names - cmd.Use = "update-ncc-azure-private-endpoint-rule-public NETWORK_CONNECTIVITY_CONFIG_ID PRIVATE_ENDPOINT_RULE_ID" + cmd.Use = "update-private-endpoint-rule NETWORK_CONNECTIVITY_CONFIG_ID PRIVATE_ENDPOINT_RULE_ID" cmd.Short = `Update a private endpoint rule.` cmd.Long = `Update a private endpoint rule. @@ -633,12 +627,10 @@ func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { customer-managed resources is allowed to be updated. Arguments: - NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectivity Configuration ID. + NETWORK_CONNECTIVITY_CONFIG_ID: The ID of a network connectivity configuration, which is the parent + resource of this private endpoint rule object. PRIVATE_ENDPOINT_RULE_ID: Your private endpoint rule ID.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -652,7 +644,7 @@ func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { a := cmdctx.AccountClient(ctx) if cmd.Flags().Changed("json") { - diags := updateNccAzurePrivateEndpointRulePublicJson.Unmarshal(&updateNccAzurePrivateEndpointRulePublicReq.PrivateEndpointRule) + diags := updatePrivateEndpointRuleJson.Unmarshal(&updatePrivateEndpointRuleReq.PrivateEndpointRule) if diags.HasError() { return diags.Error() } @@ -663,10 +655,10 @@ func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { } } } - updateNccAzurePrivateEndpointRulePublicReq.NetworkConnectivityConfigId = args[0] - updateNccAzurePrivateEndpointRulePublicReq.PrivateEndpointRuleId = args[1] + updatePrivateEndpointRuleReq.NetworkConnectivityConfigId = args[0] + updatePrivateEndpointRuleReq.PrivateEndpointRuleId = args[1] - response, err := a.NetworkConnectivity.UpdateNccAzurePrivateEndpointRulePublic(ctx, updateNccAzurePrivateEndpointRulePublicReq) + response, err := a.NetworkConnectivity.UpdatePrivateEndpointRule(ctx, updatePrivateEndpointRuleReq) if err != nil { return err } @@ -678,8 +670,8 @@ func newUpdateNccAzurePrivateEndpointRulePublic() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range updateNccAzurePrivateEndpointRulePublicOverrides { - fn(cmd, &updateNccAzurePrivateEndpointRulePublicReq) + for _, fn := range updatePrivateEndpointRuleOverrides { + fn(cmd, &updatePrivateEndpointRuleReq) } return cmd diff --git a/cmd/account/network-policies/network-policies.go b/cmd/account/network-policies/network-policies.go index 00f5a72f71..c22c6576bb 100755 --- a/cmd/account/network-policies/network-policies.go +++ b/cmd/account/network-policies/network-policies.go @@ -30,10 +30,7 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "settings", }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + RunE: root.ReportUnknownSubcommand, } // Add methods diff --git a/cmd/account/workspace-network-configuration/workspace-network-configuration.go b/cmd/account/workspace-network-configuration/workspace-network-configuration.go index c94ac5a96f..dee1c53af5 100755 --- a/cmd/account/workspace-network-configuration/workspace-network-configuration.go +++ b/cmd/account/workspace-network-configuration/workspace-network-configuration.go @@ -20,22 +20,19 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "workspace-network-configuration", - Short: `These APIs allow configuration of network settings for Databricks workspaces.`, - Long: `These APIs allow configuration of network settings for Databricks workspaces. - Each workspace is always associated with exactly one network policy that - controls which network destinations can be accessed from the Databricks - environment. By default, workspaces are associated with the 'default-policy' - network policy. You cannot create or delete a workspace's network - configuration, only update it to associate the workspace with a different - policy.`, + Short: `These APIs allow configuration of network settings for Databricks workspaces by selecting which network policy to associate with the workspace.`, + Long: `These APIs allow configuration of network settings for Databricks workspaces + by selecting which network policy to associate with the workspace. Each + workspace is always associated with exactly one network policy that controls + which network destinations can be accessed from the Databricks environment. By + default, workspaces are associated with the 'default-policy' network policy. + You cannot create or delete a workspace's network option, only update it to + associate the workspace with a different policy`, GroupID: "settings", Annotations: map[string]string{ "package": "settings", }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + RunE: root.ReportUnknownSubcommand, } // Add methods @@ -67,12 +64,12 @@ func newGetWorkspaceNetworkOptionRpc() *cobra.Command { // TODO: short flags cmd.Use = "get-workspace-network-option-rpc WORKSPACE_ID" - cmd.Short = `Get workspace network configuration.` - cmd.Long = `Get workspace network configuration. + cmd.Short = `Get workspace network option.` + cmd.Long = `Get workspace network option. - Gets the network configuration for a workspace. Every workspace has exactly - one network policy binding, with 'default-policy' used if no explicit - assignment exists. + Gets the network option for a workspace. Every workspace has exactly one + network policy binding, with 'default-policy' used if no explicit assignment + exists. Arguments: WORKSPACE_ID: The workspace ID.` @@ -136,12 +133,12 @@ func newUpdateWorkspaceNetworkOptionRpc() *cobra.Command { cmd.Flags().Int64Var(&updateWorkspaceNetworkOptionRpcReq.WorkspaceNetworkOption.WorkspaceId, "workspace-id", updateWorkspaceNetworkOptionRpcReq.WorkspaceNetworkOption.WorkspaceId, `The workspace ID.`) cmd.Use = "update-workspace-network-option-rpc WORKSPACE_ID" - cmd.Short = `Update workspace network configuration.` - cmd.Long = `Update workspace network configuration. + cmd.Short = `Update workspace network option.` + cmd.Long = `Update workspace network option. - Updates the network configuration for a workspace. This operation associates - the workspace with the specified network policy. To revert to the default - policy, specify 'default-policy' as the network_policy_id. + Updates the network option for a workspace. This operation associates the + workspace with the specified network policy. To revert to the default policy, + specify 'default-policy' as the network_policy_id. Arguments: WORKSPACE_ID: The workspace ID.` diff --git a/cmd/cmd.go b/cmd/cmd.go index 4f5337fd3c..acd8968df4 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -24,6 +24,41 @@ const ( permissionsGroup = "permissions" ) +// filterGroups returns command groups that have at least one available (non-hidden) command. +// Empty groups or groups with only hidden commands are filtered out from the help output. +// Commands that belong to filtered groups will have their GroupID cleared. +func filterGroups(groups []cobra.Group, allCommands []*cobra.Command) []cobra.Group { + var filteredGroups []cobra.Group + + // Create a map to track which groups have available commands + groupHasAvailableCommands := make(map[string]bool) + + // Check each command to see if it belongs to a group and is available + for _, cmd := range allCommands { + if cmd.GroupID != "" && cmd.IsAvailableCommand() { + groupHasAvailableCommands[cmd.GroupID] = true + } + } + + // Collect groups that have available commands + validGroupIDs := make(map[string]bool) + for _, group := range groups { + if groupHasAvailableCommands[group.ID] { + filteredGroups = append(filteredGroups, group) + validGroupIDs[group.ID] = true + } + } + + // Clear GroupID for commands that belong to filtered groups + for _, cmd := range allCommands { + if cmd.GroupID != "" && !validGroupIDs[cmd.GroupID] { + cmd.GroupID = "" + } + } + + return filteredGroups +} + func New(ctx context.Context) *cobra.Command { cli := root.New(ctx) @@ -31,7 +66,8 @@ func New(ctx context.Context) *cobra.Command { cli.AddCommand(account.New()) // Add workspace subcommands. - for _, cmd := range workspace.All() { + workspaceCommands := workspace.All() + for _, cmd := range workspaceCommands { // Built-in groups for the workspace commands. groups := []cobra.Group{ { @@ -60,12 +96,6 @@ func New(ctx context.Context) *cobra.Command { cli.AddCommand(cmd) } - // Add workspace command groups. - groups := workspace.Groups() - for i := range groups { - cli.AddGroup(&groups[i]) - } - // Add other subcommands. cli.AddCommand(api.New()) cli.AddCommand(auth.New()) @@ -77,5 +107,13 @@ func New(ctx context.Context) *cobra.Command { cli.AddCommand(version.New()) cli.AddCommand(selftest.New()) + // Add workspace command groups, filtering out empty groups or groups with only hidden commands. + allGroups := workspace.Groups() + allCommands := cli.Commands() + filteredGroups := filterGroups(allGroups, allCommands) + for i := range filteredGroups { + cli.AddGroup(&filteredGroups[i]) + } + return cli } diff --git a/cmd/workspace/alerts-v2/alerts-v2.go b/cmd/workspace/alerts-v2/alerts-v2.go index 37db596a0e..1e1267fc4e 100755 --- a/cmd/workspace/alerts-v2/alerts-v2.go +++ b/cmd/workspace/alerts-v2/alerts-v2.go @@ -20,16 +20,13 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "alerts-v2", - Short: `TODO: Add description.`, - Long: `TODO: Add description`, + Short: `New version of SQL Alerts.`, + Long: `New version of SQL Alerts`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, + RunE: root.ReportUnknownSubcommand, } // Add methods diff --git a/cmd/workspace/clean-room-assets/clean-room-assets.go b/cmd/workspace/clean-room-assets/clean-room-assets.go index e8c4b9cb3b..14242dd59b 100755 --- a/cmd/workspace/clean-room-assets/clean-room-assets.go +++ b/cmd/workspace/clean-room-assets/clean-room-assets.go @@ -149,7 +149,7 @@ func newDelete() *cobra.Command { // TODO: short flags - cmd.Use = "delete CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Use = "delete CLEAN_ROOM_NAME ASSET_TYPE NAME" cmd.Short = `Delete an asset.` cmd.Long = `Delete an asset. @@ -159,7 +159,7 @@ func newDelete() *cobra.Command { CLEAN_ROOM_NAME: Name of the clean room. ASSET_TYPE: The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] - ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + NAME: The fully qualified name of the asset, it is same as the name field in CleanRoomAsset.` cmd.Annotations = make(map[string]string) @@ -179,7 +179,7 @@ func newDelete() *cobra.Command { if err != nil { return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) } - deleteReq.AssetFullName = args[2] + deleteReq.Name = args[2] err = w.CleanRoomAssets.Delete(ctx, deleteReq) if err != nil { @@ -216,7 +216,7 @@ func newGet() *cobra.Command { // TODO: short flags - cmd.Use = "get CLEAN_ROOM_NAME ASSET_TYPE ASSET_FULL_NAME" + cmd.Use = "get CLEAN_ROOM_NAME ASSET_TYPE NAME" cmd.Short = `Get an asset.` cmd.Long = `Get an asset. @@ -226,7 +226,7 @@ func newGet() *cobra.Command { CLEAN_ROOM_NAME: Name of the clean room. ASSET_TYPE: The type of the asset. Supported values: [FOREIGN_TABLE, NOTEBOOK_FILE, TABLE, VIEW, VOLUME] - ASSET_FULL_NAME: The fully qualified name of the asset, it is same as the name field in + NAME: The fully qualified name of the asset, it is same as the name field in CleanRoomAsset.` cmd.Annotations = make(map[string]string) @@ -246,7 +246,7 @@ func newGet() *cobra.Command { if err != nil { return fmt.Errorf("invalid ASSET_TYPE: %s", args[1]) } - getReq.AssetFullName = args[2] + getReq.Name = args[2] response, err := w.CleanRoomAssets.Get(ctx, getReq) if err != nil { diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index c496d588bb..545b9c486e 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -24,10 +24,11 @@ import ( credentials "github.com/databricks/cli/cmd/workspace/credentials" credentials_manager "github.com/databricks/cli/cmd/workspace/credentials-manager" current_user "github.com/databricks/cli/cmd/workspace/current-user" + custom_llms "github.com/databricks/cli/cmd/workspace/custom-llms" dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" dashboards "github.com/databricks/cli/cmd/workspace/dashboards" data_sources "github.com/databricks/cli/cmd/workspace/data-sources" - database_instances "github.com/databricks/cli/cmd/workspace/database-instances" + database "github.com/databricks/cli/cmd/workspace/database" experiments "github.com/databricks/cli/cmd/workspace/experiments" external_locations "github.com/databricks/cli/cmd/workspace/external-locations" forecasting "github.com/databricks/cli/cmd/workspace/forecasting" @@ -63,10 +64,10 @@ import ( provider_provider_analytics_dashboards "github.com/databricks/cli/cmd/workspace/provider-provider-analytics-dashboards" provider_providers "github.com/databricks/cli/cmd/workspace/provider-providers" providers "github.com/databricks/cli/cmd/workspace/providers" + quality_monitor_v2 "github.com/databricks/cli/cmd/workspace/quality-monitor-v2" quality_monitors "github.com/databricks/cli/cmd/workspace/quality-monitors" queries "github.com/databricks/cli/cmd/workspace/queries" queries_legacy "github.com/databricks/cli/cmd/workspace/queries-legacy" - query_execution "github.com/databricks/cli/cmd/workspace/query-execution" query_history "github.com/databricks/cli/cmd/workspace/query-history" query_visualizations "github.com/databricks/cli/cmd/workspace/query-visualizations" query_visualizations_legacy "github.com/databricks/cli/cmd/workspace/query-visualizations-legacy" @@ -125,10 +126,11 @@ func All() []*cobra.Command { out = append(out, credentials.New()) out = append(out, credentials_manager.New()) out = append(out, current_user.New()) + out = append(out, custom_llms.New()) out = append(out, dashboard_widgets.New()) out = append(out, dashboards.New()) out = append(out, data_sources.New()) - out = append(out, database_instances.New()) + out = append(out, database.New()) out = append(out, experiments.New()) out = append(out, external_locations.New()) out = append(out, functions.New()) @@ -163,10 +165,10 @@ func All() []*cobra.Command { out = append(out, provider_provider_analytics_dashboards.New()) out = append(out, provider_providers.New()) out = append(out, providers.New()) + out = append(out, quality_monitor_v2.New()) out = append(out, quality_monitors.New()) out = append(out, queries.New()) out = append(out, queries_legacy.New()) - out = append(out, query_execution.New()) out = append(out, query_history.New()) out = append(out, query_visualizations.New()) out = append(out, query_visualizations_legacy.New()) diff --git a/cmd/workspace/custom-llms/custom-llms.go b/cmd/workspace/custom-llms/custom-llms.go new file mode 100755 index 0000000000..34ad043881 --- /dev/null +++ b/cmd/workspace/custom-llms/custom-llms.go @@ -0,0 +1,287 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package custom_llms + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/aibuilder" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "custom-llms", + Short: `The Custom LLMs service manages state and powers the UI for the Custom LLM product.`, + Long: `The Custom LLMs service manages state and powers the UI for the Custom LLM + product.`, + GroupID: "aibuilder", + Annotations: map[string]string{ + "package": "aibuilder", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCancel()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start cancel command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cancelOverrides []func( + *cobra.Command, + *aibuilder.CancelCustomLlmOptimizationRunRequest, +) + +func newCancel() *cobra.Command { + cmd := &cobra.Command{} + + var cancelReq aibuilder.CancelCustomLlmOptimizationRunRequest + + // TODO: short flags + + cmd.Use = "cancel ID" + cmd.Short = `Cancel a Custom LLM Optimization Run.` + cmd.Long = `Cancel a Custom LLM Optimization Run.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + cancelReq.Id = args[0] + + err = w.CustomLlms.Cancel(ctx, cancelReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range cancelOverrides { + fn(cmd, &cancelReq) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *aibuilder.StartCustomLlmOptimizationRunRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq aibuilder.StartCustomLlmOptimizationRunRequest + + // TODO: short flags + + cmd.Use = "create ID" + cmd.Short = `Start a Custom LLM Optimization Run.` + cmd.Long = `Start a Custom LLM Optimization Run. + + Arguments: + ID: The Id of the tile.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + createReq.Id = args[0] + + response, err := w.CustomLlms.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *aibuilder.GetCustomLlmRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq aibuilder.GetCustomLlmRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get a Custom LLM.` + cmd.Long = `Get a Custom LLM. + + Arguments: + ID: The id of the custom llm` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getReq.Id = args[0] + + response, err := w.CustomLlms.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *aibuilder.UpdateCustomLlmRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq aibuilder.UpdateCustomLlmRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update a Custom LLM.` + cmd.Long = `Update a Custom LLM. + + Arguments: + ID: The id of the custom llm` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.CustomLlms.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CustomLlms diff --git a/cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go b/cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go new file mode 100755 index 0000000000..0da11badd3 --- /dev/null +++ b/cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go @@ -0,0 +1,218 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package dashboard_email_subscriptions + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "dashboard-email-subscriptions", + Short: `Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can send subscription emails containing PDFs and/or images of the dashboard.`, + Long: `Controls whether schedules or workload tasks for refreshing AI/BI Dashboards + in the workspace can send subscription emails containing PDFs and/or images of + the dashboard. By default, this setting is enabled (set to true)`, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteDashboardEmailSubscriptionsRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteDashboardEmailSubscriptionsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the Dashboard Email Subscriptions setting.` + cmd.Long = `Delete the Dashboard Email Subscriptions setting. + + Reverts the Dashboard Email Subscriptions setting to its default value.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.DashboardEmailSubscriptions().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetDashboardEmailSubscriptionsRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetDashboardEmailSubscriptionsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the Dashboard Email Subscriptions setting.` + cmd.Long = `Get the Dashboard Email Subscriptions setting. + + Gets the Dashboard Email Subscriptions setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.DashboardEmailSubscriptions().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateDashboardEmailSubscriptionsRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateDashboardEmailSubscriptionsRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the Dashboard Email Subscriptions setting.` + cmd.Long = `Update the Dashboard Email Subscriptions setting. + + Updates the Dashboard Email Subscriptions setting.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.DashboardEmailSubscriptions().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service DashboardEmailSubscriptions diff --git a/cmd/workspace/database-instances/database-instances.go b/cmd/workspace/database/database.go similarity index 67% rename from cmd/workspace/database-instances/database-instances.go rename to cmd/workspace/database/database.go index 64fe1d7f82..f955d5953f 100755 --- a/cmd/workspace/database-instances/database-instances.go +++ b/cmd/workspace/database/database.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package database_instances +package database import ( "fmt" @@ -9,7 +9,7 @@ import ( "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/databricks-sdk-go/service/database" "github.com/spf13/cobra" ) @@ -19,12 +19,12 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "database-instances", + Use: "database", Short: `Database Instances provide access to a database via REST API or direct SQL.`, Long: `Database Instances provide access to a database via REST API or direct SQL.`, - GroupID: "catalog", + GroupID: "database", Annotations: map[string]string{ - "package": "catalog", + "package": "database", }, // This service is being previewed; hide from help output. @@ -35,13 +35,17 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreateDatabaseCatalog()) cmd.AddCommand(newCreateDatabaseInstance()) + cmd.AddCommand(newCreateDatabaseTable()) cmd.AddCommand(newCreateSyncedDatabaseTable()) cmd.AddCommand(newDeleteDatabaseCatalog()) cmd.AddCommand(newDeleteDatabaseInstance()) + cmd.AddCommand(newDeleteDatabaseTable()) cmd.AddCommand(newDeleteSyncedDatabaseTable()) cmd.AddCommand(newFindDatabaseInstanceByUid()) + cmd.AddCommand(newGenerateDatabaseCredential()) cmd.AddCommand(newGetDatabaseCatalog()) cmd.AddCommand(newGetDatabaseInstance()) + cmd.AddCommand(newGetDatabaseTable()) cmd.AddCommand(newGetSyncedDatabaseTable()) cmd.AddCommand(newListDatabaseInstances()) cmd.AddCommand(newUpdateDatabaseInstance()) @@ -60,14 +64,14 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createDatabaseCatalogOverrides []func( *cobra.Command, - *catalog.CreateDatabaseCatalogRequest, + *database.CreateDatabaseCatalogRequest, ) func newCreateDatabaseCatalog() *cobra.Command { cmd := &cobra.Command{} - var createDatabaseCatalogReq catalog.CreateDatabaseCatalogRequest - createDatabaseCatalogReq.Catalog = catalog.DatabaseCatalog{} + var createDatabaseCatalogReq database.CreateDatabaseCatalogRequest + createDatabaseCatalogReq.Catalog = database.DatabaseCatalog{} var createDatabaseCatalogJson flags.JsonFlag // TODO: short flags @@ -125,7 +129,7 @@ func newCreateDatabaseCatalog() *cobra.Command { createDatabaseCatalogReq.Catalog.DatabaseName = args[2] } - response, err := w.DatabaseInstances.CreateDatabaseCatalog(ctx, createDatabaseCatalogReq) + response, err := w.Database.CreateDatabaseCatalog(ctx, createDatabaseCatalogReq) if err != nil { return err } @@ -150,21 +154,19 @@ func newCreateDatabaseCatalog() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createDatabaseInstanceOverrides []func( *cobra.Command, - *catalog.CreateDatabaseInstanceRequest, + *database.CreateDatabaseInstanceRequest, ) func newCreateDatabaseInstance() *cobra.Command { cmd := &cobra.Command{} - var createDatabaseInstanceReq catalog.CreateDatabaseInstanceRequest - createDatabaseInstanceReq.DatabaseInstance = catalog.DatabaseInstance{} + var createDatabaseInstanceReq database.CreateDatabaseInstanceRequest + createDatabaseInstanceReq.DatabaseInstance = database.DatabaseInstance{} var createDatabaseInstanceJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createDatabaseInstanceJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.AdminPassword, "admin-password", createDatabaseInstanceReq.DatabaseInstance.AdminPassword, `Password for admin user to create.`) - cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.AdminRolename, "admin-rolename", createDatabaseInstanceReq.DatabaseInstance.AdminRolename, `Name of the admin role for the instance.`) cmd.Flags().StringVar(&createDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", createDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) cmd.Flags().BoolVar(&createDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", createDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether the instance is stopped.`) @@ -210,7 +212,7 @@ func newCreateDatabaseInstance() *cobra.Command { createDatabaseInstanceReq.DatabaseInstance.Name = args[0] } - response, err := w.DatabaseInstances.CreateDatabaseInstance(ctx, createDatabaseInstanceReq) + response, err := w.Database.CreateDatabaseInstance(ctx, createDatabaseInstanceReq) if err != nil { return err } @@ -229,20 +231,103 @@ func newCreateDatabaseInstance() *cobra.Command { return cmd } +// start create-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createDatabaseTableOverrides []func( + *cobra.Command, + *database.CreateDatabaseTableRequest, +) + +func newCreateDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var createDatabaseTableReq database.CreateDatabaseTableRequest + createDatabaseTableReq.Table = database.DatabaseTable{} + var createDatabaseTableJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createDatabaseTableJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createDatabaseTableReq.Table.DatabaseInstanceName, "database-instance-name", createDatabaseTableReq.Table.DatabaseInstanceName, `Name of the target database instance.`) + cmd.Flags().StringVar(&createDatabaseTableReq.Table.LogicalDatabaseName, "logical-database-name", createDatabaseTableReq.Table.LogicalDatabaseName, `Target Postgres database object (logical database) name for this table.`) + + cmd.Use = "create-database-table NAME" + cmd.Short = `Create a Database Table.` + cmd.Long = `Create a Database Table. + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createDatabaseTableJson.Unmarshal(&createDatabaseTableReq.Table) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createDatabaseTableReq.Table.Name = args[0] + } + + response, err := w.Database.CreateDatabaseTable(ctx, createDatabaseTableReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createDatabaseTableOverrides { + fn(cmd, &createDatabaseTableReq) + } + + return cmd +} + // start create-synced-database-table command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var createSyncedDatabaseTableOverrides []func( *cobra.Command, - *catalog.CreateSyncedDatabaseTableRequest, + *database.CreateSyncedDatabaseTableRequest, ) func newCreateSyncedDatabaseTable() *cobra.Command { cmd := &cobra.Command{} - var createSyncedDatabaseTableReq catalog.CreateSyncedDatabaseTableRequest - createSyncedDatabaseTableReq.SyncedTable = catalog.SyncedDatabaseTable{} + var createSyncedDatabaseTableReq database.CreateSyncedDatabaseTableRequest + createSyncedDatabaseTableReq.SyncedTable = database.SyncedDatabaseTable{} var createSyncedDatabaseTableJson flags.JsonFlag // TODO: short flags @@ -295,7 +380,7 @@ func newCreateSyncedDatabaseTable() *cobra.Command { createSyncedDatabaseTableReq.SyncedTable.Name = args[0] } - response, err := w.DatabaseInstances.CreateSyncedDatabaseTable(ctx, createSyncedDatabaseTableReq) + response, err := w.Database.CreateSyncedDatabaseTable(ctx, createSyncedDatabaseTableReq) if err != nil { return err } @@ -320,13 +405,13 @@ func newCreateSyncedDatabaseTable() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteDatabaseCatalogOverrides []func( *cobra.Command, - *catalog.DeleteDatabaseCatalogRequest, + *database.DeleteDatabaseCatalogRequest, ) func newDeleteDatabaseCatalog() *cobra.Command { cmd := &cobra.Command{} - var deleteDatabaseCatalogReq catalog.DeleteDatabaseCatalogRequest + var deleteDatabaseCatalogReq database.DeleteDatabaseCatalogRequest // TODO: short flags @@ -348,7 +433,7 @@ func newDeleteDatabaseCatalog() *cobra.Command { deleteDatabaseCatalogReq.Name = args[0] - err = w.DatabaseInstances.DeleteDatabaseCatalog(ctx, deleteDatabaseCatalogReq) + err = w.Database.DeleteDatabaseCatalog(ctx, deleteDatabaseCatalogReq) if err != nil { return err } @@ -373,13 +458,13 @@ func newDeleteDatabaseCatalog() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteDatabaseInstanceOverrides []func( *cobra.Command, - *catalog.DeleteDatabaseInstanceRequest, + *database.DeleteDatabaseInstanceRequest, ) func newDeleteDatabaseInstance() *cobra.Command { cmd := &cobra.Command{} - var deleteDatabaseInstanceReq catalog.DeleteDatabaseInstanceRequest + var deleteDatabaseInstanceReq database.DeleteDatabaseInstanceRequest // TODO: short flags @@ -407,7 +492,7 @@ func newDeleteDatabaseInstance() *cobra.Command { deleteDatabaseInstanceReq.Name = args[0] - err = w.DatabaseInstances.DeleteDatabaseInstance(ctx, deleteDatabaseInstanceReq) + err = w.Database.DeleteDatabaseInstance(ctx, deleteDatabaseInstanceReq) if err != nil { return err } @@ -426,19 +511,72 @@ func newDeleteDatabaseInstance() *cobra.Command { return cmd } +// start delete-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteDatabaseTableOverrides []func( + *cobra.Command, + *database.DeleteDatabaseTableRequest, +) + +func newDeleteDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var deleteDatabaseTableReq database.DeleteDatabaseTableRequest + + // TODO: short flags + + cmd.Use = "delete-database-table NAME" + cmd.Short = `Delete a Database Table.` + cmd.Long = `Delete a Database Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteDatabaseTableReq.Name = args[0] + + err = w.Database.DeleteDatabaseTable(ctx, deleteDatabaseTableReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteDatabaseTableOverrides { + fn(cmd, &deleteDatabaseTableReq) + } + + return cmd +} + // start delete-synced-database-table command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var deleteSyncedDatabaseTableOverrides []func( *cobra.Command, - *catalog.DeleteSyncedDatabaseTableRequest, + *database.DeleteSyncedDatabaseTableRequest, ) func newDeleteSyncedDatabaseTable() *cobra.Command { cmd := &cobra.Command{} - var deleteSyncedDatabaseTableReq catalog.DeleteSyncedDatabaseTableRequest + var deleteSyncedDatabaseTableReq database.DeleteSyncedDatabaseTableRequest // TODO: short flags @@ -460,7 +598,7 @@ func newDeleteSyncedDatabaseTable() *cobra.Command { deleteSyncedDatabaseTableReq.Name = args[0] - err = w.DatabaseInstances.DeleteSyncedDatabaseTable(ctx, deleteSyncedDatabaseTableReq) + err = w.Database.DeleteSyncedDatabaseTable(ctx, deleteSyncedDatabaseTableReq) if err != nil { return err } @@ -485,13 +623,13 @@ func newDeleteSyncedDatabaseTable() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var findDatabaseInstanceByUidOverrides []func( *cobra.Command, - *catalog.FindDatabaseInstanceByUidRequest, + *database.FindDatabaseInstanceByUidRequest, ) func newFindDatabaseInstanceByUid() *cobra.Command { cmd := &cobra.Command{} - var findDatabaseInstanceByUidReq catalog.FindDatabaseInstanceByUidRequest + var findDatabaseInstanceByUidReq database.FindDatabaseInstanceByUidRequest // TODO: short flags @@ -513,7 +651,7 @@ func newFindDatabaseInstanceByUid() *cobra.Command { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - response, err := w.DatabaseInstances.FindDatabaseInstanceByUid(ctx, findDatabaseInstanceByUidReq) + response, err := w.Database.FindDatabaseInstanceByUid(ctx, findDatabaseInstanceByUidReq) if err != nil { return err } @@ -532,19 +670,88 @@ func newFindDatabaseInstanceByUid() *cobra.Command { return cmd } +// start generate-database-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var generateDatabaseCredentialOverrides []func( + *cobra.Command, + *database.GenerateDatabaseCredentialRequest, +) + +func newGenerateDatabaseCredential() *cobra.Command { + cmd := &cobra.Command{} + + var generateDatabaseCredentialReq database.GenerateDatabaseCredentialRequest + var generateDatabaseCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&generateDatabaseCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: instance_names + cmd.Flags().StringVar(&generateDatabaseCredentialReq.RequestId, "request-id", generateDatabaseCredentialReq.RequestId, ``) + + cmd.Use = "generate-database-credential" + cmd.Short = `Generates a credential that can be used to access database instances.` + cmd.Long = `Generates a credential that can be used to access database instances.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := generateDatabaseCredentialJson.Unmarshal(&generateDatabaseCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.Database.GenerateDatabaseCredential(ctx, generateDatabaseCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range generateDatabaseCredentialOverrides { + fn(cmd, &generateDatabaseCredentialReq) + } + + return cmd +} + // start get-database-catalog command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var getDatabaseCatalogOverrides []func( *cobra.Command, - *catalog.GetDatabaseCatalogRequest, + *database.GetDatabaseCatalogRequest, ) func newGetDatabaseCatalog() *cobra.Command { cmd := &cobra.Command{} - var getDatabaseCatalogReq catalog.GetDatabaseCatalogRequest + var getDatabaseCatalogReq database.GetDatabaseCatalogRequest // TODO: short flags @@ -566,7 +773,7 @@ func newGetDatabaseCatalog() *cobra.Command { getDatabaseCatalogReq.Name = args[0] - response, err := w.DatabaseInstances.GetDatabaseCatalog(ctx, getDatabaseCatalogReq) + response, err := w.Database.GetDatabaseCatalog(ctx, getDatabaseCatalogReq) if err != nil { return err } @@ -591,13 +798,13 @@ func newGetDatabaseCatalog() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getDatabaseInstanceOverrides []func( *cobra.Command, - *catalog.GetDatabaseInstanceRequest, + *database.GetDatabaseInstanceRequest, ) func newGetDatabaseInstance() *cobra.Command { cmd := &cobra.Command{} - var getDatabaseInstanceReq catalog.GetDatabaseInstanceRequest + var getDatabaseInstanceReq database.GetDatabaseInstanceRequest // TODO: short flags @@ -622,7 +829,7 @@ func newGetDatabaseInstance() *cobra.Command { getDatabaseInstanceReq.Name = args[0] - response, err := w.DatabaseInstances.GetDatabaseInstance(ctx, getDatabaseInstanceReq) + response, err := w.Database.GetDatabaseInstance(ctx, getDatabaseInstanceReq) if err != nil { return err } @@ -641,19 +848,72 @@ func newGetDatabaseInstance() *cobra.Command { return cmd } +// start get-database-table command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getDatabaseTableOverrides []func( + *cobra.Command, + *database.GetDatabaseTableRequest, +) + +func newGetDatabaseTable() *cobra.Command { + cmd := &cobra.Command{} + + var getDatabaseTableReq database.GetDatabaseTableRequest + + // TODO: short flags + + cmd.Use = "get-database-table NAME" + cmd.Short = `Get a Database Table.` + cmd.Long = `Get a Database Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getDatabaseTableReq.Name = args[0] + + response, err := w.Database.GetDatabaseTable(ctx, getDatabaseTableReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getDatabaseTableOverrides { + fn(cmd, &getDatabaseTableReq) + } + + return cmd +} + // start get-synced-database-table command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var getSyncedDatabaseTableOverrides []func( *cobra.Command, - *catalog.GetSyncedDatabaseTableRequest, + *database.GetSyncedDatabaseTableRequest, ) func newGetSyncedDatabaseTable() *cobra.Command { cmd := &cobra.Command{} - var getSyncedDatabaseTableReq catalog.GetSyncedDatabaseTableRequest + var getSyncedDatabaseTableReq database.GetSyncedDatabaseTableRequest // TODO: short flags @@ -675,7 +935,7 @@ func newGetSyncedDatabaseTable() *cobra.Command { getSyncedDatabaseTableReq.Name = args[0] - response, err := w.DatabaseInstances.GetSyncedDatabaseTable(ctx, getSyncedDatabaseTableReq) + response, err := w.Database.GetSyncedDatabaseTable(ctx, getSyncedDatabaseTableReq) if err != nil { return err } @@ -700,13 +960,13 @@ func newGetSyncedDatabaseTable() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listDatabaseInstancesOverrides []func( *cobra.Command, - *catalog.ListDatabaseInstancesRequest, + *database.ListDatabaseInstancesRequest, ) func newListDatabaseInstances() *cobra.Command { cmd := &cobra.Command{} - var listDatabaseInstancesReq catalog.ListDatabaseInstancesRequest + var listDatabaseInstancesReq database.ListDatabaseInstancesRequest // TODO: short flags @@ -729,7 +989,7 @@ func newListDatabaseInstances() *cobra.Command { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - response := w.DatabaseInstances.ListDatabaseInstances(ctx, listDatabaseInstancesReq) + response := w.Database.ListDatabaseInstances(ctx, listDatabaseInstancesReq) return cmdio.RenderIterator(ctx, response) } @@ -751,21 +1011,19 @@ func newListDatabaseInstances() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateDatabaseInstanceOverrides []func( *cobra.Command, - *catalog.UpdateDatabaseInstanceRequest, + *database.UpdateDatabaseInstanceRequest, ) func newUpdateDatabaseInstance() *cobra.Command { cmd := &cobra.Command{} - var updateDatabaseInstanceReq catalog.UpdateDatabaseInstanceRequest - updateDatabaseInstanceReq.DatabaseInstance = catalog.DatabaseInstance{} + var updateDatabaseInstanceReq database.UpdateDatabaseInstanceRequest + updateDatabaseInstanceReq.DatabaseInstance = database.DatabaseInstance{} var updateDatabaseInstanceJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateDatabaseInstanceJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.AdminPassword, "admin-password", updateDatabaseInstanceReq.DatabaseInstance.AdminPassword, `Password for admin user to create.`) - cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.AdminRolename, "admin-rolename", updateDatabaseInstanceReq.DatabaseInstance.AdminRolename, `Name of the admin role for the instance.`) cmd.Flags().StringVar(&updateDatabaseInstanceReq.DatabaseInstance.Capacity, "capacity", updateDatabaseInstanceReq.DatabaseInstance.Capacity, `The sku of the instance.`) cmd.Flags().BoolVar(&updateDatabaseInstanceReq.DatabaseInstance.Stopped, "stopped", updateDatabaseInstanceReq.DatabaseInstance.Stopped, `Whether the instance is stopped.`) @@ -802,7 +1060,7 @@ func newUpdateDatabaseInstance() *cobra.Command { } updateDatabaseInstanceReq.Name = args[0] - response, err := w.DatabaseInstances.UpdateDatabaseInstance(ctx, updateDatabaseInstanceReq) + response, err := w.Database.UpdateDatabaseInstance(ctx, updateDatabaseInstanceReq) if err != nil { return err } @@ -821,4 +1079,4 @@ func newUpdateDatabaseInstance() *cobra.Command { return cmd } -// end service DatabaseInstances +// end service Database diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index d4a4738d07..1f94baf770 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -49,8 +49,6 @@ func New() *cobra.Command { cmd.AddCommand(newDeleteTag()) cmd.AddCommand(newFinalizeLoggedModel()) cmd.AddCommand(newGetByName()) - cmd.AddCommand(newGetCredentialsForTraceDataDownload()) - cmd.AddCommand(newGetCredentialsForTraceDataUpload()) cmd.AddCommand(newGetExperiment()) cmd.AddCommand(newGetHistory()) cmd.AddCommand(newGetLoggedModel()) @@ -59,7 +57,6 @@ func New() *cobra.Command { cmd.AddCommand(newGetRun()) cmd.AddCommand(newListArtifacts()) cmd.AddCommand(newListExperiments()) - cmd.AddCommand(newListLoggedModelArtifacts()) cmd.AddCommand(newLogBatch()) cmd.AddCommand(newLogInputs()) cmd.AddCommand(newLogLoggedModelParams()) @@ -209,9 +206,6 @@ func newCreateLoggedModel() *cobra.Command { Arguments: EXPERIMENT_ID: The ID of the experiment that owns the model.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -449,9 +443,6 @@ func newDeleteLoggedModel() *cobra.Command { Arguments: MODEL_ID: The ID of the logged model to delete.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -509,9 +500,6 @@ func newDeleteLoggedModelTag() *cobra.Command { MODEL_ID: The ID of the logged model to delete the tag from. TAG_KEY: The tag key.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -833,12 +821,9 @@ func newFinalizeLoggedModel() *cobra.Command { MODEL_ID: The ID of the logged model to finalize. STATUS: Whether or not the model is ready for use. "LOGGED_MODEL_UPLOAD_FAILED" indicates that something went wrong when - logging the model weights / agent code). + logging the model weights / agent code. Supported values: [LOGGED_MODEL_PENDING, LOGGED_MODEL_READY, LOGGED_MODEL_UPLOAD_FAILED]` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -963,124 +948,6 @@ func newGetByName() *cobra.Command { return cmd } -// start get-credentials-for-trace-data-download command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getCredentialsForTraceDataDownloadOverrides []func( - *cobra.Command, - *ml.GetCredentialsForTraceDataDownloadRequest, -) - -func newGetCredentialsForTraceDataDownload() *cobra.Command { - cmd := &cobra.Command{} - - var getCredentialsForTraceDataDownloadReq ml.GetCredentialsForTraceDataDownloadRequest - - // TODO: short flags - - cmd.Use = "get-credentials-for-trace-data-download REQUEST_ID" - cmd.Short = `Get credentials to download trace data.` - cmd.Long = `Get credentials to download trace data. - - Arguments: - REQUEST_ID: The ID of the trace to fetch artifact download credentials for.` - - // This command is being previewed; hide from help output. - cmd.Hidden = true - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - getCredentialsForTraceDataDownloadReq.RequestId = args[0] - - response, err := w.Experiments.GetCredentialsForTraceDataDownload(ctx, getCredentialsForTraceDataDownloadReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getCredentialsForTraceDataDownloadOverrides { - fn(cmd, &getCredentialsForTraceDataDownloadReq) - } - - return cmd -} - -// start get-credentials-for-trace-data-upload command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getCredentialsForTraceDataUploadOverrides []func( - *cobra.Command, - *ml.GetCredentialsForTraceDataUploadRequest, -) - -func newGetCredentialsForTraceDataUpload() *cobra.Command { - cmd := &cobra.Command{} - - var getCredentialsForTraceDataUploadReq ml.GetCredentialsForTraceDataUploadRequest - - // TODO: short flags - - cmd.Use = "get-credentials-for-trace-data-upload REQUEST_ID" - cmd.Short = `Get credentials to upload trace data.` - cmd.Long = `Get credentials to upload trace data. - - Arguments: - REQUEST_ID: The ID of the trace to fetch artifact upload credentials for.` - - // This command is being previewed; hide from help output. - cmd.Hidden = true - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - getCredentialsForTraceDataUploadReq.RequestId = args[0] - - response, err := w.Experiments.GetCredentialsForTraceDataUpload(ctx, getCredentialsForTraceDataUploadReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getCredentialsForTraceDataUploadOverrides { - fn(cmd, &getCredentialsForTraceDataUploadReq) - } - - return cmd -} - // start get-experiment command // Slice with functions to override default command behavior. @@ -1222,9 +1089,6 @@ func newGetLoggedModel() *cobra.Command { Arguments: MODEL_ID: The ID of the logged model to retrieve.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -1554,72 +1418,6 @@ func newListExperiments() *cobra.Command { return cmd } -// start list-logged-model-artifacts command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var listLoggedModelArtifactsOverrides []func( - *cobra.Command, - *ml.ListLoggedModelArtifactsRequest, -) - -func newListLoggedModelArtifacts() *cobra.Command { - cmd := &cobra.Command{} - - var listLoggedModelArtifactsReq ml.ListLoggedModelArtifactsRequest - - // TODO: short flags - - cmd.Flags().StringVar(&listLoggedModelArtifactsReq.ArtifactDirectoryPath, "artifact-directory-path", listLoggedModelArtifactsReq.ArtifactDirectoryPath, `Filter artifacts matching this path (a relative path from the root artifact directory).`) - cmd.Flags().StringVar(&listLoggedModelArtifactsReq.PageToken, "page-token", listLoggedModelArtifactsReq.PageToken, `Token indicating the page of artifact results to fetch.`) - - cmd.Use = "list-logged-model-artifacts MODEL_ID" - cmd.Short = `List artifacts for a logged model.` - cmd.Long = `List artifacts for a logged model. - - List artifacts for a logged model. Takes an optional - artifact_directory_path prefix which if specified, the response contains - only artifacts with the specified prefix. - - Arguments: - MODEL_ID: The ID of the logged model for which to list the artifacts.` - - // This command is being previewed; hide from help output. - cmd.Hidden = true - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - listLoggedModelArtifactsReq.ModelId = args[0] - - response, err := w.Experiments.ListLoggedModelArtifacts(ctx, listLoggedModelArtifactsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range listLoggedModelArtifactsOverrides { - fn(cmd, &listLoggedModelArtifactsReq) - } - - return cmd -} - // start log-batch command // Slice with functions to override default command behavior. @@ -1854,9 +1652,6 @@ func newLogLoggedModelParams() *cobra.Command { Arguments: MODEL_ID: The ID of the logged model to log params for.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -2110,9 +1905,6 @@ func newLogOutputs() *cobra.Command { Arguments: RUN_ID: The ID of the Run from which to log outputs.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -2625,9 +2417,6 @@ func newSearchLoggedModels() *cobra.Command { Search for Logged Models that satisfy specified search criteria.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -2863,9 +2652,6 @@ func newSetLoggedModelTags() *cobra.Command { Arguments: MODEL_ID: The ID of the logged model to set the tags on.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/genie/genie.go b/cmd/workspace/genie/genie.go index 1b58dbb81b..bc17c02bff 100755 --- a/cmd/workspace/genie/genie.go +++ b/cmd/workspace/genie/genie.go @@ -45,6 +45,7 @@ func New() *cobra.Command { cmd.AddCommand(newGetMessageQueryResult()) cmd.AddCommand(newGetMessageQueryResultByAttachment()) cmd.AddCommand(newGetSpace()) + cmd.AddCommand(newListSpaces()) cmd.AddCommand(newStartConversation()) // Apply optional overrides to this command. @@ -766,6 +767,65 @@ func newGetSpace() *cobra.Command { return cmd } +// start list-spaces command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listSpacesOverrides []func( + *cobra.Command, + *dashboards.GenieListSpacesRequest, +) + +func newListSpaces() *cobra.Command { + cmd := &cobra.Command{} + + var listSpacesReq dashboards.GenieListSpacesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listSpacesReq.PageSize, "page-size", listSpacesReq.PageSize, `Maximum number of spaces to return per page.`) + cmd.Flags().StringVar(&listSpacesReq.PageToken, "page-token", listSpacesReq.PageToken, `Pagination token for getting the next page of results.`) + + cmd.Use = "list-spaces" + cmd.Short = `List Genie spaces.` + cmd.Long = `List Genie spaces. + + Get list of Genie Spaces.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Genie.ListSpaces(ctx, listSpacesReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listSpacesOverrides { + fn(cmd, &listSpacesReq) + } + + return cmd +} + // start start-conversation command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/grants/grants.go b/cmd/workspace/grants/grants.go index 9abbef1cf1..9fa89b72d5 100755 --- a/cmd/workspace/grants/grants.go +++ b/cmd/workspace/grants/grants.go @@ -3,8 +3,6 @@ package grants import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdctx" "github.com/databricks/cli/libs/cmdio" @@ -68,36 +66,18 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&getReq.MaxResults, "max-results", getReq.MaxResults, `Specifies the maximum number of privileges to return (page length).`) + cmd.Flags().StringVar(&getReq.PageToken, "page-token", getReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) cmd.Flags().StringVar(&getReq.Principal, "principal", getReq.Principal, `If provided, only the permissions for the specified principal (user or group) are returned.`) cmd.Use = "get SECURABLE_TYPE FULL_NAME" cmd.Short = `Get permissions.` cmd.Long = `Get permissions. - Gets the permissions for a securable. + Gets the permissions for a securable. Does not include inherited permissions. Arguments: - SECURABLE_TYPE: Type of securable. - Supported values: [ - CATALOG, - CLEAN_ROOM, - CONNECTION, - CREDENTIAL, - EXTERNAL_LOCATION, - EXTERNAL_METADATA, - FUNCTION, - METASTORE, - PIPELINE, - PROVIDER, - RECIPIENT, - SCHEMA, - SHARE, - STAGING_TABLE, - STORAGE_CREDENTIAL, - TABLE, - UNKNOWN_SECURABLE_TYPE, - VOLUME, - ] + SECURABLE_TYPE: Type of securable. FULL_NAME: Full name of securable.` cmd.Annotations = make(map[string]string) @@ -112,10 +92,7 @@ func newGet() *cobra.Command { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - _, err = fmt.Sscan(args[0], &getReq.SecurableType) - if err != nil { - return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) - } + getReq.SecurableType = args[0] getReq.FullName = args[1] response, err := w.Grants.Get(ctx, getReq) @@ -153,36 +130,19 @@ func newGetEffective() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&getEffectiveReq.MaxResults, "max-results", getEffectiveReq.MaxResults, `Specifies the maximum number of privileges to return (page length).`) + cmd.Flags().StringVar(&getEffectiveReq.PageToken, "page-token", getEffectiveReq.PageToken, `Opaque token for the next page of results (pagination).`) cmd.Flags().StringVar(&getEffectiveReq.Principal, "principal", getEffectiveReq.Principal, `If provided, only the effective permissions for the specified principal (user or group) are returned.`) cmd.Use = "get-effective SECURABLE_TYPE FULL_NAME" cmd.Short = `Get effective permissions.` cmd.Long = `Get effective permissions. - Gets the effective permissions for a securable. + Gets the effective permissions for a securable. Includes inherited permissions + from any parent securables. Arguments: - SECURABLE_TYPE: Type of securable. - Supported values: [ - CATALOG, - CLEAN_ROOM, - CONNECTION, - CREDENTIAL, - EXTERNAL_LOCATION, - EXTERNAL_METADATA, - FUNCTION, - METASTORE, - PIPELINE, - PROVIDER, - RECIPIENT, - SCHEMA, - SHARE, - STAGING_TABLE, - STORAGE_CREDENTIAL, - TABLE, - UNKNOWN_SECURABLE_TYPE, - VOLUME, - ] + SECURABLE_TYPE: Type of securable. FULL_NAME: Full name of securable.` cmd.Annotations = make(map[string]string) @@ -197,10 +157,7 @@ func newGetEffective() *cobra.Command { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - _, err = fmt.Sscan(args[0], &getEffectiveReq.SecurableType) - if err != nil { - return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) - } + getEffectiveReq.SecurableType = args[0] getEffectiveReq.FullName = args[1] response, err := w.Grants.GetEffective(ctx, getEffectiveReq) @@ -249,27 +206,7 @@ func newUpdate() *cobra.Command { Updates the permissions for a securable. Arguments: - SECURABLE_TYPE: Type of securable. - Supported values: [ - CATALOG, - CLEAN_ROOM, - CONNECTION, - CREDENTIAL, - EXTERNAL_LOCATION, - EXTERNAL_METADATA, - FUNCTION, - METASTORE, - PIPELINE, - PROVIDER, - RECIPIENT, - SCHEMA, - SHARE, - STAGING_TABLE, - STORAGE_CREDENTIAL, - TABLE, - UNKNOWN_SECURABLE_TYPE, - VOLUME, - ] + SECURABLE_TYPE: Type of securable. FULL_NAME: Full name of securable.` cmd.Annotations = make(map[string]string) @@ -296,10 +233,7 @@ func newUpdate() *cobra.Command { } } } - _, err = fmt.Sscan(args[0], &updateReq.SecurableType) - if err != nil { - return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) - } + updateReq.SecurableType = args[0] updateReq.FullName = args[1] response, err := w.Grants.Update(ctx, updateReq) diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 8827682fa6..817f915345 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -76,5 +76,17 @@ func Groups() []cobra.Group { ID: "cleanrooms", Title: "Clean Rooms", }, + { + ID: "aibuilder", + Title: "AI Builder", + }, + { + ID: "database", + Title: "Database", + }, + { + ID: "qualitymonitorv2", + Title: "Quality Monitor v2", + }, } } diff --git a/cmd/workspace/lakeview-embedded/lakeview-embedded.go b/cmd/workspace/lakeview-embedded/lakeview-embedded.go index 782b4effcf..06ed7f1f73 100755 --- a/cmd/workspace/lakeview-embedded/lakeview-embedded.go +++ b/cmd/workspace/lakeview-embedded/lakeview-embedded.go @@ -27,7 +27,6 @@ func New() *cobra.Command { } // Add methods - cmd.AddCommand(newGetPublishedDashboardEmbedded()) cmd.AddCommand(newGetPublishedDashboardTokenInfo()) // Apply optional overrides to this command. @@ -38,67 +37,6 @@ func New() *cobra.Command { return cmd } -// start get-published-dashboard-embedded command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getPublishedDashboardEmbeddedOverrides []func( - *cobra.Command, - *dashboards.GetPublishedDashboardEmbeddedRequest, -) - -func newGetPublishedDashboardEmbedded() *cobra.Command { - cmd := &cobra.Command{} - - var getPublishedDashboardEmbeddedReq dashboards.GetPublishedDashboardEmbeddedRequest - - // TODO: short flags - - cmd.Use = "get-published-dashboard-embedded DASHBOARD_ID" - cmd.Short = `Read a published dashboard in an embedded ui.` - cmd.Long = `Read a published dashboard in an embedded ui. - - Get the current published dashboard within an embedded context. - - Arguments: - DASHBOARD_ID: UUID identifying the published dashboard.` - - // This command is being previewed; hide from help output. - cmd.Hidden = true - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - getPublishedDashboardEmbeddedReq.DashboardId = args[0] - - err = w.LakeviewEmbedded.GetPublishedDashboardEmbedded(ctx, getPublishedDashboardEmbeddedReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getPublishedDashboardEmbeddedOverrides { - fn(cmd, &getPublishedDashboardEmbeddedReq) - } - - return cmd -} - // start get-published-dashboard-token-info command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 99dc7cfbb3..fe5bfb5178 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -90,9 +90,9 @@ func newAssign() *cobra.Command { Arguments: WORKSPACE_ID: A workspace ID. METASTORE_ID: The unique ID of the metastore. - DEFAULT_CATALOG_NAME: The name of the default catalog in the metastore. This field is depracted. - Please use "Default Namespace API" to configure the default catalog for a - Databricks workspace.` + DEFAULT_CATALOG_NAME: The name of the default catalog in the metastore. This field is + deprecated. Please use "Default Namespace API" to configure the default + catalog for a Databricks workspace.` cmd.Annotations = make(map[string]string) @@ -314,28 +314,16 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down." - names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Unique ID of the metastore") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have unique id of the metastore") - } deleteReq.Id = args[0] err = w.Metastores.Delete(ctx, deleteReq) @@ -385,28 +373,16 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down." - names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Unique ID of the metastore") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have unique id of the metastore") - } getReq.Id = args[0] response, err := w.Metastores.Get(ctx, getReq) @@ -434,11 +410,19 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *catalog.ListMetastoresRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq catalog.ListMetastoresRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of metastores to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "list" cmd.Short = `List metastores.` cmd.Long = `List metastores. @@ -449,11 +433,17 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - response := w.Metastores.List(ctx) + + response := w.Metastores.List(ctx, listReq) return cmdio.RenderIterator(ctx, response) } @@ -463,7 +453,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd @@ -614,6 +604,11 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -631,23 +626,6 @@ func newUpdate() *cobra.Command { } } } - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ID argument specified. Loading names for Metastores drop-down." - names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Unique ID of the metastore") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have unique id of the metastore") - } updateReq.Id = args[0] response, err := w.Metastores.Update(ctx, updateReq) @@ -704,6 +682,11 @@ func newUpdateAssignment() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -721,23 +704,6 @@ func newUpdateAssignment() *cobra.Command { } } } - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No WORKSPACE_ID argument specified. Loading names for Metastores drop-down." - names, err := w.Metastores.MetastoreInfoNameToMetastoreIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Metastores drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "A workspace ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have a workspace id") - } _, err = fmt.Sscan(args[0], &updateAssignmentReq.WorkspaceId) if err != nil { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) diff --git a/cmd/workspace/metastores/overrides.go b/cmd/workspace/metastores/overrides.go index 3ee6a10714..4f81c5ce5b 100644 --- a/cmd/workspace/metastores/overrides.go +++ b/cmd/workspace/metastores/overrides.go @@ -2,10 +2,11 @@ package metastores import ( "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/spf13/cobra" ) -func listOverride(listCmd *cobra.Command) { +func listOverride(listCmd *cobra.Command, req *catalog.ListMetastoresRequest) { listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` {{header "ID"}} {{header "Name"}} {{"Region"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index 4fbabec8f0..a5a41d7f59 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -585,7 +585,7 @@ func newCreateWebhook() *cobra.Command { cmd.Flags().StringVar(&createWebhookReq.Description, "description", createWebhookReq.Description, `User-specified description for the webhook.`) // TODO: complex arg: http_url_spec // TODO: complex arg: job_spec - cmd.Flags().StringVar(&createWebhookReq.ModelName, "model-name", createWebhookReq.ModelName, `Name of the model whose events would trigger this webhook.`) + cmd.Flags().StringVar(&createWebhookReq.ModelName, "model-name", createWebhookReq.ModelName, `If model name is not specified, a registry-wide webhook is created that listens for the specified events across all versions of all registered models.`) cmd.Flags().Var(&createWebhookReq.Status, "status", `Enable or disable triggering the webhook, or put the webhook into test mode. Supported values: [ACTIVE, DISABLED, TEST_MODE]`) cmd.Use = "create-webhook" @@ -657,7 +657,10 @@ func newDeleteComment() *cobra.Command { cmd.Short = `Delete a comment.` cmd.Long = `Delete a comment. - Deletes a comment on a model version.` + Deletes a comment on a model version. + + Arguments: + ID: Unique identifier of an activity` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 3000842482..29a9a7d5fe 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -153,7 +153,8 @@ func newDelete() *cobra.Command { cmd.Short = `Delete a pipeline.` cmd.Long = `Delete a pipeline. - Deletes a pipeline.` + Deletes a pipeline. Deleting a pipeline is a permanent action that stops and + removes the pipeline and its tables. You cannot undo this action.` cmd.Annotations = make(map[string]string) @@ -980,6 +981,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Schema, "schema", updateReq.Schema, `The default schema (database) where tables are read from or published to.`) cmd.Flags().BoolVar(&updateReq.Serverless, "serverless", updateReq.Serverless, `Whether serverless compute is enabled for this pipeline.`) cmd.Flags().StringVar(&updateReq.Storage, "storage", updateReq.Storage, `DBFS root directory for storing checkpoints and tables.`) + // TODO: map via StringToStringVar: tags cmd.Flags().StringVar(&updateReq.Target, "target", updateReq.Target, `Target schema (database) to add tables in this pipeline to.`) // TODO: complex arg: trigger diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index b91638478d..5228982aec 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -79,7 +79,7 @@ func newCreate() *cobra.Command { Arguments: NAME: The name of the Provider. AUTHENTICATION_TYPE: The delta sharing authentication type. - Supported values: [DATABRICKS, OAUTH_CLIENT_CREDENTIALS, TOKEN]` + Supported values: [DATABRICKS, OAUTH_CLIENT_CREDENTIALS, OIDC_FEDERATION, TOKEN]` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go b/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go new file mode 100755 index 0000000000..ea0175fdd5 --- /dev/null +++ b/cmd/workspace/quality-monitor-v2/quality-monitor-v2.go @@ -0,0 +1,400 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package quality_monitor_v2 + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/qualitymonitorv2" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "quality-monitor-v2", + Short: `Manage data quality of UC objects (currently support schema).`, + Long: `Manage data quality of UC objects (currently support schema)`, + GroupID: "qualitymonitorv2", + Annotations: map[string]string{ + "package": "qualitymonitorv2", + }, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newCreateQualityMonitor()) + cmd.AddCommand(newDeleteQualityMonitor()) + cmd.AddCommand(newGetQualityMonitor()) + cmd.AddCommand(newListQualityMonitor()) + cmd.AddCommand(newUpdateQualityMonitor()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.CreateQualityMonitorRequest, +) + +func newCreateQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var createQualityMonitorReq qualitymonitorv2.CreateQualityMonitorRequest + createQualityMonitorReq.QualityMonitor = qualitymonitorv2.QualityMonitor{} + var createQualityMonitorJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createQualityMonitorJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: anomaly_detection_config + + cmd.Use = "create-quality-monitor OBJECT_TYPE OBJECT_ID" + cmd.Short = `Create a quality monitor.` + cmd.Long = `Create a quality monitor. + + Create a quality monitor on UC object + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'object_type', 'object_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createQualityMonitorJson.Unmarshal(&createQualityMonitorReq.QualityMonitor) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + if !cmd.Flags().Changed("json") { + createQualityMonitorReq.QualityMonitor.ObjectType = args[0] + } + if !cmd.Flags().Changed("json") { + createQualityMonitorReq.QualityMonitor.ObjectId = args[1] + } + + response, err := w.QualityMonitorV2.CreateQualityMonitor(ctx, createQualityMonitorReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createQualityMonitorOverrides { + fn(cmd, &createQualityMonitorReq) + } + + return cmd +} + +// start delete-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.DeleteQualityMonitorRequest, +) + +func newDeleteQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var deleteQualityMonitorReq qualitymonitorv2.DeleteQualityMonitorRequest + + // TODO: short flags + + cmd.Use = "delete-quality-monitor OBJECT_TYPE OBJECT_ID" + cmd.Short = `Delete a quality monitor.` + cmd.Long = `Delete a quality monitor. + + Delete a quality monitor on UC object + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + deleteQualityMonitorReq.ObjectType = args[0] + deleteQualityMonitorReq.ObjectId = args[1] + + err = w.QualityMonitorV2.DeleteQualityMonitor(ctx, deleteQualityMonitorReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteQualityMonitorOverrides { + fn(cmd, &deleteQualityMonitorReq) + } + + return cmd +} + +// start get-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.GetQualityMonitorRequest, +) + +func newGetQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var getQualityMonitorReq qualitymonitorv2.GetQualityMonitorRequest + + // TODO: short flags + + cmd.Use = "get-quality-monitor OBJECT_TYPE OBJECT_ID" + cmd.Short = `Read a quality monitor.` + cmd.Long = `Read a quality monitor. + + Read a quality monitor on UC object + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + getQualityMonitorReq.ObjectType = args[0] + getQualityMonitorReq.ObjectId = args[1] + + response, err := w.QualityMonitorV2.GetQualityMonitor(ctx, getQualityMonitorReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getQualityMonitorOverrides { + fn(cmd, &getQualityMonitorReq) + } + + return cmd +} + +// start list-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.ListQualityMonitorRequest, +) + +func newListQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var listQualityMonitorReq qualitymonitorv2.ListQualityMonitorRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listQualityMonitorReq.PageSize, "page-size", listQualityMonitorReq.PageSize, ``) + cmd.Flags().StringVar(&listQualityMonitorReq.PageToken, "page-token", listQualityMonitorReq.PageToken, ``) + + cmd.Use = "list-quality-monitor" + cmd.Short = `List quality monitors.` + cmd.Long = `List quality monitors. + + (Unimplemented) List quality monitors` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response := w.QualityMonitorV2.ListQualityMonitor(ctx, listQualityMonitorReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listQualityMonitorOverrides { + fn(cmd, &listQualityMonitorReq) + } + + return cmd +} + +// start update-quality-monitor command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateQualityMonitorOverrides []func( + *cobra.Command, + *qualitymonitorv2.UpdateQualityMonitorRequest, +) + +func newUpdateQualityMonitor() *cobra.Command { + cmd := &cobra.Command{} + + var updateQualityMonitorReq qualitymonitorv2.UpdateQualityMonitorRequest + updateQualityMonitorReq.QualityMonitor = qualitymonitorv2.QualityMonitor{} + var updateQualityMonitorJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateQualityMonitorJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: anomaly_detection_config + + cmd.Use = "update-quality-monitor OBJECT_TYPE OBJECT_ID OBJECT_TYPE OBJECT_ID" + cmd.Short = `Update a quality monitor.` + cmd.Long = `Update a quality monitor. + + (Unimplemented) Update a quality monitor on UC object + + Arguments: + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id. + OBJECT_TYPE: The type of the monitored object. Can be one of the following: schema. + OBJECT_ID: The uuid of the request object. For example, schema id.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(2)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only OBJECT_TYPE, OBJECT_ID as positional arguments. Provide 'object_type', 'object_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(4) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateQualityMonitorJson.Unmarshal(&updateQualityMonitorReq.QualityMonitor) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateQualityMonitorReq.ObjectType = args[0] + updateQualityMonitorReq.ObjectId = args[1] + if !cmd.Flags().Changed("json") { + updateQualityMonitorReq.QualityMonitor.ObjectType = args[2] + } + if !cmd.Flags().Changed("json") { + updateQualityMonitorReq.QualityMonitor.ObjectId = args[3] + } + + response, err := w.QualityMonitorV2.UpdateQualityMonitor(ctx, updateQualityMonitorReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateQualityMonitorOverrides { + fn(cmd, &updateQualityMonitorReq) + } + + return cmd +} + +// end service QualityMonitorV2 diff --git a/cmd/workspace/query-execution/query-execution.go b/cmd/workspace/query-execution/query-execution.go deleted file mode 100755 index 63d57bba3d..0000000000 --- a/cmd/workspace/query-execution/query-execution.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package query_execution - -import ( - "fmt" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdctx" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/dashboards" - "github.com/spf13/cobra" -) - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cmdOverrides []func(*cobra.Command) - -func New() *cobra.Command { - cmd := &cobra.Command{ - Use: "query-execution", - Short: `Query execution APIs for AI / BI Dashboards.`, - Long: `Query execution APIs for AI / BI Dashboards`, - GroupID: "dashboards", - Annotations: map[string]string{ - "package": "dashboards", - }, - - // This service is being previewed; hide from help output. - Hidden: true, - RunE: root.ReportUnknownSubcommand, - } - - // Add methods - cmd.AddCommand(newCancelPublishedQueryExecution()) - cmd.AddCommand(newExecutePublishedDashboardQuery()) - cmd.AddCommand(newPollPublishedQueryStatus()) - - // Apply optional overrides to this command. - for _, fn := range cmdOverrides { - fn(cmd) - } - - return cmd -} - -// start cancel-published-query-execution command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cancelPublishedQueryExecutionOverrides []func( - *cobra.Command, - *dashboards.CancelPublishedQueryExecutionRequest, -) - -func newCancelPublishedQueryExecution() *cobra.Command { - cmd := &cobra.Command{} - - var cancelPublishedQueryExecutionReq dashboards.CancelPublishedQueryExecutionRequest - - // TODO: short flags - - // TODO: array: tokens - - cmd.Use = "cancel-published-query-execution DASHBOARD_NAME DASHBOARD_REVISION_ID" - cmd.Short = `Cancel the results for the a query for a published, embedded dashboard.` - cmd.Long = `Cancel the results for the a query for a published, embedded dashboard.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(2) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - cancelPublishedQueryExecutionReq.DashboardName = args[0] - cancelPublishedQueryExecutionReq.DashboardRevisionId = args[1] - - response, err := w.QueryExecution.CancelPublishedQueryExecution(ctx, cancelPublishedQueryExecutionReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range cancelPublishedQueryExecutionOverrides { - fn(cmd, &cancelPublishedQueryExecutionReq) - } - - return cmd -} - -// start execute-published-dashboard-query command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var executePublishedDashboardQueryOverrides []func( - *cobra.Command, - *dashboards.ExecutePublishedDashboardQueryRequest, -) - -func newExecutePublishedDashboardQuery() *cobra.Command { - cmd := &cobra.Command{} - - var executePublishedDashboardQueryReq dashboards.ExecutePublishedDashboardQueryRequest - var executePublishedDashboardQueryJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&executePublishedDashboardQueryJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().StringVar(&executePublishedDashboardQueryReq.OverrideWarehouseId, "override-warehouse-id", executePublishedDashboardQueryReq.OverrideWarehouseId, `A dashboard schedule can override the warehouse used as compute for processing the published dashboard queries.`) - - cmd.Use = "execute-published-dashboard-query DASHBOARD_NAME DASHBOARD_REVISION_ID" - cmd.Short = `Execute a query for a published dashboard.` - cmd.Long = `Execute a query for a published dashboard. - - Arguments: - DASHBOARD_NAME: Dashboard name and revision_id is required to retrieve - PublishedDatasetDataModel which contains the list of datasets, - warehouse_id, and embedded_credentials - DASHBOARD_REVISION_ID: ` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'dashboard_name', 'dashboard_revision_id' in your JSON input") - } - return nil - } - check := root.ExactArgs(2) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := executePublishedDashboardQueryJson.Unmarshal(&executePublishedDashboardQueryReq) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - if !cmd.Flags().Changed("json") { - executePublishedDashboardQueryReq.DashboardName = args[0] - } - if !cmd.Flags().Changed("json") { - executePublishedDashboardQueryReq.DashboardRevisionId = args[1] - } - - err = w.QueryExecution.ExecutePublishedDashboardQuery(ctx, executePublishedDashboardQueryReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range executePublishedDashboardQueryOverrides { - fn(cmd, &executePublishedDashboardQueryReq) - } - - return cmd -} - -// start poll-published-query-status command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var pollPublishedQueryStatusOverrides []func( - *cobra.Command, - *dashboards.PollPublishedQueryStatusRequest, -) - -func newPollPublishedQueryStatus() *cobra.Command { - cmd := &cobra.Command{} - - var pollPublishedQueryStatusReq dashboards.PollPublishedQueryStatusRequest - - // TODO: short flags - - // TODO: array: tokens - - cmd.Use = "poll-published-query-status DASHBOARD_NAME DASHBOARD_REVISION_ID" - cmd.Short = `Poll the results for the a query for a published, embedded dashboard.` - cmd.Long = `Poll the results for the a query for a published, embedded dashboard.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(2) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := cmdctx.WorkspaceClient(ctx) - - pollPublishedQueryStatusReq.DashboardName = args[0] - pollPublishedQueryStatusReq.DashboardRevisionId = args[1] - - response, err := w.QueryExecution.PollPublishedQueryStatus(ctx, pollPublishedQueryStatusReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range pollPublishedQueryStatusOverrides { - fn(cmd, &pollPublishedQueryStatusReq) - } - - return cmd -} - -// end service QueryExecution diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 8b036de7af..1864707766 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -99,7 +99,7 @@ func newCreate() *cobra.Command { Arguments: NAME: Name of Recipient. AUTHENTICATION_TYPE: The delta sharing authentication type. - Supported values: [DATABRICKS, OAUTH_CLIENT_CREDENTIALS, TOKEN]` + Supported values: [DATABRICKS, OAUTH_CLIENT_CREDENTIALS, OIDC_FEDERATION, TOKEN]` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index bcd3273c66..9c3425d6b1 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -169,28 +169,16 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME argument specified. Loading names for Schemas drop-down." - names, err := w.Schemas.SchemaInfoNameToFullNameMap(ctx, catalog.ListSchemasRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Schemas drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Full name of the schema") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have full name of the schema") - } deleteReq.FullName = args[0] err = w.Schemas.Delete(ctx, deleteReq) @@ -243,28 +231,16 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := cmdctx.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME argument specified. Loading names for Schemas drop-down." - names, err := w.Schemas.SchemaInfoNameToFullNameMap(ctx, catalog.ListSchemasRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Schemas drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Full name of the schema") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have full name of the schema") - } getReq.FullName = args[0] response, err := w.Schemas.Get(ctx, getReq) @@ -368,7 +344,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) - cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `. Supported values: [DISABLE, ENABLE, INHERIT]`) + cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the schema.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of schema.`) // TODO: map via StringToStringVar: properties @@ -388,6 +364,11 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -405,23 +386,6 @@ func newUpdate() *cobra.Command { } } } - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME argument specified. Loading names for Schemas drop-down." - names, err := w.Schemas.SchemaInfoNameToFullNameMap(ctx, catalog.ListSchemasRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Schemas drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Full name of the schema") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have full name of the schema") - } updateReq.FullName = args[0] response, err := w.Schemas.Update(ctx, updateReq) diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 50519f2adf..2754412a77 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -10,6 +10,7 @@ import ( aibi_dashboard_embedding_approved_domains "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-approved-domains" automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update" compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile" + dashboard_email_subscriptions "github.com/databricks/cli/cmd/workspace/dashboard-email-subscriptions" default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace" disable_legacy_access "github.com/databricks/cli/cmd/workspace/disable-legacy-access" disable_legacy_dbfs "github.com/databricks/cli/cmd/workspace/disable-legacy-dbfs" @@ -19,6 +20,7 @@ import ( enhanced_security_monitoring "github.com/databricks/cli/cmd/workspace/enhanced-security-monitoring" llm_proxy_partner_powered_workspace "github.com/databricks/cli/cmd/workspace/llm-proxy-partner-powered-workspace" restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins" + sql_results_download "github.com/databricks/cli/cmd/workspace/sql-results-download" ) // Slice with functions to override default command behavior. @@ -42,6 +44,7 @@ func New() *cobra.Command { cmd.AddCommand(aibi_dashboard_embedding_approved_domains.New()) cmd.AddCommand(automatic_cluster_update.New()) cmd.AddCommand(compliance_security_profile.New()) + cmd.AddCommand(dashboard_email_subscriptions.New()) cmd.AddCommand(default_namespace.New()) cmd.AddCommand(disable_legacy_access.New()) cmd.AddCommand(disable_legacy_dbfs.New()) @@ -51,6 +54,7 @@ func New() *cobra.Command { cmd.AddCommand(enhanced_security_monitoring.New()) cmd.AddCommand(llm_proxy_partner_powered_workspace.New()) cmd.AddCommand(restrict_workspace_admins.New()) + cmd.AddCommand(sql_results_download.New()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { diff --git a/cmd/workspace/sql-results-download/sql-results-download.go b/cmd/workspace/sql-results-download/sql-results-download.go new file mode 100755 index 0000000000..b807a767a2 --- /dev/null +++ b/cmd/workspace/sql-results-download/sql-results-download.go @@ -0,0 +1,218 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package sql_results_download + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdctx" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "sql-results-download", + Short: `Controls whether users within the workspace are allowed to download results from the SQL Editor and AI/BI Dashboards UIs.`, + Long: `Controls whether users within the workspace are allowed to download results + from the SQL Editor and AI/BI Dashboards UIs. By default, this setting is + enabled (set to true)`, + RunE: root.ReportUnknownSubcommand, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteSqlResultsDownloadRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteSqlResultsDownloadRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the SQL Results Download setting.` + cmd.Long = `Delete the SQL Results Download setting. + + Reverts the SQL Results Download setting to its default value.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.SqlResultsDownload().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetSqlResultsDownloadRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetSqlResultsDownloadRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the SQL Results Download setting.` + cmd.Long = `Get the SQL Results Download setting. + + Gets the SQL Results Download setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + response, err := w.Settings.SqlResultsDownload().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateSqlResultsDownloadRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateSqlResultsDownloadRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the SQL Results Download setting.` + cmd.Long = `Update the SQL Results Download setting. + + Updates the SQL Results Download setting.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := cmdctx.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.SqlResultsDownload().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service SqlResultsDownload diff --git a/experimental/python/databricks/bundles/compute/_models/environment.py b/experimental/python/databricks/bundles/compute/_models/environment.py index 16cc9d7140..c8bdee0917 100644 --- a/experimental/python/databricks/bundles/compute/_models/environment.py +++ b/experimental/python/databricks/bundles/compute/_models/environment.py @@ -3,11 +3,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value -from databricks.bundles.core._variable import ( - VariableOr, - VariableOrList, - VariableOrOptional, -) +from databricks.bundles.core._variable import VariableOrList, VariableOrOptional if TYPE_CHECKING: from typing_extensions import Self @@ -20,14 +16,6 @@ class Environment: In this minimal environment spec, only pip dependencies are supported. """ - client: VariableOr[str] - """ - Client version used by the environment - The client is the user-facing environment of the runtime. - Each client comes with a specific set of pre-installed libraries. - The version is a string, consisting of the major client version. - """ - dependencies: VariableOrList[str] = field(default_factory=list) """ List of pip dependencies, as supported by the version of pip in this environment. @@ -35,12 +23,9 @@ class Environment: environment_version: VariableOrOptional[str] = None """ - :meta private: [EXPERIMENTAL] - - We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized - correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field - for any other purpose, e.g. notebook storage. - This field is not yet exposed to customers (e.g. in the jobs API). + Required. Environment version used by the environment. + Each version comes with a specific Python version and a set of Python packages. + The version is a string, consisting of an integer. """ jar_dependencies: VariableOrList[str] = field(default_factory=list) @@ -61,14 +46,6 @@ def as_dict(self) -> "EnvironmentDict": class EnvironmentDict(TypedDict, total=False): """""" - client: VariableOr[str] - """ - Client version used by the environment - The client is the user-facing environment of the runtime. - Each client comes with a specific set of pre-installed libraries. - The version is a string, consisting of the major client version. - """ - dependencies: VariableOrList[str] """ List of pip dependencies, as supported by the version of pip in this environment. @@ -76,12 +53,9 @@ class EnvironmentDict(TypedDict, total=False): environment_version: VariableOrOptional[str] """ - :meta private: [EXPERIMENTAL] - - We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized - correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field - for any other purpose, e.g. notebook storage. - This field is not yet exposed to customers (e.g. in the jobs API). + Required. Environment version used by the environment. + Each version comes with a specific Python version and a set of Python packages. + The version is a string, consisting of an integer. """ jar_dependencies: VariableOrList[str] diff --git a/experimental/python/databricks/bundles/jobs/__init__.py b/experimental/python/databricks/bundles/jobs/__init__.py index 32fd0e6599..3eb3d43185 100644 --- a/experimental/python/databricks/bundles/jobs/__init__.py +++ b/experimental/python/databricks/bundles/jobs/__init__.py @@ -53,6 +53,9 @@ "DbfsStorageInfo", "DbfsStorageInfoDict", "DbfsStorageInfoParam", + "DbtCloudTask", + "DbtCloudTaskDict", + "DbtCloudTaskParam", "DbtTask", "DbtTaskDict", "DbtTaskParam", @@ -445,6 +448,11 @@ DashboardTaskDict, DashboardTaskParam, ) +from databricks.bundles.jobs._models.dbt_cloud_task import ( + DbtCloudTask, + DbtCloudTaskDict, + DbtCloudTaskParam, +) from databricks.bundles.jobs._models.dbt_task import DbtTask, DbtTaskDict, DbtTaskParam from databricks.bundles.jobs._models.file_arrival_trigger_configuration import ( FileArrivalTriggerConfiguration, diff --git a/experimental/python/databricks/bundles/jobs/_models/dashboard_task.py b/experimental/python/databricks/bundles/jobs/_models/dashboard_task.py index 6284ca36d3..b42ef0bdd5 100644 --- a/experimental/python/databricks/bundles/jobs/_models/dashboard_task.py +++ b/experimental/python/databricks/bundles/jobs/_models/dashboard_task.py @@ -4,10 +4,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrOptional -from databricks.bundles.jobs._models.subscription import ( - Subscription, - SubscriptionParam, -) +from databricks.bundles.jobs._models.subscription import Subscription, SubscriptionParam if TYPE_CHECKING: from typing_extensions import Self diff --git a/experimental/python/databricks/bundles/jobs/_models/dbt_cloud_task.py b/experimental/python/databricks/bundles/jobs/_models/dbt_cloud_task.py new file mode 100644 index 0000000000..d1d862c7ef --- /dev/null +++ b/experimental/python/databricks/bundles/jobs/_models/dbt_cloud_task.py @@ -0,0 +1,50 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class DbtCloudTask: + """ + :meta private: [EXPERIMENTAL] + """ + + connection_resource_name: VariableOrOptional[str] = None + """ + The resource name of the UC connection that authenticates the dbt Cloud for this task + """ + + dbt_cloud_job_id: VariableOrOptional[int] = None + """ + Id of the dbt Cloud job to be triggered + """ + + @classmethod + def from_dict(cls, value: "DbtCloudTaskDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "DbtCloudTaskDict": + return _transform_to_json_value(self) # type:ignore + + +class DbtCloudTaskDict(TypedDict, total=False): + """""" + + connection_resource_name: VariableOrOptional[str] + """ + The resource name of the UC connection that authenticates the dbt Cloud for this task + """ + + dbt_cloud_job_id: VariableOrOptional[int] + """ + Id of the dbt Cloud job to be triggered + """ + + +DbtCloudTaskParam = DbtCloudTaskDict | DbtCloudTask diff --git a/experimental/python/databricks/bundles/jobs/_models/job.py b/experimental/python/databricks/bundles/jobs/_models/job.py index c72a20a329..ca40311133 100644 --- a/experimental/python/databricks/bundles/jobs/_models/job.py +++ b/experimental/python/databricks/bundles/jobs/_models/job.py @@ -17,10 +17,7 @@ CronSchedule, CronScheduleParam, ) -from databricks.bundles.jobs._models.git_source import ( - GitSource, - GitSourceParam, -) +from databricks.bundles.jobs._models.git_source import GitSource, GitSourceParam from databricks.bundles.jobs._models.job_cluster import JobCluster, JobClusterParam from databricks.bundles.jobs._models.job_email_notifications import ( JobEmailNotifications, diff --git a/experimental/python/databricks/bundles/jobs/_models/task.py b/experimental/python/databricks/bundles/jobs/_models/task.py index 7120c970a8..8da07a4ab3 100644 --- a/experimental/python/databricks/bundles/jobs/_models/task.py +++ b/experimental/python/databricks/bundles/jobs/_models/task.py @@ -28,6 +28,10 @@ DashboardTask, DashboardTaskParam, ) +from databricks.bundles.jobs._models.dbt_cloud_task import ( + DbtCloudTask, + DbtCloudTaskParam, +) from databricks.bundles.jobs._models.dbt_task import DbtTask, DbtTaskParam from databricks.bundles.jobs._models.for_each_task import ( ForEachTask, @@ -121,6 +125,13 @@ class Task: The task refreshes a dashboard and sends a snapshot to subscribers. """ + dbt_cloud_task: VariableOrOptional[DbtCloudTask] = None + """ + :meta private: [EXPERIMENTAL] + + Task type for dbt cloud + """ + dbt_task: VariableOrOptional[DbtTask] = None """ The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse. @@ -319,6 +330,13 @@ class TaskDict(TypedDict, total=False): The task refreshes a dashboard and sends a snapshot to subscribers. """ + dbt_cloud_task: VariableOrOptional[DbtCloudTaskParam] + """ + :meta private: [EXPERIMENTAL] + + Task type for dbt cloud + """ + dbt_task: VariableOrOptional[DbtTaskParam] """ The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse. diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py index 988227c43e..c452222df9 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_config.py @@ -4,10 +4,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value from databricks.bundles.core._variable import VariableOrOptional -from databricks.bundles.pipelines._models.report_spec import ( - ReportSpec, - ReportSpecParam, -) +from databricks.bundles.pipelines._models.report_spec import ReportSpec, ReportSpecParam from databricks.bundles.pipelines._models.schema_spec import SchemaSpec, SchemaSpecParam from databricks.bundles.pipelines._models.table_spec import TableSpec, TableSpecParam diff --git a/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py b/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py index 50754bee6a..b5ed997cfd 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py +++ b/experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py @@ -13,6 +13,7 @@ class IngestionSourceType(Enum): SERVICENOW = "SERVICENOW" MANAGED_POSTGRESQL = "MANAGED_POSTGRESQL" ORACLE = "ORACLE" + TERADATA = "TERADATA" SHAREPOINT = "SHAREPOINT" DYNAMICS365 = "DYNAMICS365" @@ -29,6 +30,7 @@ class IngestionSourceType(Enum): "SERVICENOW", "MANAGED_POSTGRESQL", "ORACLE", + "TERADATA", "SHAREPOINT", "DYNAMICS365", ] diff --git a/experimental/python/databricks/bundles/pipelines/_models/pipeline.py b/experimental/python/databricks/bundles/pipelines/_models/pipeline.py index 8bf25fd1f1..936842ea92 100644 --- a/experimental/python/databricks/bundles/pipelines/_models/pipeline.py +++ b/experimental/python/databricks/bundles/pipelines/_models/pipeline.py @@ -182,6 +182,13 @@ class Pipeline(Resource): DBFS root directory for storing checkpoints and tables. """ + tags: VariableOrDict[str] = field(default_factory=dict) + """ + A map of tags associated with the pipeline. + These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. + A maximum of 25 tags can be added to the pipeline. + """ + target: VariableOrOptional[str] = None """ Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field. @@ -325,6 +332,13 @@ class PipelineDict(TypedDict, total=False): DBFS root directory for storing checkpoints and tables. """ + tags: VariableOrDict[str] + """ + A map of tags associated with the pipeline. + These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations. + A maximum of 25 tags can be added to the pipeline. + """ + target: VariableOrOptional[str] """ Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field. diff --git a/go.mod b/go.mod index 499a73557b..bc76833833 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/BurntSushi/toml v1.5.0 // MIT github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.71.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.72.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/gorilla/mux v1.8.1 // BSD 3-Clause diff --git a/go.sum b/go.sum index 06323b68bc..b4f563aa45 100644 --- a/go.sum +++ b/go.sum @@ -38,8 +38,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/databricks/databricks-sdk-go v0.71.0 h1:YVNcvQUcgzlKesxDolDXSQPbNcCldubYLvM71hzVmUY= -github.com/databricks/databricks-sdk-go v0.71.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= +github.com/databricks/databricks-sdk-go v0.72.0 h1:vNS4zlpvNYiXsy/7/lzV7cuu/yOcT/1xpfuJw3+W3TA= +github.com/databricks/databricks-sdk-go v0.72.0/go.mod h1:xBtjeP9nq+6MgTewZW1EcbRkD7aDY9gZvcRPcwPhZjw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/libs/structwalk/walktype_test.go b/libs/structwalk/walktype_test.go index df665a3f5a..54d43efa2a 100644 --- a/libs/structwalk/walktype_test.go +++ b/libs/structwalk/walktype_test.go @@ -123,7 +123,7 @@ func TestTypeJobSettings(t *testing.T) { func TestTypeRoot(t *testing.T) { testStruct(t, reflect.TypeOf(config.Root{}), - 3400, 3500, // 3487 at this time + 3500, 3600, // 3516 at this time map[string]any{ ".bundle.target": "", `.variables[*].lookup.dashboard`: "",