Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .codegen/_openapi_sha
Original file line number Diff line number Diff line change
@@ -1 +1 @@
2cee201b2e8d656f7306b2f9ec98edfa721e9829
a8f547d3728fba835fbdda301e846829c5cbbef5
2 changes: 1 addition & 1 deletion .codegen/service.go.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ func new{{.PascalName}}() *cobra.Command {
{{- if $optionalIfJsonIsUsed }}
if !cmd.Flags().Changed("json") {
{{- end }}
{{if and (not $field.Entity.IsString) (not $field.Entity.IsFieldMask) (not $field.Entity.IsTimestamp) (not $field.Entity.IsDuration) -}} {{/* TODO: add support for well known types */}}
{{if and (not $field.Entity.IsString) (not $field.Entity.IsFieldMask) (not $field.Entity.IsTimestamp) (not $field.Entity.IsDuration) -}} {{/* TODO: add support for well known types */ -}}
_, err = fmt.Sscan(args[{{$arg}}], &{{- template "request-body-obj" (dict "Method" $method "Field" $field)}})
if err != nil {
return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}])
Expand Down
7 changes: 5 additions & 2 deletions .gitattributes
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,12 @@ cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true
cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true
cmd/workspace/credentials/credentials.go linguist-generated=true
cmd/workspace/current-user/current-user.go linguist-generated=true
cmd/workspace/custom-llms/custom-llms.go linguist-generated=true
cmd/workspace/dashboard-email-subscriptions/dashboard-email-subscriptions.go linguist-generated=true
cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
cmd/workspace/dashboards/dashboards.go linguist-generated=true
cmd/workspace/data-sources/data-sources.go linguist-generated=true
cmd/workspace/database-instances/database-instances.go linguist-generated=true
cmd/workspace/database/database.go linguist-generated=true
cmd/workspace/default-namespace/default-namespace.go linguist-generated=true
cmd/workspace/disable-legacy-access/disable-legacy-access.go linguist-generated=true
cmd/workspace/disable-legacy-dbfs/disable-legacy-dbfs.go linguist-generated=true
Expand Down Expand Up @@ -110,10 +112,10 @@ cmd/workspace/provider-personalization-requests/provider-personalization-request
cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true
cmd/workspace/provider-providers/provider-providers.go linguist-generated=true
cmd/workspace/providers/providers.go linguist-generated=true
cmd/workspace/quality-monitor-v2/quality-monitor-v2.go linguist-generated=true
cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true
cmd/workspace/queries-legacy/queries-legacy.go linguist-generated=true
cmd/workspace/queries/queries.go linguist-generated=true
cmd/workspace/query-execution/query-execution.go linguist-generated=true
cmd/workspace/query-history/query-history.go linguist-generated=true
cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go linguist-generated=true
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true
Expand All @@ -131,6 +133,7 @@ cmd/workspace/service-principals/service-principals.go linguist-generated=true
cmd/workspace/serving-endpoints/serving-endpoints.go linguist-generated=true
cmd/workspace/settings/settings.go linguist-generated=true
cmd/workspace/shares/shares.go linguist-generated=true
cmd/workspace/sql-results-download/sql-results-download.go linguist-generated=true
cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
Expand Down
4 changes: 4 additions & 0 deletions acceptance/help/output.txt
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ Identity and Access Management
Databricks SQL
alerts The alerts API can be used to perform CRUD operations on alerts.
alerts-legacy The alerts API can be used to perform CRUD operations on alerts.
alerts-v2 New version of SQL Alerts.
dashboards In general, there is little need to modify dashboards using the API.
data-sources This API is provided to assist you in making new query objects.
queries The queries API can be used to perform CRUD operations on queries.
Expand Down Expand Up @@ -123,6 +124,9 @@ Clean Rooms
clean-room-task-runs Clean room task runs are the executions of notebooks in a clean room.
clean-rooms A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data.

Quality Monitor v2
quality-monitor-v2 Manage data quality of UC objects (currently support schema).

Additional Commands:
account Databricks Account Commands
api Perform Databricks API call
Expand Down
23 changes: 21 additions & 2 deletions bundle/config/variable/resolve_metastore.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,39 @@ package variable

import (
"context"
"fmt"

"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/service/catalog"
)

type resolveMetastore struct {
name string
}

func (l resolveMetastore) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) {
entity, err := w.Metastores.GetByName(ctx, l.name)
result, err := w.Metastores.ListAll(ctx, catalog.ListMetastoresRequest{})
if err != nil {
return "", err
}
return entity.MetastoreId, nil

// Collect all metastores with the given name.
var entities []catalog.MetastoreInfo
for _, entity := range result {
if entity.Name == l.name {
entities = append(entities, entity)
}
}

// Return the ID of the first matching metastore.
switch len(entities) {
case 0:
return "", fmt.Errorf("metastoren named %q does not exist", l.name)
case 1:
return entities[0].MetastoreId, nil
default:
return "", fmt.Errorf("there are %d instances of clusters named %q", len(entities), l.name)
}
}

func (l resolveMetastore) String() string {
Expand Down
15 changes: 8 additions & 7 deletions bundle/config/variable/resolve_metastore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
"testing"

"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/stretchr/testify/assert"
Expand All @@ -17,9 +16,9 @@ func TestResolveMetastore_ResolveSuccess(t *testing.T) {

api := m.GetMockMetastoresAPI()
api.EXPECT().
GetByName(mock.Anything, "metastore").
Return(&catalog.MetastoreInfo{
MetastoreId: "abcd",
ListAll(mock.Anything, mock.Anything).
Return([]catalog.MetastoreInfo{
{MetastoreId: "abcd", Name: "metastore"},
}, nil)

ctx := context.Background()
Expand All @@ -34,13 +33,15 @@ func TestResolveMetastore_ResolveNotFound(t *testing.T) {

api := m.GetMockMetastoresAPI()
api.EXPECT().
GetByName(mock.Anything, "metastore").
Return(nil, &apierr.APIError{StatusCode: 404})
ListAll(mock.Anything, mock.Anything).
Return([]catalog.MetastoreInfo{
{MetastoreId: "abcd", Name: "different"},
}, nil)

ctx := context.Background()
l := resolveMetastore{name: "metastore"}
_, err := l.Resolve(ctx, m.WorkspaceClient)
require.ErrorIs(t, err, apierr.ErrNotFound)
require.ErrorContains(t, err, "metastoren named \"metastore\" does not exist")
}

func TestResolveMetastore_String(t *testing.T) {
Expand Down
56 changes: 41 additions & 15 deletions bundle/internal/schema/annotations_openapi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -534,6 +534,11 @@ github.com/databricks/cli/bundle/config/resources.Pipeline:
"storage":
"description": |-
DBFS root directory for storing checkpoints and tables.
"tags":
"description": |-
A map of tags associated with the pipeline.
These are forwarded to the cluster as cluster tags, and are therefore subject to the same limitations.
A maximum of 25 tags can be added to the pipeline.
"target":
"description": |-
Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated for pipeline creation in favor of the `schema` field.
Expand Down Expand Up @@ -1425,24 +1430,19 @@ github.com/databricks/databricks-sdk-go/service/compute.Environment:
In this minimal environment spec, only pip dependencies are supported.
"client":
"description": |-
Client version used by the environment
The client is the user-facing environment of the runtime.
Each client comes with a specific set of pre-installed libraries.
The version is a string, consisting of the major client version.
Use `environment_version` instead.
"deprecation_message": |-
This field is deprecated
"dependencies":
"description": |-
List of pip dependencies, as supported by the version of pip in this environment.
Each dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/
Allowed dependency could be <requirement specifier>, <archive url/path>, <local project path>(WSFS or Volumes in Databricks), <vcs project url>
E.g. dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"]
Each dependency is a valid pip requirements file line per https://pip.pypa.io/en/stable/reference/requirements-file-format/.
Allowed dependencies include a requirement specifier, an archive URL, a local project path (such as WSFS or UC Volumes in Databricks), or a VCS project URL.
"environment_version":
"description": |-
We renamed `client` to `environment_version` in notebook exports. This field is meant solely so that imported notebooks with `environment_version` can be deserialized
correctly, in a backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it will be deserialized correctly). Do NOT use this field
for any other purpose, e.g. notebook storage.
This field is not yet exposed to customers (e.g. in the jobs API).
"x-databricks-preview": |-
PRIVATE
Required. Environment version used by the environment.
Each version comes with a specific Python version and a set of Python packages.
The version is a string, consisting of an integer.
"jar_dependencies":
"description": |-
List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`.
Expand Down Expand Up @@ -1787,6 +1787,13 @@ github.com/databricks/databricks-sdk-go/service/jobs.DashboardTask:
"description": |-
Optional: The warehouse id to execute the dashboard with for the schedule.
If not specified, the default warehouse of the dashboard will be used.
github.com/databricks/databricks-sdk-go/service/jobs.DbtCloudTask:
"connection_resource_name":
"description": |-
The resource name of the UC connection that authenticates the dbt Cloud for this task
"dbt_cloud_job_id":
"description": |-
Id of the dbt Cloud job to be triggered
github.com/databricks/databricks-sdk-go/service/jobs.DbtTask:
"catalog":
"description": |-
Expand Down Expand Up @@ -2540,6 +2547,11 @@ github.com/databricks/databricks-sdk-go/service/jobs.Task:
"dashboard_task":
"description": |-
The task refreshes a dashboard and sends a snapshot to subscribers.
"dbt_cloud_task":
"description": |-
Task type for dbt cloud
"x-databricks-preview": |-
PRIVATE
"dbt_task":
"description": |-
The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.
Expand Down Expand Up @@ -2878,6 +2890,8 @@ github.com/databricks/databricks-sdk-go/service/pipelines.IngestionSourceType:
MANAGED_POSTGRESQL
- |-
ORACLE
- |-
TERADATA
- |-
SHAREPOINT
- |-
Expand Down Expand Up @@ -3692,9 +3706,15 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput:
"instance_profile_arn":
"description": |-
ARN of the instance profile that the served entity uses to access AWS resources.
"max_provisioned_concurrency":
"description": |-
The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.
"max_provisioned_throughput":
"description": |-
The maximum tokens per second that the endpoint can scale up to.
"min_provisioned_concurrency":
"description": |-
The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.
"min_provisioned_throughput":
"description": |-
The minimum tokens per second that the endpoint can scale down to.
Expand All @@ -3709,7 +3729,7 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedEntityInput:
Whether the compute resources for the served entity should scale down to zero.
"workload_size":
"description": |-
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.
"workload_type":
"description": |-
The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
Expand All @@ -3720,9 +3740,15 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput:
"instance_profile_arn":
"description": |-
ARN of the instance profile that the served entity uses to access AWS resources.
"max_provisioned_concurrency":
"description": |-
The maximum provisioned concurrency that the endpoint can scale up to. Do not use if workload_size is specified.
"max_provisioned_throughput":
"description": |-
The maximum tokens per second that the endpoint can scale up to.
"min_provisioned_concurrency":
"description": |-
The minimum provisioned concurrency that the endpoint can scale down to. Do not use if workload_size is specified.
"min_provisioned_throughput":
"description": |-
The minimum tokens per second that the endpoint can scale down to.
Expand All @@ -3739,7 +3765,7 @@ github.com/databricks/databricks-sdk-go/service/serving.ServedModelInput:
Whether the compute resources for the served entity should scale down to zero.
"workload_size":
"description": |-
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0.
The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.
"workload_type":
"description": |-
The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is "CPU". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others. See the available [GPU types](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
Expand Down
Loading