From 2ce841ebbbe08d40e23322d9c67e528a1eab427b Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Fri, 4 Apr 2025 14:41:09 +0200 Subject: [PATCH] [Python] Update jobs code --- .../databricks/bundles/jobs/__init__.py | 46 +++++-- .../jobs/_models/clean_rooms_notebook_task.py | 74 +++++++++++ .../bundles/jobs/_models/compute_config.py | 62 +++++++++ .../bundles/jobs/_models/condition.py | 4 + .../jobs/_models/gen_ai_compute_task.py | 121 ++++++++++++++++++ .../databricks/bundles/jobs/_models/job.py | 18 ++- .../jobs/_models/job_email_notifications.py | 14 +- .../bundles/jobs/_models/job_permission.py | 60 +++++++++ .../jobs/_models/job_permission_level.py | 14 ++ .../jobs/_models/performance_target.py | 5 +- .../bundles/jobs/_models/spark_jar_task.py | 16 +-- .../table_update_trigger_configuration.py | 4 +- .../databricks/bundles/jobs/_models/task.py | 30 +++++ .../jobs/_models/task_email_notifications.py | 14 +- .../bundles/jobs/_models/trigger_settings.py | 6 + .../databricks_tests/jobs/test_permission.py | 17 +-- 16 files changed, 439 insertions(+), 66 deletions(-) create mode 100644 experimental/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py create mode 100644 experimental/python/databricks/bundles/jobs/_models/compute_config.py create mode 100644 experimental/python/databricks/bundles/jobs/_models/gen_ai_compute_task.py create mode 100644 experimental/python/databricks/bundles/jobs/_models/job_permission.py create mode 100644 experimental/python/databricks/bundles/jobs/_models/job_permission_level.py diff --git a/experimental/python/databricks/bundles/jobs/__init__.py b/experimental/python/databricks/bundles/jobs/__init__.py index 5d7162b7ca..495124c8ec 100644 --- a/experimental/python/databricks/bundles/jobs/__init__.py +++ b/experimental/python/databricks/bundles/jobs/__init__.py @@ -15,6 +15,9 @@ "AzureAttributesParam", "AzureAvailability", "AzureAvailabilityParam", + "CleanRoomsNotebookTask", + "CleanRoomsNotebookTaskDict", + "CleanRoomsNotebookTaskParam", "ClientsTypes", "ClientsTypesDict", "ClientsTypesParam", @@ -24,6 +27,9 @@ "ClusterSpec", "ClusterSpecDict", "ClusterSpecParam", + "ComputeConfig", + "ComputeConfigDict", + "ComputeConfigParam", "Condition", "ConditionParam", "ConditionTask", @@ -70,6 +76,9 @@ "GcsStorageInfo", "GcsStorageInfoDict", "GcsStorageInfoParam", + "GenAiComputeTask", + "GenAiComputeTaskDict", + "GenAiComputeTaskParam", "GitProvider", "GitProviderParam", "GitSource", @@ -96,6 +105,11 @@ "JobParameterDefinition", "JobParameterDefinitionDict", "JobParameterDefinitionParam", + "JobPermission", + "JobPermissionDict", + "JobPermissionLevel", + "JobPermissionLevelParam", + "JobPermissionParam", "JobRunAs", "JobRunAsDict", "JobRunAsParam", @@ -133,9 +147,6 @@ "PeriodicTriggerConfigurationParam", "PeriodicTriggerConfigurationTimeUnit", "PeriodicTriggerConfigurationTimeUnitParam", - "Permission", - "PermissionDict", - "PermissionParam", "PipelineParams", "PipelineParamsDict", "PipelineParamsParam", @@ -373,6 +384,16 @@ WorkspaceStorageInfoDict, WorkspaceStorageInfoParam, ) +from databricks.bundles.jobs._models.clean_rooms_notebook_task import ( + CleanRoomsNotebookTask, + CleanRoomsNotebookTaskDict, + CleanRoomsNotebookTaskParam, +) +from databricks.bundles.jobs._models.compute_config import ( + ComputeConfig, + ComputeConfigDict, + ComputeConfigParam, +) from databricks.bundles.jobs._models.condition import Condition, ConditionParam from databricks.bundles.jobs._models.condition_task import ( ConditionTask, @@ -404,6 +425,11 @@ ForEachTaskDict, ForEachTaskParam, ) +from databricks.bundles.jobs._models.gen_ai_compute_task import ( + GenAiComputeTask, + GenAiComputeTaskDict, + GenAiComputeTaskParam, +) from databricks.bundles.jobs._models.git_provider import GitProvider, GitProviderParam from databricks.bundles.jobs._models.git_source import ( GitSource, @@ -436,6 +462,15 @@ JobParameterDefinitionDict, JobParameterDefinitionParam, ) +from databricks.bundles.jobs._models.job_permission import ( + JobPermission, + JobPermissionDict, + JobPermissionParam, +) +from databricks.bundles.jobs._models.job_permission_level import ( + JobPermissionLevel, + JobPermissionLevelParam, +) from databricks.bundles.jobs._models.job_run_as import ( JobRunAs, JobRunAsDict, @@ -478,11 +513,6 @@ PeriodicTriggerConfigurationTimeUnit, PeriodicTriggerConfigurationTimeUnitParam, ) -from databricks.bundles.jobs._models.permission import ( - Permission, - PermissionDict, - PermissionParam, -) from databricks.bundles.jobs._models.pipeline_params import ( PipelineParams, PipelineParamsDict, diff --git a/experimental/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py b/experimental/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py new file mode 100644 index 0000000000..7d30a1bad1 --- /dev/null +++ b/experimental/python/databricks/bundles/jobs/_models/clean_rooms_notebook_task.py @@ -0,0 +1,74 @@ +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import ( + VariableOr, + VariableOrDict, + VariableOrOptional, +) + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class CleanRoomsNotebookTask: + """""" + + clean_room_name: VariableOr[str] + """ + The clean room that the notebook belongs to. + """ + + notebook_name: VariableOr[str] + """ + Name of the notebook being run. + """ + + etag: VariableOrOptional[str] = None + """ + Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version). + It can be fetched by calling the :method:cleanroomassets/get API. + """ + + notebook_base_parameters: VariableOrDict[str] = field(default_factory=dict) + """ + Base parameters to be used for the clean room notebook job. + """ + + @classmethod + def from_dict(cls, value: "CleanRoomsNotebookTaskDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "CleanRoomsNotebookTaskDict": + return _transform_to_json_value(self) # type:ignore + + +class CleanRoomsNotebookTaskDict(TypedDict, total=False): + """""" + + clean_room_name: VariableOr[str] + """ + The clean room that the notebook belongs to. + """ + + notebook_name: VariableOr[str] + """ + Name of the notebook being run. + """ + + etag: VariableOrOptional[str] + """ + Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the latest version). + It can be fetched by calling the :method:cleanroomassets/get API. + """ + + notebook_base_parameters: VariableOrDict[str] + """ + Base parameters to be used for the clean room notebook job. + """ + + +CleanRoomsNotebookTaskParam = CleanRoomsNotebookTaskDict | CleanRoomsNotebookTask diff --git a/experimental/python/databricks/bundles/jobs/_models/compute_config.py b/experimental/python/databricks/bundles/jobs/_models/compute_config.py new file mode 100644 index 0000000000..72c0e24160 --- /dev/null +++ b/experimental/python/databricks/bundles/jobs/_models/compute_config.py @@ -0,0 +1,62 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOr, VariableOrOptional + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class ComputeConfig: + """ + :meta private: [EXPERIMENTAL] + + Next field: 4 + """ + + gpu_node_pool_id: VariableOr[str] + """ + IDof the GPU pool to use. + """ + + num_gpus: VariableOr[int] + """ + Number of GPUs. + """ + + gpu_type: VariableOrOptional[str] = None + """ + GPU type. + """ + + @classmethod + def from_dict(cls, value: "ComputeConfigDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "ComputeConfigDict": + return _transform_to_json_value(self) # type:ignore + + +class ComputeConfigDict(TypedDict, total=False): + """""" + + gpu_node_pool_id: VariableOr[str] + """ + IDof the GPU pool to use. + """ + + num_gpus: VariableOr[int] + """ + Number of GPUs. + """ + + gpu_type: VariableOrOptional[str] + """ + GPU type. + """ + + +ComputeConfigParam = ComputeConfigDict | ComputeConfig diff --git a/experimental/python/databricks/bundles/jobs/_models/condition.py b/experimental/python/databricks/bundles/jobs/_models/condition.py index d7c1b25bc7..d1b3566d5d 100644 --- a/experimental/python/databricks/bundles/jobs/_models/condition.py +++ b/experimental/python/databricks/bundles/jobs/_models/condition.py @@ -3,6 +3,10 @@ class Condition(Enum): + """ + :meta private: [EXPERIMENTAL] + """ + ANY_UPDATED = "ANY_UPDATED" ALL_UPDATED = "ALL_UPDATED" diff --git a/experimental/python/databricks/bundles/jobs/_models/gen_ai_compute_task.py b/experimental/python/databricks/bundles/jobs/_models/gen_ai_compute_task.py new file mode 100644 index 0000000000..81b8fdb78e --- /dev/null +++ b/experimental/python/databricks/bundles/jobs/_models/gen_ai_compute_task.py @@ -0,0 +1,121 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOr, VariableOrOptional +from databricks.bundles.jobs._models.compute_config import ( + ComputeConfig, + ComputeConfigParam, +) +from databricks.bundles.jobs._models.source import Source, SourceParam + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class GenAiComputeTask: + """ + :meta private: [EXPERIMENTAL] + + Next field: 9 + """ + + dl_runtime_image: VariableOr[str] + """ + Runtime image + """ + + command: VariableOrOptional[str] = None + """ + Command launcher to run the actual script, e.g. bash, python etc. + """ + + compute: VariableOrOptional[ComputeConfig] = None + + mlflow_experiment_name: VariableOrOptional[str] = None + """ + Optional string containing the name of the MLflow experiment to log the run to. If name is not + found, backend will create the mlflow experiment using the name. + """ + + source: VariableOrOptional[Source] = None + """ + Optional location type of the training script. When set to `WORKSPACE`, the script will be retrieved from the local Databricks workspace. When set to `GIT`, the script will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + * `WORKSPACE`: Script is located in Databricks workspace. + * `GIT`: Script is located in cloud Git provider. + """ + + training_script_path: VariableOrOptional[str] = None + """ + The training script file path to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required. + """ + + yaml_parameters: VariableOrOptional[str] = None + """ + Optional string containing model parameters passed to the training script in yaml format. + If present, then the content in yaml_parameters_file_path will be ignored. + """ + + yaml_parameters_file_path: VariableOrOptional[str] = None + """ + Optional path to a YAML file containing model parameters passed to the training script. + """ + + @classmethod + def from_dict(cls, value: "GenAiComputeTaskDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "GenAiComputeTaskDict": + return _transform_to_json_value(self) # type:ignore + + +class GenAiComputeTaskDict(TypedDict, total=False): + """""" + + dl_runtime_image: VariableOr[str] + """ + Runtime image + """ + + command: VariableOrOptional[str] + """ + Command launcher to run the actual script, e.g. bash, python etc. + """ + + compute: VariableOrOptional[ComputeConfigParam] + + mlflow_experiment_name: VariableOrOptional[str] + """ + Optional string containing the name of the MLflow experiment to log the run to. If name is not + found, backend will create the mlflow experiment using the name. + """ + + source: VariableOrOptional[SourceParam] + """ + Optional location type of the training script. When set to `WORKSPACE`, the script will be retrieved from the local Databricks workspace. When set to `GIT`, the script will be retrieved from a Git repository + defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + * `WORKSPACE`: Script is located in Databricks workspace. + * `GIT`: Script is located in cloud Git provider. + """ + + training_script_path: VariableOrOptional[str] + """ + The training script file path to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required. + """ + + yaml_parameters: VariableOrOptional[str] + """ + Optional string containing model parameters passed to the training script in yaml format. + If present, then the content in yaml_parameters_file_path will be ignored. + """ + + yaml_parameters_file_path: VariableOrOptional[str] + """ + Optional path to a YAML file containing model parameters passed to the training script. + """ + + +GenAiComputeTaskParam = GenAiComputeTaskDict | GenAiComputeTask diff --git a/experimental/python/databricks/bundles/jobs/_models/job.py b/experimental/python/databricks/bundles/jobs/_models/job.py index 3fa1496b17..df8e7fe393 100644 --- a/experimental/python/databricks/bundles/jobs/_models/job.py +++ b/experimental/python/databricks/bundles/jobs/_models/job.py @@ -17,10 +17,7 @@ CronSchedule, CronScheduleParam, ) -from databricks.bundles.jobs._models.git_source import ( - GitSource, - GitSourceParam, -) +from databricks.bundles.jobs._models.git_source import GitSource, GitSourceParam from databricks.bundles.jobs._models.job_cluster import JobCluster, JobClusterParam from databricks.bundles.jobs._models.job_email_notifications import ( JobEmailNotifications, @@ -38,6 +35,10 @@ JobParameterDefinition, JobParameterDefinitionParam, ) +from databricks.bundles.jobs._models.job_permission import ( + JobPermission, + JobPermissionParam, +) from databricks.bundles.jobs._models.job_run_as import JobRunAs, JobRunAsParam from databricks.bundles.jobs._models.jobs_health_rules import ( JobsHealthRules, @@ -47,7 +48,6 @@ PerformanceTarget, PerformanceTargetParam, ) -from databricks.bundles.jobs._models.permission import Permission, PermissionParam from databricks.bundles.jobs._models.queue_settings import ( QueueSettings, QueueSettingsParam, @@ -144,10 +144,12 @@ class Job(Resource): performance_target: VariableOrOptional[PerformanceTarget] = None """ + :meta private: [EXPERIMENTAL] + PerformanceTarget defines how performant or cost efficient the execution of run on serverless should be. """ - permissions: VariableOrList[Permission] = field(default_factory=list) + permissions: VariableOrList[JobPermission] = field(default_factory=list) queue: VariableOrOptional[QueueSettings] = None """ @@ -272,10 +274,12 @@ class JobDict(TypedDict, total=False): performance_target: VariableOrOptional[PerformanceTargetParam] """ + :meta private: [EXPERIMENTAL] + PerformanceTarget defines how performant or cost efficient the execution of run on serverless should be. """ - permissions: VariableOrList[PermissionParam] + permissions: VariableOrList[JobPermissionParam] queue: VariableOrOptional[QueueSettingsParam] """ diff --git a/experimental/python/databricks/bundles/jobs/_models/job_email_notifications.py b/experimental/python/databricks/bundles/jobs/_models/job_email_notifications.py index d3070b97be..4cca930023 100644 --- a/experimental/python/databricks/bundles/jobs/_models/job_email_notifications.py +++ b/experimental/python/databricks/bundles/jobs/_models/job_email_notifications.py @@ -3,7 +3,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value -from databricks.bundles.core._variable import VariableOrList, VariableOrOptional +from databricks.bundles.core._variable import VariableOrList if TYPE_CHECKING: from typing_extensions import Self @@ -13,12 +13,6 @@ class JobEmailNotifications: """""" - no_alert_for_skipped_runs: VariableOrOptional[bool] = None - """ - If true, do not send email to recipients specified in `on_failure` if the run is skipped. - This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. - """ - on_duration_warning_threshold_exceeded: VariableOrList[str] = field( default_factory=list ) @@ -59,12 +53,6 @@ def as_dict(self) -> "JobEmailNotificationsDict": class JobEmailNotificationsDict(TypedDict, total=False): """""" - no_alert_for_skipped_runs: VariableOrOptional[bool] - """ - If true, do not send email to recipients specified in `on_failure` if the run is skipped. - This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. - """ - on_duration_warning_threshold_exceeded: VariableOrList[str] """ A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent. diff --git a/experimental/python/databricks/bundles/jobs/_models/job_permission.py b/experimental/python/databricks/bundles/jobs/_models/job_permission.py new file mode 100644 index 0000000000..986b035933 --- /dev/null +++ b/experimental/python/databricks/bundles/jobs/_models/job_permission.py @@ -0,0 +1,60 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, TypedDict + +from databricks.bundles.core._transform import _transform +from databricks.bundles.core._transform_to_json import _transform_to_json_value +from databricks.bundles.core._variable import VariableOr, VariableOrOptional +from databricks.bundles.jobs._models.job_permission_level import ( + JobPermissionLevel, + JobPermissionLevelParam, +) + +if TYPE_CHECKING: + from typing_extensions import Self + + +@dataclass(kw_only=True) +class JobPermission: + """""" + + level: VariableOr[JobPermissionLevel] + + group_name: VariableOrOptional[str] = None + + service_principal_name: VariableOrOptional[str] = None + + user_name: VariableOrOptional[str] = None + + def __post_init__(self): + union_fields = [ + self.user_name, + self.service_principal_name, + self.group_name, + ] + + if sum(f is not None for f in union_fields) != 1: + raise ValueError( + "JobPermission must specify exactly one of 'user_name', 'service_principal_name', 'group_name'" + ) + + @classmethod + def from_dict(cls, value: "JobPermissionDict") -> "Self": + return _transform(cls, value) + + def as_dict(self) -> "JobPermissionDict": + return _transform_to_json_value(self) # type:ignore + + +class JobPermissionDict(TypedDict, total=False): + """""" + + level: VariableOr[JobPermissionLevelParam] + + group_name: VariableOrOptional[str] + + service_principal_name: VariableOrOptional[str] + + user_name: VariableOrOptional[str] + + +JobPermissionParam = JobPermissionDict | JobPermission diff --git a/experimental/python/databricks/bundles/jobs/_models/job_permission_level.py b/experimental/python/databricks/bundles/jobs/_models/job_permission_level.py new file mode 100644 index 0000000000..fc4e5ebac7 --- /dev/null +++ b/experimental/python/databricks/bundles/jobs/_models/job_permission_level.py @@ -0,0 +1,14 @@ +from enum import Enum +from typing import Literal + + +class JobPermissionLevel(Enum): + CAN_MANAGE = "CAN_MANAGE" + CAN_MANAGE_RUN = "CAN_MANAGE_RUN" + CAN_VIEW = "CAN_VIEW" + IS_OWNER = "IS_OWNER" + + +JobPermissionLevelParam = ( + Literal["CAN_MANAGE", "CAN_MANAGE_RUN", "CAN_VIEW", "IS_OWNER"] | JobPermissionLevel +) diff --git a/experimental/python/databricks/bundles/jobs/_models/performance_target.py b/experimental/python/databricks/bundles/jobs/_models/performance_target.py index 925e536386..8fde1dd4c2 100644 --- a/experimental/python/databricks/bundles/jobs/_models/performance_target.py +++ b/experimental/python/databricks/bundles/jobs/_models/performance_target.py @@ -4,6 +4,8 @@ class PerformanceTarget(Enum): """ + :meta private: [EXPERIMENTAL] + PerformanceTarget defines how performant (lower latency) or cost efficient the execution of run on serverless compute should be. The performance mode on the job or pipeline should map to a performance setting that is passed to Cluster Manager (see cluster-common PerformanceTarget). @@ -11,8 +13,9 @@ class PerformanceTarget(Enum): PERFORMANCE_OPTIMIZED = "PERFORMANCE_OPTIMIZED" COST_OPTIMIZED = "COST_OPTIMIZED" + BALANCED = "BALANCED" PerformanceTargetParam = ( - Literal["PERFORMANCE_OPTIMIZED", "COST_OPTIMIZED"] | PerformanceTarget + Literal["PERFORMANCE_OPTIMIZED", "COST_OPTIMIZED", "BALANCED"] | PerformanceTarget ) diff --git a/experimental/python/databricks/bundles/jobs/_models/spark_jar_task.py b/experimental/python/databricks/bundles/jobs/_models/spark_jar_task.py index 82b47ffcc8..c4ebcaaf75 100644 --- a/experimental/python/databricks/bundles/jobs/_models/spark_jar_task.py +++ b/experimental/python/databricks/bundles/jobs/_models/spark_jar_task.py @@ -3,11 +3,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value -from databricks.bundles.core._variable import ( - VariableOr, - VariableOrList, - VariableOrOptional, -) +from databricks.bundles.core._variable import VariableOr, VariableOrList if TYPE_CHECKING: from typing_extensions import Self @@ -24,11 +20,6 @@ class SparkJarTask: The code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail. """ - jar_uri: VariableOrOptional[str] = None - """ - Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. - """ - parameters: VariableOrList[str] = field(default_factory=list) """ Parameters passed to the main method. @@ -54,11 +45,6 @@ class SparkJarTaskDict(TypedDict, total=False): The code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail. """ - jar_uri: VariableOrOptional[str] - """ - Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create. - """ - parameters: VariableOrList[str] """ Parameters passed to the main method. diff --git a/experimental/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py b/experimental/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py index aad662874d..c89b0c4011 100644 --- a/experimental/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py +++ b/experimental/python/databricks/bundles/jobs/_models/table_update_trigger_configuration.py @@ -12,7 +12,9 @@ @dataclass(kw_only=True) class TableUpdateTriggerConfiguration: - """""" + """ + :meta private: [EXPERIMENTAL] + """ condition: VariableOrOptional[Condition] = None """ diff --git a/experimental/python/databricks/bundles/jobs/_models/task.py b/experimental/python/databricks/bundles/jobs/_models/task.py index c95b96e464..b9d9886ec3 100644 --- a/experimental/python/databricks/bundles/jobs/_models/task.py +++ b/experimental/python/databricks/bundles/jobs/_models/task.py @@ -16,6 +16,10 @@ VariableOrList, VariableOrOptional, ) +from databricks.bundles.jobs._models.clean_rooms_notebook_task import ( + CleanRoomsNotebookTask, + CleanRoomsNotebookTaskParam, +) from databricks.bundles.jobs._models.condition_task import ( ConditionTask, ConditionTaskParam, @@ -25,6 +29,10 @@ ForEachTask, ForEachTaskParam, ) +from databricks.bundles.jobs._models.gen_ai_compute_task import ( + GenAiComputeTask, + GenAiComputeTaskParam, +) from databricks.bundles.jobs._models.jobs_health_rules import ( JobsHealthRules, JobsHealthRulesParam, @@ -91,6 +99,12 @@ class Task: On Update or Reset, this field is used to reference the tasks to be updated or reset. """ + clean_rooms_notebook_task: VariableOrOptional[CleanRoomsNotebookTask] = None + """ + The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook + when the `clean_rooms_notebook_task` field is present. + """ + condition_task: VariableOrOptional[ConditionTask] = None """ The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present. @@ -141,6 +155,11 @@ class Task: The task executes a nested task for every input provided when the `for_each_task` field is present. """ + gen_ai_compute_task: VariableOrOptional[GenAiComputeTask] = None + """ + :meta private: [EXPERIMENTAL] + """ + health: VariableOrOptional[JobsHealthRules] = None job_cluster_key: VariableOrOptional[str] = None @@ -281,6 +300,12 @@ class TaskDict(TypedDict, total=False): On Update or Reset, this field is used to reference the tasks to be updated or reset. """ + clean_rooms_notebook_task: VariableOrOptional[CleanRoomsNotebookTaskParam] + """ + The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook + when the `clean_rooms_notebook_task` field is present. + """ + condition_task: VariableOrOptional[ConditionTaskParam] """ The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present. @@ -331,6 +356,11 @@ class TaskDict(TypedDict, total=False): The task executes a nested task for every input provided when the `for_each_task` field is present. """ + gen_ai_compute_task: VariableOrOptional[GenAiComputeTaskParam] + """ + :meta private: [EXPERIMENTAL] + """ + health: VariableOrOptional[JobsHealthRulesParam] job_cluster_key: VariableOrOptional[str] diff --git a/experimental/python/databricks/bundles/jobs/_models/task_email_notifications.py b/experimental/python/databricks/bundles/jobs/_models/task_email_notifications.py index 4a6e405ab5..a0ead50c37 100644 --- a/experimental/python/databricks/bundles/jobs/_models/task_email_notifications.py +++ b/experimental/python/databricks/bundles/jobs/_models/task_email_notifications.py @@ -3,7 +3,7 @@ from databricks.bundles.core._transform import _transform from databricks.bundles.core._transform_to_json import _transform_to_json_value -from databricks.bundles.core._variable import VariableOrList, VariableOrOptional +from databricks.bundles.core._variable import VariableOrList if TYPE_CHECKING: from typing_extensions import Self @@ -13,12 +13,6 @@ class TaskEmailNotifications: """""" - no_alert_for_skipped_runs: VariableOrOptional[bool] = None - """ - If true, do not send email to recipients specified in `on_failure` if the run is skipped. - This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. - """ - on_duration_warning_threshold_exceeded: VariableOrList[str] = field( default_factory=list ) @@ -59,12 +53,6 @@ def as_dict(self) -> "TaskEmailNotificationsDict": class TaskEmailNotificationsDict(TypedDict, total=False): """""" - no_alert_for_skipped_runs: VariableOrOptional[bool] - """ - If true, do not send email to recipients specified in `on_failure` if the run is skipped. - This field is `deprecated`. Please use the `notification_settings.no_alert_for_skipped_runs` field. - """ - on_duration_warning_threshold_exceeded: VariableOrList[str] """ A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent. diff --git a/experimental/python/databricks/bundles/jobs/_models/trigger_settings.py b/experimental/python/databricks/bundles/jobs/_models/trigger_settings.py index 6914e257df..5256efce67 100644 --- a/experimental/python/databricks/bundles/jobs/_models/trigger_settings.py +++ b/experimental/python/databricks/bundles/jobs/_models/trigger_settings.py @@ -42,6 +42,9 @@ class TriggerSettings: """ table_update: VariableOrOptional[TableUpdateTriggerConfiguration] = None + """ + :meta private: [EXPERIMENTAL] + """ def __post_init__(self): union_fields = [ @@ -82,6 +85,9 @@ class TriggerSettingsDict(TypedDict, total=False): """ table_update: VariableOrOptional[TableUpdateTriggerConfigurationParam] + """ + :meta private: [EXPERIMENTAL] + """ TriggerSettingsParam = TriggerSettingsDict | TriggerSettings diff --git a/experimental/python/databricks_tests/jobs/test_permission.py b/experimental/python/databricks_tests/jobs/test_permission.py index 9997b61615..d64d6b790a 100644 --- a/experimental/python/databricks_tests/jobs/test_permission.py +++ b/experimental/python/databricks_tests/jobs/test_permission.py @@ -1,11 +1,12 @@ import pytest -from databricks.bundles.jobs import Permission +from databricks.bundles.jobs import JobPermission +from databricks.bundles.jobs._models.job_permission_level import JobPermissionLevel def test_oneof_one(): - permission = Permission( - level="CAN_VIEW", + permission = JobPermission( + level=JobPermissionLevel.CAN_VIEW, user_name="test@example.com", ) @@ -14,23 +15,23 @@ def test_oneof_one(): def test_oneof_none(): with pytest.raises(ValueError) as exc_info: - Permission(level="CAN_VIEW") # FIXME should be enum + JobPermission(level=JobPermissionLevel.CAN_VIEW) assert exc_info.exconly() == ( - "ValueError: Permission must specify exactly one of 'user_name', " + "ValueError: JobPermission must specify exactly one of 'user_name', " "'service_principal_name', 'group_name'" ) def test_oneof_both(): with pytest.raises(ValueError) as exc_info: - Permission( - level="CAN_VIEW", # FIXME should be enum + JobPermission( + level=JobPermissionLevel.CAN_VIEW, user_name="test@example.com", service_principal_name="secret", ) assert exc_info.exconly() == ( - "ValueError: Permission must specify exactly one of 'user_name', " + "ValueError: JobPermission must specify exactly one of 'user_name', " "'service_principal_name', 'group_name'" )