Skip to content

Commit e21d5db

Browse files
authored
Convert TestBindJobToExistingJob into an acceptance test (#3072)
## Why <!-- Why are these changes needed? Provide the context that the reviewer might be missing. For example, were there any decisions behind the change that are not reflected in the code itself? --> One change in the series of changes for converting integration tests into acceptance tests. This will allow for easier testing of various backing solutions for bundle deployment
1 parent 913840c commit e21d5db

File tree

6 files changed

+149
-58
lines changed

6 files changed

+149
-58
lines changed
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
bundle:
2+
name: test-bundle-$UNIQUE_NAME
3+
4+
workspace:
5+
root_path: "~/.bundle/test-bind-job-$UNIQUE_NAME"
6+
7+
resources:
8+
jobs:
9+
foo:
10+
name: test-job-basic-$UNIQUE_NAME
11+
tasks:
12+
- task_key: my_notebook_task
13+
new_cluster:
14+
spark_version: $DEFAULT_SPARK_VERSION
15+
node_type_id: $NODE_TYPE_ID
16+
num_workers: 1
17+
spark_python_task:
18+
python_file: ./hello_world.py
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
print("Hello World!")
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
2+
=== Create a pre-defined job:
3+
Created job with ID: [NUMID]
4+
5+
=== Bind job:
6+
>>> [CLI] bundle deployment bind foo [NUMID] --auto-approve
7+
Updating deployment state...
8+
Successfully bound job with an id '[NUMID]'. Run 'bundle deploy' to deploy changes to your workspace
9+
10+
=== Remove .databricks directory to simulate fresh deployment:
11+
>>> rm -rf .databricks
12+
13+
=== Deploy bundle:
14+
>>> [CLI] bundle deploy --force-lock --auto-approve
15+
Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/test-bind-job-[UNIQUE_NAME]/files...
16+
Deploying resources...
17+
Updating deployment state...
18+
Deployment complete!
19+
20+
=== Read the pre-defined job:
21+
>>> [CLI] jobs get [NUMID]
22+
{
23+
"job_id": [NUMID],
24+
"settings": {
25+
"name": "test-job-basic-[UNIQUE_NAME]",
26+
"tasks": [
27+
{
28+
"task_key": "my_notebook_task",
29+
"spark_python_task": {
30+
"python_file": "/Workspace/Users/[USERNAME]/.bundle/test-bind-job-[UNIQUE_NAME]/files/hello_world.py"
31+
}
32+
}
33+
]
34+
}
35+
}
36+
37+
=== Unbind the job:
38+
>>> [CLI] bundle deployment unbind foo
39+
Updating deployment state...
40+
41+
=== Remove .databricks directory to simulate fresh deployment:
42+
>>> rm -rf .databricks
43+
44+
=== Destroy the bundle:
45+
>>> [CLI] bundle destroy --auto-approve
46+
All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/test-bind-job-[UNIQUE_NAME]
47+
48+
Deleting files...
49+
Destroy complete!
50+
51+
=== Read the pre-defined job again (expecting it still exists):
52+
>>> [CLI] jobs get [NUMID]
53+
{
54+
"job_id": [NUMID],
55+
"settings": {
56+
"name": "test-job-basic-[UNIQUE_NAME]",
57+
"tasks": [
58+
{
59+
"task_key": "my_notebook_task",
60+
"spark_python_task": {
61+
"python_file": "/Workspace/Users/[USERNAME]/.bundle/test-bind-job-[UNIQUE_NAME]/files/hello_world.py"
62+
}
63+
}
64+
]
65+
}
66+
}
67+
68+
=== Delete the pre-defined job [NUMID]:0
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
title "Create a pre-defined job:\n"
2+
3+
PYTHON_FILE="/Workspace/Users/${CURRENT_USER_NAME}/initial_hello_world.py"
4+
5+
JOB_ID=$($CLI jobs create --json '
6+
{
7+
"name": "test-job-bind-'${UNIQUE_NAME}'",
8+
"tasks": [
9+
{
10+
"task_key": "my_notebook_task",
11+
"new_cluster": {
12+
"spark_version": "'${DEFAULT_SPARK_VERSION}'",
13+
"node_type_id": "'${NODE_TYPE_ID}'",
14+
"num_workers": 1
15+
},
16+
"spark_python_task": {
17+
"python_file": "'${PYTHON_FILE}'"
18+
}
19+
}
20+
]
21+
}' | jq -r '.job_id')
22+
23+
echo "Created job with ID: $JOB_ID"
24+
25+
envsubst < databricks.yml.tmpl > databricks.yml
26+
27+
cleanup() {
28+
title "Delete the pre-defined job $JOB_ID:"
29+
$CLI jobs delete $JOB_ID
30+
echo $?
31+
}
32+
trap cleanup EXIT
33+
34+
title "Bind job:"
35+
trace $CLI bundle deployment bind foo $JOB_ID --auto-approve
36+
37+
title "Remove .databricks directory to simulate fresh deployment:"
38+
trace rm -rf .databricks
39+
40+
title "Deploy bundle:"
41+
trace $CLI bundle deploy --force-lock --auto-approve
42+
43+
title "Read the pre-defined job:"
44+
trace $CLI jobs get $JOB_ID | jq '{job_id, settings: {name: .settings.name, tasks: [.settings.tasks[] | {task_key, spark_python_task: .spark_python_task}]}}'
45+
46+
title "Unbind the job:"
47+
trace $CLI bundle deployment unbind foo
48+
49+
title "Remove .databricks directory to simulate fresh deployment:"
50+
trace rm -rf .databricks
51+
52+
title "Destroy the bundle:"
53+
trace $CLI bundle destroy --auto-approve
54+
55+
title "Read the pre-defined job again (expecting it still exists):"
56+
trace $CLI jobs get ${JOB_ID} | jq '{job_id, settings: {name: .settings.name, tasks: [.settings.tasks[] | {task_key, spark_python_task: .spark_python_task}]}}'
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
Local = true
2+
Cloud = true
3+
4+
Ignore = [
5+
"databricks.yml",
6+
]

integration/bundle/bind_resource_test.go

Lines changed: 0 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -71,64 +71,6 @@ func TestBindSchemaToExistingSchema(t *testing.T) {
7171
require.Equal(t, postDestroySchema.SchemaId, predefinedSchema.SchemaId)
7272
}
7373

74-
func TestBindJobToExistingJob(t *testing.T) {
75-
ctx, wt := acc.WorkspaceTest(t)
76-
gt := &generateJobTest{T: wt, w: wt.W}
77-
78-
nodeTypeId := testutil.GetCloud(t).NodeTypeID()
79-
uniqueId := uuid.New().String()
80-
bundleRoot := initTestTemplate(t, ctx, "basic", map[string]any{
81-
"unique_id": uniqueId,
82-
"spark_version": "13.3.x-scala2.12",
83-
"node_type_id": nodeTypeId,
84-
})
85-
86-
jobId := gt.createTestJob(ctx)
87-
t.Cleanup(func() {
88-
gt.destroyJob(ctx, jobId)
89-
})
90-
91-
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
92-
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", strconv.FormatInt(jobId, 10), "--auto-approve")
93-
_, _, err := c.Run()
94-
require.NoError(t, err)
95-
96-
// Remove .databricks directory to simulate a fresh deployment
97-
err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks"))
98-
require.NoError(t, err)
99-
100-
deployBundle(t, ctx, bundleRoot)
101-
102-
w, err := databricks.NewWorkspaceClient()
103-
require.NoError(t, err)
104-
105-
// Check that job is bound and updated with config from bundle
106-
job, err := w.Jobs.Get(ctx, jobs.GetJobRequest{
107-
JobId: jobId,
108-
})
109-
require.NoError(t, err)
110-
require.Equal(t, job.Settings.Name, "test-job-basic-"+uniqueId)
111-
require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py")
112-
113-
c = testcli.NewRunner(t, ctx, "bundle", "deployment", "unbind", "foo")
114-
_, _, err = c.Run()
115-
require.NoError(t, err)
116-
117-
// Remove .databricks directory to simulate a fresh deployment
118-
err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks"))
119-
require.NoError(t, err)
120-
121-
destroyBundle(t, ctx, bundleRoot)
122-
123-
// Check that job is unbound and exists after bundle is destroyed
124-
job, err = w.Jobs.Get(ctx, jobs.GetJobRequest{
125-
JobId: jobId,
126-
})
127-
require.NoError(t, err)
128-
require.Equal(t, job.Settings.Name, "test-job-basic-"+uniqueId)
129-
require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py")
130-
}
131-
13274
func TestAbortBind(t *testing.T) {
13375
ctx, wt := acc.WorkspaceTest(t)
13476
gt := &generateJobTest{T: wt, w: wt.W}

0 commit comments

Comments
 (0)