File tree Expand file tree Collapse file tree 1 file changed +3
-2
lines changed
Expand file tree Collapse file tree 1 file changed +3
-2
lines changed Original file line number Diff line number Diff line change @@ -313,6 +313,7 @@ def test_delete_partitioned_table_positional_deletes_empty_batch(spark: SparkSes
313313@pytest .mark .filterwarnings ("ignore:Merge on read is not yet supported, falling back to copy-on-write" )
314314def test_read_multiple_batches_in_task_with_position_deletes (spark : SparkSession , session_catalog : RestCatalog ) -> None :
315315 identifier = "default.test_read_multiple_batches_in_task_with_position_deletes"
316+ multiplier = 10
316317
317318 run_spark_commands (
318319 spark ,
@@ -337,7 +338,7 @@ def test_read_multiple_batches_in_task_with_position_deletes(spark: SparkSession
337338
338339 arrow_table = pa .Table .from_arrays (
339340 [
340- pa .array (list (range (1 , 1001 )) * 100 ),
341+ pa .array (list (range (1 , 1001 )) * multiplier ),
341342 ],
342343 schema = pa .schema ([pa .field ("number" , pa .int32 ())]),
343344 )
@@ -358,7 +359,7 @@ def test_read_multiple_batches_in_task_with_position_deletes(spark: SparkSession
358359 reader = tbl .scan (row_filter = "number <= 50" ).to_arrow_batch_reader ()
359360 assert isinstance (reader , pa .RecordBatchReader )
360361 pyiceberg_count = len (reader .read_all ())
361- expected_count = 46 * 100
362+ expected_count = 46 * multiplier
362363 assert pyiceberg_count == expected_count , f"Failing check. { pyiceberg_count } != { expected_count } "
363364
364365
You can’t perform that action at this time.
0 commit comments