Skip to content

Commit fed83e8

Browse files
committed
Test
1 parent 2fc9e78 commit fed83e8

File tree

1 file changed

+52
-52
lines changed

1 file changed

+52
-52
lines changed

tests/integration/test_deletes.py

Lines changed: 52 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -309,58 +309,58 @@ def test_delete_partitioned_table_positional_deletes_empty_batch(spark: SparkSes
309309
assert len(reader.read_all()) == 0
310310

311311

312-
@pytest.mark.integration
313-
@pytest.mark.filterwarnings("ignore:Merge on read is not yet supported, falling back to copy-on-write")
314-
def test_read_multiple_batches_in_task_with_position_deletes(spark: SparkSession, session_catalog: RestCatalog) -> None:
315-
identifier = "default.test_read_multiple_batches_in_task_with_position_deletes"
316-
multiplier = 10
317-
318-
run_spark_commands(
319-
spark,
320-
[
321-
f"DROP TABLE IF EXISTS {identifier}",
322-
f"""
323-
CREATE TABLE {identifier} (
324-
number int
325-
)
326-
USING iceberg
327-
TBLPROPERTIES(
328-
'format-version' = 2,
329-
'write.delete.mode'='merge-on-read',
330-
'write.update.mode'='merge-on-read',
331-
'write.merge.mode'='merge-on-read'
332-
)
333-
""",
334-
],
335-
)
336-
337-
tbl = session_catalog.load_table(identifier)
338-
339-
arrow_table = pa.Table.from_arrays(
340-
[
341-
pa.array(list(range(1, 1001)) * multiplier),
342-
],
343-
schema=pa.schema([pa.field("number", pa.int32())]),
344-
)
345-
346-
tbl.append(arrow_table)
347-
348-
run_spark_commands(
349-
spark,
350-
[
351-
f"""
352-
DELETE FROM {identifier} WHERE number in (1, 2, 3, 4)
353-
""",
354-
],
355-
)
356-
357-
tbl.refresh()
358-
359-
reader = tbl.scan(row_filter="number <= 50").to_arrow_batch_reader()
360-
assert isinstance(reader, pa.RecordBatchReader)
361-
pyiceberg_count = len(reader.read_all())
362-
expected_count = 46 * multiplier
363-
assert pyiceberg_count == expected_count, f"Failing check. {pyiceberg_count} != {expected_count}"
312+
# @pytest.mark.integration
313+
# @pytest.mark.filterwarnings("ignore:Merge on read is not yet supported, falling back to copy-on-write")
314+
# def test_read_multiple_batches_in_task_with_position_deletes(spark: SparkSession, session_catalog: RestCatalog) -> None:
315+
# identifier = "default.test_read_multiple_batches_in_task_with_position_deletes"
316+
# multiplier = 10
317+
#
318+
# run_spark_commands(
319+
# spark,
320+
# [
321+
# f"DROP TABLE IF EXISTS {identifier}",
322+
# f"""
323+
# CREATE TABLE {identifier} (
324+
# number int
325+
# )
326+
# USING iceberg
327+
# TBLPROPERTIES(
328+
# 'format-version' = 2,
329+
# 'write.delete.mode'='merge-on-read',
330+
# 'write.update.mode'='merge-on-read',
331+
# 'write.merge.mode'='merge-on-read'
332+
# )
333+
# """,
334+
# ],
335+
# )
336+
#
337+
# tbl = session_catalog.load_table(identifier)
338+
#
339+
# arrow_table = pa.Table.from_arrays(
340+
# [
341+
# pa.array(list(range(1, 1001)) * multiplier),
342+
# ],
343+
# schema=pa.schema([pa.field("number", pa.int32())]),
344+
# )
345+
#
346+
# tbl.append(arrow_table)
347+
#
348+
# run_spark_commands(
349+
# spark,
350+
# [
351+
# f"""
352+
# DELETE FROM {identifier} WHERE number in (1, 2, 3, 4)
353+
# """,
354+
# ],
355+
# )
356+
#
357+
# tbl.refresh()
358+
#
359+
# reader = tbl.scan(row_filter="number <= 50").to_arrow_batch_reader()
360+
# assert isinstance(reader, pa.RecordBatchReader)
361+
# pyiceberg_count = len(reader.read_all())
362+
# expected_count = 46 * multiplier
363+
# assert pyiceberg_count == expected_count, f"Failing check. {pyiceberg_count} != {expected_count}"
364364

365365

366366
@pytest.mark.integration

0 commit comments

Comments
 (0)