Skip to content

Commit 43bc736

Browse files
committed
format
1 parent bf36058 commit 43bc736

File tree

1 file changed

+16
-14
lines changed

1 file changed

+16
-14
lines changed

parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetFileWriter.java

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1200,13 +1200,13 @@ public void writeDataPageV2(
12001200
int compressedSize =
12011201
toIntWithCheck(compressedData.size() + repetitionLevels.size() + definitionLevels.size(), "page");
12021202

1203-
int uncompressedSize =
1204-
toIntWithCheck(uncompressedDataSize + repetitionLevels.size() + definitionLevels.size(), "page");
1203+
int uncompressedSize =
1204+
toIntWithCheck(uncompressedDataSize + repetitionLevels.size() + definitionLevels.size(), "page");
12051205

1206-
long beforeHeader = out.getPos();
1207-
if (currentChunkFirstDataPage < 0) {
1208-
currentChunkFirstDataPage = beforeHeader;
1209-
}
1206+
long beforeHeader = out.getPos();
1207+
if (currentChunkFirstDataPage < 0) {
1208+
currentChunkFirstDataPage = beforeHeader;
1209+
}
12101210

12111211
if (pageWriteChecksumEnabled) {
12121212
crc.reset();
@@ -1247,16 +1247,17 @@ public void writeDataPageV2(
12471247
pageHeaderAAD);
12481248
}
12491249

1250-
long headersSize = out.getPos() - beforeHeader;
1251-
this.uncompressedLength += uncompressedSize + headersSize;
1252-
this.compressedLength += compressedSize + headersSize;
1250+
long headersSize = out.getPos() - beforeHeader;
1251+
this.uncompressedLength += uncompressedSize + headersSize;
1252+
this.compressedLength += compressedSize + headersSize;
12531253

1254-
mergeColumnStatistics(statistics, sizeStatistics);
1254+
mergeColumnStatistics(statistics, sizeStatistics);
12551255

1256-
currentEncodings.add(dataEncoding);
1257-
encodingStatsBuilder.addDataEncoding(dataEncoding);
1256+
currentEncodings.add(dataEncoding);
1257+
encodingStatsBuilder.addDataEncoding(dataEncoding);
12581258

1259-
BytesInput.concat(repetitionLevels, definitionLevels, compressedData).writeAllTo(out);
1259+
BytesInput.concat(repetitionLevels, definitionLevels, compressedData)
1260+
.writeAllTo(out);
12601261

12611262
offsetIndexBuilder.add(
12621263
toIntWithCheck(out.getPos() - beforeHeader, "page"),
@@ -1367,7 +1368,8 @@ void writeColumnChunk(
13671368
// write bloom filter if one of data pages is not dictionary encoded
13681369
boolean isWriteBloomFilter = false;
13691370
for (Encoding encoding : dataEncodings) {
1370-
// dictionary encoding: `PLAIN_DICTIONARY` is used in parquet v1, `RLE_DICTIONARY` is used in parquet v2
1371+
// dictionary encoding: `PLAIN_DICTIONARY` is used in parquet v1, `RLE_DICTIONARY` is used in
1372+
// parquet v2
13711373
if (encoding != Encoding.PLAIN_DICTIONARY && encoding != Encoding.RLE_DICTIONARY) {
13721374
isWriteBloomFilter = true;
13731375
break;

0 commit comments

Comments
 (0)