Skip to content

Commit 6082e9c

Browse files
[HUDI-6101] Use UnBoundedCompactionStrategy for MDT compactions (#8496)
1 parent ab86512 commit 6082e9c

File tree

1 file changed

+5
-1
lines changed

1 file changed

+5
-1
lines changed

hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@
7272
import org.apache.hudi.exception.HoodieMetadataException;
7373
import org.apache.hudi.hadoop.CachingPath;
7474
import org.apache.hudi.hadoop.SerializablePath;
75-
75+
import org.apache.hudi.table.action.compact.strategy.UnBoundedCompactionStrategy;
7676
import org.apache.avro.specific.SpecificRecordBase;
7777
import org.apache.hadoop.conf.Configuration;
7878
import org.apache.hadoop.fs.FileStatus;
@@ -296,6 +296,10 @@ private HoodieWriteConfig createMetadataWriteConfig(
296296
.withInlineCompaction(false)
297297
.withMaxNumDeltaCommitsBeforeCompaction(writeConfig.getMetadataCompactDeltaCommitMax())
298298
.withEnableOptimizedLogBlocksScan(String.valueOf(writeConfig.enableOptimizedLogBlocksScan()))
299+
// Compaction on metadata table is used as a barrier for archiving on main dataset and for validating the
300+
// deltacommits having corresponding completed commits. Therefore, we need to compact all fileslices of all
301+
// partitions together requiring UnBoundedCompactionStrategy.
302+
.withCompactionStrategy(new UnBoundedCompactionStrategy())
299303
.build())
300304
.withParallelism(parallelism, parallelism)
301305
.withDeleteParallelism(parallelism)

0 commit comments

Comments
 (0)