Skip to content

Commit eff5c2a

Browse files
committed
adding a new config to assist in bypassing the default partition validation
1 parent b0ad569 commit eff5c2a

3 files changed

Lines changed: 33 additions & 10 deletions

File tree

hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -480,6 +480,13 @@ public class HoodieWriteConfig extends HoodieConfig {
480480
.sinceVersion("0.11.0")
481481
.withDocumentation("Auto adjust lock configurations when metadata table is enabled and for async table services.");
482482

483+
public static final ConfigProperty<Boolean> SKIP_DEFAULT_PARTITION_VALIDATION = ConfigProperty
484+
.key("hoodie.skip.default.partition.validation")
485+
.defaultValue(false)
486+
.sinceVersion("0.12.0")
487+
.withDocumentation("When table is upgraded from pre 0.12 to 0.12, we check for \"default\" partition and fail if found one. " +
488+
"Users are expected to rewrite the data in those partitions. Enabling this config will bypass this validation");
489+
483490
private ConsistencyGuardConfig consistencyGuardConfig;
484491
private FileSystemRetryConfig fileSystemRetryConfig;
485492

@@ -2038,6 +2045,11 @@ public WriteConcurrencyMode getWriteConcurrencyMode() {
20382045
return WriteConcurrencyMode.fromValue(getString(WRITE_CONCURRENCY_MODE));
20392046
}
20402047

2048+
// misc configs
2049+
public Boolean doSkipDefaultPartitionValidation() {
2050+
return getBoolean(SKIP_DEFAULT_PARTITION_VALIDATION);
2051+
}
2052+
20412053
/**
20422054
* Are any table services configured to run inline for both scheduling and execution?
20432055
*
@@ -2517,6 +2529,11 @@ public Builder withAutoAdjustLockConfigs(boolean autoAdjustLockConfigs) {
25172529
return this;
25182530
}
25192531

2532+
public Builder doSkipDefaultPartitionValidation(boolean skipDefaultPartitionValidation) {
2533+
writeConfig.setValue(SKIP_DEFAULT_PARTITION_VALIDATION, String.valueOf(skipDefaultPartitionValidation));
2534+
return this;
2535+
}
2536+
25202537
protected void setDefaults() {
25212538
writeConfig.setDefaultValue(MARKERS_TYPE, getDefaultMarkersType(engineType));
25222539
// Check for mandatory properties

hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FourToFiveUpgradeHandler.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ public class FourToFiveUpgradeHandler implements UpgradeHandler {
4747
public Map<ConfigProperty, String> upgrade(HoodieWriteConfig config, HoodieEngineContext context, String instantTime, SupportsUpgradeDowngrade upgradeDowngradeHelper) {
4848
try {
4949
FileSystem fs = new Path(config.getBasePath()).getFileSystem(context.getHadoopConf().get());
50-
if (fs.exists(new Path(config.getBasePath() + "/" + DEPRECATED_DEFAULT_PARTITION_PATH))) {
50+
if (!config.doSkipDefaultPartitionValidation() && fs.exists(new Path(config.getBasePath() + "/" + DEPRECATED_DEFAULT_PARTITION_PATH))) {
5151
LOG.error(String.format("\"%s\" partition detected. From 0.12, we are changing the default partition in hudi to %s "
5252
+ " Please read and write back the data in \"%s\" partition in hudi to new partition path \"%s\". \"\n"
5353
+ " Sample spark command to use to re-write the data: \n\n"

hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -331,15 +331,20 @@ public void testUpgradeDowngradeBetweenThreeAndCurrentVersion() throws IOExcepti
331331

332332
@Test
333333
public void testUpgradeFourtoFive() throws Exception {
334-
testUpgradeFourToFiveInternal(false);
334+
testUpgradeFourToFiveInternal(false, false);
335335
}
336336

337337
@Test
338338
public void testUpgradeFourtoFiveWithDefaultPartition() throws Exception {
339-
testUpgradeFourToFiveInternal(true);
339+
testUpgradeFourToFiveInternal(true, false);
340340
}
341341

342-
private void testUpgradeFourToFiveInternal(boolean assertDefaultPartition) throws Exception {
342+
@Test
343+
public void testUpgradeFourtoFiveWithDefaultPartitionWithSkipValidation() throws Exception {
344+
testUpgradeFourToFiveInternal(true, true);
345+
}
346+
347+
private void testUpgradeFourToFiveInternal(boolean assertDefaultPartition, boolean skipDefaultPartitionValidation) throws Exception {
343348
String tableName = metaClient.getTableConfig().getTableName();
344349
// clean up and re instantiate meta client w/ right table props
345350
cleanUp();
@@ -354,7 +359,8 @@ private void testUpgradeFourToFiveInternal(boolean assertDefaultPartition) throw
354359

355360
initMetaClient(getTableType(), properties);
356361
// init config, table and client.
357-
HoodieWriteConfig cfg = getConfigBuilder().withAutoCommit(false).withRollbackUsingMarkers(false).withProps(params).build();
362+
HoodieWriteConfig cfg = getConfigBuilder().withAutoCommit(false).withRollbackUsingMarkers(false)
363+
.doSkipDefaultPartitionValidation(skipDefaultPartitionValidation).withProps(params).build();
358364
SparkRDDWriteClient client = getHoodieWriteClient(cfg);
359365
// Write inserts
360366
doInsert(client);
@@ -367,7 +373,11 @@ private void testUpgradeFourToFiveInternal(boolean assertDefaultPartition) throw
367373
downgradeTableConfigsFromFiveToFour(cfg);
368374

369375
// perform upgrade
370-
if (!assertDefaultPartition) {
376+
if (assertDefaultPartition && !skipDefaultPartitionValidation) {
377+
// if "default" partition is present, upgrade should fail
378+
assertThrows(HoodieException.class, () -> new UpgradeDowngrade(metaClient, cfg, context, SparkUpgradeDowngradeHelper.getInstance())
379+
.run(HoodieTableVersion.FIVE, null), "Upgrade from 4 to 5 is expected to fail if \"default\" partition is present.");
380+
} else {
371381
new UpgradeDowngrade(metaClient, cfg, context, SparkUpgradeDowngradeHelper.getInstance())
372382
.run(HoodieTableVersion.FIVE, null);
373383

@@ -378,10 +388,6 @@ private void testUpgradeFourToFiveInternal(boolean assertDefaultPartition) throw
378388

379389
// verify table props
380390
assertTableProps(cfg);
381-
} else {
382-
// if "default" partition is present, upgrade should fail
383-
assertThrows(HoodieException.class, () -> new UpgradeDowngrade(metaClient, cfg, context, SparkUpgradeDowngradeHelper.getInstance())
384-
.run(HoodieTableVersion.FIVE, null), "Upgrade from 4 to 5 is expected to fail if \"default\" partition is present.");
385391
}
386392
}
387393

0 commit comments

Comments
 (0)