|
71 | 71 | import org.apache.hudi.common.util.Option; |
72 | 72 | import org.apache.hudi.common.util.StringUtils; |
73 | 73 | import org.apache.hudi.common.util.collection.Pair; |
| 74 | +import org.apache.hudi.config.HoodieArchivalConfig; |
74 | 75 | import org.apache.hudi.config.HoodieCompactionConfig; |
75 | 76 | import org.apache.hudi.config.HoodieIndexConfig; |
76 | 77 | import org.apache.hudi.config.HoodieClusteringConfig; |
@@ -675,6 +676,62 @@ private void testUpsertsInternal(HoodieWriteConfig config, |
675 | 676 | }).collect(); |
676 | 677 | } |
677 | 678 |
|
| 679 | + @Test |
| 680 | + public void testRestoreWithSavepointBeyondArchival() throws Exception { |
| 681 | + HoodieWriteConfig config = getConfigBuilder().withRollbackUsingMarkers(true).build(); |
| 682 | + HoodieWriteConfig hoodieWriteConfig = getConfigBuilder(EAGER) |
| 683 | + .withRollbackUsingMarkers(true) |
| 684 | + .withArchivalConfig(HoodieArchivalConfig.newBuilder().withArchiveBeyondSavepoint(true).build()) |
| 685 | + .withProps(config.getProps()).withTimelineLayoutVersion( |
| 686 | + VERSION_0).build(); |
| 687 | + |
| 688 | + HoodieTableMetaClient.withPropertyBuilder() |
| 689 | + .fromMetaClient(metaClient) |
| 690 | + .setTimelineLayoutVersion(VERSION_0) |
| 691 | + .setPopulateMetaFields(config.populateMetaFields()) |
| 692 | + .initTable(metaClient.getHadoopConf(), metaClient.getBasePath()); |
| 693 | + |
| 694 | + SparkRDDWriteClient client = getHoodieWriteClient(hoodieWriteConfig); |
| 695 | + |
| 696 | + // Write 1 (only inserts) |
| 697 | + String newCommitTime = "001"; |
| 698 | + String initCommitTime = "000"; |
| 699 | + int numRecords = 200; |
| 700 | + insertFirstBatch(hoodieWriteConfig, client, newCommitTime, initCommitTime, numRecords, SparkRDDWriteClient::insert, |
| 701 | + false, true, numRecords, config.populateMetaFields()); |
| 702 | + |
| 703 | + // Write 2 (updates) |
| 704 | + String prevCommitTime = newCommitTime; |
| 705 | + newCommitTime = "004"; |
| 706 | + numRecords = 100; |
| 707 | + String commitTimeBetweenPrevAndNew = "002"; |
| 708 | + updateBatch(hoodieWriteConfig, client, newCommitTime, prevCommitTime, |
| 709 | + Option.of(Arrays.asList(commitTimeBetweenPrevAndNew)), initCommitTime, numRecords, SparkRDDWriteClient::upsert, false, true, |
| 710 | + numRecords, 200, 2, config.populateMetaFields()); |
| 711 | + |
| 712 | + // Delete 1 |
| 713 | + prevCommitTime = newCommitTime; |
| 714 | + newCommitTime = "005"; |
| 715 | + numRecords = 50; |
| 716 | + |
| 717 | + deleteBatch(hoodieWriteConfig, client, newCommitTime, prevCommitTime, |
| 718 | + initCommitTime, numRecords, SparkRDDWriteClient::delete, false, true, |
| 719 | + 0, 150, config.populateMetaFields()); |
| 720 | + |
| 721 | + HoodieWriteConfig newConfig = getConfigBuilder().withProps(config.getProps()).withTimelineLayoutVersion( |
| 722 | + TimelineLayoutVersion.CURR_VERSION) |
| 723 | + .withArchivalConfig(HoodieArchivalConfig.newBuilder().withArchiveBeyondSavepoint(true).build()).build(); |
| 724 | + client = getHoodieWriteClient(newConfig); |
| 725 | + |
| 726 | + client.savepoint("004", "user1", "comment1"); |
| 727 | + |
| 728 | + // verify that restore fails when "hoodie.archive.beyond.savepoint" is enabled. |
| 729 | + SparkRDDWriteClient finalClient = client; |
| 730 | + assertThrows(IllegalArgumentException.class, () -> { |
| 731 | + finalClient.restoreToSavepoint("004"); |
| 732 | + }, "Restore should not be supported when " + HoodieArchivalConfig.ARCHIVE_BEYOND_SAVEPOINT.key() + " is enabled"); |
| 733 | + } |
| 734 | + |
678 | 735 | /** |
679 | 736 | * Test Insert API for HoodieConcatHandle. |
680 | 737 | */ |
|
0 commit comments