diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java index 65b01bb2545e4..77b79d8d556e0 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java @@ -18,8 +18,6 @@ package org.apache.hudi.cli.commands; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; import org.apache.hudi.cli.HoodieCLI; import org.apache.hudi.cli.HoodiePrintHelper; import org.apache.hudi.cli.TableHeader; @@ -35,6 +33,9 @@ import org.apache.hudi.metadata.HoodieBackedTableMetadata; import org.apache.hudi.metadata.HoodieTableMetadata; import org.apache.hudi.metadata.SparkHoodieBackedTableMetadataWriter; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.spark.api.java.JavaSparkContext; @@ -122,7 +123,7 @@ public String create( HoodieCLI.fs.mkdirs(metadataPath); } - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); HoodieWriteConfig writeConfig = getWriteConfig(); initJavaSparkContext(Option.of(master)); SparkHoodieBackedTableMetadataWriter.create(HoodieCLI.conf, writeConfig, new HoodieSparkEngineContext(jsc)); @@ -158,7 +159,7 @@ public String init(@ShellOption(value = "--sparkMaster", defaultValue = SparkUti throw new RuntimeException("Metadata directory (" + metadataPath.toString() + ") does not exist."); } - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); if (!readOnly) { HoodieWriteConfig writeConfig = getWriteConfig(); initJavaSparkContext(Option.of(master)); @@ -206,7 +207,7 @@ public String listPartitions( return "[ERROR] Metadata Table not enabled/initialized\n\n"; } - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); List partitions = metadata.getAllPartitionPaths(); LOG.debug("Took " + timer.endTimer() + " ms"); @@ -239,7 +240,7 @@ public String listFiles( partitionPath = new Path(HoodieCLI.basePath, partition); } - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); FileStatus[] statuses = metaReader.getAllFilesInPartition(partitionPath); LOG.debug("Took " + timer.endTimer() + " ms"); @@ -271,7 +272,7 @@ public String validateFiles( HoodieBackedTableMetadata fsMetaReader = new HoodieBackedTableMetadata( new HoodieLocalEngineContext(HoodieCLI.conf), fsConfig, HoodieCLI.basePath, "/tmp"); - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); List metadataPartitions = metadataReader.getAllPartitionPaths(); LOG.debug("Listing partitions Took " + timer.endTimer() + " ms"); List fsPartitions = fsMetaReader.getAllPartitionPaths(); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndexUtils.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndexUtils.java index 61be856d3662c..d6872276ac3fd 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndexUtils.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndexUtils.java @@ -18,8 +18,6 @@ package org.apache.hudi.index; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; import org.apache.hudi.common.engine.HoodieEngineContext; import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.model.FileSlice; @@ -35,6 +33,9 @@ import org.apache.hudi.io.storage.HoodieFileReader; import org.apache.hudi.io.storage.HoodieFileReaderFactory; import org.apache.hudi.table.HoodieTable; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -152,7 +153,7 @@ public static List filterKeysFromFile(Path filePath, List candid try { // Load all rowKeys from the file, to double-confirm if (!candidateRecordKeys.isEmpty()) { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); HoodieFileReader fileReader = HoodieFileReaderFactory.getFileReader(configuration, filePath); Set fileRowKeys = fileReader.filterRowKeys(new TreeSet<>(candidateRecordKeys)); foundRecordKeys.addAll(fileRowKeys); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java index a38ae7f1f149b..df629b83dcaa3 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java @@ -60,7 +60,7 @@ public HoodieKeyLookupHandle(HoodieWriteConfig config, HoodieTable h private BloomFilter getBloomFilter() { BloomFilter bloomFilter = null; - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); try { if (config.getBloomIndexUseMetadata() && hoodieTable.getMetaClient().getTableConfig().getMetadataPartitions() diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java index abf5c0face155..807f14ca2883c 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java @@ -49,8 +49,8 @@ import java.io.IOException; import java.util.Collections; -import java.util.List; import java.util.HashMap; +import java.util.List; import static org.apache.hudi.common.util.StringUtils.isNullOrEmpty; @@ -124,7 +124,7 @@ protected HoodieWriteHandle(HoodieWriteConfig config, String instantTime, String this.tableSchemaWithMetaFields = HoodieAvroUtils.addMetadataFields(tableSchema, config.allowOperationMetadataField()); this.writeSchema = overriddenSchema.orElseGet(() -> getWriteSchema(config)); this.writeSchemaWithMetaFields = HoodieAvroUtils.addMetadataFields(writeSchema, config.allowOperationMetadataField()); - this.timer = new HoodieTimer().startTimer(); + this.timer = HoodieTimer.start(); this.writeStatus = (WriteStatus) ReflectionUtils.loadClass(config.getWriteStatusClassName(), !hoodieTable.getIndex().isImplicitWithStorage(), config.getWriteStatusFailureFraction()); this.taskContextSupplier = taskContextSupplier; diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java index 7da0a4a57ade7..15759a570f4f9 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java @@ -382,7 +382,7 @@ public void initTableMetadata() { protected void initializeIfNeeded(HoodieTableMetaClient dataMetaClient, Option actionMetadata, Option inflightInstantTimestamp) throws IOException { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); boolean exists = metadataTableExists(dataMetaClient, actionMetadata); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanActionExecutor.java index 56b01ec77b62b..750e687947b8b 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanActionExecutor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanActionExecutor.java @@ -18,9 +18,6 @@ package org.apache.hudi.table.action.clean; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; - import org.apache.hudi.avro.model.HoodieActionInstant; import org.apache.hudi.avro.model.HoodieCleanMetadata; import org.apache.hudi.avro.model.HoodieCleanerPlan; @@ -43,6 +40,8 @@ import org.apache.hudi.table.HoodieTable; import org.apache.hudi.table.action.BaseActionExecutor; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -198,8 +197,7 @@ private HoodieCleanMetadata runClean(HoodieTable table, HoodieInstan HoodieInstant inflightInstant = null; try { - final HoodieTimer timer = new HoodieTimer(); - timer.startTimer(); + final HoodieTimer timer = HoodieTimer.start(); if (cleanInstant.isRequested()) { inflightInstant = table.getActiveTimeline().transitionCleanRequestedToInflight(cleanInstant, TimelineMetadataUtils.serializeCleanerPlan(cleanerPlan)); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/RunIndexActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/RunIndexActionExecutor.java index 96d46928e7c3a..43c7ed459d20c 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/RunIndexActionExecutor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/RunIndexActionExecutor.java @@ -104,8 +104,7 @@ public RunIndexActionExecutor(HoodieEngineContext context, HoodieWriteConfig con @Override public Option execute() { - HoodieTimer indexTimer = new HoodieTimer(); - indexTimer.startTimer(); + HoodieTimer indexTimer = HoodieTimer.start(); HoodieInstant indexInstant = validateAndGetIndexInstant(); // read HoodieIndexPlan diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/restore/BaseRestoreActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/restore/BaseRestoreActionExecutor.java index 62ecbe2a31f8f..8a577021d249f 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/restore/BaseRestoreActionExecutor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/restore/BaseRestoreActionExecutor.java @@ -68,8 +68,7 @@ public BaseRestoreActionExecutor(HoodieEngineContext context, @Override public HoodieRestoreMetadata execute() { - HoodieTimer restoreTimer = new HoodieTimer(); - restoreTimer.startTimer(); + HoodieTimer restoreTimer = HoodieTimer.start(); Option restoreInstant = table.getRestoreTimeline() .filterInflightsAndRequested() diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackActionExecutor.java index 4add51886fe3a..ef2c79051611d 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackActionExecutor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackActionExecutor.java @@ -104,7 +104,7 @@ private HoodieRollbackMetadata runRollback(HoodieTable table, Hoodie ? table.getActiveTimeline().transitionRollbackRequestedToInflight(rollbackInstant) : rollbackInstant; - HoodieTimer rollbackTimer = new HoodieTimer().startTimer(); + HoodieTimer rollbackTimer = HoodieTimer.start(); List stats = doRollbackAndGetStats(rollbackPlan); HoodieRollbackMetadata rollbackMetadata = TimelineMetadataUtils.convertRollbackMetadata( instantTime, diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/CopyOnWriteRollbackActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/CopyOnWriteRollbackActionExecutor.java index e766dbdc81c09..64b3f483e6486 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/CopyOnWriteRollbackActionExecutor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/CopyOnWriteRollbackActionExecutor.java @@ -61,8 +61,7 @@ public CopyOnWriteRollbackActionExecutor(HoodieEngineContext context, @Override protected List executeRollback(HoodieRollbackPlan hoodieRollbackPlan) { - HoodieTimer rollbackTimer = new HoodieTimer(); - rollbackTimer.startTimer(); + HoodieTimer rollbackTimer = HoodieTimer.start(); List stats = new ArrayList<>(); HoodieActiveTimeline activeTimeline = table.getActiveTimeline(); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/MergeOnReadRollbackActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/MergeOnReadRollbackActionExecutor.java index 46d4d84ebf21d..097897bd1524a 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/MergeOnReadRollbackActionExecutor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/MergeOnReadRollbackActionExecutor.java @@ -61,8 +61,7 @@ public MergeOnReadRollbackActionExecutor(HoodieEngineContext context, @Override protected List executeRollback(HoodieRollbackPlan hoodieRollbackPlan) { - HoodieTimer rollbackTimer = new HoodieTimer(); - rollbackTimer.startTimer(); + HoodieTimer rollbackTimer = HoodieTimer.start(); LOG.info("Rolling back instant " + instantToRollback); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/DirectWriteMarkers.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/DirectWriteMarkers.java index e813382079634..f1a7cde432315 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/DirectWriteMarkers.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/DirectWriteMarkers.java @@ -156,7 +156,7 @@ protected Option create(String partitionPath, String dataFileName, IOType } private Option create(Path markerPath, boolean checkIfExists) { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); Path dirPath = markerPath.getParent(); try { if (!fs.exists(dirPath)) { diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java index 4879e0bc60c94..2de9c9fdb8df5 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java @@ -129,7 +129,7 @@ public Set allMarkerFilePaths() { @Override protected Option create(String partitionPath, String dataFileName, IOType type, boolean checkIfExists) { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); String markerFileName = getMarkerFileName(dataFileName, type); Map paramsMap = new HashMap<>(); diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java index ec059b23cd97f..231dee2c7d4bc 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java @@ -82,8 +82,7 @@ public HoodieRowDataCreateHandle(HoodieTable table, HoodieWriteConfig writeConfi this.taskEpochId = taskEpochId; this.fileId = fileId; this.preserveHoodieMetadata = preserveHoodieMetadata; - this.currTimer = new HoodieTimer(); - this.currTimer.startTimer(); + this.currTimer = HoodieTimer.start(); this.fs = table.getMetaClient().getFs(); this.path = makeNewPath(partitionPath); this.writeStatus = new HoodieInternalWriteStatus(!table.getIndex().isImplicitWithStorage(), diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/validator/SparkPreCommitValidator.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/validator/SparkPreCommitValidator.java index f08d11b571492..c1f0f1a83b4cb 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/validator/SparkPreCommitValidator.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/validator/SparkPreCommitValidator.java @@ -70,7 +70,7 @@ protected Set getPartitionsModified(HoodieWriteMetadata writeResult) * Throw HoodieValidationException if any unexpected data is written (Example: data files are not readable for some reason). */ public void validate(String instantTime, HoodieWriteMetadata writeResult, Dataset before, Dataset after) throws HoodieValidationException { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); try { validateRecordsBeforeAndAfter(before, after, getPartitionsModified(writeResult)); } finally { diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkDeletePartitionCommitActionExecutor.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkDeletePartitionCommitActionExecutor.java index 149aef03e238a..7cc0efb34efb9 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkDeletePartitionCommitActionExecutor.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkDeletePartitionCommitActionExecutor.java @@ -18,12 +18,6 @@ package org.apache.hudi.table.action.commit; -import java.time.Duration; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import org.apache.hadoop.fs.Path; import org.apache.hudi.avro.model.HoodieRequestedReplaceMetadata; import org.apache.hudi.client.WriteStatus; import org.apache.hudi.common.data.HoodieData; @@ -42,6 +36,14 @@ import org.apache.hudi.table.WorkloadStat; import org.apache.hudi.table.action.HoodieWriteMetadata; +import org.apache.hadoop.fs.Path; + +import java.time.Duration; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import static org.apache.hudi.common.table.timeline.HoodieInstant.State.REQUESTED; import static org.apache.hudi.common.table.timeline.HoodieTimeline.REPLACE_COMMIT_ACTION; @@ -59,7 +61,7 @@ public SparkDeletePartitionCommitActionExecutor(HoodieEngineContext context, @Override public HoodieWriteMetadata> execute() { try { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); context.setJobStatus(this.getClass().getSimpleName(), "Gather all file ids from all deleting partitions."); Map> partitionToReplaceFileIds = HoodieJavaPairRDD.getJavaPairRDD(context.parallelize(partitions).distinct() diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java index 8ea6c2adf895f..a82904a8f4d43 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java @@ -2400,7 +2400,7 @@ private void validateMetadata(SparkRDDWriteClient testClient) throws IOException return; } - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc); // Partitions should match diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/HoodieClientTestHarness.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/HoodieClientTestHarness.java index a0c093be16b8a..a41c62cdcee0c 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/HoodieClientTestHarness.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/HoodieClientTestHarness.java @@ -578,7 +578,7 @@ public void validateMetadata(HoodieTestTable testTable, List inflightCom } assertEquals(inflightCommits, testTable.inflightCommits()); - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc); // Partitions should match diff --git a/hudi-common/src/main/java/org/apache/hudi/common/fs/HoodieWrapperFileSystem.java b/hudi-common/src/main/java/org/apache/hudi/common/fs/HoodieWrapperFileSystem.java index 2979696be7157..fba24097fb6fb 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/fs/HoodieWrapperFileSystem.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/fs/HoodieWrapperFileSystem.java @@ -24,6 +24,7 @@ import org.apache.hudi.common.util.Option; import org.apache.hudi.exception.HoodieException; import org.apache.hudi.exception.HoodieIOException; +import org.apache.hudi.hadoop.CachingPath; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -49,7 +50,6 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; -import org.apache.hudi.hadoop.CachingPath; import java.io.IOException; import java.net.URI; @@ -102,7 +102,7 @@ private static Registry getMetricRegistryForPath(Path p) { } protected static R executeFuncWithTimeMetrics(String metricName, Path p, CheckedFunction func) throws IOException { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); R res = func.get(); Registry registry = getMetricRegistryForPath(p); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java index a614523ba066e..625277a375dbb 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java @@ -132,7 +132,7 @@ protected void refreshTimeline(HoodieTimeline visibleActiveTimeline) { * Adds the provided statuses into the file system view, and also caches it inside this object. */ public List addFilesToView(FileStatus[] statuses) { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); List fileGroups = buildFileGroups(statuses, visibleCommitsAndCompactionTimeline, true); long fgBuildTimeTakenMs = timer.endTimer(); timer.startTimer(); @@ -216,8 +216,7 @@ protected List buildFileGroups(Stream baseFileS * Get replaced instant for each file group by looking at all commit instants. */ private void resetFileGroupsReplaced(HoodieTimeline timeline) { - HoodieTimer hoodieTimer = new HoodieTimer(); - hoodieTimer.startTimer(); + HoodieTimer hoodieTimer = HoodieTimer.start(); // for each REPLACE instant, get map of (partitionPath -> deleteFileGroup) HoodieTimeline replacedTimeline = timeline.getCompletedReplaceTimeline(); Stream> resultStream = replacedTimeline.getInstants().flatMap(instant -> { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/RocksDBDAO.java b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/RocksDBDAO.java index fe40d98594287..639b5ed4157be 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/RocksDBDAO.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/RocksDBDAO.java @@ -334,8 +334,7 @@ public T get(String columnFamil */ public Stream> prefixSearch(String columnFamilyName, String prefix) { ValidationUtils.checkArgument(!closed); - final HoodieTimer timer = new HoodieTimer(); - timer.startTimer(); + final HoodieTimer timer = HoodieTimer.start(); long timeTakenMicro = 0; List> results = new LinkedList<>(); try (final RocksIterator it = getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName))) { diff --git a/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java b/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java index 37a209b0a8719..e9474214d436b 100644 --- a/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java +++ b/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java @@ -19,7 +19,6 @@ package org.apache.hudi.metadata; -import org.apache.hadoop.fs.FileSystem; import org.apache.hudi.avro.model.HoodieMetadataBloomFilter; import org.apache.hudi.avro.model.HoodieMetadataColumnStats; import org.apache.hudi.common.bloom.BloomFilter; @@ -42,11 +41,12 @@ import org.apache.hudi.common.util.hash.PartitionIndexID; import org.apache.hudi.exception.HoodieIOException; import org.apache.hudi.exception.HoodieMetadataException; +import org.apache.hudi.hadoop.CachingPath; +import org.apache.hudi.hadoop.SerializablePath; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hudi.hadoop.CachingPath; -import org.apache.hudi.hadoop.SerializablePath; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -198,7 +198,7 @@ public Map, BloomFilter> getBloomFilters(final List partitionIDFileIDSortedStrings = new TreeSet<>(); Map> fileToKeyMap = new HashMap<>(); partitionNameFileNameList.forEach(partitionNameFileNamePair -> { @@ -258,7 +258,7 @@ public Map, HoodieMetadataColumnStats> getColumnStats(final } List columnStatKeys = new ArrayList<>(sortedKeys); - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); List>>> hoodieRecordList = getRecordsByKeys(columnStatKeys, MetadataPartitionType.COLUMN_STATS.getPartitionPath()); metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.LOOKUP_COLUMN_STATS_METADATA_STR, timer.endTimer())); @@ -287,7 +287,7 @@ public Map, HoodieMetadataColumnStats> getColumnStats(final * Returns a list of all partitions. */ protected List fetchAllPartitionPaths() { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); Option> recordOpt = getRecordByKey(RECORDKEY_PARTITION_LIST, MetadataPartitionType.FILES.getPartitionPath()); metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.LOOKUP_PARTITIONS_STR, timer.endTimer())); @@ -319,7 +319,7 @@ FileStatus[] fetchAllFilesInPartition(Path partitionPath) throws IOException { String relativePartitionPath = FSUtils.getRelativePartitionPath(dataBasePath.get(), partitionPath); String recordKey = relativePartitionPath.isEmpty() ? NON_PARTITIONED_NAME : relativePartitionPath; - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); Option> recordOpt = getRecordByKey(recordKey, MetadataPartitionType.FILES.getPartitionPath()); metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.LOOKUP_FILES_STR, timer.endTimer())); @@ -349,7 +349,7 @@ Map fetchAllFilesInPartitionPaths(List partitionPath }, Function.identity()) ); - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); List>>> partitionIdRecordPairs = getRecordsByKeys(new ArrayList<>(partitionIdToPathMap.keySet()), MetadataPartitionType.FILES.getPartitionPath()); metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.LOOKUP_FILES_STR, timer.endTimer())); diff --git a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java index bcaf2bcab349f..001c0493373d0 100644 --- a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java +++ b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java @@ -244,8 +244,7 @@ private Map>> readLogRecords( List keys, boolean fullKey, List timings) { - HoodieTimer timer = new HoodieTimer().startTimer(); - timer.startTimer(); + HoodieTimer timer = HoodieTimer.start(); if (logRecordScanner == null) { timings.add(timer.endTimer()); @@ -285,8 +284,7 @@ private List>>> readFrom Map>> logRecords, List timings, String partitionName) throws IOException { - HoodieTimer timer = new HoodieTimer().startTimer(); - timer.startTimer(); + HoodieTimer timer = HoodieTimer.start(); if (baseFileReader == null) { // No base file at all @@ -304,8 +302,7 @@ private List>>> readFrom } } - HoodieTimer readTimer = new HoodieTimer(); - readTimer.startTimer(); + HoodieTimer readTimer = HoodieTimer.start(); Map> records = fetchBaseFileRecordsByKeys(baseFileReader, keys, fullKeys, partitionName); @@ -408,7 +405,7 @@ private Pair getOrCreateR private Pair openReaders(String partitionName, FileSlice slice) { try { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); // Open base file reader Pair baseFileReaderOpenTimePair = getBaseFileReader(slice, timer); HoodieFileReader baseFileReader = baseFileReaderOpenTimePair.getKey(); @@ -472,7 +469,7 @@ private Set getValidInstantTimestamps() { public Pair getLogRecordScanner(List logFiles, String partitionName, Option allowFullScanOverride) { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); List sortedLogFilePaths = logFiles.stream() .sorted(HoodieLogFile.getLogFileComparator()) .map(o -> o.getPath().toString()) diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala index db48f224f2c98..e5497d030a755 100644 --- a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala +++ b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala @@ -18,15 +18,13 @@ package org.apache.hudi import org.apache.avro.Schema -import org.apache.hudi.common.model.{HoodieCommitMetadata, HoodieFileFormat, HoodieRecord, HoodieReplaceCommitMetadata} -import org.apache.hudi.common.table.{HoodieTableMetaClient, TableSchemaResolver} - -import java.util.stream.Collectors import org.apache.hadoop.fs.{GlobPattern, Path} import org.apache.hudi.client.common.HoodieSparkEngineContext import org.apache.hudi.client.utils.SparkInternalSchemaConverter import org.apache.hudi.common.fs.FSUtils +import org.apache.hudi.common.model.{HoodieCommitMetadata, HoodieFileFormat, HoodieRecord, HoodieReplaceCommitMetadata} import org.apache.hudi.common.table.timeline.{HoodieInstant, HoodieTimeline} +import org.apache.hudi.common.table.{HoodieTableMetaClient, TableSchemaResolver} import org.apache.hudi.common.util.{HoodieTimer, InternalSchemaCache} import org.apache.hudi.config.HoodieWriteConfig import org.apache.hudi.exception.HoodieException @@ -41,6 +39,7 @@ import org.apache.spark.sql.sources.{BaseRelation, TableScan} import org.apache.spark.sql.types.StructType import org.apache.spark.sql.{DataFrame, Row, SQLContext} +import java.util.stream.Collectors import scala.collection.JavaConversions._ import scala.collection.mutable @@ -219,7 +218,7 @@ class IncrementalRelation(val sqlContext: SQLContext, if (fallbackToFullTableScan) { val fs = basePath.getFileSystem(sqlContext.sparkContext.hadoopConfiguration); - val timer = new HoodieTimer().startTimer(); + val timer = HoodieTimer.start val allFilesToCheck = filteredMetaBootstrapFullPaths ++ filteredRegularFullPaths val firstNotFoundPath = allFilesToCheck.find(path => !fs.exists(new Path(path))) diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/CreateMetadataTableProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/CreateMetadataTableProcedure.scala index 3a16d8319a05d..bbed979f5cd46 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/CreateMetadataTableProcedure.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/CreateMetadataTableProcedure.scala @@ -61,7 +61,7 @@ class CreateMetadataTableProcedure extends BaseProcedure with ProcedureBuilder w // Metadata directory does not exist yet metaClient.getFs.mkdirs(metadataPath) } - val timer = new HoodieTimer().startTimer + val timer = HoodieTimer.start val writeConfig = getWriteConfig(basePath) SparkHoodieBackedTableMetadataWriter.create(metaClient.getHadoopConf, writeConfig, new HoodieSparkEngineContext(jsc)) Seq(Row("Created Metadata Table in " + metadataPath + " (duration=" + timer.endTimer / 1000.0 + "secs)")) diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/InitMetadataTableProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/InitMetadataTableProcedure.scala index 73d1128a98d08..3b875e77ffa17 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/InitMetadataTableProcedure.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/InitMetadataTableProcedure.scala @@ -61,7 +61,7 @@ class InitMetadataTableProcedure extends BaseProcedure with ProcedureBuilder wit throw new RuntimeException("Metadata directory (" + metadataPath.toString + ") does not exist.") } - val timer = new HoodieTimer().startTimer + val timer = HoodieTimer.start if (!readOnly) { val writeConfig = getWriteConfig(basePath) SparkHoodieBackedTableMetadataWriter.create(metaClient.getHadoopConf, writeConfig, new HoodieSparkEngineContext(jsc)) diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RunCompactionProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RunCompactionProcedure.scala index 3e5a7e29e4022..bd2e863b1ea69 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RunCompactionProcedure.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RunCompactionProcedure.scala @@ -28,7 +28,6 @@ import org.apache.spark.sql.Row import org.apache.spark.sql.types._ import java.util.function.Supplier - import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ @@ -107,8 +106,7 @@ class RunCompactionProcedure extends BaseProcedure with ProcedureBuilder with Sp logInfo(s"No need to compaction on $basePath") } else { logInfo(s"Run compaction at instants: [${willCompactionInstants.mkString(",")}] on $basePath") - val timer = new HoodieTimer - timer.startTimer() + val timer = HoodieTimer.start willCompactionInstants.foreach { compactionInstant => val writeResponse = client.compact(compactionInstant) handleResponse(writeResponse.getCommitMetadata.get()) diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowMetadataTableFilesProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowMetadataTableFilesProcedure.scala index b30203dc06e8b..edd0439a2bd62 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowMetadataTableFilesProcedure.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowMetadataTableFilesProcedure.scala @@ -65,7 +65,7 @@ class ShowMetadataTableFilesProcedure() extends BaseProcedure with ProcedureBuil partitionPath = new Path(basePath, partition) } - val timer = new HoodieTimer().startTimer + val timer = HoodieTimer.start val statuses = metaReader.getAllFilesInPartition(partitionPath) logDebug("Took " + timer.endTimer + " ms") diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowMetadataTablePartitionsProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowMetadataTablePartitionsProcedure.scala index f2eaa7ad838fe..f9a676abc9129 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowMetadataTablePartitionsProcedure.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowMetadataTablePartitionsProcedure.scala @@ -57,7 +57,7 @@ class ShowMetadataTablePartitionsProcedure() extends BaseProcedure with Procedur throw new HoodieException(s"Metadata Table not enabled/initialized.") } - val timer = new HoodieTimer().startTimer + val timer = HoodieTimer.start val partitions = metadata.getAllPartitionPaths Collections.sort(partitions) logDebug("Took " + timer.endTimer + " ms") diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ValidateMetadataTableFilesProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ValidateMetadataTableFilesProcedure.scala index 81540d9684665..6d7457772bd6c 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ValidateMetadataTableFilesProcedure.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ValidateMetadataTableFilesProcedure.scala @@ -73,7 +73,7 @@ class ValidateMetadataTableFilesProcedure() extends BaseProcedure with Procedure val fsMetaReader = new HoodieBackedTableMetadata(new HoodieLocalEngineContext(metaClient.getHadoopConf), fsConfig, basePath, "/tmp") - val timer = new HoodieTimer().startTimer + val timer = HoodieTimer.start val metadataPartitions = metadataReader.getAllPartitionPaths logDebug("Listing partitions Took " + timer.endTimer + " ms") val fsPartitions = fsMetaReader.getAllPartitionPaths diff --git a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HiveQueryDDLExecutor.java b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HiveQueryDDLExecutor.java index 90efd2701c793..93ae3cfbf7383 100644 --- a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HiveQueryDDLExecutor.java +++ b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HiveQueryDDLExecutor.java @@ -91,7 +91,7 @@ private List updateHiveSQLs(List sqls) { try { for (String sql : sqls) { if (hiveDriver != null) { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); responses.add(hiveDriver.run(sql)); LOG.info(String.format("Time taken to execute [%s]: %s ms", sql, timer.endTimer())); } diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/RequestHandler.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/RequestHandler.java index b53c2534bc887..5595c2b8481c5 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/RequestHandler.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/RequestHandler.java @@ -114,7 +114,7 @@ public RequestHandler(Javalin app, Configuration conf, TimelineService.Config ti public static String jsonifyResult( Context ctx, Object obj, Registry metricsRegistry, ObjectMapper objectMapper, Logger logger) throws JsonProcessingException { - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); boolean prettyPrint = ctx.queryParam("pretty") != null; String result = prettyPrint ? objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(obj) diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/BatchedMarkerCreationRunnable.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/BatchedMarkerCreationRunnable.java index 50b9913f5cefe..2416a5590c056 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/BatchedMarkerCreationRunnable.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/BatchedMarkerCreationRunnable.java @@ -40,7 +40,7 @@ public BatchedMarkerCreationRunnable(List requestC @Override public void run() { LOG.debug("Start processing create marker requests"); - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); for (BatchedMarkerCreationContext requestContext : requestContextList) { requestContext.getMarkerDirState().processMarkerCreationRequests( diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerCreationFuture.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerCreationFuture.java index d965e56a01cb9..e1deb28527ff7 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerCreationFuture.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerCreationFuture.java @@ -39,7 +39,7 @@ public class MarkerCreationFuture extends CompletableFuture { public MarkerCreationFuture(Context context, String markerDirPath, String markerName) { super(); - this.timer = new HoodieTimer().startTimer(); + this.timer = HoodieTimer.start(); this.context = context; this.markerDirPath = markerDirPath; this.markerName = markerName; diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java index 67e850bb7d8fa..f367ec870eb1c 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java @@ -313,7 +313,7 @@ private int parseMarkerFileIndex(String markerFilePathStr) { */ private void flushMarkersToFile(int markerFileIndex) { LOG.debug("Write to " + markerDirPath + "/" + MARKERS_FILENAME_PREFIX + markerFileIndex); - HoodieTimer timer = new HoodieTimer().startTimer(); + HoodieTimer timer = HoodieTimer.start(); Path markersFilePath = new Path(markerDirPath, MARKERS_FILENAME_PREFIX + markerFileIndex); FSDataOutputStream fsDataOutputStream = null; BufferedWriter bufferedWriter = null;