diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/connectors/hive/HiveSourceFileEnumerator.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/connectors/hive/HiveSourceFileEnumerator.java index 54a2189360313..24829cb83369d 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/connectors/hive/HiveSourceFileEnumerator.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/connectors/hive/HiveSourceFileEnumerator.java @@ -110,13 +110,10 @@ public static InputSplit[] createMRSplits( public static int getNumFiles(List partitions, JobConf jobConf) throws IOException { int numFiles = 0; - FileSystem fs = null; for (HiveTablePartition partition : partitions) { StorageDescriptor sd = partition.getStorageDescriptor(); org.apache.hadoop.fs.Path inputPath = new org.apache.hadoop.fs.Path(sd.getLocation()); - if (fs == null) { - fs = inputPath.getFileSystem(jobConf); - } + FileSystem fs = inputPath.getFileSystem(jobConf); // it's possible a partition exists in metastore but the data has been removed if (!fs.exists(inputPath)) { continue; diff --git a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/connectors/hive/read/HiveTableInputFormat.java b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/connectors/hive/read/HiveTableInputFormat.java index 073544060672c..c5653878d9199 100644 --- a/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/connectors/hive/read/HiveTableInputFormat.java +++ b/flink-connectors/flink-connector-hive/src/main/java/org/apache/flink/connectors/hive/read/HiveTableInputFormat.java @@ -34,9 +34,6 @@ import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.logical.LogicalType; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.ql.io.IOConstants; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; import org.apache.hadoop.mapred.InputSplit; @@ -342,22 +339,4 @@ public BaseStatistics getStatistics(BaseStatistics cachedStats) { public InputSplitAssigner getInputSplitAssigner(HiveTableInputSplit[] inputSplits) { return new LocatableInputSplitAssigner(inputSplits); } - - public int getNumFiles() throws IOException { - int numFiles = 0; - FileSystem fs = null; - for (HiveTablePartition partition : partitions) { - StorageDescriptor sd = partition.getStorageDescriptor(); - Path inputPath = new Path(sd.getLocation()); - if (fs == null) { - fs = inputPath.getFileSystem(jobConf.conf()); - } - // it's possible a partition exists in metastore but the data has been removed - if (!fs.exists(inputPath)) { - continue; - } - numFiles += fs.listStatus(inputPath).length; - } - return numFiles; - } }