|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one or more |
| 3 | + * contributor license agreements. See the NOTICE file distributed with |
| 4 | + * this work for additional information regarding copyright ownership. |
| 5 | + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| 6 | + * (the "License"); you may not use this file except in compliance with |
| 7 | + * the License. You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + */ |
| 17 | + |
| 18 | +package org.apache.spark.sql.hudi.command.procedures |
| 19 | + |
| 20 | +import com.codahale.metrics.{Histogram, Snapshot, UniformReservoir} |
| 21 | +import com.google.common.collect.{Lists, Maps} |
| 22 | +import org.apache.hadoop.fs.Path |
| 23 | +import org.apache.hudi.common.fs.FSUtils |
| 24 | +import org.apache.hudi.common.table.HoodieTableMetaClient |
| 25 | +import org.apache.spark.sql.Row |
| 26 | +import org.apache.spark.sql.hudi.command.procedures.StatsFileSizeProcedure.MAX_FILES |
| 27 | +import org.apache.spark.sql.types.{DataTypes, Metadata, StructField, StructType} |
| 28 | + |
| 29 | +import java.util.function.Supplier |
| 30 | +import scala.collection.JavaConverters.{asScalaBufferConverter, mapAsScalaMapConverter} |
| 31 | + |
| 32 | +class StatsFileSizeProcedure extends BaseProcedure with ProcedureBuilder { |
| 33 | + |
| 34 | + override def parameters: Array[ProcedureParameter] = Array[ProcedureParameter]( |
| 35 | + ProcedureParameter.required(0, "table", DataTypes.StringType, None), |
| 36 | + ProcedureParameter.optional(1, "partition_path", DataTypes.StringType, ""), |
| 37 | + ProcedureParameter.optional(2, "limit", DataTypes.IntegerType, 10) |
| 38 | + ) |
| 39 | + |
| 40 | + override def outputType: StructType = StructType(Array[StructField]( |
| 41 | + StructField("commit_time", DataTypes.StringType, nullable = true, Metadata.empty), |
| 42 | + StructField("min", DataTypes.LongType, nullable = true, Metadata.empty), |
| 43 | + StructField("10th", DataTypes.DoubleType, nullable = true, Metadata.empty), |
| 44 | + StructField("50th", DataTypes.DoubleType, nullable = true, Metadata.empty), |
| 45 | + StructField("avg", DataTypes.DoubleType, nullable = true, Metadata.empty), |
| 46 | + StructField("95th", DataTypes.DoubleType, nullable = true, Metadata.empty), |
| 47 | + StructField("max", DataTypes.LongType, nullable = true, Metadata.empty), |
| 48 | + StructField("num_files", DataTypes.IntegerType, nullable = true, Metadata.empty), |
| 49 | + StructField("stddev", DataTypes.DoubleType, nullable = true, Metadata.empty) |
| 50 | + )) |
| 51 | + |
| 52 | + override def call(args: ProcedureArgs): Seq[Row] = { |
| 53 | + checkArgs(parameters, args) |
| 54 | + val table = getArgValueOrDefault(args, parameters(0)) |
| 55 | + val globRegex = getArgValueOrDefault(args, parameters(1)).get.asInstanceOf[String] |
| 56 | + val limit: Int = getArgValueOrDefault(args, parameters(2)).get.asInstanceOf[Int] |
| 57 | + val basePath = getBasePath(table) |
| 58 | + val fs = HoodieTableMetaClient.builder.setConf(jsc.hadoopConfiguration()).setBasePath(basePath).build.getFs |
| 59 | + val globPath = String.format("%s/%s/*", basePath, globRegex) |
| 60 | + val statuses = FSUtils.getGlobStatusExcludingMetaFolder(fs, new Path(globPath)) |
| 61 | + |
| 62 | + val globalHistogram = new Histogram(new UniformReservoir(MAX_FILES)) |
| 63 | + val commitHistogramMap: java.util.Map[String, Histogram] = Maps.newHashMap() |
| 64 | + statuses.asScala.foreach( |
| 65 | + status => { |
| 66 | + val instantTime = FSUtils.getCommitTime(status.getPath.getName) |
| 67 | + val len = status.getLen |
| 68 | + commitHistogramMap.putIfAbsent(instantTime, new Histogram(new UniformReservoir(MAX_FILES))) |
| 69 | + commitHistogramMap.get(instantTime).update(len) |
| 70 | + globalHistogram.update(len) |
| 71 | + } |
| 72 | + ) |
| 73 | + val rows: java.util.List[Row] = Lists.newArrayList() |
| 74 | + commitHistogramMap.asScala.foreach { |
| 75 | + case (instantTime, histogram) => |
| 76 | + val snapshot = histogram.getSnapshot |
| 77 | + rows.add(printFileSizeHistogram(instantTime, snapshot)) |
| 78 | + } |
| 79 | + val snapshot = globalHistogram.getSnapshot |
| 80 | + rows.add(printFileSizeHistogram("ALL", snapshot)) |
| 81 | + rows.stream().limit(limit).toArray().map(r => r.asInstanceOf[Row]).toList |
| 82 | + } |
| 83 | + |
| 84 | + def printFileSizeHistogram(instantTime: String, snapshot: Snapshot): Row = { |
| 85 | + Row( |
| 86 | + instantTime, |
| 87 | + snapshot.getMin, |
| 88 | + snapshot.getValue(0.1), |
| 89 | + snapshot.getMedian, |
| 90 | + snapshot.getMean, |
| 91 | + snapshot.get95thPercentile, |
| 92 | + snapshot.getMax, |
| 93 | + snapshot.size, |
| 94 | + snapshot.getStdDev |
| 95 | + ) |
| 96 | + } |
| 97 | + |
| 98 | + override def build: Procedure = new StatsFileSizeProcedure |
| 99 | +} |
| 100 | + |
| 101 | +object StatsFileSizeProcedure { |
| 102 | + val MAX_FILES = 1000000 |
| 103 | + val NAME = "stats_filesizes" |
| 104 | + |
| 105 | + def builder: Supplier[ProcedureBuilder] = new Supplier[ProcedureBuilder] { |
| 106 | + override def get(): ProcedureBuilder = new StatsFileSizeProcedure() |
| 107 | + } |
| 108 | +} |
0 commit comments