Skip to content

Commit eeafaea

Browse files
microbearzzhanshaoxiong
andauthored
[HUDI-3512] Add call procedure for StatsCommand (#5955)
Co-authored-by: zhanshaoxiong <shaoxiong0001@@gmail.com>
1 parent 59978ef commit eeafaea

File tree

4 files changed

+294
-0
lines changed

4 files changed

+294
-0
lines changed

hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/HoodieProcedures.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,8 @@ object HoodieProcedures {
5252
mapBuilder.put(ShowLatestFileSystemViewProcedure.NAME, ShowLatestFileSystemViewProcedure.builder)
5353
mapBuilder.put(ShowHoodieLogFileMetadataProcedure.NAME, ShowHoodieLogFileMetadataProcedure.builder)
5454
mapBuilder.put(ShowHoodieLogFileRecordsProcedure.NAME, ShowHoodieLogFileRecordsProcedure.builder)
55+
mapBuilder.put(StatsWriteAmplificationProcedure.NAME, StatsWriteAmplificationProcedure.builder)
56+
mapBuilder.put(StatsFileSizeProcedure.NAME, StatsFileSizeProcedure.builder)
5557
mapBuilder.build
5658
}
5759
}
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one or more
3+
* contributor license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright ownership.
5+
* The ASF licenses this file to You under the Apache License, Version 2.0
6+
* (the "License"); you may not use this file except in compliance with
7+
* the License. You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
18+
package org.apache.spark.sql.hudi.command.procedures
19+
20+
import com.codahale.metrics.{Histogram, Snapshot, UniformReservoir}
21+
import com.google.common.collect.{Lists, Maps}
22+
import org.apache.hadoop.fs.Path
23+
import org.apache.hudi.common.fs.FSUtils
24+
import org.apache.hudi.common.table.HoodieTableMetaClient
25+
import org.apache.spark.sql.Row
26+
import org.apache.spark.sql.hudi.command.procedures.StatsFileSizeProcedure.MAX_FILES
27+
import org.apache.spark.sql.types.{DataTypes, Metadata, StructField, StructType}
28+
29+
import java.util.function.Supplier
30+
import scala.collection.JavaConverters.{asScalaBufferConverter, mapAsScalaMapConverter}
31+
32+
class StatsFileSizeProcedure extends BaseProcedure with ProcedureBuilder {
33+
34+
override def parameters: Array[ProcedureParameter] = Array[ProcedureParameter](
35+
ProcedureParameter.required(0, "table", DataTypes.StringType, None),
36+
ProcedureParameter.optional(1, "partition_path", DataTypes.StringType, ""),
37+
ProcedureParameter.optional(2, "limit", DataTypes.IntegerType, 10)
38+
)
39+
40+
override def outputType: StructType = StructType(Array[StructField](
41+
StructField("commit_time", DataTypes.StringType, nullable = true, Metadata.empty),
42+
StructField("min", DataTypes.LongType, nullable = true, Metadata.empty),
43+
StructField("10th", DataTypes.DoubleType, nullable = true, Metadata.empty),
44+
StructField("50th", DataTypes.DoubleType, nullable = true, Metadata.empty),
45+
StructField("avg", DataTypes.DoubleType, nullable = true, Metadata.empty),
46+
StructField("95th", DataTypes.DoubleType, nullable = true, Metadata.empty),
47+
StructField("max", DataTypes.LongType, nullable = true, Metadata.empty),
48+
StructField("num_files", DataTypes.IntegerType, nullable = true, Metadata.empty),
49+
StructField("stddev", DataTypes.DoubleType, nullable = true, Metadata.empty)
50+
))
51+
52+
override def call(args: ProcedureArgs): Seq[Row] = {
53+
checkArgs(parameters, args)
54+
val table = getArgValueOrDefault(args, parameters(0))
55+
val globRegex = getArgValueOrDefault(args, parameters(1)).get.asInstanceOf[String]
56+
val limit: Int = getArgValueOrDefault(args, parameters(2)).get.asInstanceOf[Int]
57+
val basePath = getBasePath(table)
58+
val fs = HoodieTableMetaClient.builder.setConf(jsc.hadoopConfiguration()).setBasePath(basePath).build.getFs
59+
val globPath = String.format("%s/%s/*", basePath, globRegex)
60+
val statuses = FSUtils.getGlobStatusExcludingMetaFolder(fs, new Path(globPath))
61+
62+
val globalHistogram = new Histogram(new UniformReservoir(MAX_FILES))
63+
val commitHistogramMap: java.util.Map[String, Histogram] = Maps.newHashMap()
64+
statuses.asScala.foreach(
65+
status => {
66+
val instantTime = FSUtils.getCommitTime(status.getPath.getName)
67+
val len = status.getLen
68+
commitHistogramMap.putIfAbsent(instantTime, new Histogram(new UniformReservoir(MAX_FILES)))
69+
commitHistogramMap.get(instantTime).update(len)
70+
globalHistogram.update(len)
71+
}
72+
)
73+
val rows: java.util.List[Row] = Lists.newArrayList()
74+
commitHistogramMap.asScala.foreach {
75+
case (instantTime, histogram) =>
76+
val snapshot = histogram.getSnapshot
77+
rows.add(printFileSizeHistogram(instantTime, snapshot))
78+
}
79+
val snapshot = globalHistogram.getSnapshot
80+
rows.add(printFileSizeHistogram("ALL", snapshot))
81+
rows.stream().limit(limit).toArray().map(r => r.asInstanceOf[Row]).toList
82+
}
83+
84+
def printFileSizeHistogram(instantTime: String, snapshot: Snapshot): Row = {
85+
Row(
86+
instantTime,
87+
snapshot.getMin,
88+
snapshot.getValue(0.1),
89+
snapshot.getMedian,
90+
snapshot.getMean,
91+
snapshot.get95thPercentile,
92+
snapshot.getMax,
93+
snapshot.size,
94+
snapshot.getStdDev
95+
)
96+
}
97+
98+
override def build: Procedure = new StatsFileSizeProcedure
99+
}
100+
101+
object StatsFileSizeProcedure {
102+
val MAX_FILES = 1000000
103+
val NAME = "stats_filesizes"
104+
105+
def builder: Supplier[ProcedureBuilder] = new Supplier[ProcedureBuilder] {
106+
override def get(): ProcedureBuilder = new StatsFileSizeProcedure()
107+
}
108+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one or more
3+
* contributor license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright ownership.
5+
* The ASF licenses this file to You under the Apache License, Version 2.0
6+
* (the "License"); you may not use this file except in compliance with
7+
* the License. You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
18+
package org.apache.spark.sql.hudi.command.procedures
19+
20+
import com.google.common.collect.Lists
21+
import org.apache.hudi.common.model.HoodieCommitMetadata
22+
import org.apache.hudi.common.table.HoodieTableMetaClient
23+
import org.apache.spark.sql.Row
24+
import org.apache.spark.sql.types.{DataTypes, Metadata, StructField, StructType}
25+
26+
import java.text.DecimalFormat
27+
import java.util.function.Supplier
28+
import scala.collection.JavaConverters.asScalaIteratorConverter
29+
30+
class StatsWriteAmplificationProcedure extends BaseProcedure with ProcedureBuilder {
31+
override def parameters: Array[ProcedureParameter] = Array[ProcedureParameter](
32+
ProcedureParameter.required(0, "table", DataTypes.StringType, None),
33+
ProcedureParameter.optional(1, "limit", DataTypes.IntegerType, 10)
34+
)
35+
36+
override def outputType: StructType = StructType(Array[StructField](
37+
StructField("commit_time", DataTypes.StringType, nullable = true, Metadata.empty),
38+
StructField("total_upserted", DataTypes.LongType, nullable = true, Metadata.empty),
39+
StructField("total_written", DataTypes.LongType, nullable = true, Metadata.empty),
40+
StructField("write_amplification_factor", DataTypes.StringType, nullable = true, Metadata.empty)
41+
))
42+
43+
override def call(args: ProcedureArgs): Seq[Row] = {
44+
checkArgs(parameters, args)
45+
val table = getArgValueOrDefault(args, parameters(0))
46+
val limit: Int = getArgValueOrDefault(args, parameters(1)).get.asInstanceOf[Int]
47+
val basePath = getBasePath(table)
48+
val client = HoodieTableMetaClient.builder.setConf(jsc.hadoopConfiguration()).setBasePath(basePath).build
49+
val activeTimeline = client.getActiveTimeline
50+
val timeline = activeTimeline.getCommitTimeline.filterCompletedInstants()
51+
52+
val rows: java.util.List[Row] = Lists.newArrayList()
53+
val df = new DecimalFormat("#.00")
54+
var totalRecordsUpserted = 0L
55+
var totalRecordsWritten = 0L
56+
timeline.getInstants.iterator.asScala.foreach(
57+
instantTime => {
58+
var waf = "0"
59+
val commit = HoodieCommitMetadata.fromBytes(activeTimeline.getInstantDetails(instantTime).get(), classOf[HoodieCommitMetadata])
60+
if (commit.fetchTotalUpdateRecordsWritten() > 0) {
61+
waf = df.format(commit.fetchTotalRecordsWritten().toFloat / commit.fetchTotalUpdateRecordsWritten())
62+
}
63+
rows.add(Row(instantTime.getTimestamp, commit.fetchTotalUpdateRecordsWritten, commit.fetchTotalRecordsWritten, waf))
64+
totalRecordsUpserted = totalRecordsUpserted + commit.fetchTotalUpdateRecordsWritten()
65+
totalRecordsWritten = totalRecordsWritten + commit.fetchTotalRecordsWritten()
66+
}
67+
)
68+
var waf = "0"
69+
if (totalRecordsUpserted > 0) {
70+
waf = df.format(totalRecordsWritten.toFloat / totalRecordsUpserted)
71+
}
72+
rows.add(Row("Total", totalRecordsUpserted, totalRecordsWritten, waf))
73+
rows.stream().limit(limit).toArray().map(r => r.asInstanceOf[Row]).toList
74+
}
75+
76+
override def build: Procedure = new StatsWriteAmplificationProcedure
77+
}
78+
79+
object StatsWriteAmplificationProcedure {
80+
val NAME = "stats_wa"
81+
82+
def builder: Supplier[ProcedureBuilder] = new Supplier[ProcedureBuilder] {
83+
override def get(): ProcedureBuilder = new StatsWriteAmplificationProcedure()
84+
}
85+
}
Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing,
13+
* software distributed under the License is distributed on an
14+
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
* KIND, either express or implied. See the License for the
16+
* specific language governing permissions and limitations
17+
* under the License.
18+
*/
19+
20+
package org.apache.spark.sql.hudi.procedure
21+
22+
import org.apache.spark.sql.hudi.HoodieSparkSqlTestBase
23+
24+
class TestStatsProcedure extends HoodieSparkSqlTestBase {
25+
test("Test Call stats_wa Procedure") {
26+
withTempDir { tmp =>
27+
val tableName = generateTableName
28+
val tablePath = s"${tmp.getCanonicalPath}/$tableName"
29+
// create table
30+
spark.sql(
31+
s"""
32+
|create table $tableName (
33+
| id int,
34+
| name string,
35+
| price double,
36+
| ts long
37+
|) using hudi
38+
| partitioned by (ts)
39+
| location '$tablePath'
40+
| tblproperties (
41+
| primaryKey = 'id',
42+
| preCombineField = 'ts'
43+
| )
44+
""".stripMargin)
45+
// insert data to table
46+
spark.sql(s"insert into $tableName select 1, 'a1', 10, 1000")
47+
spark.sql(s"insert into $tableName select 2, 'a2', 20, 1500")
48+
spark.sql(s"update $tableName set name = 'b1', price = 100 where id = 1")
49+
50+
// Check required fields
51+
checkExceptionContain(s"""call stats_wa(limit => 10)""")(
52+
s"Argument: table is required")
53+
54+
// collect result for table
55+
val result = spark.sql(
56+
s"""call stats_wa(table => '$tableName')""".stripMargin).collect()
57+
assertResult(4) {
58+
result.length
59+
}
60+
}
61+
}
62+
63+
test("Test Call stats_filesizes Procedure") {
64+
withTempDir { tmp =>
65+
val tableName = generateTableName
66+
val tablePath = s"${tmp.getCanonicalPath}/$tableName"
67+
// create table
68+
spark.sql(
69+
s"""
70+
|create table $tableName (
71+
| id int,
72+
| name string,
73+
| price double,
74+
| ts long
75+
|) using hudi
76+
| partitioned by (ts)
77+
| location '$tablePath'
78+
| tblproperties (
79+
| primaryKey = 'id',
80+
| preCombineField = 'ts'
81+
| )
82+
""".stripMargin)
83+
// insert data to table
84+
spark.sql(s"insert into $tableName select 1, 'a1', 10, 1000")
85+
spark.sql(s"insert into $tableName select 2, 'a2', 20, 1500")
86+
87+
// Check required fields
88+
checkExceptionContain(s"""call stats_filesizes(limit => 10)""")(
89+
s"Argument: table is required")
90+
91+
// collect result for table
92+
val result = spark.sql(
93+
s"""call stats_filesizes(table => '$tableName', partition_path => '/*')""".stripMargin).collect()
94+
assertResult(3) {
95+
result.length
96+
}
97+
}
98+
}
99+
}

0 commit comments

Comments
 (0)