-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-13568] [ML] Create feature transformer to impute missing values #11601
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 48 commits
2999b26
8335cf2
7be5e9b
131f7d5
a72a3ea
78df589
b949be5
79b1c62
c3d5d55
1b39668
7f87ffb
4e45f81
e1dd0d2
12220eb
1b36deb
72d104d
c311b2e
d6b9421
d181b12
e211481
791533b
fdd6f94
8042cfb
4bdf595
e6ad69c
1718422
594c501
3043e7d
b3633e8
053d489
63e7032
4e1c34a
051aec6
aef094b
335ded7
949ed79
93bba63
cca8dd4
eea8947
4e07431
31556e6
d4f92e4
544a65c
910685e
91d4cee
8744524
ca45c33
e86d919
4f17c54
ce59a5b
41d91b9
9f6bd57
e378db5
c67afc1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,260 @@ | ||||||
| /* | ||||||
| * Licensed to the Apache Software Foundation (ASF) under one or more | ||||||
| * contributor license agreements. See the NOTICE file distributed with | ||||||
| * this work for additional information regarding copyright ownership. | ||||||
| * The ASF licenses this file to You under the Apache License, Version 2.0 | ||||||
| * (the "License"); you may not use this file except in compliance with | ||||||
| * the License. You may obtain a copy of the License at | ||||||
| * | ||||||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||||||
| * | ||||||
| * Unless required by applicable law or agreed to in writing, software | ||||||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
| * See the License for the specific language governing permissions and | ||||||
| * limitations under the License. | ||||||
| */ | ||||||
|
|
||||||
| package org.apache.spark.ml.feature | ||||||
|
|
||||||
| import org.apache.hadoop.fs.Path | ||||||
|
|
||||||
| import org.apache.spark.SparkException | ||||||
| import org.apache.spark.annotation.{Experimental, Since} | ||||||
| import org.apache.spark.ml.{Estimator, Model} | ||||||
| import org.apache.spark.ml.param._ | ||||||
| import org.apache.spark.ml.param.shared.{HasInputCols, HasOutputCol} | ||||||
| import org.apache.spark.ml.util._ | ||||||
| import org.apache.spark.sql.{DataFrame, Dataset, Row} | ||||||
| import org.apache.spark.sql.functions._ | ||||||
| import org.apache.spark.sql.types._ | ||||||
|
|
||||||
| /** | ||||||
| * Params for [[Imputer]] and [[ImputerModel]]. | ||||||
| */ | ||||||
| private[feature] trait ImputerParams extends Params with HasInputCols with HasOutputCol { | ||||||
|
||||||
|
|
||||||
| /** | ||||||
| * The imputation strategy. | ||||||
| * If "mean", then replace missing values using the mean value of the feature. | ||||||
| * If "median", then replace missing values using the approximate median value of the feature. | ||||||
| * Default: mean | ||||||
| * | ||||||
| * @group param | ||||||
| */ | ||||||
| final val strategy: Param[String] = new Param(this, "strategy", "strategy for imputation. " + | ||||||
| "If mean, then replace missing values using the mean value of the feature. " + | ||||||
| "If median, then replace missing values using the median value of the feature.", | ||||||
| ParamValidators.inArray[String](Imputer.supportedStrategyNames.toArray)) | ||||||
|
|
||||||
| /** @group getParam */ | ||||||
| def getStrategy: String = $(strategy) | ||||||
|
|
||||||
| /** | ||||||
| * The placeholder for the missing values. All occurrences of missingValue will be imputed. | ||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Doc: Note that null values are always treated as missing. |
||||||
| * Note that null values are always treated as missing. | ||||||
| * Default: Double.NaN | ||||||
| * | ||||||
| * @group param | ||||||
| */ | ||||||
| final val missingValue: DoubleParam = new DoubleParam(this, "missingValue", | ||||||
| "The placeholder for the missing values. All occurrences of missingValue will be imputed") | ||||||
|
|
||||||
| /** @group getParam */ | ||||||
| def getMissingValue: Double = $(missingValue) | ||||||
|
|
||||||
| /** | ||||||
|
||||||
| * Param for output column names. | ||||||
| * @group param | ||||||
| */ | ||||||
| final val outputCols: StringArrayParam = new StringArrayParam(this, "outputCols", | ||||||
| "output column names") | ||||||
|
|
||||||
| /** @group getParam */ | ||||||
| final def getOutputCols: Array[String] = $(outputCols) | ||||||
|
|
||||||
| /** Validates and transforms the input schema. */ | ||||||
| protected def validateAndTransformSchema(schema: StructType): StructType = { | ||||||
| require($(inputCols).length == $(outputCols).length, "inputCols and outputCols should have" + | ||||||
| "the same length") | ||||||
| val localInputCols = $(inputCols) | ||||||
| val localOutputCols = $(outputCols) | ||||||
| var outputSchema = schema | ||||||
|
|
||||||
| $(inputCols).indices.foreach { i => | ||||||
|
||||||
| val inputCol = localInputCols(i) | ||||||
| val outputCol = localOutputCols(i) | ||||||
| val inputType = schema(inputCol).dataType | ||||||
| SchemaUtils.checkColumnTypes(schema, inputCol, Seq(DoubleType, FloatType)) | ||||||
| outputSchema = SchemaUtils.appendColumn(outputSchema, outputCol, inputType) | ||||||
| } | ||||||
| outputSchema | ||||||
| } | ||||||
| } | ||||||
|
|
||||||
| /** | ||||||
| * :: Experimental :: | ||||||
| * Imputation estimator for completing missing values, either using the mean or the median | ||||||
| * of the column in which the missing values are located. The input column should be of | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As mentioned above at https://github.com/apache/spark/pull/11601/files#r104403880, you can add the note about relative error here. Something like "For computing median,
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I didn't add the link as it may break java doc generation.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah right - perhaps just mention using approxQuantile? |
||||||
| * DoubleType or FloatType. Currently Imputer does not support categorical features yet | ||||||
| * (SPARK-15041) and possibly creates incorrect values for a categorical feature. | ||||||
| * | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we document that we only support "Float" and "Double" types for now?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Cool. Thanks. |
||||||
| * Note that the mean/median value is computed after filtering out missing values. | ||||||
| * All Null values in the input column are treated as missing, and so are also imputed. | ||||||
|
||||||
| */ | ||||||
| @Experimental | ||||||
| class Imputer @Since("2.1.0")(override val uid: String) | ||||||
|
||||||
| extends Estimator[ImputerModel] with ImputerParams with DefaultParamsWritable { | ||||||
|
|
||||||
| @Since("2.1.0") | ||||||
| def this() = this(Identifiable.randomUID("imputer")) | ||||||
|
|
||||||
| /** @group setParam */ | ||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shall we add Since annotations for the setters?
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I've heard an argument that everything in the class is implicitly since 2.1.0 since the class itself is - unless otherwise stated. Which does make sense. But I do slightly favour being explicit about it (even if it is a bit pedantic) so yeah let's add the annotation to all the setters. |
||||||
| @Since("2.1.0") | ||||||
| def setInputCols(value: Array[String]): this.type = set(inputCols, value) | ||||||
|
|
||||||
| /** @group setParam */ | ||||||
| @Since("2.1.0") | ||||||
| def setOutputCols(value: Array[String]): this.type = set(outputCols, value) | ||||||
|
|
||||||
| /** | ||||||
| * Imputation strategy. Available options are ["mean", "median"]. | ||||||
| * @group setParam | ||||||
| */ | ||||||
| @Since("2.1.0") | ||||||
| def setStrategy(value: String): this.type = set(strategy, value) | ||||||
|
|
||||||
| /** @group setParam */ | ||||||
| @Since("2.1.0") | ||||||
| def setMissingValue(value: Double): this.type = set(missingValue, value) | ||||||
|
|
||||||
| setDefault(strategy -> "mean", missingValue -> Double.NaN) | ||||||
|
|
||||||
| override def fit(dataset: Dataset[_]): ImputerModel = { | ||||||
| transformSchema(dataset.schema, logging = true) | ||||||
| val surrogates = $(inputCols).map { inputCol => | ||||||
| val ic = col(inputCol) | ||||||
| val filtered = dataset.select(ic.cast(DoubleType)) | ||||||
| .filter(ic.isNotNull && ic =!= $(missingValue)) | ||||||
|
||||||
| .filter(!ic.isNaN) | ||||||
| if(filtered.rdd.isEmpty()) { | ||||||
|
||||||
| throw new SparkException(s"surrogate cannot be computed. " + | ||||||
| s"All the values in ${inputCol} are Null, Nan or missingValue ($missingValue)") | ||||||
| } | ||||||
| val surrogate = $(strategy) match { | ||||||
| case "mean" => filtered.select(avg(inputCol)).first().getDouble(0) | ||||||
|
||||||
| case "median" => filtered.stat.approxQuantile(inputCol, Array(0.5), 0.001)(0) | ||||||
|
||||||
| } | ||||||
| surrogate.asInstanceOf[Double] | ||||||
|
||||||
| } | ||||||
|
|
||||||
| import dataset.sparkSession.implicits._ | ||||||
| val surrogateDF = Seq(surrogates).toDF("surrogates") | ||||||
| copyValues(new ImputerModel(uid, surrogateDF).setParent(this)) | ||||||
| } | ||||||
|
|
||||||
| override def transformSchema(schema: StructType): StructType = { | ||||||
| validateAndTransformSchema(schema) | ||||||
| } | ||||||
|
|
||||||
| override def copy(extra: ParamMap): Imputer = defaultCopy(extra) | ||||||
| } | ||||||
|
|
||||||
| @Since("2.1.0") | ||||||
| object Imputer extends DefaultParamsReadable[Imputer] { | ||||||
|
|
||||||
| /** Set of strategy names that Imputer currently supports. */ | ||||||
| private[ml] val supportedStrategyNames = Set("mean", "median") | ||||||
|
||||||
|
|
||||||
| @Since("2.1.0") | ||||||
| override def load(path: String): Imputer = super.load(path) | ||||||
| } | ||||||
|
|
||||||
| /** | ||||||
| * :: Experimental :: | ||||||
| * Model fitted by [[Imputer]]. | ||||||
| * | ||||||
| * @param surrogateDF Value by which missing values in the input columns will be replaced. This | ||||||
| * is stored using DataFrame with input column names and the corresponding surrogates. | ||||||
|
||||||
| inputCol1 | inputCol2 |
|---|---|
| surrogate1 | surrogate2 |
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
.as[Seq[Double]].head()
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I change it to
val surrogates = surrogateDF.select(
which can actually handle different datatypes.
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You could do $(inputCols).zip($(outputCols)).zip(surrogates).map { case ((inputCol, outputCol), icSurrogate) => ...
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,136 @@ | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one or more | ||
| * contributor license agreements. See the NOTICE file distributed with | ||
| * this work for additional information regarding copyright ownership. | ||
| * The ASF licenses this file to You under the Apache License, Version 2.0 | ||
| * (the "License"); you may not use this file except in compliance with | ||
| * the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
| package org.apache.spark.ml.feature | ||
|
|
||
| import org.apache.spark.{SparkException, SparkFunSuite} | ||
| import org.apache.spark.ml.util.DefaultReadWriteTest | ||
| import org.apache.spark.mllib.util.MLlibTestSparkContext | ||
| import org.apache.spark.mllib.util.TestingUtils._ | ||
| import org.apache.spark.sql.{DataFrame, Row} | ||
|
|
||
| class ImputerSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We need tests for multiple columns too |
||
|
|
||
| test("Imputer for Double with default missing Value NaN") { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The way these tests are written was pretty confusing to me. It seems like the "mean" column should be the mean of some values but really it is the expected output when "mean" strategy is used. This is minor since it just affects readability and might be more of a personal preference. I think the following is clearer: val df = sqlContext.createDataFrame( Seq(
(0, 1.0),
(1, 1.0),
(2, 3.0),
(3, 4.0),
(4, Double.NaN)
)).toDF("id", "value")
val expectedOutput = Map(
"mean"->Array(1.0, 1.0, 3.0, 4.0, 2.25),
"median" -> Array(1.0, 1.0, 3.0, 4.0, 1.0),
"mode" -> Array(1.0, 1.0, 3.0, 4.0, 1.0))
Seq("mean", "median", "mode").foreach { strategy =>
val imputer = new Imputer().setInputCol("value").setOutputCol("out").setStrategy(strategy)
val model = imputer.fit(df)
val result = model.transform(df).select("out").collect().map(_.getDouble(0))
result.zip(expectedOutput(strategy)).foreach { case (actual, expected) =>
assert(actual ~== expected absTol 1e-5)
}Really, just any way of indicating that the extra columns are expected outputs would be clearer to me.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I get the point, yet data in the format of provides direct correspondence. I've updated the columns names.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sorry to nitpick, but "expected_mean" seems significantly more clear than "exp_mean" at the expense of only a few extra characters. Only change it if you push more commits since its minor :) |
||
| val df = spark.createDataFrame( Seq( | ||
| (0, 1.0, 1.0, 1.0), | ||
| (1, 1.0, 1.0, 1.0), | ||
| (2, 3.0, 3.0, 3.0), | ||
| (3, 4.0, 4.0, 4.0), | ||
| (4, Double.NaN, 2.25, 1.0) | ||
| )).toDF("id", "value", "expected_mean", "expected_median") | ||
| val imputer = new Imputer().setInputCols(Array("value")).setOutputCols(Array("out")) | ||
| ImputerSuite.iterateStrategyTest(imputer, df) | ||
| } | ||
|
|
||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We need to add tests for the case where the entire column is
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good catch yes - obviously the imputer can't actually do anything useful in that case - but it should either throw a useful error, or return the dataset unchanged. I would favor an error in this case as if a user is explicitly wanting to impute missing data and all their data is missing, rather blow up now than later in the pipeline.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, actually this also fails if the entire input column is the missing value as well. We need to beef up the test suite :) |
||
| test("Imputer should handle NaNs when computing surrogate value, if missingValue is not NaN") { | ||
| val df = spark.createDataFrame( Seq( | ||
| (0, 1.0, 1.0, 1.0), | ||
| (1, 3.0, 3.0, 3.0), | ||
| (2, Double.NaN, Double.NaN, Double.NaN), | ||
| (3, -1.0, 2.0, 3.0) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does approx quantile always choose the greater of the two middle values as the median? If so, can we add a comment noting that? NumPy computes the median of
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actually, it always choose the smaller one of the two middle values as I saw in some tests. |
||
| )).toDF("id", "value", "expected_mean", "expected_median") | ||
| val imputer = new Imputer().setInputCols(Array("value")).setOutputCols(Array("out")) | ||
| .setMissingValue(-1.0) | ||
| ImputerSuite.iterateStrategyTest(imputer, df) | ||
| } | ||
|
|
||
| test("Imputer for Float with missing Value -1.0") { | ||
| val df = spark.createDataFrame( Seq( | ||
| (0, 1.0F, 1.0F, 1.0F), | ||
| (1, 3.0F, 3.0F, 3.0F), | ||
| (2, 10.0F, 10.0F, 10.0F), | ||
| (3, 10.0F, 10.0F, 10.0F), | ||
| (4, -1.0F, 6.0F, 3.0F) | ||
| )).toDF("id", "value", "expected_mean", "expected_median") | ||
| val imputer = new Imputer().setInputCols(Array("value")).setOutputCols(Array("out")) | ||
| .setMissingValue(-1) | ||
| ImputerSuite.iterateStrategyTest(imputer, df) | ||
| } | ||
|
|
||
| test("Imputer should impute null as well as 'missingValue'") { | ||
| val df = spark.createDataFrame( Seq( | ||
| (0, 4.0, 4.0, 4.0), | ||
| (1, 10.0, 10.0, 10.0), | ||
| (2, 10.0, 10.0, 10.0), | ||
| (3, Double.NaN, 8.0, 10.0), | ||
| (4, -1.0, 8.0, 10.0) | ||
| )).toDF("id", "value", "expected_mean", "expected_median") | ||
| val df2 = df.selectExpr("*", "IF(value=-1.0, null, value) as nullable_value") | ||
| val imputer = new Imputer().setInputCols(Array("nullable_value")).setOutputCols(Array("out")) | ||
| ImputerSuite.iterateStrategyTest(imputer, df2) | ||
| } | ||
|
|
||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we should also have a test for a
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. added |
||
|
|
||
| test("Imputer throws exception when surrogate cannot be computed") { | ||
| val df = spark.createDataFrame( Seq( | ||
| (0, Double.NaN, 1.0, 1.0), | ||
| (1, Double.NaN, 3.0, 3.0), | ||
| (2, Double.NaN, Double.NaN, Double.NaN) | ||
| )).toDF("id", "value", "expected_mean", "expected_median") | ||
| Seq("mean", "median").foreach { strategy => | ||
| val imputer = new Imputer().setInputCols(Array("value")).setOutputCols(Array("out")) | ||
| .setStrategy(strategy) | ||
| intercept[SparkException] { | ||
|
||
| val model = imputer.fit(df) | ||
| } | ||
| } | ||
| } | ||
|
|
||
| test("Imputer read/write") { | ||
| val t = new Imputer() | ||
| .setInputCols(Array("myInputCol")) | ||
| .setOutputCols(Array("myOutputCol")) | ||
| .setMissingValue(-1.0) | ||
| testDefaultReadWrite(t) | ||
| } | ||
|
|
||
| test("ImputerModel read/write") { | ||
| val spark = this.spark | ||
| import spark.implicits._ | ||
| val surrogateDF = Seq(1.234).toDF("myInputCol") | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should be "surrogate" col name - though I see we don't actually use it in load or transform
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this happens to be the correct column name for now.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ok - we should add a test here to check the column names of |
||
|
|
||
| val instance = new ImputerModel( | ||
| "myImputer", surrogateDF) | ||
| .setInputCols(Array("myInputCol")) | ||
| .setOutputCols(Array("myOutputCol")) | ||
| val newInstance = testDefaultReadWrite(instance) | ||
| assert(newInstance.surrogateDF.collect() === instance.surrogateDF.collect()) | ||
| } | ||
|
|
||
| } | ||
|
|
||
| object ImputerSuite{ | ||
|
||
|
|
||
| /** | ||
| * Imputation strategy. Available options are ["mean", "median"]. | ||
| * @param df DataFrame with columns "id", "value", "expected_mean", "expected_median" | ||
| */ | ||
| def iterateStrategyTest(imputer: Imputer, df: DataFrame): Unit = { | ||
| Seq("mean", "median").foreach { strategy => | ||
| imputer.setStrategy(strategy) | ||
| val model = imputer.fit(df) | ||
| model.transform(df).select("expected_" + strategy, "out").collect().foreach { | ||
| case Row(exp: Float, out: Float) => | ||
| assert((exp.isNaN && out.isNaN) || (exp == out), | ||
| s"Imputed values differ. Expected: $exp, actual: $out") | ||
| case Row(exp: Double, out: Double) => | ||
| assert((exp.isNaN && out.isNaN) || (exp ~== out absTol 1e-5), | ||
| s"Imputed values differ. Expected: $exp, actual: $out") | ||
| } | ||
| } | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
unused import
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not applicable anymore as it's used below now.