From b054a54d814b64572d036857181c7c6f5e2a53a3 Mon Sep 17 00:00:00 2001 From: Xusen Yin Date: Wed, 25 Nov 2015 14:08:11 +0800 Subject: [PATCH 1/3] add docs for QuantileDiscretizer --- docs/ml-features.md | 65 +++++++++++++++++ .../ml/JavaQuantileDiscretizerExample.java | 71 +++++++++++++++++++ .../ml/QuantileDiscretizerExample.scala | 49 +++++++++++++ 3 files changed, 185 insertions(+) create mode 100644 examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java create mode 100644 examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala diff --git a/docs/ml-features.md b/docs/ml-features.md index cd1838d6d2882..6ebdc7e44995e 100644 --- a/docs/ml-features.md +++ b/docs/ml-features.md @@ -1705,6 +1705,71 @@ print(output.select("features", "clicked").first()) +## QuantileDiscretizer + +`QuantileDiscretizer` takes a column with continuous features and outputs a column with binned +categorical features. +The bin ranges are chosen by taking a sample of the data and dividing it into roughly equal parts. +The lower and upper bin bounds will be `-Infinity` and `+Infinity`, covering all real values. +This attempts to find numBuckets partitions based on a sample of the given input data, but it may +find fewer depending on the data sample values. + +Note that the result may different every time you run it, since the sample strategy behind it is +non-deterministic. + +**Examples** + +Assume that we have a DataFrame with the columns `id`, `hour`: + +~~~ + id | hour +----|------ + 0 | 18.0 +----|------ + 1 | 19.0 +----|------ + 2 | 8.0 +----|------ + 3 | 5.0 +----|------ + 4 | 22.0 +~~~ + +`hour` is a continuous feature with `Double` type. We want to turn the continuous feature into +categorical one. Given `numBuckets = 3`, we should get the following DataFrame: + +~~~ + id | hour | result +----|------|------ + 0 | 18.0 | 2.0 +----|------|------ + 1 | 19.0 | 2.0 +----|------|------ + 2 | 8.0 | 1.0 +----|------|------ + 3 | 5.0 | 1.0 +----|------|------ + 4 | 22.0 | 0.0 +~~~ + +
+
+ +Refer to the [QuantileDiscretizer Scala docs](api/scala/index.html#org.apache.spark.ml.feature.QuantileDiscretizer) +for more details on the API. + +{% include_example scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala %} +
+ +
+ +Refer to the [QuantileDiscretizer Java docs](api/java/org/apache/spark/ml/feature/QuantileDiscretizer.html) +for more details on the API. + +{% include_example java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java %} +
+
+ # Feature Selectors ## VectorSlicer diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java new file mode 100644 index 0000000000000..b857e57721886 --- /dev/null +++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.examples.ml; + +import org.apache.spark.SparkConf; +import org.apache.spark.api.java.JavaSparkContext; +import org.apache.spark.sql.SQLContext; +// $example on$ +import java.util.Arrays; + +import org.apache.spark.api.java.JavaRDD; +import org.apache.spark.ml.feature.QuantileDiscretizer; +import org.apache.spark.sql.DataFrame; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.RowFactory; +import org.apache.spark.sql.types.DataTypes; +import org.apache.spark.sql.types.Metadata; +import org.apache.spark.sql.types.StructField; +import org.apache.spark.sql.types.StructType; +// $example off$ + +public class JavaQuantileDiscretizerExample { + public static void main(String[] args) { + SparkConf conf = new SparkConf().setAppName("JavaQuantileDiscretizerExample"); + JavaSparkContext jsc = new JavaSparkContext(conf); + SQLContext sqlContext = new SQLContext(jsc); + + // $example on$ + JavaRDD jrdd = jsc.parallelize( + Arrays.asList( + RowFactory.create(0, 18.0), + RowFactory.create(1, 19.0), + RowFactory.create(2, 8.0), + RowFactory.create(3, 5.0), + RowFactory.create(4, 22.0) + ) + ); + + StructType schema = new StructType(new StructField[]{ + new StructField("id", DataTypes.IntegerType, false, Metadata.empty()), + new StructField("hour", DataTypes.DoubleType, false, Metadata.empty()) + }); + + DataFrame df = sqlContext.createDataFrame(jrdd, schema); + + QuantileDiscretizer discretizer = new QuantileDiscretizer() + .setInputCol("hour") + .setOutputCol("result") + .setNumBuckets(3); + + DataFrame result = discretizer.fit(df).transform(df); + result.show(); + // $example off$ + jsc.stop(); + } +} diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala new file mode 100644 index 0000000000000..8f29b7eaa6d26 --- /dev/null +++ b/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// scalastyle:off println +package org.apache.spark.examples.ml + +// $example on$ +import org.apache.spark.ml.feature.QuantileDiscretizer +// $example off$ +import org.apache.spark.sql.SQLContext +import org.apache.spark.{SparkConf, SparkContext} + +object QuantileDiscretizerExample { + def main(args: Array[String]) { + val conf = new SparkConf().setAppName("QuantileDiscretizerExample") + val sc = new SparkContext(conf) + val sqlContext = new SQLContext(sc) + import sqlContext.implicits._ + + // $example on$ + val data = Array((0, 18.0), (1, 19.0), (2, 8.0), (3, 5.0), (4, 2.2)) + val df = sc.parallelize(data).toDF("id", "hour") + + val discretizer = new QuantileDiscretizer() + .setInputCol("hour") + .setOutputCol("result") + .setNumBuckets(3) + + val result = discretizer.fit(df).transform(df) + result.show() + // $example off$ + sc.stop() + } +} +// scalastyle:on println From fede71bea84fb8c2b0a71fcd416f62bdbd35ae1a Mon Sep 17 00:00:00 2001 From: Xusen Yin Date: Thu, 3 Dec 2015 09:22:57 +0800 Subject: [PATCH 2/3] fix inconsistency --- docs/ml-features.md | 4 ++-- .../spark/examples/ml/JavaQuantileDiscretizerExample.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/ml-features.md b/docs/ml-features.md index 6ebdc7e44995e..281d2c524b75c 100644 --- a/docs/ml-features.md +++ b/docs/ml-features.md @@ -1732,7 +1732,7 @@ Assume that we have a DataFrame with the columns `id`, `hour`: ----|------ 3 | 5.0 ----|------ - 4 | 22.0 + 4 | 2.2 ~~~ `hour` is a continuous feature with `Double` type. We want to turn the continuous feature into @@ -1749,7 +1749,7 @@ categorical one. Given `numBuckets = 3`, we should get the following DataFrame: ----|------|------ 3 | 5.0 | 1.0 ----|------|------ - 4 | 22.0 | 0.0 + 4 | 2.2 | 0.0 ~~~
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java index b857e57721886..251ae79d9a108 100644 --- a/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java +++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java @@ -47,7 +47,7 @@ public static void main(String[] args) { RowFactory.create(1, 19.0), RowFactory.create(2, 8.0), RowFactory.create(3, 5.0), - RowFactory.create(4, 22.0) + RowFactory.create(4, 2.2) ) ); From 17655aae3c84aefbc1d3104e2b02be72394b0136 Mon Sep 17 00:00:00 2001 From: Xusen Yin Date: Fri, 4 Dec 2015 10:47:16 +0800 Subject: [PATCH 3/3] fix typos --- docs/ml-features.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ml-features.md b/docs/ml-features.md index 281d2c524b75c..b8b78efd22d65 100644 --- a/docs/ml-features.md +++ b/docs/ml-features.md @@ -1711,10 +1711,10 @@ print(output.select("features", "clicked").first()) categorical features. The bin ranges are chosen by taking a sample of the data and dividing it into roughly equal parts. The lower and upper bin bounds will be `-Infinity` and `+Infinity`, covering all real values. -This attempts to find numBuckets partitions based on a sample of the given input data, but it may +This attempts to find `numBuckets` partitions based on a sample of the given input data, but it may find fewer depending on the data sample values. -Note that the result may different every time you run it, since the sample strategy behind it is +Note that the result may be different every time you run it, since the sample strategy behind it is non-deterministic. **Examples**