import org.apache.spark.ml.feature.Bucketizer
// Double.NegativeInfinity:负无穷;Double.PositiveInfinity:正无穷
// 分为6个组:[负无穷,-100),[-100,-10),[-10,0),[0,10),[10,90),[90,正无穷)
val splits = Array(Double.NegativeInfinity, -100, -10, 0.0, 10, 90, Double.PositiveInfinity)
val data: Array[Double] = Array(-180,-160,-100,-50,-70,-20,-8,-5,-3, 0.0, 1,3,7,10,30,60,90,100,120,150)
val dataFrame = spark.createDataFrame(data.map(Tuple1.apply)).toDF("features")
dataFrame: org.apache.spark.sql.DataFrame = [features: double]
val bucketizer = new Bucketizer()
.setInputCol("features")
.setOutputCol("bucketedFeatures")
.setSplits(splits)
// 将原始数据转换为桶索引
val bucketedData = bucketizer.transform(dataFrame)
bucketedData: org.apache.spark.sql.DataFrame = [features: double, bucketedFeatures: double]
bucketedData.show(50,truncate=false)
+--------+----------------+
|features|bucketedFeatures|
+--------+----------------+
|-180.0 |0.0 |
|-160.0 |0.0 |
|-100.0 |1.0 |
|-50.0 |1.0 |
|-70.0 |1.0 |
|-20.0 |1.0 |
|-8.0 |2.0 |
|-5.0 |2.0 |
|-3.0 |2.0 |
|0.0 |3.0 |
|1.0 |3.0 |
|3.0 |3.0 |
|7.0 |3.0 |
|10.0 |4.0 |
|30.0 |4.0 |
|60.0 |4.0 |
|90.0 |5.0 |
|100.0 |5.0 |
|120.0 |5.0 |
|150.0 |5.0 |
+--------+----------------+
Spark2 feature Bucketizer之将连续数据离散化到指定的范围区间
最新推荐文章于 2022-05-16 19:34:05 发布