import org.apache.spark.SparkContext
import scala.util.Random
import org.apache.spark.sql.Row
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.types.DoubleType
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.SparkSession
def genClassifiDataFrame(sc:SparkContext,
numPartitions:Int,
rows:Int,
dim:Int,
numClass:Int)={
val rdd=sc.parallelize((1 to rows.toInt),numPartitions).map{i=>
val values:Array[Double]=(0 to (dim-1)).map{j=>
(Random.nextDouble()*10).formatted("%.2f").toDouble
}.toArray
Row.fromSeq(values:+Random.nextInt(numClass).toString())
}
val sqlc=new SQLContext(sc)
val structs=Array.tabulate(dim)(f=>StructField(“x”+f.toString(),DoubleType,true))
val schame=StructType(structs:+StructField(“x”+dim,StringType,true))
val df=sqlc.createDataFrame(rdd, schame)
df
}
val input=getInPortDataset(“in”)
val sc=input.sparkSession.sparkContext
val output=genClassifiDataFrame(sc,1,rows=1000000,dim=100,3)
setOutPortValue(“out”, output)
spark 造数据代码
最新推荐文章于 2024-03-01 15:50:22 发布