/**
* 数据源:JDBC
* @param spark
*/
def testJDBC(spark: SparkSession): Unit = {
// 从机器1的mysql读取数据
println("========================第一种读取mysql方式================================")
//默认partation为1
val url1: String = "jdbc:mysql://127.0.0.1/test?useUnicode=true&characterEncoding=utf-8"
val table1 = "tb_score"
val properties1: Properties = new Properties()
properties1.setProperty("user","root")
properties1.setProperty("password","root")
properties1.setProperty("driver","com.mysql.jdbc.Driver")
val jdbcDF1: DataFrame = spark.read.jdbc(url1, table1, properties1)
jdbcDF1.show()
val jdbcDF1New: DataFrame = jdbcDF1.union(jdbcDF1) //将两个dataframe做合并
jdbcDF1New.show()
logger.info("第一种读取mysql完成!")
println("jdbcDF1分区数:"+jdbcDF1.rdd.getNumPartitions) //查看并发度
import org.apache.spark.TaskContext
jdbcDF1.foreach(row => {
println("partitionId:" + TaskContext.get.partitionId)
})
println("========================第二种读取mysql方式(分区)================================")
//自定义partation数量
val url2: String = "jdbc:mysql://127.0.0.1:3306/test?useUnicode=true&characterEncoding=utf-8"
val table2 = "tb_score"
val colName: String = "userid" //分区字段,需要是数值类的(partitionColumn must be a numeric column from the table in question.),经测试,除整型外,float、double、decimal都是可以的
val lowerBound = 1 //下界,必须为整数
val upperBound = 10 //上界,必须为整数
val numPartions = 5 //最大分区数量,必须为整数,当为0或负整数时,实际的分区数为1;并不一定是最终的分区数量,例如“upperBound - lowerBound< numPartitions”时,实际的分区数量是“upperBound - lowerBound”;
//注意:在分区结果中,分区是连续的,虽然查看每条记录的分区,不是顺序的,但是将rdd保存为文件后,可以看出是顺序的。
val properties2: Properties = new Properties()
properties2.setProperty("user","root")
properties2.setProperty("password","root")
spark:sparksql:jdbc测试(mysql)
最新推荐文章于 2024-06-01 08:50:02 发布