SPark SQL 从 DB 读取数据方法和方式 scala

import org.apache.spark.sql.SQLContext
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import java.util.HashMap
import org.apache.spark.sql.Row
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.DataTypes
import java.util.ArrayList
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.DataFrame
import com.mysql.jdbc.Connection
import com.mysql.jdbc.Statement
import java.sql.DriverManager

/**
 * @author three
 */
object JDBCDataSource{
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("JDBCDataSource").setMaster("local[1]")
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)

    var options = new HashMap[String, String]();
    options.put("url", "jdbc:mysql://192.168.5.111:3306/testdb");
    options.put("user", "spark");
    options.put("password", "spark2016");

    options.put("dbtable", "student_info");
    var studentInfosDF = sqlContext.read.format("jdbc").options(options).load()

    options.put("dbtable", "student_score");
    var studentScoresDF = sqlContext.read.format("jdbc").options(options).load()

    // 将两个DataFrame转换成JavePairRDD,进行join操作  //需要有相同的key 做join
    var rdd1 = studentInfosDF.map(x => (x.getString(0), x.getInt(1)))
    var rdd2 = studentScoresDF.map(x => (x.getString(0), x.getInt(1)))
    var studentsRDD = rdd1.join(rdd2)

    // 将JavaPairRDD转换为JavaRDD<Row>
    var studentRowsRDD = studentsRDD.map(x => (x._1, x._2._1.toInt, x._2._2.toInt))
    var filteredStudentRowsRDD = studentRowsRDD.filter(_._3 > 80).map(x => (Row(x._1, x._2.toInt, x._3.toInt)))

    // 继续转换为DataFrame
    var structFields = new ArrayList[StructField]();
    structFields.add(DataTypes.createStructField("name", DataTypes.StringType, true));
    structFields.add(DataTypes.createStructField("age", DataTypes.IntegerType, true));
    structFields.add(DataTypes.createStructField("score", DataTypes.IntegerType, true));

    var structType = DataTypes.createStructType(structFields);
    var studentsDF = sqlContext.createDataFrame(filteredStudentRowsRDD, structType);

    var rows = studentsDF.collect();
    for (row <- rows) {
      println(row);
    }

    // 将DataFrame数据保存到MySQL表中
    studentsDF.foreach { row =>
      {
        var sql = "insert into good_student_info values(".+("'").+(row.getString(0)).+("',").+(row.getInt(1)).+(",").+(row.getInt(2)).+(")")
        //println(sql)
        Class.forName("com.mysql.jdbc.Driver");
        var conn = DriverManager.getConnection("jdbc:mysql://192.168.5.111:3306/testdb", "spark", "spark2016");
        var stat = conn.createStatement();
        stat.executeUpdate(sql);

        if (stat != null) {
          stat.close();
        }
        if (conn != null) {
          conn.close();
        }
      }
    }
  }
}

转载于:https://www.cnblogs.com/TendToBigData/p/10501296.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值