RDD 与 mysql 和 Hbase 交互

RDD 与 MYSQL 交互:

poml.xml 文件依赖:

<dependency>
     <groupId>mysql</groupId>
     <artifactId>mysql-connector-java</artifactId>
     <version>5.1.27</version>
</dependency>
插入数据不能保证有序,因为是每个分区在 executor 上执行操作。
初始化对象要在插入数据sql 代码块外部初始化,并用 xxpartitionxx 方法。
//读数据库
import java.util.Properties
val prop = {
    val p = new Properties()
    p.put("driver", "com.mysql.jdbc.Driver")
    p.put("url", "jdbc:mysql://10.18.2.3:3306/dbname?"+
 "useUnicode=true&characterEncoding=utf8&rewriteBatchedStatements=true")
    p.put("user", "username")
    p.put("password", "****")
    p
}
prop.put("dbtable", "tablename")
prop.put("dbtable", "(select * from tablename) tablename_")
import scala.collection.JavaConversions._
spark.read.format("jdbc").options(prop).load()

//写数据库:
import org.apache.spark.sql.SaveMode
df.write
    .mode(SaveMode.Append)
    .jdbc(prop.getProperty("url"), "tablename", prop)

不用spark API 的话自己写:

import java.sql.DriverManager
import java.util.Properties

object JDBCUtil extends Serializable {
  def apply(prop: Properties) = {
    new JDBCUtil(prop)
  }
}

class JDBCUtil private (prop: Properties) extends Serializable {
  Class.forName(prop.getProperty("driver"))

  val url = prop.getProperty("url")
  private[this] val user = prop.getProperty("user")
  private[this] val password = prop.getProperty("password")

  def getConnect() = {
    DriverManager.getConnection(url, user, password)
  }

  def getJDBCProp() = {
    val prop_copy = new Properties()
    prop_copy.putAll(prop)
    prop_copy
  }
}

// import org.apache.spark.SparkFiles
object DBUtil extends Serializable {
  private[this] val jdbc_conf = {
    val p = new Properties()
    p.put("driver", "com.mysql.jdbc.Driver")
    p.put("url", "jdbc:mysql://10.18.2.3:3306/dbname?" +
      "useUnicode=true&characterEncoding=utf8&rewriteBatchedStatements=true")
    p.put("user", "username")
    p.put("password", "****")
    p
  }
  //private[this] val jdbc_file = SparkFiles.get("route_analysis.properties")
  private[this] val jdbcUtil = JDBCUtil(jdbc_conf)
  val url = jdbcUtil.url
  def conn = jdbcUtil.getConnect
  def prop = jdbcUtil.getJDBCProp
  import scala.collection.JavaConversions._
  def options: scala.collection.mutable.Map[String, String] = prop
}

// 使用:
def update(df: DataFrame) {
  val table = "tablename"
  df.foreachPartition { part =>
    val conn = DBUtil.conn
    part.foreach { r =>
      val col = r.getAs[String]("col")
      //查询已有数据
      val sql = s"""select col from ${table} where col = '${col}' """
      val stmt_query = conn.createStatement()
      val res = stmt_query.executeQuery(sql)
      val stmt_new = conn.createStatement()
      //如果结果集为空则插入一条记录
      if (!res.first) {
        val sql_insert = s"""insert into ${table} (col) values ('${col}') """
        stmt_new.execute(sql_insert)
      } else {
        //若找到记录可以看看是否可更新ex_station字段。
        val sql_update = s"""update ${table} set col = '${col}' where id = ${id} """
        stmt_new.executeUpdate(sql_update)
      }
      res.close()
      stmt_query.close()
      stmt_new.close()
    }
  }
  println(s"update表(${table})完成...")
}

Hbase 交互

在resource 目录下放 hbase-site.xml 文件

 <dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-server</artifactId>
      <version>1.3.1</version>
  </dependency>
  <dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-client</artifactId>
      <version>1.3.1</version>
  </dependency>
import org.apache.hadoop.hbase._
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.{SparkConf, SparkContext}
object SparkOperateHbase{
    def main(args:Array[String]): Unit ={
        //建立Hbase的连接
        val conf = HBaseConfiguration.create();
        //设置查询的表名student
        conf.set(TableInputFormat.INPUT_TABLE,"student")
        //通过SparkContext将student表中数据创建一个rdd
        val sc = new SparkContext(new SparkConf());
        val stuRdd = sc.newAPIHadoopRDD(conf,classOf[TableInputFormat],
            classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
            classOf[org.apache.hadoop.hbase.client.Result]);
        stuRdd.cache();//持久化
        //计算数据条数
        val count = stuRdd.count();
        println("Student rdd count:"+count);
        //遍历输出
        //当我们建立Rdd的时候,前边全部是参数信息,后边的result才是保存数据的数据集
        stuRdd.foreach({case (_,result) =>
            //通过result.getRow来获取行键
            val key = Bytes.toString(result.getRow);
            //通过result.getValue("列族","列名")来获取值
            //注意这里需要使用getBytes将字符流转化成字节流
            val name = Bytes.toString(result.getValue("info".getBytes,"name".getBytes));
            val gender = Bytes.toString(result.getValue("info".getBytes,"gender".getBytes));
            val age = Bytes.toString(result.getValue("info".getBytes,"age".getBytes));
            //打印结果
            println("Row key:"+key+" Name:"+name+" Gender:"+gender+" Age:"+age);
        });
    }
}

插入数据

import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{SparkConf, SparkContext}
object HbasePut{
    def main(args:Array[String]): Unit = {
        //建立sparkcontext
        val sparkConf = new SparkConf().setAppName("HbasePut").setMaster("local")
        val sc = new SparkContext(sparkConf)
        //与hbase的student表建立连接
        val tableName = "student"
        sc.hadoopConfiguration.set(TableOutputFormat.OUTPUT_TABLE,tableName)
        //建立任务job
        val job = new Job(sc.hadoopConfiguration)
        //配置job参数
        job.setOutputKeyClass(classOf[ImmutableBytesWritable])
        job.setOutputValueClass(classOf[Result])
        job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
        //要插入的数据,这里的makeRDD是parallelize的扩展版
        val indataRdd = sc.makeRDD(Array("3,zhang,M,26","4,yue,M,27"))
        val rdd = indataRdd.map(_.split(",")).map(arr=>{
            val put = new Put(Bytes.toBytes(arr(0))) //行键的值
            //依次给列族info的列添加值
            put.add(Bytes.toBytes("info"),Bytes.toBytes("name"),Bytes.toBytes(arr(1)))
            put.add(Bytes.toBytes("info"),Bytes.toBytes("gender"),Bytes.toBytes(arr(2)))
            put.add(Bytes.toBytes("info"),Bytes.toBytes("age"),Bytes.toBytes(arr(3)))
            //必须有这两个返回值,put为要传入的数据
            (new ImmutableBytesWritable,put)
        })
        rdd.saveAsNewAPIHadoopDataset(job.getConfiguration)
    }
} 
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值