spark streaming插入hbase

9 篇文章 0 订阅
import java.sql.{DriverManager, ResultSet}

import org.apache.spark._
import org.apache.spark.streaming._

import scala.util.Random

import org.apache.hadoop.hbase.{HTableDescriptor,HColumnDescriptor,HBaseConfiguration,TableName}
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, Put,Table}



object Pi {

  val user="root"
  val password = "root"
  val host="10.8.8.123"
  val database="db_1"
  val port=3306
  val conn_str = "jdbc:mysql://"+host +":"+port+"/"+database


  val tablename="achi"
  val cf="a"
  val qulified="name"


  def CreatTableIfNotFind(conn:Connection,userTable:TableName): Unit ={
    //从Connection获得 Admin 对象(相当于以前的 HAdmin)
    val admin=conn.getAdmin

    if(admin.tableExists(userTable)){
      println("Table exists!")
      //admin.disableTable(userTable)
      //admin.deleteTable(userTable)
      //exit()
    }else{
      val tableDesc=new HTableDescriptor(userTable)
      tableDesc.addFamily(new HColumnDescriptor(cf.getBytes))
      admin.createTable(tableDesc)
      println("Create table success!")
    }
  }

  def InsertHbase(table:Table,cf:String,qulified:String,value:String): Unit ={
    val p=new Put("id001".getBytes())
    p.addColumn(cf.getBytes,qulified.getBytes,value.getBytes)
    table.put(p)
  }


  def main(args: Array[String]) {
    val conf=new SparkConf().setAppName("Spark Streaming").setMaster("local[2]")
    val ssc=new StreamingContext(conf,Seconds(3))

    val lines=ssc.socketTextStream("localhost",9999)
    val words=lines.map(_.split('|'))

    words.print()



    words.foreachRDD{
      rdd=>rdd.foreachPartition{
        pa=>
          val conf=HBaseConfiguration.create()
          val conn=ConnectionFactory.createConnection(conf)
          val userTable=TableName.valueOf(tablename)
          val table=conn.getTable(userTable)

          pa.foreach{
            w=>
              try{
                var beg = System.currentTimeMillis()
                println(w(0)+w(1))
                InsertHbase(table,cf,w(0),w(1))
                println("***************************************************************")
                println(" 耗时: " + (System.currentTimeMillis() - beg)+"ms")
                println("***************************************************************")
              }catch{
                case _:Exception=>println("raw error!")
              }
          }
          table.close()
          conn.close()

      }
    }



    ssc.start()
    ssc.awaitTermination()

  /*
    Class.forName("com.mysql.jdbc.Driver").newInstance();
    val conn1 = DriverManager.getConnection(conn_str,user,password)

    try {
      val statement = conn1.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
      val rs = statement.executeQuery("select * from achi limit 10")
      while (rs.next) {
        println(rs.getString(1))
      }
    }
    catch {
      case _ : Exception => println("===>")
    }
    finally {
      conn1.close
    }

    */
  }
}
name := "untitled"

version := "1.0"

scalaVersion := "2.10.6"

libraryDependencies++= Seq(
  "mysql" % "mysql-connector-java" % "5.1.38",
  "org.apache.spark" %% "spark-core" % "1.5.2",
  "org.apache.spark" %% "spark-streaming" % "1.5.2",
  "org.apache.hbase" % "hbase-client" % "1.1.3",
  "org.apache.hbase" % "hbase-common" % "1.1.3",
  "org.apache.hbase" % "hbase-server" % "1.1.3"
)


resolvers+="OS China" at "http://maven.oschina.net/content/groups/public/"
  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
About This Book Explore the integration of Apache Spark with third party applications such as H20, Databricks and Titan Evaluate how Cassandra and Hbase can be used for storage An advanced guide with a combination of instructions and practical examples to extend the most up-to date Spark functionalities Who This Book Is For If you are a developer with some experience with Spark and want to strengthen your knowledge of how to get around in the world of Spark, then this book is ideal for you. Basic knowledge of Linux, Hadoop and Spark is assumed. Reasonable knowledge of Scala is expected. What You Will Learn Extend the tools available for processing and storage Examine clustering and classification using MLlib Discover Spark stream processing via Flume, HDFS Create a schema in Spark SQL, and learn how a Spark schema can be populated with data Study Spark based graph processing using Spark GraphX Combine Spark with H20 and deep learning and learn why it is useful Evaluate how graph storage works with Apache Spark, Titan, HBase and Cassandra Use Apache Spark in the cloud with Databricks and AWS In Detail Apache Spark is an in-memory cluster based parallel processing system that provides a wide range of functionality like graph processing, machine learning, stream processing and SQL. It operates at unprecedented speeds, is easy to use and offers a rich set of data transformations. This book aims to take your limited knowledge of Spark to the next level by teaching you how to expand Spark functionality. The book commences with an overview of the Spark eco-system. You will learn how to use MLlib to create a fully working neural net for handwriting recognition. You will then discover how stream processing can be tuned for optimal performance and to ensure parallel processing. The book extends to show how to incorporate H20 for machine learning, Titan for graph based storage, Databricks for cloud-based Spark. Intermediate Scala based code examples are provided for Apache Spark module processing in a CentOS Linux and Databricks cloud environment. Table of Contents Chapter 1: Apache Spark Chapter 2: Apache Spark Mllib Chapter 3: Apache Spark Streaming Chapter 4: Apache Spark Sql Chapter 5: Apache Spark Graphx Chapter 6: Graph-Based Storage Chapter 7: Extending Spark With H2O Chapter 8: Spark Databricks Chapter 9: Databricks Visualization

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值