spark集群环境构建及java访问

Load & save 函数

  • MySQL集成(引入MysQL驱动jar)
<dependency>
    <groupId>mysql</groupId>
    <artifactId>mysql-connector-java</artifactId>
    <version>5.1.47</version>
</dependency>
  val spark = SparkSession.builder().master("local[2]").appName("readfrommysql").getOrCreate()
    //从本地mysql数据库读取数据
    spark.read.format("jdbc")
        .option("url","jdbc:mysql://localhost:3306/test")
        .option("dbtable","t_user")
        .option("user","root")
        .option("password","root")
        .load().createTempView("t_user")
    spark.sql("select * from t_user").show()
    spark.sparkContext.setLogLevel("FATAL")
    spark.stop()
  • 读取CSV格式的数据
  //创建spark
    val spark = SparkSession.builder().master("local[2]").appName("readfromcsv").getOrCreate()
    //从本地读入csv文件
    /*    //方式一
    spark.read.option("inferSchema",true).option("header",true)
      .csv("D://suns/t_user.csv")
      .createTempView("t_user")*/
    //方式二
    spark.read.format("csv")
      .option("sep",",")
      .option("inferSchema",true)
      .option("header",true)
      .load("D://suns/t_user.csv").createTempView("t_user")
    spark.sql("select * from t_user").show()
    spark.sparkContext.setLogLevel("FATAL")
    spark.stop()
  • 读取json数据
    //创建spark
    val spark = SparkSession.builder().master("local[2]").appName("readfromjson").getOrCreate()
   //从本地读取json文件
    spark.read.format("json")
        .load("D://suns/person.json")
      //创建表
        .createTempView("t_people")
    spark.sql("select * from t_people").show()
    //关闭日志打印
    spark.sparkContext.setLogLevel("FATAL")
    spark.stop()
  • 数据导入MysQL
 val spark = SparkSession.builder().master("local[2]").appName("writetomysql").getOrCreate()
    val personRDD = spark.sparkContext.parallelize(Array("14 tom 19", "15 jerry 18", "16 kitty 20"))
      .map(_.split(" "))
      .map(tokens=>Row(tokens(0).toInt,tokens(1),tokens(2).toInt))
    //通过StrutType直接指定每个字段的schema
    val schema = StructType(
      List(
        StructField("id",IntegerType,true),
        StructField("name",StringType,true),
        StructField("age",IntegerType,true)
      )
    )
    val props = new Properties()
    props.put("user", "root")
    props.put("password", "root")

    spark.createDataFrame(personRDD,schema)
      .write.mode("append")
      .jdbc("jdbc:mysql://localhost:3306/test",
        "t_user",props)
  • 存储为Json
  val spark = SparkSession.builder()
      .appName("writetojson")
      .master("local[2]")
      .getOrCreate()
    //准备rdd
    val rdd = spark.sparkContext.parallelize(Array("14 tom 9", "15 lisi 7", "16 suns 32"))
      .map(_.split("\\s+"))
      .map(tokens => {
        Row(tokens(0).toInt, tokens(1), tokens(2).toInt)
      })

    //指定数据类型
    var fields=new StructField("id",IntegerType)::new StructField("name",StringType)::new StructField("age",IntegerType)::Nil
    //创建dataframe并将数据输出至本地保存为json格式
    spark.createDataFrame(rdd,StructType(fields))
      .write.mode("append")
      .format("json")
      //保留表头
      .option("header",true)
      .save("D://suns/test")
    spark.sparkContext.setLogLevel("FATAL")
    spark.stop()
  • 存储为CSV格式
val personRDD = spark.sparkContext.parallelize(Array("14 tom 19", "15 jerry 18", "16 kitty 20"))
.map(_.split(" "))
.map(tokens=>Row(tokens(0).toInt,tokens(1),tokens(2).toInt))
//通过StrutType直接指定每个字段的schema
val schema = StructType(
    List(
        StructField("id",IntegerType,true),
        StructField("name",StringType,true),
        StructField("age",IntegerType,true)
    )
)

spark.createDataFrame(personRDD,schema)
.write.mode("append")
.format("csv")
.option("header", "true")//存储表头
.save("file:///D:/csv")
  • 生产分区文件
val spark = SparkSession.builder()
.appName("hellosql")
.master("local[10]")
.getOrCreate()

spark.read.format("csv")
.option("sep", ",")
.option("inferSchema", "true")
.option("header", "true")
.load("file:///D:/t_user.csv").createTempView("t_user")

spark.sql("select * from t_user")
.write.format("json")
.mode(SaveMode.Overwrite)
.partitionBy("id")
.save("file:///D:/partitions")

spark.stop()

Jar包解决

  • 测试jar冲突案例

    package com.baizhi.demo01
    
    import org.apache.kafka.common.serialization.StringDeserializer
    import org.apache.spark.SparkConf
    import org.apache.spark.streaming.kafka010.ConsumerStrategies._
    import org.apache.spark.streaming.kafka010.{KafkaUtils, LocationStrategies}
    import org.apache.spark.streaming.{Seconds, State, StateSpec, StreamingContext}
    import redis.clients.jedis.Jedis
    
    import scala.collection.JavaConverters._
    
    object WordCountTest {
      def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setMaster("spark://CentOS:7077").setAppName("KafkaStreamWordCount")
        val ssc = new StreamingContext(conf, Seconds(1))
        //持久化存储路径
        ssc.checkpoint("hdfs:///checkpoints01")
        //配置kafka相关参数
        val kafkaParams = Map[String, Object](
          "bootstrap.servers" -> "CentOS:9092",
          "key.deserializer" -> classOf[StringDeserializer],
          "value.deserializer" -> classOf[StringDeserializer],
          "group.id" -> "group1",
          "enable.auto.commit" -> (false: java.lang.Boolean)
        )
         //读取hdfs中的数据
        //001 zhangsan
        val cacheRDD = ssc.sparkContext.textFile("hdfs:///userdata")
          //字符拆分
          .map(item => (item.split("\\s+")(0), item.split("\\s+")(1)))
          //去重
          .distinct()
          //缓存
          .cache()
          //读取kafka中的topic01的数据
         //001 apple 4.5 2
        KafkaUtils.createDirectStream(ssc,
          LocationStrategies.PreferConsistent,
          Subscribe[String,String](Array("topic01"),kafkaParams))
          .map(record => record.value())
          //字符拆分
          .map(value=>{
            val tokens = value.split("\\s+")
            (tokens(0),tokens(2).toDouble * tokens(3).toInt)//userid  消费
          }).transform(rdd=>{
          //合并rdd和cacheRDD
          rdd.join(cacheRDD)// 001 (消费,用户名)
          //更新状态
        }).mapWithState(StateSpec.function((k:String,v:Option[(Double,String)],state:State[(String,Double)])=>{
          var username=v.get._2
          var historyCost:(String,Double)=("",0.0)
          if(state.exists()){
            historyCost=state.getOption().getOrElse((username,0.0))
          }
          var currentValue = v.get
          if(currentValue != null){
            state.update((currentValue._2,currentValue._1+historyCost._2))
          }
          (k,username+":"+(currentValue._1+historyCost._2))
        })).foreachRDD(rdd=>{
          //将结果按分区存入redis
          rdd.foreachPartition(list=>{
            val jedis = new Jedis("CentOS",6379)
            //将数据格式转换成java中的map
            var jMap= list.toMap.asJava
            //批量存入redis
            val pipeline = jedis.pipelined()
            pipeline.hmset("usercost",jMap)
            pipeline.sync()
            //关闭redis
            jedis.close()
          })
        })
    
        ssc.sparkContext.setLogLevel("FATAL")//关闭日志打印
        ssc.start()
        ssc.awaitTermination()
      }
    
    }
    
    
  • 使用shade插件, 生成fatjar

<plugin>
    <groupId>net.alchim31.maven</groupId>
    <artifactId>scala-maven-plugin</artifactId>
    <version>4.0.1</version>
    <executions>
        <execution>
            <id>scala-compile-first</id>
            <phase>process-resources</phase>
            <goals>
                <goal>add-source</goal>
                <goal>compile</goal>
            </goals>
        </execution>
    </executions>
</plugin>
<plugin>
    <groupId>org.apache.maven.plugins</groupId>
    <artifactId>maven-shade-plugin</artifactId>
    <version>2.4.3</version>
    <executions>
        <execution>
            <phase>package</phase>
            <goals>
                <goal>shade</goal>
            </goals>
            <configuration>
                <filters>
                    <filter>
                        <artifact>*:*</artifact>
                        <excludes>
                            <exclude>META-INF/*.SF</exclude>
                            <exclude>META-INF/*.DSA</exclude>
                            <exclude>META-INF/*.RSA</exclude>
                        </excludes>
                    </filter>
                </filters>
            </configuration>
        </execution>
    </executions>
</plugin>
  • 使用 --packages解决jar包依赖(需要联网)
[root@CentOS spark-2.4.3]# ./bin/spark-submit --master spark://CentOS:7077 --deploy-mode client  --class com.baizhi.demo10parkStreamWordCounts --total-executor-cores 4 --packages 'org.apache.spark:spark-streaming-kafka-0-10_2.11:2.4.3,redis.clients:jedis:2.9.0'  /root/original-sparkstream-1.0-SNAPSHOT.jar

注意以上两种方式均不可以解决MySQL的依赖问题。

  • 使用spark.executor.extraClassPath和spark.driver.extraClassPath能够解决MySQL依赖问题
[root@CentOS spark-2.4.3]# ./bin/spark-submit --master spark://CentOS:7077 --deploy-mode client  --class com.baizhi.demo10parkStreamWordCounts --total-executor-cores 4 --packages 'org.apache.spark:spark-streaming-kafka-0-10_2.11:2.4.3,redis.clients:jedis:2.9.0' --conf spark.executor.extraClassPath=/root/mysql-xxx.jar --conf  spark.driver.extraClassPath=/root/mysql-xxx.jar  /root/original-sparkstream-1.0-SNAPSHOT.jar

如果大家觉得麻烦,还可以在spark-defaut.conf配置改参数

spark.executor.extraClassPath=/root/.ivy2/jars/*
spark.driver.extraClassPath=/root/.ivy2/jars/*

Standalone集群构建

物理资源:CentOSA/B/C-6.5 64bit 内存2GB

主机名IP角色
CentOSA192.168.111.134Zookepper、NameNode、DataNode、journalnode、zkfc、master(spark)、Worker(Spark)、Kafka
CentOSB192.168.111.135Zookepper、NameNode、DataNode、journalnode、zkfc、master(spark)、Worker(Spark)、Kafka
CentOSC192.168.111.136Zookepper、DataNode、journalnode、master(spark)、Worker(Spark)、Kafka

[外链图片转存失败(img-dHO2cViP-1562684123944)(assets/1562570110484.png)]

环境准备

  • 主机名和IP映射关系
[root@CentOSX ~]# vi /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.111.134 CentOSA
192.168.111.135 CentOSB
192.168.111.136 CentOSC
  • 设置CentOS进程数和文件数(重启)
[root@CentOS ~]# vi /etc/security/limits.conf
* soft nofile 204800
* hard nofile 204800
* soft nproc 204800
* hard nproc 204800
[root@CentOS ~]# reboot
  • 关闭防火墙
[root@CentOSX ~]# service iptables stop
iptables: Setting chains to policy ACCEPT: filter          [  OK  ]
iptables: Flushing firewall rules:                         [  OK  ]
iptables: Unloading modules:                               [  OK  ]
[root@CentOSX ~]# chkconfig iptables off
  • 同步时钟
[root@CentOSX ~]# yum install -y ntp
[root@CentOSX ~]# ntpdate ntp.ubuntu.com
[root@CentOSX ~]# clock -w
  • SSH免密码认证
[root@CentOSX ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
2c:4b:ba:46:66:27:18:74:11:a7:96:f5:e5:96:97:d5 root@CentOSA
The key's randomart image is:
+--[ RSA 2048]----+
|    ooo   .   .. |
|  . .= . o . o  E|
| . .+   . + o    |
|  ..   . . .     |
|   o  o S        |
|  . =o.o         |
|   +.o.          |
|    ..           |
|   ..            |
+-----------------+
[root@CentOSX ~]# ssh-copy-id CentOSA
[root@CentOSX ~]# ssh-copy-id CentOSB
[root@CentOSX ~]# ssh-copy-id CentOSC

构建zookeeper集群

  • 安装JDKjdk-8u191-linux-x64.rpm
[root@CentOSX ~]# rpm -ivh jdk-8u191-linux-x64.rpm
[root@CentOSX ~]# vi .bashrc
JAVA_HOME=/usr/java/latest
PATH=$PATH:$/bin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
[root@CentOSX ~]# source .bashrc
[root@CentOSX ~]# java -version
java version "1.8.0_191"
Java(TM) SE Runtime Environment (build 1.8.0_191-b12)
Java HotSpot(TM) 64-Bit Server VM (build 25.191-b12, mixed mode)
  • 安装配置Zookeeper集群
[root@CentOSX ~]# tar -zxf zookeeper-3.4.6.tar.gz -C /usr/
[root@CentOSX ~]# mkdir /root/zkdata

[root@CentOSA ~]# echo 1 >> /root/zkdata/myid
[root@CentOSB ~]# echo 2 >> /root/zkdata/myid
[root@CentOSC ~]# echo 3 >> /root/zkdata/myid
[root@CentOSX ~]# vi /usr/zookeeper-3.4.6/conf/zoo.cfg
(找到并修改)
tickTime=2000
dataDir=/root/zkdata
clientPort=2181
initLimit=5
syncLimit=2

server.1=CentOSA:2887:3887
server.2=CentOSB:2887:3887
server.3=CentOSC:2887:3887

[root@CentOSX ~]# /usr/zookeeper-3.4.6/bin/zkServer.sh start zoo.cfg
[root@CentOSX ~]# /usr/zookeeper-3.4.6/bin/zkServer.sh status zoo.cfg
JMX enabled by default
Using config: /usr/zookeeper-3.4.6/bin/../conf/zoo.cfg
Mode: `follower|leader`

构建Kafka集群

配置与安装

[root@hadoopX ~]# tar -zxf kafka_2.11-2.2.0.tgz -C /usr
[root@hadoop2 ~]# vi /usr/kafka_2.11-2.2.0/config/server.properties
############################# Server Basics #############################
broker.id=[0|1|2]
############################# Socket Server Settings #############################
listeners=PLAINTEXT://CentOS[A|B|C]:9092
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/usr/kafka-logs
############################# Zookeeper #############################
zookeeper.connect=CentOSA:2181,CentOSB:2181,CentOSC:2181

启动服务

[root@CentOSX ~]# cd /usr/kafka_2.11-2.2.0/
[root@CentOSX kafka_2.11-2.2.0]# ./bin/kafka-server-start.sh -daemon config/server.properties

测试环境

  • 创建topic01
[root@CentOSA kafka_2.11-2.2.0]# ./bin/kafka-topics.sh 
                                --zookeeper CentOSA:2181,CentOSB:2181,CentOSC:2181 
                                --create 
                                --topic topic01 
                                --partitions 3 
                                --replication-factor 3
Created topic topic01.

这里partitions指定日志分区数目,replication-factor指定分区日志的副本因子。

  • 消费者订阅topic01
[root@CentOSA kafka_2.11-2.2.0]# ./bin/kafka-console-consumer.sh 
                                --bootstrap-server CentOSA:9092,CentOSB:9092,CentOSC:9092 
                                --topic topic01
  • 生产消息
[root@CentOSB kafka_2.11-2.2.0]# ./bin/kafka-console-producer.sh --broker-list CentOSA:9092,CentOSB:9092,CentOSC:9092 --topic topic01
> Hello Kafka

关闭Kafka服务

修改kafka-server-stop.sh

#!/bin/sh
SIGNAL=${SIGNAL:-TERM}
#PIDS=$(ps ax | grep -i 'kafka\.Kafka' | grep java | grep -v grep | awk '{print $1}')
PIDS=$(jps | grep 'Kafka' | awk '{print $1}')
if [ -z "$PIDS" ]; then
  echo "No kafka server to stop"
  exit 1
else
  kill -s $SIGNAL $PIDS
fi

Hadoop NameNode HA

  • 解压并配置HADOOP_HOME
[root@CentOSX ~]# tar -zxf hadoop-2.9.2.tar.gz -C /usr/
[root@CentOSX ~]# vi .bashrc
HADOOP_HOME=/usr/hadoop-2.9.2
JAVA_HOME=/usr/java/latest
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
export HADOOP_HOME
[root@CentOSX ~]# source .bashrc
  • 配置core-site.xml
<!--配置Namenode服务ID-->
<property>		
      <name>fs.defaultFS</name>		
      <value>hdfs://mycluster</value>	
</property>
<property>		
     <name>hadoop.tmp.dir</name>		
     <value>/usr/hadoop-2.9.2/hadoop-${user.name}</value>    
</property>
<property>		
     <name>fs.trash.interval</name>		
     <value>30</value>    
</property>
<!--配置机架脚本-->
<property>		
     <name>net.topology.script.file.name</name>		
     <value>/usr/hadoop-2.9.2/etc/hadoop/rack.sh</value>    
</property>
<!--配置ZK服务信息-->
<property>   
	<name>ha.zookeeper.quorum</name>
	<value>CentOSA:2181,CentOSB:2181,CentOSC:2181</value> 
</property>
<!--配置SSH秘钥位置-->
<property>
     <name>dfs.ha.fencing.methods</name>
     <value>sshfence</value>
</property>
<property>
     <name>dfs.ha.fencing.ssh.private-key-files</name>
     <value>/root/.ssh/id_rsa</value>
</property>

  • 配置机架脚本
[root@CentOSX ~]# touch /usr/hadoop-2.9.2/etc/hadoop/rack.sh
[root@CentOSX ~]# chmod u+x /usr/hadoop-2.9.2/etc/hadoop/rack.sh
[root@CentOSX ~]# vi /usr/hadoop-2.9.2/etc/hadoop/rack.sh
while [ $# -gt 0 ] ; do
	  nodeArg=$1
	  exec</usr/hadoop-2.9.2/etc/hadoop/topology.data
	  result="" 
	  while read line ; do
		ar=( $line ) 
		if [ "${ar[0]}" = "$nodeArg" ] ; then
		  result="${ar[1]}"
		fi
	  done 
	  shift 
	  if [ -z "$result" ] ; then
		echo -n "/default-rack"
	  else
		echo -n "$result "
	  fi
done
[root@CentOSX ~]# vi /usr/hadoop-2.9.2/etc/hadoop/topology.data
192.168.111.134 /rack01
192.168.111.135 /rack02
192.168.111.136 /rack03
  • 配置hdfs-site.xml
<property>
	<name>dfs.replication</name>
	<value>3</value>
</property> 
<!--开启自动故障转移-->
<property>
	<name>dfs.ha.automatic-failover.enabled</name>
	<value>true</value>
</property>
<!--解释core-site.xml内容-->
<property>
	<name>dfs.nameservices</name>
	<value>mycluster</value>
</property>
<property>
	<name>dfs.ha.namenodes.mycluster</name>
	<value>nn1,nn2</value>
</property>
<property>
	<name>dfs.namenode.rpc-address.mycluster.nn1</name>
	<value>CentOSA:9000</value>
</property>
<property>
	 <name>dfs.namenode.rpc-address.mycluster.nn2</name>
	 <value>CentOSB:9000</value>
</property>
<!--配置日志服务器的信息-->
<property>
  <name>dfs.namenode.shared.edits.dir</name>
  <value>qjournal://CentOSA:8485;CentOSB:8485;CentOSC:8485/mycluster</value>
</property>
<!--实现故障转切换的实现类-->
<property>
	<name>dfs.client.failover.proxy.provider.mycluster</name>
	<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>

  • 配置slaves
CentOSA
CentOSB
CentOSC
  • 启动HDFS(集群初始化启动)
[root@CentOSX ~]# hadoop-daemon.sh start journalnode (等待10s钟)
[root@CentOSA ~]# hdfs namenode -format
[root@CentOSA ~]# hadoop-daemon.sh start namenode
[root@CentOSB ~]# hdfs namenode -bootstrapStandby
[root@CentOSB ~]# hadoop-daemon.sh start namenode
#注册Namenode信息到zookeeper中,只需要在CentOSA或者B上任意一台执行一下指令
[root@CentOSA|B ~]# hdfs zkfc -formatZK
[root@CentOSA ~]# hadoop-daemon.sh start zkfc
[root@CentOSB ~]# hadoop-daemon.sh start zkfc
[root@CentOSX ~]# hadoop-daemon.sh start datanode

构建Spark Standalone

  • 安装配置Spark
[root@CentOSX ~]# tar -zxf spark-2.4.3-bin-without-hadoop.tgz -C /usr/
[root@CentOSX ~]# mv /usr/spark-2.4.3-bin-without-hadoop/ /usr/spark-2.4.3
[root@CentOSX ~]# cd /usr/spark-2.4.3/conf/
[root@CentOS conf]# mv spark-env.sh.template spark-env.sh
[root@CentOS conf]# mv slaves.template slaves
[root@CentOS conf]# vi slaves
CentOSA
CentOSB
CentOSC
[root@CentOS conf]# vi spark-env.sh
SPARK_WORKER_CORES=4
SPARK_WORKER_MEMORY=2g
LD_LIBRARY_PATH=/usr/hadoop-2.9.2/lib/native
SPARK_DIST_CLASSPATH=$(hadoop classpath)
SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=CentOSA:2181,CentOSB:2181,CentOSC:2181 -Dspark.deploy.zookeeper.dir=/spark"

export SPARK_WORKER_CORES
export SPARK_WORKER_MEMORY
export LD_LIBRARY_PATH
export SPARK_DIST_CLASSPATH
export SPARK_DAEMON_JAVA_OPTS
  • 启动Spark服务
[root@CentOSX ~]# cd /usr/spark-2.4.3/
[root@CentOSA spark-2.4.3]# ./sbin/start-all.sh
[root@CentOSB spark-2.4.3]# ./sbin/start-master.sh
[root@CentOSC spark-2.4.3]# ./sbin/start-master.sh

  • 测试Spark环境
[root@CentOSA spark-2.4.3]# ./bin/spark-shell --master spark://CentOSA:7077,CentOSB:7077,CentOSC:7077 --total-executor-cores 6
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
Spark context Web UI available at http://CentOSA:4040
Spark context available as 'sc' (master = spark://CentOSA:7077,CentOSB:7077,CentOSC:7077, app id = app-20190708162400-0000).
Spark session available as 'spark'.
Welcome to
      ____              __
     / __/__  ___ _____/ /__
    _\ \/ _ \/ _ `/ __/  '_/
   /___/ .__/\_,_/_/ /_/\_\   version 2.4.3
      /_/

Using Scala version 2.11.12 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_191)
Type in expressions to have them evaluated.
Type :help for more information.

scala>


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值