集群分配如下:
192.168.58.11 spark01
192.168.58.12 spark02
192.168.58.13 spark03
spark版本:spark-2.1.0-bin-hadoop2.7
flume版本:apache-flume-1.7.0-bin
flume配置如下:
#flume启动命令
#bin/flume-ng agent -n a1 -f conf/a1.conf -c conf -Dflume.root.logger=INFO,console
a1.channels = c1
a1.sinks = k1
a1.sources = r1
a1.sources.r1.type = spooldir
a1.sources.r1.spoolDir = /opt/kevin/log
a1.channels.c1.type = memory
a1.channels.c1.capacity = 100000
a1.channels.c1.transactionCapacity = 100000
a1.sinks.k1.type = org.apache.spark.streaming.flume.sink.SparkSink
a1.sinks.k1.channel = c1
a1.sinks.k1.hostname = 192.168.58.11(IP地址)
a1.sinks.k1.port = 1234(端口号)
#组装source、channel、sink
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
Spark Streaming程序
package com.kk.sparkstreaming.flume
import org.apache.log4j.Level
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.flume.FlumeUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.api.java.StorageLevels
import java.sql.DriverManager
import java.sql.PreparedStatement
import java.sql.Connection
object FlumePull {
def main(args: Array[String]): Unit = {
// 减少日志输出
// Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
//Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
//创建StreamingContext对象 StreamingContext(conf: SparkConf, batchDuration: Duration)
//Duration采用间隔
val sparkConf = new SparkConf().setAppName("FlumePull")
val sparkStream = new StreamingContext(sparkConf, Seconds(3)); //每隔3秒采集一次数据
//采用pull方式
val flumeDStream = FlumeUtils.createPollingStream(sparkStream, "192.168.58.11",1234, StorageLevels.MEMORY_AND_DISK_SER_2)
//从Flume中接收数据:
//e 表示从FLume中收到的一次事件Event
val data = flumeDStream.map { e =>
{
new String(e.event.getBody.array());
}
}
data.print()
val datas = data.map(line => {
val index: Array[String] = line.split(",");
val con = index(1);
(con, 1)
})
datas.print()
datas.foreachRDD(cs => {
var conn: Connection = null;
var ps: PreparedStatement = null;
try {
Class.forName("com.mysql.jdbc.Driver").newInstance();
cs.foreachPartition(f => {
conn = DriverManager.getConnection("jdbc:mysql://192.168.58.14:3306/storm?useUnicode=true&characterEncoding=utf8", "root", "kevin");
ps = conn.prepareStatement("insert into result values(?,?)");
f.foreach(s => {
ps.setString(1, s._1);
ps.setInt(2, s._2);
ps.executeUpdate();
})
})
} catch {
case t: Throwable => t.printStackTrace() // TODO: handle error
} finally {
if (ps != null) {
ps.close()
}
if (conn != null) {
conn.close();
}
}
})
sparkStream.start()
sparkStream.awaitTermination();
}
}
pom.xml文件
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<spark.version>2.2.1</spark.version>
<scala.version>2.11.1</scala.version>
</properties>
<dependencies>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-compiler</artifactId>
<version>${scala.version}</version>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-reflect</artifactId>
<version>${scala.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-flume_2.11</artifactId>
<version>2.2.2</version>
</dependency>
</dependencies>
所需jar包: spark-streaming-flume-sink_2.10-2.1.0.jar
在集群运行需要将spark-streaming-flume-sink_2.10-2.1.0.jar拷贝在Flume的lib目录下,还需将Spark的jar包拷贝到Flume的lib目录下,此外还需要mysql驱动包,拷贝到集群每台机器的Spark的jars目录下
测试:
1.首先需要启动Flume
2.启动Spark Streaming程序
3.拷贝日志文件到/opt/kevin/log
注意:序列化问题,启动顺序