项目目录
pom.xml
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.sid.spark</groupId>
<artifactId>spark-train</artifactId>
<version>1.0</version>
<inceptionYear>2008</inceptionYear>
<properties>
<scala.version>2.11.8</scala.version>
<kafka.version>0.9.0.0</kafka.version>
<spark.version>2.2.0</spark.version>
<hadoop.version>2.9.0</hadoop.version>
<hbase.version>1.4.4</hbase.version>
</properties>
<repositories>
<repository>
<id>scala-tools.org</id>
<name>Scala-Tools Maven2 Repository</name>
<url>http://scala-tools.org/repo-releases</url>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>scala-tools.org</id>
<name>Scala-Tools Maven2 Repository</name>
<url>http://scala-tools.org/repo-releases</url>
</pluginRepository>
</pluginRepositories>
<dependencies>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>servlet-api</artifactId>
<groupId>javax.servlet</groupId>
</exclusion>
</exclusions>
</dependency>
<!--<dependency>-->
<!--<groupId>org.apache.hbase</groupId>-->
<!--<artifactId>hbase-client</artifactId>-->
<!--<version>${hbase.version}</version>-->
<!--</dependency>-->
<!--<dependency>-->
<!--<groupId>org.apache.hbase</groupId>-->
<!--<artifactId>hbase-server</artifactId>-->
<!--<version>${hbase.version}</version>-->
<!--</dependency>-->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-flume_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-flume-sink_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>net.jpountz.lz4</groupId>
<artifactId>lz4</artifactId>
<version>1.3.0</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.31</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.5</version>
</dependency>
<dependency>
<groupId>org.apache.flume.flume-ng-clients</groupId>
<artifactId>flume-ng-log4jappender</artifactId>
<version>1.6.0</version>
</dependency>
</dependencies>
<build>
<sourceDirectory>src/main/scala</sourceDirectory>
<testSourceDirectory>src/test/scala</testSourceDirectory>
<plugins>
<plugin>
<groupId>org.scala-tools</groupId>
<artifactId>maven-scala-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>compile</goal>
<goal>testCompile</goal>
</goals>
</execution>
</executions>
<configuration>
<scalaVersion>${scala.version}</scalaVersion>
<args>
<arg>-target:jvm-1.5</arg>
</args>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-eclipse-plugin</artifactId>
<configuration>
<downloadSources>true</downloadSources>
<buildcommands>
<buildcommand>ch.epfl.lamp.sdt.core.scalabuilder</buildcommand>
</buildcommands>
<additionalProjectnatures>
<projectnature>ch.epfl.lamp.sdt.core.scalanature</projectnature>
</additionalProjectnatures>
<classpathContainers>
<classpathContainer>org.eclipse.jdt.launching.JRE_CONTAINER</classpathContainer>
<classpathContainer>ch.epfl.lamp.sdt.launching.SCALA_CONTAINER</classpathContainer>
</classpathContainers>
</configuration>
</plugin>
</plugins>
</build>
<reporting>
<plugins>
<plugin>
<groupId>org.scala-tools</groupId>
<artifactId>maven-scala-plugin</artifactId>
<configuration>
<scalaVersion>${scala.version}</scalaVersion>
</configuration>
</plugin>
</plugins>
</reporting>
</project>
1.使用log4j生成日志
采集Log4j的日志需要用到Log4jAppender,Flume的官网上有介绍。
Appender会把Log4j的日志放到Flume的agent中,通过avro source。
客户端在使用Appender时,必须要有flume-ng-sdk。必填属性是用粗体字标注。
log4j.properties
log4j.appender.encoding = UTF-8
log4j.rootLogger=INFO,stdout,flume
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.target=System.out
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] [%c] [%p] - %m%n
log4j.appender.flume = org.apache.flume.clients.log4jappender.Log4jAppender
log4j.appender.flume.Hostname = node1
log4j.appender.flume.Port = 41414
log4j.appender.flume.UnsafeMode = true
LoggerGenerator.java
import org.apache.log4j.Logger;
/**
* Created by jy02268879 on 2018/7/19.
*
* 模拟日志产生
*/
public class LoggerGenerator {
private static Logger logger = Logger.getLogger(LoggerGenerator.class.getName());
public static void main(String[] args) throws Exception{
int index = 0;
while (true){
Thread.sleep(1000);
logger.info("Current value is :"+index++);
System.out.println(index);
}
}
}
2.编写flume agent的配置文件
在node1上进入flume目录
cd /app/flume/flume/conf
创建flume的agent配置文件
vi test-flume-kafka-streaming.conf
#flume-kafka-streaming
flume-kafka-streaming.sources = avro-source
flume-kafka-streaming.sinks = log-sink
flume-kafka-streaming.channels = logger-channel
flume-kafka-streaming.sources.avro-source.type = avro
flume-kafka-streaming.sources.avro-source.bind = node1
flume-kafka-streaming.sources.avro-source.port = 41414
flume-kafka-streaming.sinks.log-sink.type = logger
flume-kafka-streaming.channels.logger-channel.type = memory
flume-kafka-streaming.sources.avro-source.channels = logger-channel
flume-kafka-streaming.sinks.log-sink.channel = logger-channel
启动node1的agent
cd /app/flume/flume
bin/flume-ng agent --name flume-kafka-streaming -c conf -f conf/test-flume-kafka-streaming.conf -Dflume.root.logger=INFO,console
3.启动IDEA上的LoggerGenerator.java代码
查看flume
此时用Flume采集Log4j的日志已经完成。
4.将flume agent的配置文件改成kafka-sink (上面用log-sink是方便测试是否从Log4j中接收到了数据)
cd /app/flume/flume/conf
vi test-flume-kafka-streaming.conf
#flume-kafka-streaming
flume-kafka-streaming.sources = avro-source
flume-kafka-streaming.sinks = kafka-sink
flume-kafka-streaming.channels = logger-channel
flume-kafka-streaming.sources.avro-source.type = avro
flume-kafka-streaming.sources.avro-source.bind = node1
flume-kafka-streaming.sources.avro-source.port = 41414
flume-kafka-streaming.sinks.kafka-sink.type = org.apache.flume.sink.kafka.KafkaSink
flume-kafka-streaming.sinks.kafka-sink.topic = spark_topic
flume-kafka-streaming.sinks.kafka-sink.brokerList = node1:9092
flume-kafka-streaming.sinks.kafka-sink.requiredAcks = 1
flume-kafka-streaming.sinks.kafka-sink.batchSize = 20
flume-kafka-streaming.channels.logger-channel.type = memory
flume-kafka-streaming.sources.avro-source.channels = logger-channel
flume-kafka-streaming.sinks.kafka-sink.channel = logger-channel
启动zookeeper集群
cd /app/zookeeper/bin
./zkServer.sh start
启动kafka
cd /app/kafka/bin
bin/kafka-server-start.sh -daemon config/server.properties &
重启flume。
cd /app/flume/flume
bin/flume-ng agent --name flume-kafka-streaming -c conf -f conf/test-flume-kafka-streaming.conf -Dflume.root.logger=INFO,console
启动kafka消费端测试是否能收到数据。
cd /app/kafka/bin
./kafka-console-consumer.sh --zookeeper node1:2181 --topic spark_topic
运行IDEA上生成日志的代码
LoggerGenerator.java
因为flume-kafka-streaming.sinks.kafka-sink.batchSize = 20。每20条数据一个批次,flume要凑齐20条数据一起发到kafka上。所以等LoggerGenerator.java生成20条数据后查看kafka的消费端。
此时log4j+flume+kafka已经测通。
5.在此基础上集成spark streaming去消费kafka
KafkaReceiver.scala
package com.sid.spark
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
/**
* Created by jy02268879 on 2018/7/19.
*
* Spark Streaming 基于 Receiver 对接Kafka
*/
object KafkaReceiver {
def main(args: Array[String]): Unit = {
if(args.length != 4){
System.err.println("Usage: KafkaReceiver <zkQuorum> <groupId> <topics> <numPartitions>")
System.exit(1)
}
val Array(zkQuorum, groupId, topics, numPartitions) = args
val sparkConf = new SparkConf().setAppName("KafkaReceiver").setMaster("local[3]")
val ssc = new StreamingContext(sparkConf,Seconds(5))
/**
* Create an input stream that pulls messages from Kafka Brokers.
* @param ssc StreamingContext object
* @param zkQuorum Zookeeper quorum (hostname:port,hostname:port,..)
* @param groupId The group id for this consumer
* @param topics Map of (topic_name to numPartitions) to consume. Each partition is consumed
* in its own thread
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
* @return DStream of (Kafka message key, Kafka message value)
*/
val topicMap = topics.split(",").map((_,numPartitions.toInt)).toMap
val messages = KafkaUtils.createStream(ssc,zkQuorum,groupId,topicMap)
messages.print()
messages.map(_._2).flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).print()
ssc.start()
ssc.awaitTermination()
}
}
启动KafkaReceiver.scala
以上已完成所有集成。