!!!!!!!!!!spark的启动:./bin/spark.shell
1、SparkStreaming从Kafka中取出数据进行计算,其Kafka的生产者还是用上一篇博客的producer.java
2、SparkStreaming拉取数据:
用maven管理:
(1)pom.xml文件
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>cn.uestc.SparkStreamingKafka</groupId>
<artifactId>SparkStreamingKafka</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>jar</packaging>
<name>SparkStreamingKafka</name>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.3</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
<version>2.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>1.1.0</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>2.11.0</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>2.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>2.2.0</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>2.6.5</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>2.6.5</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>2.6.5</version>
</dependency>
</dependencies>
</project>
注意一点:
spark 2.2.0版本的,其scala的版本必须是2.11.0及其以下的,不能为2.12.0,这个注意,否则会出错
(2)sparkStreamingKafka.java
package cn.uestc.SparkStreamingKafka.SparkStreamingKafka;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Function;
import scala.Tuple2;
public class sparkStreamingkafka {
public static void main(String[] args) throws InterruptedException {
String brokers = "192.168.0.129:9092,192.168.0.129:9093,192.168.0.129:9094";
String topics = "mytest";
SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("streaming word count")
.set("spark.streaming.kafka.consumer.poll.ms", "60000");//不加红色的代码会出现这个错误: Failed to get records for spark-executor-logGroup mytest 0 1265517 after polling for 512
//JavaSparkContext sc = new JavaSparkContext(conf);
//sc.setLogLevel("DEBUG");
JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(2));
Collection<String> topicsSet = new HashSet<>(Arrays.asList(topics));
//kafka相关参数,必要!缺了会报错
Map<String, Object> kafkaParams = new HashMap<>();
kafkaParams.put("bootstrap.servers", brokers);//"metadata.broker.list",
kafkaParams.put("group.id", "logGroup");
kafkaParams.put("enable.auto.commit", false);//禁止自动提交
kafkaParams.put("auto.commit.interval.ms", "1000"); //1秒提交一次
// kafkaParams.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
kafkaParams.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
kafkaParams.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//Topic分区,运行几次会报错,所以优化程序
//Map<TopicPartition, Long> offsets = new HashMap<>();
//offsets.put(new TopicPartition("mytest", 0), 2L);
//新增快照,括号里的内容为存放快照文件的目录,需要自己手动创建这个目录
ssc.checkpoint("/home/dmcl216/Downloads/checkpoint");
//通过KafkaUtils.createDirectStream(...)获得kafka数据,kafka相关参数由kafkaParams指定
JavaInputDStream<ConsumerRecord<Object,Object>> lines = KafkaUtils.createDirectStream(
ssc,
LocationStrategies.PreferConsistent(),
ConsumerStrategies.Subscribe(topicsSet, kafkaParams, offsets)
);
//这里就跟之前的demo一样了,只是需要注意这边的lines里的参数本身是个ConsumerRecord对象
/* JavaPairDStream<String,Integer> counts =
lines.flatMap(x -> Arrays.asList(x.value().toString().split(" ")).iterator())
.mapToPair(x -> new Tuple2<String, Integer>(x, 1))
.reduceByKey((x, y) -> x + y); */
JavaDStream<String> counts = lines
.flatMap(x -> Arrays.asList(x.value().toString().split(",")).iterator());//用逗号分隔
counts.print();
System.out.println(counts.toString());
//可以打印所有信息,看下ConsumerRecord的结构
/*lines.foreachRDD(rdd -> {
rdd.foreach(x -> {
System.out.println(x);
});
});*/
ssc.start();
ssc.awaitTermination();
ssc.close();
}
}
注意:以上生产者和消费者的代码都要在kafka集群所在机器上运行,否则生产不了数据,也读不到数据
这个网页上的算子函数讲的很清楚:
https://www.cnblogs.com/yjd_hycf_space/p/7053722.html