storm+kafka集成

pom.xml依赖
<dependency>
    <groupId>com.google.guava</groupId>
    <artifactId>guava</artifactId>
    <version>21.0</version>
</dependency>

<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka_2.12</artifactId>
    <version>2.3.0</version>
</dependency>
<dependency>
    <groupId>org.apache.storm</groupId>
    <artifactId>storm-core</artifactId>
    <version>1.1.0</version>
    <!-- 集群模式需要释放如下注释 -->
    <!--<scope>provided</scope>-->
</dependency>

<dependency>
    <groupId>org.apache.storm</groupId>
    <artifactId>storm-kafka</artifactId>
    <version>1.2.3</version>
    <!--<scope>provided</scope>-->
</dependency>

<dependency>
    <groupId>org.apache.storm</groupId>
    <artifactId>storm-kafka-client</artifactId>
    <version>1.2.3</version>
    <!--<scope>provided</scope>-->
</dependency>
KafKaTopic
package com.zbj.storm.kafka;

import com.google.common.collect.Maps;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.kafka.spout.ByTopicRecordTranslator;
import org.apache.storm.kafka.spout.Func;
import org.apache.storm.kafka.spout.KafkaSpout;
import org.apache.storm.kafka.spout.KafkaSpoutConfig;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;

import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;

/**
 * KafKaTopic
 * 参考链接
 * http://blog.itpub.net/31506529/viewspace-2215095/
 * https://www.cnblogs.com/ye-hcj/p/10264092.html
 *
 * @author weigang
 * @create 2019-09-18
 **/
public class KafKaTopic {

    public static final String TOPIC = "kafkaStorm1";

    public static void main(String[] args) throws Exception {

        String spoutId = "kafkaSpout";
        // 模拟kafka生产者生产数据
        KafkaSpoutConfig.Builder builder = new KafkaSpoutConfig.Builder(
                "172.31.15.175:9092", Arrays.asList(TOPIC));

builder.setFirstPollOffsetStrategy(KafkaSpoutConfig.FirstPollOffsetStrategy.LATEST);

        //该类将传入的kafka记录转换为storm的tuple
        ByTopicRecordTranslator<String, String> recordTranslator = new ByTopicRecordTranslator<>(new Func<ConsumerRecord<String, String>, List<Object>>() {
            @Override
            public List<Object> apply(ConsumerRecord<String, String> record) {
                return new Values(record.value(), record.topic());
            }
        }, new Fields("values", TOPIC));

        builder.setRecordTranslator(recordTranslator);

        Map<String, Object> consumerMap = Maps.newHashMap();
        consumerMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        consumerMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        consumerMap.put(ConsumerConfig.GROUP_ID_CONFIG, "zbj-kafka");
        builder.setProp(consumerMap);

        KafkaSpoutConfig spoutConfig = new KafkaSpoutConfig(builder);
        TopologyBuilder topologyBuilder = new TopologyBuilder();
        KafkaSpout kafkaSpout = new KafkaSpout<>(spoutConfig);
        topologyBuilder.setSpout(spoutId, kafkaSpout);


        topologyBuilder.setBolt("word-split", new WordSpliter()).shuffleGrouping(spoutId);
        // 产生4个文件,以uuid命名 指定了4个bolt同时处理数据 但是每一行只会同一个bolt处理
        topologyBuilder.setBolt("writer", new WriterBolt(), 4).fieldsGrouping("word-split", new Fields("word"));

        Config config = new Config();
        config.setNumWorkers(4);
        config.setNumAckers(0);
        config.setDebug(false);

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCount", config, topologyBuilder.createTopology());

        TimeUnit.MINUTES.sleep(10);
        cluster.killTopology("wordCount");

        // 提交 topology 到Storm集群中运行
        //StormSubmitter.submitTopology("wordCount", config, builder.createTopology());

    }
}
单词分割
package com.zbj.storm.kafka;

import org.apache.commons.lang.StringUtils;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;

/**
 * WordSpliter
 *
 * @author weigang
 * @crBaseBasicBolt18
 **/
public class WordSpliter extends BaseBasicBolt {
    @Override
    public void execute(Tuple input, BasicOutputCollector collector) {
        String line = input.getString(0);
        String[] words = line.split(" ");
        for (String word : words) {
            word = word.trim();
            if (StringUtils.isNotBlank(word)) {
                word.toLowerCase();
                collector.emit(new Values(word));
            }
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields("word"));
    }
}
写入到文件
package com.zbj.storm.kafka;

import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Tuple;

import java.io.FileWriter;
import java.util.Map;
import java.util.UUID;

/**
 * WriterBolt
 *
 * @author weigang
 * @create 2019-09-18
 **/
public class WriterBolt extends BaseBasicBolt {

    private FileWriter writer;

    @Override
    public void prepare(Map stormConf, TopologyContext context) {
        try {
            writer = new FileWriter("wordCount-" + UUID.randomUUID().toString().replaceAll("-", "") + ".txt");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    @Override
    public void execute(Tuple input, BasicOutputCollector collector) {
        String word = input.getString(0);
        try {
            writer.write(word);
            writer.write("\n");
            writer.flush();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {

    }
}
示例代码地址
  1. kafka-storm-demo
参考地址
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值