storm集成kafkawordcount小案例

1、进行pom文件的配置

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.zpark</groupId>
    <artifactId>HelloStorm</artifactId>
<!--    <packaging>jar</packaging>-->
    <version>1.0-SNAPSHOT</version>


    <dependencies>
        <!-- 整合kafka -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.11</artifactId>
            <version>0.9.0.0</version>
            <exclusions>
                <exclusion>
                    <groupId>org.apache.zookeeper</groupId>
                    <artifactId>zookeeper</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>log4j</groupId>
                    <artifactId>log4j</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>0.9.0.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.storm</groupId>
            <artifactId>storm-kafka</artifactId>
            <version>1.2.2</version>
        </dependency>

        <dependency>
            <groupId>org.apache.storm</groupId>
            <artifactId>storm-core</artifactId>
            <version>1.1.1</version>
            <!--  本地测试注释集群运行打开 -->
            <!--  <scope>provided</scope>-->
        </dependency>


    </dependencies>
</project>

2、bolt:

package com.storm_kafka;

import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;

import java.util.Map;

public class TestBolt extends BaseRichBolt {

    private TopologyContext context;
    private OutputCollector collector;
    
    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
        this.collector = collector;
        this.context = context;

    }

    public void execute(Tuple tuple) {

        System.out.println(tuple.getString(0));
    }

    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields("word","count"));

    }
}

3、App:

package com.storm_kafka;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.StringScheme;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;

import java.util.UUID;

public class TestApp {
    public static void main(String[] args) {
        //zookeeper连接串
        String zk = "hdp-1:2181";
        //连接broker
        ZkHosts zkHosts = new ZkHosts(zk);
        //kafka主题
        String topicName = "kafka_storm";
        /**
         * 准备spout配置,SpoutConfig四个参数
         * 1、ZKHosts:记录Spout读取进度所用的zookeeper的host
         * 2、kafka主题
         * 3、进度信息记录于zookeeper的哪个路径下(可以不配给个"")
         * 4、进度记录的id,想要一个新的Spout读取之前的记录,应把它的id设为跟之前的一样。唯一标识。
         */

        SpoutConfig config = new SpoutConfig(zkHosts,topicName,"/" + topicName, UUID.randomUUID().toString());
        config.scheme = new SchemeAsMultiScheme(new StringScheme());
        //kafkaSpout
        KafkaSpout kafkaSpout = new KafkaSpout(config);
        //准备topology
        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("kafkaspout",kafkaSpout);
        builder.setBolt("outbolt",new TestBolt()).shuffleGrouping("kafkaspout");

        //本地执行
        LocalCluster localCluster = new LocalCluster();
        Config conf = new Config();
//        conf.setDebug(true);
        localCluster.submitTopology("storm-kafka",conf,builder.createTopology());
    }
}

4、在虚拟机中开启kafka和zookeeper(缺一不可),然后在kafka的bin下:

./kafka-console-producer.sh --broker-list hdp-1:9092 --topic kafka_storm

运行结果:

控制台:

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值