storm集成kafka简单使用示例

KafkaStormSample.java

package kafkaUse.kafkaUse.withStorm;

import java.util.Properties;
import java.util.UUID;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.StringScheme;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.kafka.bolt.KafkaBolt;
import org.apache.storm.kafka.bolt.mapper.FieldNameBasedTupleToKafkaMapper;
import org.apache.storm.kafka.bolt.selector.DefaultTopicSelector;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.utils.Utils;

public class KafkaStormSample 
{
    public static void main(String[] args) throws Exception
 {
        Config config = new Config();
        config.setDebug(true);
        config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);

        String zkConnString = "192.168.153.233:2181";
        String topic = "test";
        BrokerHosts hosts = new ZkHosts(zkConnString);

        SpoutConfig kafkaSpoutConfig = new SpoutConfig(hosts, topic, "/"
                + topic, UUID.randomUUID().toString());
        kafkaSpoutConfig.bufferSizeBytes = 1024 * 1024 * 4;
        kafkaSpoutConfig.fetchSizeBytes = 1024 * 1024 * 4;
        // kafkaSpoutConfig.forceFromStart = true;
        kafkaSpoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

        Properties props = new Properties();
        props.put("bootstrap.servers", "192.168.153.233:9092");
        props.put("acks", "1");
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        KafkaBolt<String, String> bolt = new KafkaBolt<String, String>()
                .withProducerProperties(props)
                .withTopicSelector(new DefaultTopicSelector("topic3"))
                .withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper<String, String>("word", "count"));

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("kafkaspout", new KafkaSpout(kafkaSpoutConfig));
        builder.setBolt("wordspitter", new SplitBolt()).shuffleGrouping(
                "kafkaspout");
        builder.setBolt("wordcounter", new CountBolt()).shuffleGrouping(
                "wordspitter");
        builder.setBolt("kafkabolt", bolt).shuffleGrouping("wordcounter");

        if (args != null && args.length > 0) {
            config.setNumWorkers(3);
            StormSubmitter.submitTopology(args[0], config,
                    builder.createTopology());
        } else {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("Topo", config, builder.createTopology());
            Utils.sleep(100000);
            cluster.killTopology("Topo");
            cluster.shutdown();
        }

    }
}

CountBolt.java

package kafkaUse.kafkaUse.withStorm;

import java.util.HashMap;
import java.util.Map;

import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.IRichBolt;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;

public class CountBolt implements IRichBolt
{
    Map<String, Integer> counters;
    private OutputCollector collector;

    @Override
    public void cleanup() 
    {
         for(Map.Entry<String, Integer> entry:counters.entrySet())
         {
             System.out.println(entry.getKey()+" : " + entry.getValue());
          }
    }
    @Override
    public void execute(Tuple input) 
    {
        String str = input.getString(0);
        Integer c = 0;
          if(!counters.containsKey(str)){
             c = 1; 
             counters.put(str, c);
          }else {
             c = counters.get(str) +1;
             counters.put(str, c);
          }

         // collector.ack(input);
        String value = str + " : " + String.valueOf(c);  
        collector.emit(new Values(str, value));

    }
    @Override
    public void prepare(Map arg0, TopologyContext arg1, OutputCollector collector) 
    {
        this.counters = new HashMap<String, Integer>();
        this.collector = collector;

    }
    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer)
    {
        declarer.declare(new Fields("word", "count"));
    }
    @Override
    public Map<String, Object> getComponentConfiguration()
    {
        return null;
    }



}

SplitBolt.java

package kafkaUse.kafkaUse.withStorm;

import java.util.Map;

import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.IRichBolt;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;

public class SplitBolt implements IRichBolt 
{
    private OutputCollector collector;

    @Override
    public void cleanup() {

    }

    @Override
    public void execute(Tuple input) 
    {
        String sentence = input.getString(0);
        String[] words = sentence.split(" ");

        for (String word : words) {
            word = word.trim();

            if (!word.isEmpty()) {
                word = word.toLowerCase();
                collector.emit(new Values(word));
            }

        }

        collector.ack(input);

    }

    @Override
    public void prepare(Map arg0, TopologyContext arg1, OutputCollector collector) {
        this.collector = collector;

    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) 
    {
        declarer.declare(new Fields("word"));

    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }

}
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值