KafkaTopo代码
package com.zhongruan.strom;
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.spout.SchemeAsMultiScheme;
import backtype.storm.topology.TopologyBuilder;
import storm.kafka.BrokerHosts;
import storm.kafka.KafkaSpout;
import storm.kafka.SpoutConfig;
import storm.kafka.ZkHosts;
public class KafkaTopo {
public static void main(String[] args) {
String topic ="dsj";
String zkRoot="/ks";
String spountId="kafkaSpout";
BrokerHosts zkHosts = new ZkHosts("zjgm01:2181,zjgm02:2181,zjgm03:2181");
TopologyBuilder builder=new TopologyBuilder();
SpoutConfig conf=new SpoutConfig(zkHosts,topic,zkRoot,spountId);
conf.forceFromStart=true;
conf.scheme=new SchemeAsMultiScheme(new MessageScheme());
builder.setSpout(spountId,new KafkaSpout(conf));
builder.setBolt("wordBolt",new WordBolt()).shuffleGrouping(spountId);
builder.setBolt("writeBolt",new WriteBolt()).shuffleGrouping("wordBolt");
LocalCluster cluster=new LocalCluster();
Config conf1=new Config();
conf1.setNumWorkers(4);
cluster.submitTopology("word",conf1,builder.createTopology());
}
}
WordBolt代码
package com.zhongruan.strom;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseBasicBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;
import org.apache.commons.lang.StringUtils;
public class WordBolt extends BaseBasicBolt {
@Override
public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
String line = tuple.getString(0);
String[] words = line.split(" ");
for (String w : words) {
if (StringUtils.isNotBlank(w)) {
String s = w.trim();
s = s.toLowerCase();
basicOutputCollector.emit(new Values(s));
}
}
}
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("s"));
}
}
MessageScheme代码
package com.zhongruan.strom;
import backtype.storm.spout.Scheme;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import java.util.List;
public class MessageScheme implements Scheme {
@Override
public List<Object> deserialize(byte[] bytes) {
String msg=new String(bytes);
return new Values(msg);
}
@Override
public Fields getOutputFields() {
return new Fields("msg");
}
}
WriteBolt代码
package com.zhongruan.strom;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseBasicBolt;
import backtype.storm.tuple.Tuple;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Map;
import java.util.UUID;
public class WriteBolt extends BaseBasicBolt {
FileWriter fileWriter=null;
@Override
public void prepare(Map stormConf, TopologyContext context) {
try {
fileWriter=new FileWriter("d:\\storm\\"+ UUID.randomUUID().toString());
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
String s = tuple.getString(0);
try {
fileWriter.write(s);
fileWriter.write("\n");
fileWriter.flush();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
}
}
将zookeeper集群启动
启动storm
在nimbus主机上
./storm nimbus
./storm ui
在supervisor主机上
./storm supervisor
在每一台节点上启动broker
bin/kafka-server-start.sh config/server.properties
D盘新建文件夹storm
结果