目录
Spout包中:
WordSpout:
package com.xnmzdx.storm.spout;
import java.util.Map;
import com.xnmzdx.storm.util.ThreadUitls;
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
public class WordSpout extends BaseRichSpout{
private static final long serialVersionUID = -8586453079574851549L;
private SpoutOutputCollector collector;
private int index = 0;
private String[] setnences = {
"hello storm hello hadoop hello scala",
"i love you storm and hadoop",
"i learn hadoop and scala",
};
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
this.collector = collector;
}
public void nextTuple() {
this.collector.emit(new Values(setnences[index]));
index++;
if(index >= setnences.length) {
index=0;
}
//ThreadUitls.waitForMillis(2);
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("sentence"));
}
}
Bolt包中:
WordCountBolt:
package com.xnmzdx.storm.bolt;
import java.sql.Types;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.storm.jdbc.common.Column;
import org.apache.storm.jdbc.common.ConnectionProvider;
import org.apache.storm.jdbc.common.HikariCPConnectionProvider;
import org.apache.storm.jdbc.common.JdbcClient;
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Tuple;
public class WordCountBolt extends BaseRichBolt {
private static final long serialVersionUID = 7540731929017423732L;
private Map<String, Long> counts;
private JdbcClient jdbcClient;
private ConnectionProvider connectionProvider;
/**
* 初始化
*/
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
this.counts = new HashMap<String, Long>();
// 连接池的配置
Map<String, Object> configMap = new HashMap<String, Object>();
configMap.put("dataSourceClassName", "com.mysql.jdbc.jdbc2.optional.MysqlDataSource");
configMap.put("dataSource.url", "jdbc:mysql://localhost/world");
configMap.put("dataSource.user", "root");
configMap.put("dataSource.password", "mysql");
// 创建连接池
connectionProvider = new HikariCPConnectionProvider(configMap);
// 对数据库连接池进行初始化
connectionProvider.prepare();
// 初始化jdbcClinet
jdbcClient = new JdbcClient(connectionProvider, 30);
}
public void execute(Tuple input) {
String word = input.getStringByField("word");
Long count = counts.get(word);
if (count == null) {
count = 0L;
}
count++;
counts.put(word, count);
List<Column> column = new ArrayList<Column>();
// 创建一列将值传入
column.add(new Column("word", word, Types.VARCHAR));
List<List<Column>> select = jdbcClient.select("select word from wordcount where word = ?", column);
// 计算出查询的条数
Long num = select.stream().count();
//查询该word是否存在
if (num >= 1) {
// update
jdbcClient.executeSql(
"update wordcount set word_count = " + counts.get(word) + " where word = '" + word + "'");
} else {
// insert
jdbcClient.executeSql("insert into wordcount values( '" + word + "'," + counts.get(word) + ")");
}
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
}
public void cleanup() {
//bolt停止的是关闭连接池
connectionProvider.cleanup();
}
}
WordSplitBolt:
package com.xnmzdx.storm.bolt;
import java.util.Map;
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;
public class WordSplitBolt extends BaseRichBolt {
private static final long serialVersionUID = 2397169994637579625L;
private OutputCollector collector;
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
this.collector = collector;
}
public void execute(Tuple input) {
String sentence = input.getStringByField("sentence");
String[] words = sentence.split(" ");
for(String word : words) {
this.collector.emit(new Values(word));
}
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word"));
}
}
Topology包中:
WoedTopology:
package com.xnmzdx.storm.topology;
import com.xnmzdx.storm.bolt.WordCountBolt;
import com.xnmzdx.storm.bolt.WordSplitBolt;
import com.xnmzdx.storm.spout.WordSpout;
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.tuple.Fields;
public class WordTopology {
//定义常量
private static final String WORD_SPOUT_ID = "word-spout";
private static final String SPLIT_BOLT_ID = "split-bolt";
private static final String COUNT_BOLT_ID = "count-bolt";
private static final String TOTOLOGY_NAME = "word-count-topology";
public static void main(String[] args) {
//实例化对象
WordSpout spout = new WordSpout();
WordSplitBolt splitBolt = new WordSplitBolt();
WordCountBolt countBolt = new WordCountBolt();
//创建拓扑
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(WORD_SPOUT_ID, spout);
builder.setBolt(SPLIT_BOLT_ID, splitBolt,5).shuffleGrouping(WORD_SPOUT_ID);
builder.setBolt(COUNT_BOLT_ID, countBolt,5).fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
//本地集群提交
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(TOTOLOGY_NAME, new Config(), builder.createTopology());
}
}
运行WordTopology:
具体如何连接图形化mysql管理软件我就不说了,软件种类太多请自行解决。