Storm学习笔记-集群环境安装部署

安装版本如下:

apache-storm-1.0.1.tar.gz

zookeeper-3.4.8.tar.gz

集群节点如下:

centos.master 192.168.10.10

centos.slave1 192.168.10.11

centos.slave2 192.168.10.12 

修改各节点zookeeper配置文件zoo.cfg内容

tickTime=2000
initLimit=10
syncLimit=5
dataDir=/home/hadoop/software/zookeeper-3.4.8/data
dataLogDir=/home/hadoop/software/zookeeper-3.4.8/logs
clientPort=2181
server.10=192.168.10.10:2888:3888  
server.11=192.168.10.11:2888:3888  
server.12=192.168.10.12:2888:3888 

启动各个节点的zookeeper服务

[hadoop@centos zookeeper-3.4.8]$ bin/zkServer.sh start


修改配置文件storm-env.sh内容

export JAVA_HOME=/usr/software/jdk
export STORM_CONF_DIR=/home/hadoop/software/storm-1.0.1/conf

修改配置文件storm.yaml内容

storm.zookeeper.servers:
    - "centos.master"
    - "centos.slave1"
    - "centos.slave2"
 
nimbus.seeds: ["centos.master"]

storm.local.dir: "/home/hadoop/software/storm-1.0.1/local" 

supervisor.slots.ports:
    - 6700
    - 6701
    - 6702
    - 6703

storm.health.check.dir: "/home/hadoop/software/storm-1.0.1/healthchecks"

storm.health.check.timeout.ms: 5000

启动对应节点上相关的strom服务

[hadoop@centos storm-1.0.1]$ bin/storm nimbus &
[hadoop@centos storm-1.0.1]$ bin/storm supervisor &
[hadoop@centos storm-1.0.1]$ bin/storm ui &

启动成功后可以通过http://centos.master:8080查看集群和拓扑的信息


测试WordCount实例


public class SentenceSpout extends BaseRichSpout {
	
	private static final long serialVersionUID = 1L;

	private SpoutOutputCollector collector = null;

	private String[] sentences = { "j2se j2ee j2me", "hibernate spring struts",
			"mybatis springmvc", "hadoop hbase hive pig", "spark mllib sql streming graph",
			"alluxio tachyon hdfs", "mongodb redis memcache cassandra" };

	@SuppressWarnings("rawtypes")
	@Override
	public void open(Map conf, TopologyContext context,
			SpoutOutputCollector collector) {
		this.collector = collector;
	}
	
	@Override
	public void ack(Object messageId) {
		System.out.println("ack : " + messageId);
	}

	@Override
	public void activate() {
	}

	@Override
	public void close() {
		
	}

	@Override
	public void deactivate() {
		
	}

	@Override
	public void fail(Object messageId) {
		System.out.println("fail : " + messageId);
	}

	@Override
	public void nextTuple() {
		int index = new Random().nextInt(sentences.length);
		this.collector.emit(new Values(sentences[index]), sentences[index]);
		Utils.sleep(1);
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		declarer.declare(new Fields("sentence"));
	}

}


public class SentenceSplitBolt extends BaseRichBolt {
	
	private static final long serialVersionUID = 1L;

	private OutputCollector collector = null;

	@SuppressWarnings("rawtypes")
	@Override
	public void prepare(Map stormConf, TopologyContext context,
			OutputCollector collector) {
		this.collector = collector;
	}

	@Override
	public void execute(Tuple input) {
		String sentence = input.getStringByField("sentence");
		String[] words = sentence.split(" ");
		for (int i = 0, len = words.length; i < len; i++) {
			this.collector.emit(new Values(words[i]));
		}
		this.collector.ack(input);
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		declarer.declare(new Fields("word"));
	}

}


public class WordCountBolt extends BaseRichBolt {

	private static final long serialVersionUID = 1L;

	private OutputCollector collector = null;

	private HashMap<String, Long> counts = null;

	@SuppressWarnings("rawtypes")
	@Override
	public void prepare(Map stormConf, TopologyContext context,
			OutputCollector collector) {
		this.collector = collector;
		this.counts = new HashMap<String, Long>();
	}

	@Override
	public void execute(Tuple input) {
		String word = input.getStringByField("word");
		Long count = this.counts.get(word);
		count = null == count ? 0L : count + 1;
		this.counts.put(word, count);
		this.collector.emit(new Values(word, count));
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		declarer.declare(new Fields("word", "count"));
	}

}


public class WordReportBolt extends BaseRichBolt {
	
	private static final long serialVersionUID = 1L;

	private HashMap<String, Long> counts = null;

	@SuppressWarnings("rawtypes")
	@Override
	public void prepare(Map stormConf, TopologyContext context,
			OutputCollector collector) {
		this.counts = new HashMap<String, Long>();
	}

	@Override
	public void execute(Tuple input) {
		String word = input.getStringByField("word");
		Long count = input.getLongByField("count");
		this.counts.put(word, count);
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
	}
	
	@Override
	public void cleanup() {
		System.out.println("--- FINAL WORD COUNT STATISTICS ---");
		List<String> keys = new ArrayList<String>();
		keys.addAll(this.counts.keySet());
		Collections.sort(keys);
		for (String key : keys) {
			System.out.println(key + " : " + this.counts.get(key));
		}
		System.out.println("--------------");
	}

}


public class WordCountTopology {

	private static final String SENTENCE_SPOUT_ID = "sentence-spout";
	private static final String SENTENCE_SPLIT_BOLT_ID = "sentence-split-bolt";
	private static final String WORD_COUNT_BOLT_ID = "word-count-bolt";
	private static final String WORD_REPORT_BOLT_ID = "word-report-bolt";
	private static final String TOPOLOGY_NAME = "word-count-topology";
	private static final boolean isCluster = true;
	
	public static void main(String[] args) throws Exception {
		TopologyBuilder builder = new TopologyBuilder();
		builder.setSpout(SENTENCE_SPOUT_ID, new SentenceSpout());
		builder.setBolt(SENTENCE_SPLIT_BOLT_ID, new SentenceSplitBolt(), 2)
					.setNumTasks(4).shuffleGrouping(SENTENCE_SPOUT_ID);
		builder.setBolt(WORD_COUNT_BOLT_ID, new WordCountBolt(), 2)
					.setNumTasks(4).fieldsGrouping(SENTENCE_SPLIT_BOLT_ID, new Fields("word"));
		builder.setBolt(WORD_REPORT_BOLT_ID, new WordReportBolt())
					.globalGrouping(WORD_COUNT_BOLT_ID);
		Config config = new Config();
		config.setDebug(true);
		
		if (isCluster) {
			config.setNumWorkers(3);
			StormSubmitter.submitTopologyWithProgressBar(
					TOPOLOGY_NAME, config, builder.createTopology());
		} else {
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
			Utils.sleep(10000);
			cluster.killTopology(TOPOLOGY_NAME);
			cluster.shutdown();
		}
	}

}

WordCount测试用例可以在本地模式环境下直接运行测试,也可以打成jar包在集群环境中通过运行

bin/storm jar example.jar main类 参数1 参数2 参数3 

测试。










  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值