maven依赖
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.zpark</groupId>
<artifactId>storm</artifactId>
<packaging>pom</packaging>
<version>1.0-SNAPSHOT</version>
<dependencies>
<!--storm相关jar -->
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId>
<version>1.2.2</version>
<!-- <scope>provided</scope>-->
</dependency>
</dependencies>
</project>
spout发送数据
package com.zpark;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;
import java.util.Map;
public class Spout extends BaseRichSpout{
private SpoutOutputCollector collector;
int count =1;
public void open(Map map, TopologyContext ag1, SpoutOutputCollector collector) {
System.out.println("open:" + map.get("test"));
this.collector = collector;
}
public void nextTuple() {
if (count <= 2){
System.out.println("第" + count + "次开始发送数据");
this.collector.emit(new Values("hello"));
}
count++;
}
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("test"));
}
}
Bolt接收数据
package com.zpark;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import java.util.Map;
public class Bolt extends BaseRichBolt {
int count = 1;
public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
}
public void execute(Tuple tuple) {
String msg = tuple.getStringByField("test");
System.out.println("Bolt第"+count+"接受的消息");
count++;
}
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("test"));
}
}
测试类APP
//package com.zpark;
//
//import org.apache.storm.Config;
//import org.apache.storm.LocalCluster;
//import org.apache.storm.StormSubmitter;
//import org.apache.storm.generated.AlreadyAliveException;
//import org.apache.storm.generated.AuthorizationException;
//import org.apache.storm.generated.InvalidTopologyException;
//import org.apache.storm.topology.TopologyBuilder;
//
//
//
//public class App {
// public static void main(String[] args){
// TopologyBuilder builder = new TopologyBuilder();
// builder.setSpout("send",new Spout());
// builder.setBolt("deal", new Bolt(),1).setNumTasks(1).shuffleGrouping("send");
// Config conf = new Config();
// conf.put("test","test");
// try {
// if(args != null && args.length > 0){
// System.out.println("远程模式");
//
// try {
// StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
// } catch (AlreadyAliveException e) {
// e.printStackTrace();
// } catch (InvalidTopologyException e) {
// e.printStackTrace();
// } catch (AuthorizationException e) {
// e.printStackTrace();
// }
//
// }else {
// System.out.println("本地模式");
// LocalCluster cluster = new LocalCluster();
// cluster.submitTopology("111", conf, builder.createTopology());
//
// Thread.sleep(10000);
// cluster.shutdown();
// }
// } catch (InterruptedException e) {
// e.printStackTrace();
// }
//
//
// }
// }
package com.zpark;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.topology.TopologyBuilder;
public class App {
private static final String str1="test1";
private static final String str2="test2";
public static void main(String[] args) {
//定义一个拓扑
TopologyBuilder builder=new TopologyBuilder();
//设置一个Executeor(线程),默认一个
builder.setSpout(str1, new Spout());
//设置一个Executeor(线程),和一个task
builder.setBolt(str2, new Bolt(),1).setNumTasks(1).shuffleGrouping(str1);
Config conf = new Config();
conf.put("test1", "test2");
try{
//运行拓扑
if(args !=null&&args.length>0){ //有参数时,表示向集群提交作业,并把第一个参数当做topology名称
System.out.println("远程模式");
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else{//没有参数时,本地提交
//启动本地模式
System.out.println("本地模式");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("111" ,conf, builder.createTopology() );
Thread.sleep(10000);
// 关闭本地集群
cluster.shutdown();
}
}catch (Exception e){
e.printStackTrace();
}
}
}
106

被折叠的 条评论
为什么被折叠?



