ES-Hadoop学习笔记-Storm交互

elasticsearch-hadoop提供ElasticSearch与Apache Storm的集成支持。从ElasticSearch读取的数据是以Storm里Tuple的形式进行操作处理。

依赖版本信息:

<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId>
<version>1.0.1</version>
</dependency>

<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-starter</artifactId>
<version>1.0.1</version>
</dependency>

<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hdfs</artifactId>
<version>1.0.1</version>
</dependency>

<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-kafka</artifactId>
<version>1.0.1</version>
</dependency>

<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.10.0.0</version>
</dependency>

<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch-hadoop</artifactId>
<version>2.3.2</version>
</dependency>


Strom的extlib目录下jar包



import java.util.Map;

import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;

public class HandleBolt extends BaseRichBolt {

	private static final long serialVersionUID = 1L;

	private OutputCollector collector = null;
	
	@SuppressWarnings("rawtypes")
	@Override
	public void prepare(Map stormConf, TopologyContext context,
			OutputCollector collector) {
		this.collector = collector;
	}

	@Override
	public void execute(Tuple input) {
		String name = "NA";
		if (input.contains("name")) {
			name = input.getStringByField("name");
		}
		String phone = "NA";
		if (input.contains("phone")) {
			phone = input.getStringByField("phone");
		}
		String rcall = "NA";
		if (input.contains("rcall")) {
			rcall = input.getStringByField("rcall");
			rcall = null == rcall || "null".equals(rcall) ? "NA" : rcall;
		}
		String address = "NA";
		if (input.contains("address")) {
			address = input.getStringByField("address");
			address = null == address || "null".equals(address) ? "NA" : address;
		}
		String email = "NA";
		if (input.contains("email")) {
			email = input.getStringByField("email");
			email = null == email || "null".equals(email) ? "NA" : email;
		}
		String idCard = "NA";
		if (input.contains("idCard")) {
			idCard = input.getStringByField("idCard");
			idCard = null == idCard || "null".equals(idCard) ? "NA" : idCard;
		}
		this.collector.emit(new Values(name, phone, rcall, address, email, idCard));
		this.collector.ack(input);
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		declarer.declare(new Fields("name", "phone", "rcal", "address", "email", "idCard"));
	}

}

import java.util.HashMap;
import java.util.Map;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.hdfs.bolt.HdfsBolt;
import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat;
import org.apache.storm.hdfs.bolt.format.DelimitedRecordFormat;
import org.apache.storm.hdfs.bolt.format.FileNameFormat;
import org.apache.storm.hdfs.bolt.format.RecordFormat;
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy;
import org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy;
import org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy.TimeUnit;
import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy;
import org.apache.storm.hdfs.bolt.sync.SyncPolicy;
import org.apache.storm.starter.bolt.PrinterBolt;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.utils.Utils;

public class ES2StormTopology {

	private static final String TOPOLOGY_NAME = "es-storm-topology";
	
	public static void main(String[] args) {
		if (args.length != 1) {
			System.exit(0);
		}
		boolean isCluster = Boolean.parseBoolean(args[0]);
		
		TopologyBuilder builder = new TopologyBuilder();
		String target = "operator/telecom";
		String query = "?q=*";
		Map<Object, Object> configuration = new HashMap<Object, Object>();
		configuration.put("es.nodes", "192.168.10.20:9200");
		configuration.put("es.read.field.include", "name,phone,rcall,email,idCard,zipCode,address");
		configuration.put("es.storm.spout.fields", "name,phone,rcall,email,idCard,zipCode,address");
		builder.setSpout("es-storm-spout", new ESSpout(target, query, configuration), 1);
		
		builder.setBolt("storm-print-bolt", new PrinterBolt()).shuffleGrouping("es-storm-spout");
		
		builder.setBolt("storm-handle-bolt", new HandleBolt()).shuffleGrouping("es-storm-spout");
		
		RecordFormat recordFormat = new DelimitedRecordFormat().withFieldDelimiter(":");
		SyncPolicy syncPolicy = new CountSyncPolicy(10);
		FileRotationPolicy fileRotationPolicy = new TimedRotationPolicy(1.0f, TimeUnit.MINUTES);
		FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath("/storm/")
				.withPrefix("es_").withExtension(".log");
		HdfsBolt hdfsBolt = new HdfsBolt().withFsUrl("hdfs://centos.host1:9000")
				.withFileNameFormat(fileNameFormat).withRecordFormat(recordFormat)
				.withRotationPolicy(fileRotationPolicy).withSyncPolicy(syncPolicy);
		builder.setBolt("storm-hdfs-bolt", hdfsBolt).globalGrouping("storm-handle-bolt");
		
		Config config = new Config();
		config.setDebug(true);
		if (isCluster) {
			try {
				config.setNumWorkers(3);
				StormSubmitter.submitTopologyWithProgressBar(
						TOPOLOGY_NAME, config, builder.createTopology());
			} catch (Exception e) {
				e.printStackTrace();
			}
		} else {
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
			Utils.sleep(100000);
			cluster.killTopology(TOPOLOGY_NAME);
			cluster.shutdown();
		}
		
	}
	
}


注意:elasticsearch-hadoop里的EsSpout类用到的Storm版本过低,所以重写了一个ESSpout替换旧版本Storm的API。


$bin/storm jar /home/hadoop/Documents/esstorm-0.0.1-SNAPSHOT.jar org.platform.storm.elasticsearch.ES2StormTopology false


import java.util.HashMap;
import java.util.Map;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.starter.bolt.PrinterBolt;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.utils.Utils;
import org.platform.storm.elasticsearch.bolt.ESBolt;
import org.platform.storm.elasticsearch.spout.ESSpout;

public class Storm2ESTopology {

	private static final String TOPOLOGY_NAME = "storm-es-topology";
	
	public static void main(String[] args) {
		if (args.length != 1) {
			System.exit(0);
		}
		boolean isCluster = Boolean.parseBoolean(args[0]);
		
		TopologyBuilder builder = new TopologyBuilder();
		
		String target = "operator/telecom";
		String query = "?q=*";
		Map<Object, Object> spoutConf = new HashMap<Object, Object>();
		spoutConf.put("es.nodes", "192.168.10.20:9200");
		spoutConf.put("es.read.field.include", "name,phone,rcall,email,idCard,zipCode,address");
		spoutConf.put("es.storm.spout.fields", "name,phone,rcall,email,idCard,zipCode,address");
		builder.setSpout("es-storm-spout", new ESSpout(target, query, spoutConf), 1);
		
		builder.setBolt("storm-print-bolt", new PrinterBolt()).shuffleGrouping("es-storm-spout");
		
		Map<Object, Object> boltConf = new HashMap<Object, Object>();
		boltConf.put("es.nodes", "192.168.10.20:9200");
		boltConf.put("es.index.auto.create", "true");
		boltConf.put("es.ser.writer.bytes.class", "org.platform.storm.elasticsearch.bolt.StormTupleBytesConverter");
		//boltConf.put("es.input.json", "true");
		builder.setBolt("storm-es-bolt", new ESBolt("data/telecom", boltConf))
			.globalGrouping("es-storm-spout");
		
		Config config = new Config();
		config.setDebug(true);
		if (isCluster) {
			try {
				config.setNumWorkers(3);
				StormSubmitter.submitTopologyWithProgressBar(
						TOPOLOGY_NAME, config, builder.createTopology());
			} catch (Exception e) {
				e.printStackTrace();
			}
		} else {
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
			Utils.sleep(100000);
			cluster.killTopology(TOPOLOGY_NAME);
			cluster.shutdown();
		}
		
	}
	
}

注意:elasticsearch-hadoop里的EsBolt、StormTupleBytesConverter类用到的Storm版本过低,所以重写了一个ESBolt、StormTupleBytesConverter替换旧版本Storm的API。


$bin/storm jar /home/hadoop/Documents/esstorm-0.0.1-SNAPSHOT.jar org.platform.storm.elasticsearch.Storm2ESTopology false




  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值