Kafka Producer编程一

简单的读取HBASE,然后把HBASE记录发送到Kafka,

import java.io.IOException;
import java.text.MessageFormat;
import java.util.*;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import scala.util.control.Exception.Catch;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.kafka.clients.producer.*;

public class kafkaProducer {

	public static void main(String[] args) throws IOException {

		Properties props = new Properties();
		props.put("zk.connect",
				"datanode01.isesol.com,datanode02.isesol.com,datanode03.isesol.com,datanode04.isesol.com:2181");
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("bootstrap.servers",
				"namenode01.isesol.com:9092,namenode02.isesol.com:9092,datanode03.isesol.com:9092");
		KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
		Result result = null;
		StringBuilder builder = new StringBuilder();
		while (getResult().iterator().hasNext()){
			result = getResult().next();
			for (int i = 0; i <= result.listCells().size() - 1; i++) {
				builder.append(new String(result.getValue("cf".getBytes(), result.listCells().get(i).getQualifier())) + "#");
				System.out.println(new String(result.getValue("cf".getBytes(), result.listCells().get(i).getQualifier())));
			}
			
			System.out.println("the data is " + builder.toString());
			ProducerRecord<String, String> record = new ProducerRecord<String, String>("jlwang",builder.toString());

			try {
				producer.send(record, new Callback() {

					@Override
					public void onCompletion(RecordMetadata metadata, Exception e) {
						if (e != null) {
							e.printStackTrace();
						}
						System.out.println("offset: {} " + metadata.offset() + " partition: {}" + " " + metadata.partition()
								+ " " + metadata.topic());
					}
				});
			} catch (Exception ex) {
				ex.printStackTrace();
			} finally {
				System.out.println("finish to put data into kafka");
				//producer.close();
			}
			
			

		}
		
		producer.close();
	}

	public static ResultScanner getResult() throws IOException {

		Configuration hbaseconf = HBaseConfiguration.create();
		hbaseconf.set("hbase.zookeeper.quorum",
				"datanode01.isesol.com,datanode02.isesol.com,datanode03.isesol.com,datanode04.isesol.com,cmserver.isesol.com");
		hbaseconf.set("hbase.zookeeper.property.clientPort", "2181");
		hbaseconf.set("user", "hdfs");
		HTable htable = new HTable(hbaseconf, "t_axes_feeding_secutrity_zero");
		Scan scan = new Scan();
		scan.setCaching(300);
		ResultScanner scaner = htable.getScanner(scan);		
		return scaner;

	}
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

tom_fans

谢谢打赏

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值