Strom整合Kafka+redis+hdfs实践和遇到的问题

1、Kafka+storm+redis+hdfs需要的pom文件

<dependency>
	<groupId>org.apache.storm</groupId>
	<artifactId>storm-core</artifactId>
	<version>1.1.1</version>
</dependency>
<dependency>
	<groupId>org.apache.storm</groupId>
	<artifactId>storm-kafka</artifactId>
	<version>1.1.1</version>
</dependency>
<dependency>
	<groupId>org.apache.storm</groupId>
	<artifactId>storm-redis</artifactId>
	<version>1.1.1</version>
</dependency>
<dependency>
	<groupId>org.apache.kafka</groupId>
	<artifactId>kafka_2.9.2</artifactId>
	<version>0.8.1.1</version>
	<exclusions>
		<exclusion>
			<groupId>org.apache.zookeeper</groupId>
			<artifactId>zookeeper</artifactId>
		</exclusion>
		<exclusion>
			<groupId>log4j</groupId>
			<artifactId>log4j</artifactId>
		</exclusion>
	</exclusions>
</dependency>
<dependency>
	<groupId>jdk.tools</groupId>
	<artifactId>jdk.tools</artifactId>
	<version>1.8</version>
	<scope>system</scope>
	<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
</dependency>
<dependency>
	<groupId>org.apache.storm</groupId>
	<artifactId>storm-hdfs</artifactId>
	<version>1.0.5</version>
</dependency>

<!-- logback -->
<dependency>
	<groupId>ch.qos.logback</groupId>
	<artifactId>logback-classic</artifactId>
	<version>1.1.2</version>
</dependency>
<dependency>
	<groupId>ch.qos.logback</groupId>
	<artifactId>logback-core</artifactId>
	<version>1.1.2</version>
</dependency>

2、KafkaSpout配置实现Storm的Spout组件

 String topic = "gwReport"; // kafka的topic
    String zkRoot = "/kafka-storm";// 在zookeeper下生成的文件夹
    String spoutId = "kafkaSpout";// Storm的Spout组件的id
    String zkHosts = "192.168.0.128:2181"; // zookeeper的host和port
    /* 配置SpoutConfig对象 */
    BrokerHosts brokerHosts = new ZkHosts(zkHosts);
    SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, zkRoot, spoutId);
    spoutConfig.stateUpdateIntervalMs = 1000;
    spoutConfig.startOffsetTime = OffsetRequest.LatestTime();
    spoutConfig.scheme = new SchemeAsMultiScheme(new MessageScheme());


    /* 创建KafkaSpout对象 */
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

MessageScheme类的实现(目的是为了把kafka发来的数据进行格式转换)

package cn.fwyun.monitorPlatform.spout;

import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.util.List;

import org.apache.storm.spout.Scheme;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.alibaba.fastjson.JSONObject;
import com.fwyun.api.domain.GateWayReport;

public class MessageScheme implements Scheme {
    

  private static final Logger logger = LoggerFactory.getLogger(MessageScheme.class);
  /**
   * 
   */
  private static final long serialVersionUID = 1L;

  public List<Object> deserialize(ByteBuffer byteBuffer) {
    try {
      String kafkaMsg = getString(byteBuffer);
      logger.info(kafkaMsg);
      GateWayReport gateWayReport = JSONObject.parseObject(kafkaMsg, GateWayReport.class);
      logger.info(gateWayReport.getFwPin());
      return new Values(gateWayReport);
    } catch (Exception e) {
      e.printStackTrace();
    }
    return null;
  }

  public Fields getOutputFields() {
    return new Fields("kafkaReport");
  }

  public static String getString(ByteBuffer buffer) {
    Charset charset = null;
    CharsetDecoder decoder = null;
    CharBuffer charBuffer = null;
    try {
      charset = Charset.forName("UTF-8");
      decoder = charset.newDecoder();
      // charBuffer = decoder.decode(buffer);//用这个的话,只能输出来一次结果,第二次显示为空
      charBuffer = decoder.decode(buffer.asReadOnlyBuffer());
      return charBuffer.toString();
    } catch (Exception ex) {
      ex.printStackTrace();
      return "";
    }
  }

}

3、Storm整合Redis数据库

    3.1、需要的jar文件

<dependency>
	<groupId>org.apache.storm</groupId>
	<artifactId>storm-redis</artifactId>
	<version>1.1.1</version>
</dependency>
  • 2
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值