1、Kafka+storm+redis+hdfs需要的pom文件
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-kafka</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-redis</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.9.2</artifactId>
<version>0.8.1.1</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
<version>1.8</version>
<scope>system</scope>
<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hdfs</artifactId>
<version>1.0.5</version>
</dependency>
<!-- logback -->
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.1.2</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>1.1.2</version>
</dependency>
2、KafkaSpout配置实现Storm的Spout组件
String topic = "gwReport"; // kafka的topic
String zkRoot = "/kafka-storm";// 在zookeeper下生成的文件夹
String spoutId = "kafkaSpout";// Storm的Spout组件的id
String zkHosts = "192.168.0.128:2181"; // zookeeper的host和port
/* 配置SpoutConfig对象 */
BrokerHosts brokerHosts = new ZkHosts(zkHosts);
SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, zkRoot, spoutId);
spoutConfig.stateUpdateIntervalMs = 1000;
spoutConfig.startOffsetTime = OffsetRequest.LatestTime();
spoutConfig.scheme = new SchemeAsMultiScheme(new MessageScheme());
/* 创建KafkaSpout对象 */
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
MessageScheme类的实现(目的是为了把kafka发来的数据进行格式转换)
package cn.fwyun.monitorPlatform.spout;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.util.List;
import org.apache.storm.spout.Scheme;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.alibaba.fastjson.JSONObject;
import com.fwyun.api.domain.GateWayReport;
public class MessageScheme implements Scheme {
private static final Logger logger = LoggerFactory.getLogger(MessageScheme.class);
/**
*
*/
private static final long serialVersionUID = 1L;
public List<Object> deserialize(ByteBuffer byteBuffer) {
try {
String kafkaMsg = getString(byteBuffer);
logger.info(kafkaMsg);
GateWayReport gateWayReport = JSONObject.parseObject(kafkaMsg, GateWayReport.class);
logger.info(gateWayReport.getFwPin());
return new Values(gateWayReport);
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
public Fields getOutputFields() {
return new Fields("kafkaReport");
}
public static String getString(ByteBuffer buffer) {
Charset charset = null;
CharsetDecoder decoder = null;
CharBuffer charBuffer = null;
try {
charset = Charset.forName("UTF-8");
decoder = charset.newDecoder();
// charBuffer = decoder.decode(buffer);//用这个的话,只能输出来一次结果,第二次显示为空
charBuffer = decoder.decode(buffer.asReadOnlyBuffer());
return charBuffer.toString();
} catch (Exception ex) {
ex.printStackTrace();
return "";
}
}
}
3、Storm整合Redis数据库
3.1、需要的jar文件
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-redis</artifactId>
<version>1.1.1</version>
</dependency>