【八】storm+kafka整合

15 篇文章 0 订阅

官网参考0.8.X的kafka

maven的pom文件

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>

  <groupId>com.sid.bigdata</groupId>
  <artifactId>storm</artifactId>
  <version>0.0.1</version>
  <packaging>jar</packaging>

  <name>storm</name>
  <url>http://maven.apache.org</url>

  <properties>
    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
    <storm.version>1.1.1</storm.version>
    <hadoop.version>2.6.0</hadoop.version>
  </properties>

  <dependencies>
    
  <dependency>
  	<groupId>org.apache.storm</groupId>
  	<artifactId>storm-core</artifactId>
  	<version>${storm.version}</version> 
  	<exclusions>
  		<exclusion>
  			<groupId>org.slf4j</groupId>
  			<artifactId>log4j-over-slf4j</artifactId>
  		</exclusion>
  	</exclusions>
  </dependency>
 
    <dependency>
  	<groupId>commons-io</groupId>
  	<artifactId>commons-io</artifactId>
  	<version>2.4</version> 
  </dependency>
   
  <dependency>
  	<groupId>org.apache.hadoop</groupId>
  	<artifactId>hadoop-client</artifactId>
  	<version>${hadoop.version}</version> 
  	<exclusions>
  		<exclusion>
  			<groupId>log4j</groupId>
  			<artifactId>log4j</artifactId>
  		</exclusion>
  	</exclusions>
  </dependency>
  
  <dependency>
  	<groupId>org.apache.storm</groupId>
  	<artifactId>storm-redis</artifactId>
  	<version>${storm.version}</version> 
  </dependency>
  
    <dependency>
  	<groupId>org.apache.storm</groupId>
  	<artifactId>storm-jdbc</artifactId>
  	<version>${storm.version}</version> 
  </dependency>
  
      <dependency>
  	<groupId>org.apache.storm</groupId>
  	<artifactId>storm-hdfs</artifactId>
  	<version>${storm.version}</version> 
  	<exclusions>
  		<exclusion>
  			<groupId>org.apache.hadoop</groupId>
  			<artifactId>hadoop-client</artifactId>
  		</exclusion>
  		 <exclusion>
  			<groupId>org.apache.hadoop</groupId>
  			<artifactId>hadoop-auth</artifactId>
  		</exclusion>
  	</exclusions>
  </dependency>

  <dependency>
    <groupId>mysql</groupId>
    <artifactId>mysql-connector-java</artifactId>
    <version>5.1.31</version>
</dependency>
  
  <dependency>
  	<groupId>org.apache.storm</groupId>
  	<artifactId>storm-kafka</artifactId>
  	<version>${storm.version}</version> 

  </dependency>
    
  <dependency>
  	<groupId>org.apache.kafka</groupId>
  	<artifactId>kafka_2.11</artifactId>
  	<version>0.9.0.0</version> 

  </dependency>
  
  <dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka-clients</artifactId>
      <version>0.9.0.0</version>
    </dependency>
    
    <dependency>
  	<groupId>org.apache.curator</groupId>
  	<artifactId>curator-client</artifactId>
  	<version>2.12.0</version> 
  </dependency>
  
  </dependencies>
</project>
 

项目


Topology代码

package integration.kafka;


import java.util.UUID;


import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.StringScheme;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;


/**
 * @author liyijie
 * @date 2018年7月9日下午8:21:23
 * @email 37024760@qq.com
 * @remark
 * @version 
 */
public class StormKafkaTopology {
	public static void main(String[] args) {
		  TopologyBuilder tb = new TopologyBuilder();
		  
		  //kafka使用的zk地址
		  BrokerHosts hosts = new ZkHosts("node1:2181");
		  
		  //2参数,指定kafka的topic 3参数指定ZK的一个根目录,是kafkaspout读取数据的位置信息(offset)
		  SpoutConfig spoutConfig = new SpoutConfig(hosts, "storm_topic", "/" + "storm_topic", UUID.randomUUID().toString());
		  spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
		  KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
		  
		  String spoutId=KafkaSpout.class.getName();
		  tb.setSpout(spoutId, kafkaSpout);
		  tb.setBolt("LogProcessBolt", new LogProcessBolt()).shuffleGrouping(spoutId);
	
		  LocalCluster cluster = new LocalCluster();  
	        cluster.submitTopology("StormKafkaTopology", new Config(), tb.createTopology());  


	}
}

bolt代码

package integration.kafka;


import java.lang.reflect.Field;
import java.util.Map;


import org.apache.storm.spout.MultiScheme;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.TupleImpl;


/**
 * @author liyijie
 * @date 2018年7月9日下午10:28:44
 * @email 37024760@qq.com
 * @remark
 * @version 
 */
public class LogProcessBolt extends BaseRichBolt{
	
	private OutputCollector collector;

	public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
    	this.collector = collector;

	}

	public void execute(Tuple input) {
		try{
			String binaryByField = input.getString(0);
			
			System.out.println("rec value is "+binaryByField);
			
			this.collector.ack(input);
		}catch(Exception e){
			this.collector.fail(input);
		}
	}

	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		
	}

}

storm整合kafka运行报错

1.

java.lang.NoClassDefFoundError: org/apache/curator/shaded/com/google/common/cache/CacheBuilder
	at org.apache.curator.framework.imps.NamespaceWatcherMap.<init>(NamespaceWatcherMap.java:31) ~[curator-framework-2.12.0.jar:?]
	at org.apache.curator.framework.imps.CuratorFrameworkImpl.<init>(CuratorFrameworkImpl.java:81) ~[curator-framework-2.12.0.jar:?]
	at org.apache.curator.framework.CuratorFrameworkFactory$Builder.build(CuratorFrameworkFactory.java:145) ~[curator-framework-2.12.0.jar:?]
	at org.apache.curator.framework.CuratorFrameworkFactory.newClient(CuratorFrameworkFactory.java:100) ~[curator-framework-2.12.0.jar:?]
	at org.apache.storm.kafka.ZkState.newCurator(ZkState.java:45) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.ZkState.<init>(ZkState.java:61) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.KafkaSpout.open(KafkaSpout.java:76) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.daemon.executor$fn__4962$fn__4977.invoke(executor.clj:602) ~[storm-core-1.1.1.jar:1.1.1]
	at org.apache.storm.util$async_loop$fn__557.invoke(util.clj:482) [storm-core-1.1.1.jar:1.1.1]
	at clojure.lang.AFn.run(AFn.java:22) [clojure-1.7.0.jar:?]
	at java.lang.Thread.run(Thread.java:745) [?:1.8.0_91]
Caused by: java.lang.ClassNotFoundException: org.apache.curator.shaded.com.google.common.cache.CacheBuilder
	at java.net.URLClassLoader.findClass(URLClassLoader.java:381) ~[?:1.8.0_91]
	at java.lang.ClassLoader.loadClass(ClassLoader.java:424) ~[?:1.8.0_91]
	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:331) ~[?:1.8.0_91]
	at java.lang.ClassLoader.loadClass(ClassLoader.java:357) ~[?:1.8.0_91]
	... 11 more

解决办法

这是差jar包,在pom中引入依赖jar包

    <dependency>
  	<groupId>org.apache.curator</groupId>
  	<artifactId>curator-client</artifactId>
  	<version>2.12.0</version> 
  </dependency>

2.

SLF4J: Detected both log4j-over-slf4j.jar AND slf4j-log4j12.jar on the class path, preempting StackOverflowError. 
SLF4J: See also http://www.slf4j.org/codes.html#log4jDelegationLoop for more details.
82691 [Thread-20-org.apache.storm.kafka.KafkaSpout-executor[3 3]] ERROR o.a.s.util - Async loop died!
java.lang.NoClassDefFoundError: Could not initialize class org.apache.log4j.Log4jLoggerFactory
	at org.apache.log4j.Logger.getLogger(Logger.java:39) ~[log4j-over-slf4j-1.6.6.jar:1.6.6]
	at kafka.utils.Logging$class.logger(Logging.scala:24) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.consumer.SimpleConsumer.logger$lzycompute(SimpleConsumer.scala:35) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.consumer.SimpleConsumer.logger(SimpleConsumer.scala:35) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.utils.Logging$class.info(Logging.scala:67) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.consumer.SimpleConsumer.info(SimpleConsumer.scala:35) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.consumer.SimpleConsumer.liftedTree1$1(SimpleConsumer.scala:94) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.consumer.SimpleConsumer.kafka$consumer$SimpleConsumer$$sendRequest(SimpleConsumer.scala:83) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.consumer.SimpleConsumer.getOffsetsBefore(SimpleConsumer.scala:149) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.javaapi.consumer.SimpleConsumer.getOffsetsBefore(SimpleConsumer.scala:79) ~[kafka_2.11-0.9.0.0.jar:?]
	at org.apache.storm.kafka.KafkaUtils.getOffset(KafkaUtils.java:81) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.KafkaUtils.getOffset(KafkaUtils.java:71) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.PartitionManager.<init>(PartitionManager.java:135) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.ZkCoordinator.refresh(ZkCoordinator.java:108) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.ZkCoordinator.getMyManagedPartitions(ZkCoordinator.java:69) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.KafkaSpout.nextTuple(KafkaSpout.java:130) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.daemon.executor$fn__4962$fn__4977$fn__5008.invoke(executor.clj:646) ~[storm-core-1.1.1.jar:1.1.1]
	at org.apache.storm.util$async_loop$fn__557.invoke(util.clj:484) [storm-core-1.1.1.jar:1.1.1]
	at clojure.lang.AFn.run(AFn.java:22) [clojure-1.7.0.jar:?]
	at java.lang.Thread.run(Thread.java:745) [?:1.8.0_91]
82692 [Thread-20-org.apache.storm.kafka.KafkaSpout-executor[3 3]] ERROR o.a.s.d.executor - 

解决方法

修改pom,这是log4j和slf4j冲突了


3.

java.lang.NoSuchMethodError: org.apache.kafka.common.network.NetworkSend.<init>(Ljava/lang/String;[Ljava/nio/ByteBuffer;)V
	at kafka.network.RequestOrResponseSend.<init>(RequestOrResponseSend.scala:41) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.network.RequestOrResponseSend.<init>(RequestOrResponseSend.scala:44) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.network.BlockingChannel.send(BlockingChannel.scala:112) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.consumer.SimpleConsumer.liftedTree1$1(SimpleConsumer.scala:98) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.consumer.SimpleConsumer.kafka$consumer$SimpleConsumer$$sendRequest(SimpleConsumer.scala:83) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.consumer.SimpleConsumer.getOffsetsBefore(SimpleConsumer.scala:149) ~[kafka_2.11-0.9.0.0.jar:?]
	at kafka.javaapi.consumer.SimpleConsumer.getOffsetsBefore(SimpleConsumer.scala:79) ~[kafka_2.11-0.9.0.0.jar:?]
	at org.apache.storm.kafka.KafkaUtils.getOffset(KafkaUtils.java:81) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.KafkaUtils.getOffset(KafkaUtils.java:71) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.PartitionManager.<init>(PartitionManager.java:135) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.ZkCoordinator.refresh(ZkCoordinator.java:108) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.ZkCoordinator.getMyManagedPartitions(ZkCoordinator.java:69) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.kafka.KafkaSpout.nextTuple(KafkaSpout.java:130) ~[storm-kafka-1.1.1.jar:1.1.1]
	at org.apache.storm.daemon.executor$fn__4962$fn__4977$fn__5008.invoke(executor.clj:646) ~[storm-core-1.1.1.jar:1.1.1]
	at org.apache.storm.util$async_loop$fn__557.invoke(util.clj:484) [storm-core-1.1.1.jar:1.1.1]
	at clojure.lang.AFn.run(AFn.java:22) [clojure-1.7.0.jar:?]
	at java.lang.Thread.run(Thread.java:745) [?:1.8.0_91]

解决办法添加kafka-client依赖

  <dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka-clients</artifactId>
      <version>0.9.0.0</version>
    </dependency>

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值