Storm 提供了接口 /root/training/apache-storm-1.0.3/external
1、Redis
(*) 除了需要 storm-redis-1.0.3.jar
package testStorm;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.hive.bolt.HiveBolt;
import org.apache.storm.hive.bolt.mapper.DelimitedRecordHiveMapper;
import org.apache.storm.hive.common.HiveOptions;
import org.apache.storm.jdbc.common.ConnectionProvider;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.spout.Scheme;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.*;
import org.apache.storm.tuple.Fields;
import org.apache.storm.hdfs.bolt.HdfsBolt;
import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat;
import org.apache.storm.hdfs.bolt.format.DelimitedRecordFormat;
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy;
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy.Units;
import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy;
import org.apache.storm.redis.bolt.RedisStoreBolt;
import org.apache.storm.redis.common.config.JedisPoolConfig;
import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
import org.apache.storm.redis.common.mapper.RedisStoreMapper;
import org.apache.storm.topology.IRichBolt;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.ITuple;
import org.apache.storm.jdbc.bolt.JdbcInsertBolt;
import org.apache.storm.jdbc.mapper.JdbcMapper;
import org.apache.storm.jdbc.mapper.SimpleJdbcMapper;
import org.apache.storm.jdbc.bolt.JdbcLookupBolt;
import org.apache.storm.jdbc.common.Column;
import org.apache.storm.jdbc.mapper.SimpleJdbcLookupMapper;
import org.apache.calcite.avatica.ConnectionProperty;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import parquet.org.slf4j.Logger;
import parquet.org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.List;
import java.util.UUID;
public class WordCountTopology {
public static void main(String[] args) throws Exception {
//主程序
TopologyBuilder builder = new TopologyBuilder();
//设置任务的spout采集数据
builder.setSpout("mywordcount_spout", new WordCountSpout());
//设置任务的第一个bolt组件,并指定上级组件的数据分组策略是:随机分组
builder.setBolt("mywordcount_split", new WordCountSplitBolt()).shuffleGrouping("mywordcount_spout");
//设置任务的第二个bolt组件,并指定上级组件的数据分组策略是:按字段分组
builder.setBolt("mywordcount_total", new WordCountTotalBolt()).fieldsGrouping("mywordcount_split",
new Fields("word"));
// 第一种 设置任务的第三个bolt组件,将结果保存到Redis中
builder.setBolt("mywordcount_redisbolt", createRedisBolt()).shuffleGrouping("mywordcount_total");
//第二种 设置任务的第三个bolt组件,将结果保存到HDFS
builder.setBolt("mywordcount_hdfsbolt", createHDFSBolt()).shuffleGrouping("mywordcount_total");
//第三种 设置任务的第三个bolt组件,将结果写到HBase中
builder.setBolt("mywordcount_hbasebolt", new WordCountHBaseBolt()).shuffleGrouping("mywordcount_total");
//第四种 设置任务的第三个bolt组件,将结果写到Mysql中
builder.setBolt("mywordcount_jdbcbolt", createJDBCBolt()).shuffleGrouping("mywordcount_total");
// 设置任务的spout 从 kafka 采集数据 未测试
builder.setSpout("mywordcount_kafkabolt", createKafkaSpout());
//创建任务
StormTopology wc = builder.createTopology();
//配置参数
Config conf = new Config();
/*
* 执行Storm的任务:有两种方式
* 1、本地模式:不需要提交到Storm集群上
* 2、集群模式:打包成jar,在storm集群上运行
*/
//1、本地模式:不需要提交到Storm集群上
LocalCluster localcluster = new LocalCluster();
localcluster.submitTopology("mywordcountdemo", conf, wc);
//2、集群模式:打包成jar,在storm集群上运行
//StormSubmitter.submitTopology("mywordcountdemo", conf, wc);
}
//存到mysql
public static IRichBolt createJDBCBolt(){
//创建jdbc连接
ConnectionProvider connectionProvider=new MyConnectionProvider();
JdbcMapper simpleJbdcMapper=new SimpleJdbcMapper("result",connectionProvider);
//new JdbcInsertBolt(connectionProvider,simpleJbdcMapper).withTableName("result").withInsertQuery("");
return new JdbcInsertBolt(connectionProvider,simpleJbdcMapper).withTableName("result").withQueryTimeoutSecs(30);
}
private static IRichBolt createHDFSBolt() {
// 创建一个HDFSBolt,把数据写到HDFS上
HdfsBolt bolt = new HdfsBolt();
//指定HDFS的位置,就是NameNode地址
bolt.withFsUrl("hdfs://192.168.157.111:9000");
//指定HDFS的目录
bolt.withFileNameFormat(new DefaultFileNameFormat().withPath("/stormdata"));
//数据的分隔符: 结果: Beijing|10
bolt.withRecordFormat(new DelimitedRecordFormat().withFieldDelimiter("|"));
//由于时间实时产生,比如:每5M数据生成一个文件
bolt.withRotationPolicy(new FileSizeRotationPolicy(5.0f, Units.MB));
//跟HDFS数据同步的时间,当tuple中结果达到了1K,就与HDFS进行同步
bolt.withSyncPolicy(new CountSyncPolicy(100));
return bolt;
}
// 返回一个Redis的Bolt将结果插入到Redis
private static IRichBolt createRedisBolt() {
//创建一个Redis的连接池 poolConfig
JedisPoolConfig.Builder builder = new JedisPoolConfig.Builder();
builder.setHost("39.108.176.119");
builder.setPort(6479);
JedisPoolConfig poolConfig = builder.build();
//storeMapper: 代表存入Redis的数据格式
return new RedisStoreBolt(poolConfig, new RedisStoreMapper(){
@Override
public String getKeyFromTuple(ITuple tuple) {
// 从上一个组件接收的key
return tuple.getStringByField("word");
}
@Override
public String getValueFromTuple(ITuple tuple) {
// 从上一个组件接收的value
return String.valueOf(tuple.getIntegerByField("total"));
}
@Override
public RedisDataTypeDescription getDataTypeDescription() {
// 指定存入Redis的数据格式: Hash集合
return new RedisDataTypeDescription(RedisDataTypeDescription.RedisDataType.HASH, //指定数据的类型
"wordcount"); //变量的名字
}
});
}
//将结果写入hive
private static IRichBolt createHiveBolt() {
//设置环境变量 ,能找到winutils.exe
System.setProperty("hadoop.home.dir","E:\\hadoop-2.7.3");
//作用: 将bolt 组件处理后的结果tuuple 存入hive的表
DelimitedRecordHiveMapper mapper=new DelimitedRecordHiveMapper().withColumnFields(new Fields("words","total"));
//配置hive的参数信息
HiveOptions options=new HiveOptions("thrift://hadoop111:9083",// hive 的 metastore
"default",//hive数据库的名字
"wordcount",//保存数据的表
mapper).withTxnsPerBatch(10)
.withBatchSize(1000).withIdleTimeout(10);
//创建一个hive的bolt组件 将单词技术后的结果存入 hive
HiveBolt bolt=new HiveBolt(options);
return bolt;
}
//kafka采集数据的 spout
private static IRichSpout createKafkaSpout(){
//定义ZK的地址
BrokerHosts hosts=new ZkHosts("39.108.176.119:2181");
//指定Topic的消息
SpoutConfig spoutConfig=new SpoutConfig(hosts,"mydemo2","/mydemo2", UUID.randomUUID().toString());
spoutConfig.scheme=new SchemeAsMultiScheme(new Scheme() {
@Override
public List<Object> deserialize(ByteBuffer byteBuffer) {
try {
String msg = (Charset.forName("UTF-8").newDecoder()).decode(byteBuffer).asReadOnlyBuffer().toString();
}catch (Exception e){
e.printStackTrace();
}
return null;
}
@Override
public Fields getOutputFields() {
return new Fields("sentence");
}
});
return new KafkaSpout(spoutConfig);
}
}
package testStorm;
import org.apache.storm.jdbc.common.ConnectionProvider;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
/**
* @author Administrator
* @Package_name testStorm
* @Date 2019/4/22 0022 下午 2:43
* @Description
*/
public class MyConnectionProvider implements ConnectionProvider {
private static String driver ="com.mysql.jdbc.Driver";
private static String url="jdbc:mysql://localhost:3306/demo";
private static String user="root";
private static String password ="123456";
static{
try {
Class.forName(driver);
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
}
@Override
public Connection getConnection() {
try {
return DriverManager.getConnection(url,user,password);
}catch (Exception e){
}
return null;
}
@Override
public void cleanup() {
}
@Override
public void prepare() {
}
}
jedis中的JedisPoolConfig没有setMaxActive和setMaxWait属性
构造连接池配置文件,发现setMaxActive和setMaxWait没这2个方法,查看源码JedisPoolConfig继承至GenericObjectPoolConfig,其父类中确实也没有MaxActive这个属性,WHY?
最终发现解决方法:
pool2中修改如下:
maxActive ==> maxTotal
maxWait ==> maxWaitMillis
====================工具类代码==========================
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
public class RedisUtils {
private static JedisPool jedisPool=null;
static {
JedisPoolConfig config=new JedisPoolConfig();
config.setMaxTotal(1024);
config.setMaxIdle(200);
config.setMaxWaitMillis(10000);
config.setTestOnBorrow(true);
jedisPool=new JedisPool(config,"192.168.220.128",6379);
}
public synchronized static Jedis getJedis() {
try {
if(jedisPool !=null) return jedisPool.getResource();
} catch (Exception e) {
// TODO: handle exception
}
return null;
}
public static void returnResource(final Jedis jedis) {
if(jedis!=null) jedisPool.returnResource(jedis);
}
}
package testStorm;
import java.util.Map;
import java.util.Random;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils;
//负责采集数据
public class WordCountSpout extends BaseRichSpout{
//模拟产生一些数据
private String[] data = {"I love Beijing","I love China","Beijing is the capital of China"};
//定义变量代表spout的输出流
private SpoutOutputCollector collector;
@Override
public void nextTuple() {
//每隔3秒采集一次数据
Utils.sleep(3000);
//会 由Storm的框架调用,用于接收外部的数据
//从模拟数据中,随机产生一个字符串
int random = (new Random()).nextInt(3);
String sentence = data[random];
System.out.println("Spout采集的数据是:" + sentence);
//发送给下个组件进行处理
this.collector.emit(new Values(sentence));
}
@Override
public void open(Map arg0, TopologyContext arg1, SpoutOutputCollector collector) {
//SpoutOutputCollector collector: 当spout处理完后,将数据使用这个输出流进行输出
//初始化
this.collector = collector;
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declare) {
// 定义输出的Tuple的格式(schema)
declare.declare(new Fields("sentence"));
}
}
package testStorm;
import java.util.Map;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
//负责单词的拆分
public class WordCountSplitBolt extends BaseRichBolt{
//输出流
private OutputCollector collector;
@Override
public void execute(Tuple tuple) {
//从上一个组件接收的数据,如何处理?
//取出数据
String data = tuple.getStringByField("sentence");
//分词
String[] words = data.split(" ");
//输出
for(String w:words){
this.collector.emit(new Values(w,1));
}
}
@Override
public void prepare(Map arg0, TopologyContext arg1, OutputCollector collector) {
//初始化该bolt的组件的输出流
this.collector = collector;
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declare) {
// 定义该组件输出的tuple的格式
declare.declare(new Fields("word","number"));
}
}
package testStorm;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Tuple;
//从上一级的bolt组件接收数据,并将结果保存到HBase
public class WordCountHBaseBolt extends BaseRichBolt {
//定义HBase的表的客户端
private HTable table = null;
@Override
public void execute(Tuple tuple) {
// 从上一个组件接收数据,并插入到HBase中
//取出发来的数据
String word = tuple.getStringByField("word");
int total = tuple.getIntegerByField("total");
//构造一个Put对象
Put put = new Put(Bytes.toBytes(word));// 是rowkey
put.add(Bytes.toBytes("info"), Bytes.toBytes("word"), Bytes.toBytes(word));
put.add(Bytes.toBytes("info"), Bytes.toBytes("total"), Bytes.toBytes(String.valueOf(total)));
//插入数据
try{
table.put(put);
}catch(Exception ex){
ex.printStackTrace();
}
}
@Override
public void prepare(Map arg0, TopologyContext arg1, OutputCollector arg2) {
//准备:我们就可以来进行一些初始化的工作
//得到HBase表的客户端
//复习:HBase 指定ZK的地址
Configuration conf = new Configuration();
conf.set("hbase.zookeeper.quorum", "192.168.157.111");
//创建HBase的客户端
try{
table = new HTable(conf, "result");
}catch(Exception ex){
ex.printStackTrace();
}
}
@Override
public void declareOutputFields(OutputFieldsDeclarer arg0) {
}
}
package testStorm;
import java.util.HashMap;
import java.util.Map;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
//负责单词的计数
public class WordCountTotalBolt extends BaseRichBolt {
private OutputCollector collector;
//定义一个Map集合来保存最后的结果
private Map<String, Integer> result = new HashMap<>();
@Override
public void execute(Tuple tuple) {
// 取出上个组件发来的数据
String word = tuple.getStringByField("word");
int number = tuple.getIntegerByField("number");
//计数
if(result.containsKey(word)){
//结果集中已经存在该单词,累加
int total = result.get(word);
result.put(word, total + number);
}else{
//该单词第一次出现
result.put(word, number);
}
//直接输出到屏幕
System.out.println("输出的结果是:" + result);
//将处理的结果继续发给下一个组件 单词 总的频率
this.collector.emit(new Values(word,result.get(word)));
}
@Override
public void prepare(Map arg0, TopologyContext arg1, OutputCollector collector) {
this.collector = collector;
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declare) {
// 数据: Beijing 3
declare.declare(new Fields("word","total"));
}
}
因为结果为一个hash值 使用 hgetall wordcount
2、HDFS
(*) storm-hdfs-1.0.3.jar
(*) 还需要HDFS的jar包
3、HBase
(*) 不建议使用:storm-hbase-1.0.3.jar(可以去试试,反而麻烦)
(*) 直接使用HBase的jar,put的方法插入数据(WordCountHBaseBolt.java)
(*) 在HBase中,创建一张表
create 'result','info'
记得开启服务端
(*) 需要HBase的jar 包
4、跟其他的集成
<property>:
<name> hive.in.test</name>
<value>true</value>
</property>
create table wordcount(word string,total int) clustered by (word) into 10 buckets stored as orc TBLPROPERTIES('transactional'='true');
创建一个支持ACID的表
以orc格式保存
测试hive javaAPI时候需要 hive的jar 和 hadoop的jar
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>1.2.0</version>
<type>jar</type>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-common</artifactId>
<version>1.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-metastore</artifactId>
<version>1.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-service</artifactId>
<version>1.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.1</version>
</dependency>
hive 集成 其他 大数据 组件jedis
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.7.0</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId>
<version>1.0.3</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hive</artifactId>
<version>1.0.3</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-redis</artifactId>
<version>1.0.3</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hdfs</artifactId>
<version>1.0.3</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hbase</artifactId>
<version>1.0.3</version>
</dependency>
===========完整的依赖============
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-core
我在配置中使用的hadoop-core项还是1.2.1版本的,早已不支持Hadoop 2.9.0。
而且由于Hadoop配置方式变更,上述问题不能通过改变hadoop-core版本解决。
正确方式是使用hadoop-client和hadoop-hdfs两个依赖项取代hadoop-core。配置方式如下: -->
<!-- <dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>0.20.2</version>
</dependency>-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.3</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.3</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.3</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.7.3</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.mrunit/mrunit -->
<dependency>
<groupId>org.apache.mrunit</groupId>
<artifactId>mrunit</artifactId>
<version>1.1.0</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>0.96.2-hadoop2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.2.0</version>
</dependency>
<dependency>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
<version>1.8</version>
<scope>system</scope>
<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
</dependency>
<!--storm相关jar -->
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId>
<version>1.0.3</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-jdbc</artifactId>
<version>1.0.3</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hive</artifactId>
<version>1.0.3</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hbase</artifactId>
<version>1.0.3</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hdfs</artifactId>
<version>1.0.3</version>
</dependency>
<!-- storm-mongodb -->
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-mongodb</artifactId>
<version>1.0.3</version>
</dependency>
<!-- zookeeper -->
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>3.4.6</version>
</dependency>
<!-- mongodb bson -->
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>bson</artifactId>
<version>3.10.1</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-kafka-client</artifactId>
<version>1.0.3</version>
</dependency>
<!-- mongo-java-driver -->
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>mongo-java-driver</artifactId>
<version>3.9.0</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.31</version>
</dependency>
</dependencys>
问题
select * from wordcount;
FAILED: SemanticException [Error 10265]: This command is not allowed on an ACID table default.wordcount with a non-ACID transaction manager. Failed command: select * from wordcount
一、KafkaSpout 引起的 log4j 的问题
问题描述:
<span style="color:#ff0000;">SLF4J: Detected both log4j-over-slf4j.jar AND slf4j-log4j12.jar on the class path, preempting StackOverflowError.
SLF4J: See also http://www.slf4j.org/codes.html#log4jDelegationLoop for more details
原因:KafkaSpout 代码里(storm.kafka.KafkaSpout)使用了 slf4j 的包,而 Kafka 系统本身(kafka.consumer.SimpleConsumer)却使用了 apache 的包,
解决办法:在依赖定义中去除问题依赖包
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.10.1.1</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>