因为storm整合redis不止有普通整合还有高级整合,所以分开
pom.xml
<dependency> <groupId>org.apache.storm</groupId> <artifactId>storm-core</artifactId> <version>1.1.1</version> </dependency> <dependency> <groupId>org.apache.storm</groupId> <artifactId>storm-redis</artifactId> <version>1.1.1</version> <type>jar</type> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-configuration-processor</artifactId> <optional>true</optional> </dependency>
package com.yjp.stormredis.redis; import org.springframework.boot.context.properties.ConfigurationProperties; import java.io.Serializable; @ConfigurationProperties(prefix = "spring.storm.redis") public class RedisClusterProperties implements Serializable { /** * 节点调转次数(实际可以看做是重试次数) */ private int maxRedirections; /** * redis集群的IP:端口号 多个以,隔开 */ private String nodes; /** * 连接超时时间 */ private int timeout; /** * 最大空闲数 */ private int maxIdle; /** * 最小空闲数 */ private int minIdle; /** * 最大数量 */ private String maxTotal; public int getMaxRedirections() { return maxRedirections; } public void setMaxRedirections(int maxRedirections) { this.maxRedirections = maxRedirections; } public String getNodes() { return nodes; } public void setNodes(String nodes) { this.nodes = nodes; } public int getTimeout() { return timeout; } public void setTimeout(int timeout) { this.timeout = timeout; } public int getMaxIdle() { return maxIdle; } public void setMaxIdle(int maxIdle) { this.maxIdle = maxIdle; } public int getMinIdle() { return minIdle; } public void setMinIdle(int minIdle) { this.minIdle = minIdle; } public String getMaxTotal() { return maxTotal; } public void setMaxTotal(String maxTotal) { this.maxTotal = maxTotal; } }
RedisAutoConfiguration redis的自动配置类
package com.yjp.stormredis.redis; import com.google.common.collect.Sets; import org.apache.storm.redis.common.config.JedisClusterConfig; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import java.io.Serializable; import java.net.InetSocketAddress; import java.util.Set; @Configuration @EnableConfigurationProperties({RedisClusterProperties.class}) public class RedisAutoConfiguration implements Serializable { @Autowired private RedisClusterProperties properties; /** * JedisCluster的配置 * * @return */ private JedisClusterConfig jedisClusterConfig() { return new JedisClusterConfig(nodes(), properties.getTimeout(), properties.getMaxRedirections()); } /** * 将配置的集群以ip:port 的形式放入 * * @return 返回所有的ip:port */ private Set<InetSocketAddress> nodes() { Set<InetSocketAddress> nodes = Sets.newConcurrentHashSet(); String[] node = properties.getNodes().split(","); for (int i = 0; i < node.length; i++) { String[] port = node[i].split(":"); InetSocketAddress address = new InetSocketAddress(port[0], Integer.valueOf(port[1])); nodes.add(address); } return nodes; } @Bean public RedisStormFactory redisStormFactory() { return new RedisStormFactory(jedisClusterConfig()); } }Factory工厂类:
package com.yjp.stormredis.redis; import org.apache.storm.redis.bolt.RedisLookupBolt; import org.apache.storm.redis.bolt.RedisStoreBolt; import org.apache.storm.redis.common.config.JedisClusterConfig; import java.io.Serializable; public class RedisStormFactory implements Serializable { private JedisClusterConfig jedisClusterConfig; public RedisStormFactory(JedisClusterConfig jedisClusterConfig) { this.jedisClusterConfig = jedisClusterConfig; } /** * 查询 * * @return */ public RedisLookupBolt createRedisLookupBolt() { return new RedisLookupBolt(jedisClusterConfig, new WordCountRedisLookupMapper()); } /** * 储存 * * @return */ public RedisStoreBolt createRedisStoreBolt() { return new RedisStoreBolt(jedisClusterConfig, new WordCountStoreMapper()); } }查询bolt的mapper
package com.yjp.stormredis.redis; import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; import org.apache.storm.redis.common.mapper.RedisLookupMapper; import org.apache.storm.shade.com.google.common.collect.Lists; import org.apache.storm.topology.OutputFieldsDeclarer; import org.apache.storm.tuple.Fields; import org.apache.storm.tuple.ITuple; import org.apache.storm.tuple.Values; import java.util.List; /** * Strom为Redis提供几个标准Bolt * RedisLookupBolt: 查询 * RedisStoreBolt: 存储 * AbstractRedisBolt: 存储 * <p> * 查询 * WordCountRedisLookupMapper绘制相应的数据结构给RedisLookupBolt * * @Author : WenBao * Date : 14:11 2018/1/23 */ public class WordCountRedisLookupMapper implements RedisLookupMapper { private RedisDataTypeDescription description; private final String hashKey = "wordCount"; public WordCountRedisLookupMapper() { //redis中储存结构为hash hashKey为根key 然后在通过getKeyFromTuple 获得的key找到相对于的value key1-key2[]-value key2中的每一个key对应一个value //lookupValue = jedisCommand.hget(additionalKey, key); description = new RedisDataTypeDescription(RedisDataTypeDescription.RedisDataType.HASH, hashKey); } @Override public List<Values> toTuple(ITuple input, Object value) { String member = getKeyFromTuple(input); List<Values> values = Lists.newArrayList(); //value 为刚刚从redis中查询出的结果 values.add(new Values(member, value)); return values; } @Override public void declareOutputFields(OutputFieldsDeclarer declarer) { declarer.declare(new Fields("wordName", "count")); } @Override public RedisDataTypeDescription getDataTypeDescription() { return description; } /** * 告诉RedisLookupBolt,从输入的Tuple中,取什么样子的Key,让它去Redis中查询。 * * @param tuple * @return */ @Override public String getKeyFromTuple(ITuple tuple) { return tuple.getStringByField("word"); } @Override public String getValueFromTuple(ITuple tuple) { return null; } }看这些方法其实有点懵,但是这个类只是我们所需要RedisLookupBolt类的一部分
直接进入源码看RedisLookupBolt最重要的方法
public void process(Tuple input) { String key = lookupMapper.getKeyFromTuple(input);//重写这个方法所拿到的tuple中我们需要查询的字段 Object lookupValue; JedisCommands jedisCommand = null; try { jedisCommand = getInstance(); switch (dataType) { case STRING: lookupValue = jedisCommand.get(key); break; case LIST: lookupValue = jedisCommand.lpop(key); break; case HASH: lookupValue = jedisCommand.hget(additionalKey, key);//获取key对应的value break; case SET: lookupValue = jedisCommand.scard(key); break; case SORTED_SET: lookupValue = jedisCommand.zscore(additionalKey, key); break; case HYPER_LOG_LOG: lookupValue = jedisCommand.pfcount(key); break; case GEO: lookupValue = jedisCommand.geopos(additionalKey, key); break; default: throw new IllegalArgumentException("Cannot process such data type: " + dataType); } List<Values> values = lookupMapper.toTuple(input, lookupValue);//重写的toTuple方法 values中的每一个元素为key-value key从tuple中获取的 value为查询到的 for (Values value : values) { collector.emit(input, value); } collector.ack(input); } catch (Exception e) { this.collector.reportError(e); this.collector.fail(input); } finally { returnInstance(jedisCommand); } }储存bolt的mapper
package com.yjp.stormredis.redis; import org.apache.storm.redis.common.mapper.RedisDataTypeDescription; import org.apache.storm.redis.common.mapper.RedisStoreMapper; import org.apache.storm.tuple.ITuple; /** * 储存 * * @Author : WenBao * Date : 15:26 2018/1/23 */ public class WordCountStoreMapper implements RedisStoreMapper { private RedisDataTypeDescription description; private final String hashKey = "wordCount"; public WordCountStoreMapper() { description = new RedisDataTypeDescription(RedisDataTypeDescription.RedisDataType.HASH, hashKey); } @Override public RedisDataTypeDescription getDataTypeDescription() { return description; } @Override public String getKeyFromTuple(ITuple tuple) { return tuple.getStringByField("word"); } @Override public String getValueFromTuple(ITuple tuple) { return tuple.getStringByField("count"); } }进入RedisStoreBolt的源码 同样的方法:
@Override public void process(Tuple input) { String key = storeMapper.getKeyFromTuple(input);//我们储存key-value的key 从tuple中获取 也可以自己设定 String value = storeMapper.getValueFromTuple(input);//从tuple字段中获取的value JedisCommands jedisCommand = null; try { jedisCommand = getInstance(); switch (dataType) { case STRING: jedisCommand.set(key, value); break; case LIST: jedisCommand.rpush(key, value); break; case HASH: jedisCommand.hset(additionalKey, key, value);//additionKey为我们设定的根key key-value 刚刚获取的 break; case SET: jedisCommand.sadd(key, value); break; case SORTED_SET: jedisCommand.zadd(additionalKey, Double.valueOf(value), key); break; case HYPER_LOG_LOG: jedisCommand.pfadd(key, value); break; case GEO: String[] array = value.split(":"); if (array.length != 2) { throw new IllegalArgumentException("value structure should be longitude:latitude"); } double longitude = Double.valueOf(array[0]); double latitude = Double.valueOf(array[1]); jedisCommand.geoadd(additionalKey, longitude, latitude, key); break; default: throw new IllegalArgumentException("Cannot process such data type: " + dataType); } collector.ack(input); } catch (Exception e) { this.collector.reportError(e); this.collector.fail(input); } finally { returnInstance(jedisCommand); } }整合到此结束,将bolt放入topology中就可以直接使用了,注意字段就可以了
努力吧,皮卡丘