storm-redis 详解

storm-redis 详解

2018年05月30日 17:11:58 万琛 阅读数:404 标签: Stormredis 更多

个人分类: Storm

 版权声明:转载请声明原地址 愿君一同勤奋 https://blog.csdn.net/qq_40570699/article/details/80512165

                                    多的不说,先来代码分析,再贴我自己写的代码。如果代码有错误,求更正。。

导入两个关键包,其他项目需要的包,大家自己导入了,我pom下的包太多,不好一下扔上来。

 
  1. <dependency>

  2. <groupId>org.apache.storm</groupId>

  3. <artifactId>storm-redis</artifactId>

  4. <version>${storm.version}</version>

  5. </dependency>

  6. <dependency>

  7. <groupId>redis.clients</groupId>

  8. <artifactId>jedis</artifactId>

  9. <version>2.9.0</version>

  10. </dependency>

 

我是连接的linux上的redis,所以要对redis进行配置,不然会出现拒绝连接的错误。

 
  1. redis部署在linux时,java远程连接需要修改配置:

  2. 修改redis.conf文件

  3. 1.将bind 127.0.0.1加上注释,(#bind 127.0.0.1),允许出本机外的IP访问redis

  4. 2.将protected-mode yes,修改为protected-mode no;不保护redis

  5. 3.将daemonize no,修改为daemonize yes;允许redis服务后台运行

  6. 修改防火墙端口号

  7. 1.将redis默认的6379注册到防火墙中

  8. /sbin/iptables -I INPUT -p tcp –dport 6379 -j ACCEPT

  9. 2.保存防火墙端口号表

  10. /etc/rc.d/init.d/iptables save

  11. 3.重启防火墙

  12. /etc/rc.d/init.d/iptables restart

  13. 4.查看防火墙状态

  14. /etc/rc.d/init.d/iptables status

 

使用测试类连接下看能不能连同:

 
  1. import java.util.Iterator;

  2. import java.util.Set;

  3. import redis.clients.jedis.Jedis;

  4. /**

  5. * @author cwc

  6. * @date 2018年5月30日

  7. * @description:

  8. * @version 1.0.0

  9. */

  10. public class RedisTest {

  11.  
  12. public static void main(String[]args){

  13.  
  14. //连接本地的 Redis 服务

  15. Jedis jedis = new Jedis("xxx.xx.xxx.xx");

  16. System.out.println("连接成功");

  17. //查看服务是否运行

  18. System.out.println("服务正在运行: "+jedis.ping());

  19. // 获取数据并输出

  20. Set<String> keys = jedis.keys("*");

  21. Iterator<String> it=keys.iterator() ;

  22. while(it.hasNext()){

  23. String key = it.next();

  24. System.out.println(key);

  25. }

  26. }

  27. }

准备就绪,先说说storm向redis写入:

        官方给的写入API:

 
  1. class WordCountStoreMapper implements RedisStoreMapper {

  2. private RedisDataTypeDescription description;

  3. private final String hashKey = "wordCount";

  4.  
  5. public WordCountStoreMapper() {

  6. description = new RedisDataTypeDescription(

  7. RedisDataTypeDescription.RedisDataType.HASH, hashKey);

  8. }

  9.  
  10. @Override

  11. public RedisDataTypeDescription getDataTypeDescription() {

  12. return description;

  13. }

  14.  
  15. @Override

  16. public String getKeyFromTuple(ITuple tuple) {

  17. return tuple.getStringByField("word");

  18. }

  19.  
  20. @Override

  21. public String getValueFromTuple(ITuple tuple) {

  22. return tuple.getStringByField("count");

  23. }

  24. }

 
  1. //这里是用来new 一个新的bolt,在TopologyBuilder时调用操作

  2. JedisPoolConfig poolConfig = new JedisPoolConfig.Builder()

  3. .setHost(host).setPort(port).build();

  4. RedisStoreMapper storeMapper = new WordCountStoreMapper();

  5. RedisStoreBolt storeBolt = new RedisStoreBolt(poolConfig, storeMapper);

    我反正刚刚看的时候一脸懵逼,之后研究了很久才明白,下面贴我自己的代码:

 
  1. import java.util.HashMap;

  2. import java.util.Map;

  3. import java.util.Random;

  4.  
  5. import org.apache.storm.spout.SpoutOutputCollector;

  6. import org.apache.storm.task.TopologyContext;

  7. import org.apache.storm.topology.OutputFieldsDeclarer;

  8. import org.apache.storm.topology.base.BaseRichSpout;

  9. import org.apache.storm.tuple.Fields;

  10. import org.apache.storm.tuple.Values;

  11.  
  12. /**

  13. * @author cwc

  14. * @date 2018年5月29日

  15. * @description:这是给的假的数据源

  16. * @version 1.0.0

  17. */

  18. public class RedisWriteSpout extends BaseRichSpout {

  19. private static final long serialVersionUID = 1L;

  20. private SpoutOutputCollector spoutOutputCollector;

  21.  
  22. /**

  23. * 作为字段word输出

  24. */

  25. private static final Map<Integer, String> LASTNAME = new HashMap<Integer, String>();

  26. static {

  27. LASTNAME.put(0, "anderson");

  28. LASTNAME.put(1, "watson");

  29. LASTNAME.put(2, "ponting");

  30. LASTNAME.put(3, "dravid");

  31. LASTNAME.put(4, "lara");

  32. }

  33. /**

  34. * 作为字段myValues输出

  35. */

  36. private static final Map<Integer, String> COMPANYNAME = new HashMap<Integer, String>();

  37. static {

  38. COMPANYNAME.put(0, "abc");

  39. COMPANYNAME.put(1, "dfg");

  40. COMPANYNAME.put(2, "pqr");

  41. COMPANYNAME.put(3, "ecd");

  42. COMPANYNAME.put(4, "awe");

  43. }

  44.  
  45. public void open(Map conf, TopologyContext context,

  46. SpoutOutputCollector spoutOutputCollector) {

  47. this.spoutOutputCollector = spoutOutputCollector;

  48. }

  49.  
  50. public void nextTuple() {

  51. final Random rand = new Random();

  52. int randomNumber = rand.nextInt(5);

  53. try {

  54. Thread.sleep(100);

  55. } catch (InterruptedException e) {

  56. // TODO Auto-generated catch block

  57. e.printStackTrace();

  58. }

  59. spoutOutputCollector.emit (new Values(LASTNAME.get(randomNumber),COMPANYNAME.get(randomNumber)));

  60. System.out.println("数据来袭!!!!!!");

  61. }

  62.  
  63. public void declareOutputFields(OutputFieldsDeclarer declarer) {

  64. // emit the field site.

  65. declarer.declare(new Fields("word","myValues"));

  66. }

  67. }

 
  1. import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;

  2. import org.apache.storm.redis.common.mapper.RedisStoreMapper;

  3. import org.apache.storm.tuple.ITuple;

  4.  
  5. /**

  6. * @author cwc

  7. * @date 2018年5月30日

  8. * @description:

  9. * @version 1.0.0

  10. */

  11. public class RedisWriteMapper implements RedisStoreMapper{

  12. private static final long serialVersionUID = 1L;

  13. private RedisDataTypeDescription description;

  14. //这里的key是redis中的key

  15. private final String hashKey = "mykey";

  16.  
  17. public RedisWriteMapper() {

  18. description = new RedisDataTypeDescription(RedisDataTypeDescription.RedisDataType.HASH, hashKey);

  19. }

  20.  
  21. @Override

  22. public String getKeyFromTuple(ITuple ituple) {

  23. //这个代表redis中,hash中的字段名

  24. return ituple.getStringByField("word");

  25. }

  26.  
  27. @Override

  28. public String getValueFromTuple(ITuple ituple) {

  29. //这个代表redis中,hash中的字段名对应的值

  30. return ituple.getStringByField("myValues");

  31. }

  32.  
  33. @Override

  34. public RedisDataTypeDescription getDataTypeDescription() {

  35. return description;

  36. }

  37.  
  38.  
  39. }

storm读取redis数据:

        官方给的API:

 
  1. class WordCountRedisLookupMapper implements RedisLookupMapper {

  2. private RedisDataTypeDescription description;

  3. private final String hashKey = "wordCount";

  4.  
  5. public WordCountRedisLookupMapper() {

  6. description = new RedisDataTypeDescription(

  7. RedisDataTypeDescription.RedisDataType.HASH, hashKey);

  8. }

  9.  
  10. @Override

  11. public List<Values> toTuple(ITuple input, Object value) {

  12. String member = getKeyFromTuple(input);

  13. List<Values> values = Lists.newArrayList();

  14. values.add(new Values(member, value));

  15. return values;

  16. }

  17.  
  18. @Override

  19. public void declareOutputFields(OutputFieldsDeclarer declarer) {

  20. declarer.declare(new Fields("wordName", "count"));

  21. }

  22.  
  23. @Override

  24. public RedisDataTypeDescription getDataTypeDescription() {

  25. return description;

  26. }

  27.  
  28. @Override

  29. public String getKeyFromTuple(ITuple tuple) {

  30. return tuple.getStringByField("word");

  31. }

  32.  
  33. @Override

  34. public String getValueFromTuple(ITuple tuple) {

  35. return null;

  36. }

  37. }

 
  1. JedisPoolConfig poolConfig = new JedisPoolConfig.Builder()

  2. .setHost(host).setPort(port).build();

  3. RedisLookupMapper lookupMapper = new WordCountRedisLookupMapper();

  4. RedisLookupBolt lookupBolt = new RedisLookupBolt(poolConfig, lookupMapper);

    自己代码:

 
  1. import java.util.HashMap;

  2. import java.util.Map;

  3. import java.util.Random;

  4.  
  5. import org.apache.storm.spout.SpoutOutputCollector;

  6. import org.apache.storm.task.TopologyContext;

  7. import org.apache.storm.topology.OutputFieldsDeclarer;

  8. import org.apache.storm.topology.base.BaseRichSpout;

  9. import org.apache.storm.tuple.Fields;

  10. import org.apache.storm.tuple.Values;

  11.  
  12. /**

  13. * @author cwc

  14. * @date 2018年5月30日

  15. * @description:

  16. * @version 1.0.0

  17. */

  18. public class RedisReadSpout extends BaseRichSpout {

  19. private static final long serialVersionUID = 1L;

  20. private SpoutOutputCollector spoutOutputCollector;

  21.  
  22. /**

  23. * 这是刚刚作为word写入的数据,要通过他获取我们存的值

  24. */

  25. private static final Map<Integer, String> LASTNAME = new HashMap<Integer, String>();

  26. static {

  27. LASTNAME.put(0, "anderson");

  28. LASTNAME.put(1, "watson");

  29. LASTNAME.put(2, "ponting");

  30. LASTNAME.put(3, "dravid");

  31. LASTNAME.put(4, "lara");

  32. }

  33.  
  34. public void open(Map conf, TopologyContext context,

  35. SpoutOutputCollector spoutOutputCollector) {

  36. this.spoutOutputCollector = spoutOutputCollector;

  37. }

  38.  
  39. public void nextTuple() {

  40. final Random rand = new Random();

  41. int randomNumber = rand.nextInt(5);

  42. try {

  43. Thread.sleep(100);

  44. } catch (InterruptedException e) {

  45. // TODO Auto-generated catch block

  46. e.printStackTrace();

  47. }

  48. spoutOutputCollector.emit (new Values(LASTNAME.get(randomNumber)));

  49. System.out.println("读数据来袭!!!!!!");

  50. }

  51.  
  52. public void declareOutputFields(OutputFieldsDeclarer declarer) {

  53. // emit the field site.

  54. declarer.declare(new Fields("word"));

  55. }

  56. }

 
  1. import java.util.List;

  2.  
  3. import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;

  4. import org.apache.storm.redis.common.mapper.RedisLookupMapper;

  5. import org.apache.storm.topology.OutputFieldsDeclarer;

  6. import org.apache.storm.tuple.Fields;

  7. import org.apache.storm.tuple.ITuple;

  8. import org.apache.storm.tuple.Values;

  9. import com.google.common.collect.Lists;

  10.  
  11. /**

  12. * @author cwc

  13. * @date 2018年5月30日

  14. * @description:

  15. * @version 1.0.0

  16. */

  17. public class RedisReadMapper implements RedisLookupMapper {

  18. private static final long serialVersionUID = 1L;

  19. //对redis的所支持的种类进行了初始化

  20. private RedisDataTypeDescription description;

  21. //你想要读取的hash表中的key,这里使用的是刚刚存储的key字段名

  22. private final String hashKey="mykey";

  23. /**

  24. * redis中储存结构为hash hashKey为根key 然后在通过getKeyFromTuple 获得的key找到相对于的value

  25. * key1-key2[]-value key2中的每一个key对应一个value

  26. * lookupValue = jedisCommand.hget(additionalKey, key);

  27. */

  28. public RedisReadMapper() {

  29. description = new RedisDataTypeDescription(RedisDataTypeDescription.RedisDataType.HASH, hashKey);

  30. }

  31. @Override

  32. public String getKeyFromTuple(ITuple tuple) {

  33. //获取传过来的字段名

  34. return tuple.getStringByField("word");

  35. }

  36. @Override

  37. public String getValueFromTuple(ITuple tuple) {

  38. return null;

  39. }

  40. @Override

  41. public RedisDataTypeDescription getDataTypeDescription() {

  42. return description;

  43. }

  44. @Override

  45. public void declareOutputFields(OutputFieldsDeclarer declarer) {

  46. //从redis中hash通过上面的key下面找到制定的word中的字段名下的值,有点想hbase中row:cf:val一样

  47. declarer.declare(new Fields("word","values"));

  48. }

  49. @Override

  50. /**

  51. * 将拿到的数据装进集合并且返回

  52. */

  53. public List<Values> toTuple(ITuple input, Object value) {

  54. String member =getKeyFromTuple(input);

  55. List<Values> values =Lists.newArrayList();

  56. //将拿到的数据存进集合,下面时将两个值返回的,所以向下游传值时需要定义两个名字。

  57. values.add(new Values(member,value));

  58. return values;

  59. }

  60. }

 
  1. import java.util.Map;

  2.  
  3. import org.apache.storm.task.OutputCollector;

  4. import org.apache.storm.task.TopologyContext;

  5. import org.apache.storm.topology.OutputFieldsDeclarer;

  6. import org.apache.storm.topology.base.BaseRichBolt;

  7. import org.apache.storm.tuple.Fields;

  8. import org.apache.storm.tuple.Tuple;

  9.  
  10. /**

  11. * @author cwc

  12. * @date 2018年5月30日

  13. * @description:打印获取的数据

  14. * @version 1.0.0

  15. */

  16. public class RedisOutBolt extends BaseRichBolt{

  17.  
  18. private OutputCollector collector;

  19. @Override

  20. public void execute(Tuple tuple) {

  21. // String str =tuple.getString(0);

  22. String strs =tuple.getString(1);

  23. System.out.println(strs);

  24.  
  25. }

  26.  
  27. @Override

  28. public void prepare(Map arg0, TopologyContext arg1, OutputCollector collector) {

  29. // TODO Auto-generated method stub

  30. this.collector=collector;

  31. }

  32.  
  33. @Override

  34. public void declareOutputFields(OutputFieldsDeclarer declarer) {

  35. declarer.declare(new Fields("RedisOutBolt"));

  36. }

  37. }

  接下来是  RedisMain,测试读写方法:

 
  1. import org.apache.storm.Config;

  2. import org.apache.storm.LocalCluster;

  3. import org.apache.storm.redis.bolt.RedisLookupBolt;

  4. import org.apache.storm.redis.bolt.RedisStoreBolt;

  5. import org.apache.storm.redis.common.config.JedisPoolConfig;

  6. import org.apache.storm.redis.common.mapper.RedisLookupMapper;

  7. import org.apache.storm.redis.common.mapper.RedisStoreMapper;

  8. import org.apache.storm.topology.TopologyBuilder;

  9.  
  10. public class RedisMain {

  11. public static void main(String[] args) throws Exception {

  12. // writeRedis();

  13. readRedis();

  14. }

  15. /**

  16. * 写redis

  17. */

  18. public static void writeRedis(){

  19. JedisPoolConfig poolConfig = new JedisPoolConfig.Builder()

  20. .setHost("xxx.xx.xx.xx").setPort(6379).build();

  21. System.out.println("连接成功!!!");

  22. RedisStoreMapper storeMapper = new RedisWriteMapper();

  23. RedisStoreBolt storeBolt = new RedisStoreBolt(poolConfig, storeMapper);

  24.  
  25. TopologyBuilder builder = new TopologyBuilder();

  26. builder.setSpout("RedisWriteSpout", new RedisWriteSpout(), 2);

  27. builder.setBolt("to-save", storeBolt, 1).shuffleGrouping("RedisWriteSpout");

  28.  
  29. Config conf = new Config();

  30. LocalCluster cluster = new LocalCluster();

  31. cluster.submitTopology("test", conf, builder.createTopology());

  32. System.err.println("写入完成!!!!!");

  33. try {

  34. Thread.sleep(10000);

  35. //等待6s之后关闭集群

  36. cluster.killTopology("test");

  37. //关闭集群

  38. cluster.shutdown();

  39. } catch (InterruptedException e) {

  40. // TODO Auto-generated catch block

  41. e.printStackTrace();

  42. }

  43. }

  44. /**

  45. * 读redis

  46. */

  47. public static void readRedis(){

  48. JedisPoolConfig poolConfig = new JedisPoolConfig.Builder()

  49. .setHost("xxx.xx.xxx.xx").setPort(6379).build();

  50. RedisLookupMapper lookupMapper = new RedisReadMapper();

  51. RedisLookupBolt lookupBolt = new RedisLookupBolt(poolConfig, lookupMapper);

  52.  
  53. TopologyBuilder builder = new TopologyBuilder();

  54. builder.setSpout("RedisReadSpout-reader", new RedisReadSpout(), 2);

  55. builder.setBolt("to-lookupBolt", lookupBolt, 1).shuffleGrouping("RedisReadSpout-reader");

  56. builder.setBolt("to-out",new RedisOutBolt(), 1).shuffleGrouping("to-lookupBolt");

  57. Config conf = new Config();

  58. LocalCluster cluster = new LocalCluster();

  59. cluster.submitTopology("test", conf, builder.createTopology());

  60. try {

  61. Thread.sleep(100000);

  62. //等待6s之后关闭集群

  63. cluster.killTopology("test");

  64. //关闭集群

  65. cluster.shutdown();

  66. } catch (InterruptedException e) {

  67. // TODO Auto-generated catch block

  68. e.printStackTrace();

  69. }

  70. }

  71.  
  72. }

    很多解释都写在了代码注解中,其中也有很多问题,在代码注释的地方放生的,认真看下代码,祝大家零BUG哦~~

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值