多线程迁移redis数据笔记

背景:
 出于项目改造的需要,需要把之前10台redis服务器的数据迁移到公司的存储上
 业内,常见的redis数据迁移方式有以下三种:aof(日志文件)、rdb(快照文件)、replication(主从复制)。
 见http://www.bitstech.net/2016/03/03/redis-migration/
 但是由于这次改动的特殊性只能自己根据规则去迁移,又因为我们用twemproxy链接的10台服务器,必须分别从10个服务器读取存储到新的存储中
面对的问题:
  1我们通过keys命令,也就是在jedis通过keys方法来匹配自己的规则查询数据,这个要考虑好,集群肯定不行的,
  2由于10个服务器,所以每调用一次方法启10个线程,注意线程池的数量,不用考虑redis链接数,默认为1w
  3使用Twemproxy代理的时候,就不能再此使用jedis链接,那样会报错
  Twemproxy 参考下面
http://blog.jiguang.cn/redis-twemproxy-benchmark
http://blog.csdn.net/luyee2010/article/details/17452733

http://www.cnblogs.com/chenny7/p/4467036.html

代码如下

 <task:executor id="asyncTaskExecutor"
                   pool-size="${executor.pool.size}"
                   queue-capacity="${executor.queue.capacity}"
                   keep-alive="500"/>

package com.jd.noah.storm.report.controller;

import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.task.AsyncTaskExecutor;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import org.springframework.util.CollectionUtils;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;

import javax.servlet.http.HttpServletRequest;
import java.util.*;


@RestController
public class FlushDataController extends BaseController {
    private static final Logger LOG = LoggerFactory.getLogger(FlushDataController.class);
   private Cluster newJimDBcache;
   private  Cache redisCache;
    private AsyncTaskExecutor asyncTaskExecutor;

    @ResponseBody
    @RequestMapping("/testRedis")
    public String  testRedis(String key,String value,String method,String source) {
        if(source.equals("new")){
            if(method.equals("set")){
                try {
                    newJimDBcache.set(key,value);
                } catch (Exception e) {
                    e.printStackTrace();
                }
                return "success";
            }else if(method.equals("get")){
                try {
                    System.out.println(newJimDBcache.get(key));
                    return newJimDBcache.get(key);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }else if(method.equals("del")){
                newJimDBcache.del(key);
                return "delete success";
            }
        }else{
            if(method.equals("set")){
                try {
                    redisCache.set(key,value,0);
                } catch (Exception e) {
                    e.printStackTrace();
                }
                return "success";
            }else if(method.equals("get")){
                try {
                    System.out.println(redisCache.get(key));
                    return redisCache.get(key);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }else if(method.equals("del")){
                redisCache.delete(key);
                return "delete success";
            }else if(method.equals("keys")){
                return redisCache.keys(key).toString();
            }
        }

        return "0";
    }

    /**
     * 根据自己设置的日期,一个一个遍历日期得到key,然后通过key去redis模糊匹配到要查询的值 2个日期最少隔一天
     * 方法1 根据8台redis服务器一个一个循环获取传入IP和端口号,比较简单,方法2根据不同的规则去循环匹配比较low,麻烦
     * @param request
     * @param beginDate
     * @param endDate
     */
    @RequestMapping("/flushData")
    public String  flushData(HttpServletRequest request,
                     @RequestParam(value="beginDate")  String beginDate,
                     @RequestParam(value="endDate")  String endDate) {
        List<String> hostAndPorts = Lists.newArrayList("ip:part", "ip:part", "ip:part", "ip:part"
                , "ip:part", "ip:part", "ip:part", "ip:part", "ip:part", "ip:part");

        for (String hostAndPort : hostAndPorts) {
            asyncTaskExecutor.submit(() -> {
                StringRedisTemplate redisCache = initRedisTemplates(hostAndPort);
                Date beginDateInfo = DateUtils.getDayEndTime(beginDate, DateUtils.SHORT_TO_DD_PATTERN);
                long mmStart = DateUtils.getCurrentDateTimeLong(beginDateInfo, DateUtils.SHORT_TO_MM_PATTERN);
                long hhStart = DateUtils.getCurrentDateTimeLong(beginDateInfo, DateUtils.SHORT_TO_HH_PATTERN);
                long ddStart = DateUtils.getCurrentDateTimeLong(beginDateInfo, DateUtils.SHORT_TO_DD_PATTERN);
                Date endDateInfo = DateUtils.getDayEndTime(endDate, DateUtils.SHORT_TO_DD_PATTERN);
                long mmEnd = DateUtils.getCurrentDateTimeLong(endDateInfo, DateUtils.SHORT_TO_MM_PATTERN);
                long hhEnd = DateUtils.getCurrentDateTimeLong(endDateInfo, DateUtils.SHORT_TO_HH_PATTERN);
                long ddEnd = DateUtils.getCurrentDateTimeLong(endDateInfo, DateUtils.SHORT_TO_DD_PATTERN);
                //PV按日、时、分 秒 AnalyseTimePeriod  PvMonitorAjaxController.querySecondIndex 根据毫秒进行处理秒麻烦
                //LOG.error("分钟处理中的mmStart=="+mmStart+"其中mmEnd="+mmEnd);
            /*    while (mmStart < mmEnd) {
                   // LOG.error("处理中的日期为=="+DateUtils.getCurrentDate(mmStart, DateUtils.SHORT_TO_YYYYMMDD_PATTERN));
                   // LOG.error("分钟处理中的mmStart=="+mmStart+"其中mmEnd="+mmEnd);
                    Set<String> pvMinuteKeys = redisCache.keys("m*" + mmStart);
                   // LOG.error("开始获取分钟的数据大小为/n=="+pvMinuteKeys.size());
                   //LOG.error("the pvMinuteKeys is" + pvMinuteKeys.toString());
                    if(pvMinuteKeys !=null && pvMinuteKeys.size()>0){
                        newJimDBcache.mSetString(mget(redisCache,pvMinuteKeys));
                    }
                  *//*  pvMinuteKeys.parallelStream().forEach(key ->{
                        newJimDBcache.set(key, redisCache.opsForValue().get(key));
                    });*//*
                    mmStart =mmStart+1 * 60 * 1000;
                }*/
                //LOG.error(" 结束获取分钟的数据=========");
                LOG.error("小时处理中的key=="+hhStart+"其中hhEnd="+hhEnd);
                while (hhStart < hhEnd) {
                    //LOG.error("处理中的日期为=="+DateUtils.getCurrentDate(hhStart, DateUtils.SHORT_TO_YYYYMMDD_PATTERN));
                    //LOG.error("小时处理中的key=="+hhStart+"其中hhEnd="+hhEnd);
                    Set<String> pvHourKeys = redisCache.keys("h*" + hhStart);//和类别一样
                  //  LOG.error("开始获取小时的数据大小为/n=="+pvHourKeys.size());
                 //   LOG.error("the pvHourKeys is" + pvHourKeys.toString());
                    if(pvHourKeys !=null && pvHourKeys.size()>0){
                        newJimDBcache.mSetString(mget(redisCache,pvHourKeys));
                    }
               /*     pvHourKeys.parallelStream().forEach(key ->{
                        newJimDBcache.set(key, redisCache.opsForValue().get(key));
                    });*/
                    hhStart=hhStart+ 1 * 60 * 60 * 1000;
                }
                LOG.error("结束获取小时的数据======");
                LOG.error("天处理中的ddStart=="+ddStart+"其中ddEnd="+ddEnd);
                while (ddStart < ddEnd) {
                   LOG.error("处理中的日期为=="+DateUtils.getCurrentDate(ddStart, DateUtils.SHORT_TO_YYYYMMDD_PATTERN));
                    Set<String> pvMinuteKeys = redisCache.keys("m*"+String.valueOf(ddStart).substring(0,5)+"*");
                    if(pvMinuteKeys !=null && pvMinuteKeys.size()>0){
                        newJimDBcache.mSetString(mget(redisCache,pvMinuteKeys));
                    }
                  //  LOG.error("天处理中的ddStart=="+ddStart+"其中ddEnd="+ddEnd);
                    Set<String> pvDayKeys = redisCache.keys("d*" + ddStart); //pv uv 商品类别一样
                  //  LOG.error("开始获取天的数据大小为/n=="+pvDayKeys.size());
                  //  LOG.error("the pvDayKeys is" + pvDayKeys.toString());
                    if(pvDayKeys !=null && pvDayKeys.size()>0){
                        newJimDBcache.mSetString(mget(redisCache,pvDayKeys));
                    }
                  /*  pvDayKeys.parallelStream().forEach(key ->{
                        newJimDBcache.set(key, redisCache.opsForValue().get(key));
                    });*/
                   // LOG.error("秒处理中的key=="+DateUtils.getCurrentDate(ddStart, DateUtils.SHORT_TO_YYYYMMDD_PATTERN));
                    Set<String> secondPvKeys = redisCache.keys("ss*" + DateUtils.getCurrentDate(ddStart, DateUtils.SHORT_TO_YYYYMMDD_PATTERN) + "*");
                    //LOG.error("开始获取按秒的数据大小为/n=="+secondPvKeys.size());
                   // LOG.error("the secondPvKeys is" + secondPvKeys.toString());
                    if(secondPvKeys !=null && secondPvKeys.size()>0){
                        newJimDBcache.mSetString(mget(redisCache,secondPvKeys));
                    }
                /*    secondPvKeys.parallelStream().forEach(key ->{
                        newJimDBcache.set(key, redisCache.opsForValue().get(key));
                    });*/
                    ddStart = ddStart+ 1 * 24 * 60 * 60 * 1000;
                }
                LOG.error("全部结束==============哈哈哈");
            });

        }
      return "success";
    }

    private Map<String, String> mget(StringRedisTemplate redisTemplate,Collection<String> keys) {
        List<String> list = redisTemplate.opsForValue().multiGet(keys);
        if (CollectionUtils.isEmpty(list)) {
            return Collections.EMPTY_MAP;
        }

        Map<String, String> result = new HashMap<String, String>();
        int i = 0;
        for (String key : keys) {
            result.put(key, list.get(i++));
        }
        return result;
    }



    private StringRedisTemplate initRedisTemplates(String hostAndPort) {
        PoolJedisConnectionFactory connectionFactory = new PoolJedisConnectionFactory();
        connectionFactory.setMaxTotal(100);
        connectionFactory.setMaxIdle(10);
        connectionFactory.setMaxWaitMillis(1000);
        connectionFactory.setHostAndPort(hostAndPort);
        connectionFactory.setTimeout(20000);
        connectionFactory.setTestOnBorrow(true);
        connectionFactory.afterPropertiesSet();
        StringRedisTemplate template = new StringRedisTemplate();
        StringRedisSerializer redisValueSerializer=new org.springframework.data.redis.serializer.StringRedisSerializer();
        template.setValueSerializer(redisValueSerializer);
        template.setConnectionFactory(connectionFactory);
        template.afterPropertiesSet();
        return template;
    }

    public void setRedisCache(Cache redisCache) {
        this.redisCache = redisCache;
    }

    public void setNewJimDBcache(Cluster newJimDBcache) {
        this.newJimDBcache = newJimDBcache;
    }

    public void setAsyncTaskExecutor(AsyncTaskExecutor asyncTaskExecutor) {
        this.asyncTaskExecutor = asyncTaskExecutor;
    }


    public static void main(String args[]){
        String date1="2016-10-30";
        Date date1Info = DateUtils.getDayEndTime(date1, DateUtils.SHORT_TO_DD_PATTERN);
        System.out.println("分钟处理中的日期为=="+DateUtils.getCurrentDateTimeLong(date1Info, DateUtils.SHORT_TO_DD_PATTERN));
        System.out.println(String.valueOf(DateUtils.getCurrentDateTimeLong(date1Info, DateUtils.SHORT_TO_DD_PATTERN)).substring(0,5));
    }
}


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值