redis性能测试

前段时间测试了写文件的性能,实在不咋滴,改测试了下redis的写入性能。

多线程写入:

package com.sides.redis;

import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;

import org.apache.log4j.Logger;

import net.sf.json.JSONArray;
import net.sf.json.JSONObject;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;

import com.sides.file.DataFactory;

public class WriteRedisThread implements Callable<Boolean>{
private static final Logger logger = Logger.getLogger(WriteRedisThread.class);
private List<Map<String,Object>> dataList;
private String tableId;
public WriteRedisThread(List<Map<String,Object>> dataList,String tableId){
this.dataList = dataList;
this.tableId = tableId;
}
@Override
public Boolean call() throws Exception {
Jedis client = null;
JSONArray dataArr = null;
try {
dataArr = JSONArray.fromObject(dataList,DataFactory.jsonConfig);
client = RedisDao.getClient();
Pipeline line = client.pipelined();
Iterator<JSONObject> iter = null;
for(int i=0;i<9;i++){
iter = dataArr.iterator();
while(iter.hasNext()){
line.lpush(tableId, iter.next().toString());

}
}
line.sync();
} catch (Exception e) {
logger.warn("when insert data into "+tableId+" exception .", e);
return false;
}finally{
RedisDao.closeClient(client);
}
return true;
}

}



调用:

package com.sides.redis;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;

import org.apache.log4j.Logger;

import redis.clients.jedis.Jedis;

import com.sides.file.DataFactory;

public class WriteMain {
private static final Logger logger = Logger.getLogger(WriteMain.class);
public static void main(String[] args) throws InterruptedException, ExecutionException {

List<Map<String,Object>> list = DataFactory.dataList;
ExecutorService service = Executors.newFixedThreadPool(200);

long t0 = System.nanoTime();
List<Future<Boolean>> results = new ArrayList<Future<Boolean>>();
for(int i=0;i<8000;i++){
Future<Boolean> future = service.submit(new WriteRedisThread(list, "V"+i+"TRIP_DTL"));
results.add(future);
}
logger.info("所有线程已启动,开始最后的阻塞同步");
for(Future<Boolean> f:results){
f.get(); //强制同步
}
long t1 = System.nanoTime();
logger.info("耗时:"+(t1-t0)/1000000);
Jedis client = RedisDao.getClient();
Set<String> set = client.keys("*");
for(String s:set){
logger.info(s);
}
logger.info("数据库中KEY数目:"+client.dbSize());

RedisDao.closeClient(client);

RedisDao.destroyPool();
}

}


redis客户端jedis

package com.sides.redis;

import org.apache.log4j.Logger;

import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;

public class RedisDao {
private static final Logger logger = Logger.getLogger(RedisDao.class);
public static JedisPool clientPool;
static{
JedisPoolConfig config = new JedisPoolConfig();
// config.setMaxActive(1500);
config.setMaxIdle(3600);
clientPool = new JedisPool(config, "10.10.10.21", 6379);
}
public static synchronized Jedis getClient(){
return clientPool.getResource();
}
public static void closeClient(Jedis client){
if(client!=null){
clientPool.returnResource(client);
}
}
public static void destroyPool(){
clientPool.destroy();
}
}


测试数据生成工厂:

package com.sides.file;

import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import net.sf.json.JsonConfig;

import org.apache.commons.lang.math.RandomUtils;

/****************************************************************************
* com.sides.file DataFactory.java Created on 2013-12-3
* @Author: linfenliang
* @Description:
* @Version: 1.0
***************************************************************************/
public class DataFactory {
private static final DateFormat dformat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
public static final List<Map<String,Object>> dataList = initData();
public static JsonConfig jsonConfig = initjsonconfig();
private static final List<Map<String,Object>> initData(){
List<Map<String,Object>> dataList = new ArrayList<Map<String,Object>>();
Map<String,Object> map;
String recvDate = dformat.format(new Date().getTime()+1000*10);
for(int i=0;i<10;i++){
map = new HashMap<String, Object>();
map.put("GATHERTIME", dformat.format(new Date().getTime()+i*1000));
map.put("ACCSTATUS", "0");
map.put("OBD_SPEED", RandomUtils.nextInt(255));
map.put("MAF", nextRandomDouble(0, 20, 2));
map.put("BASE_STATION", "1823,4207,460,0,37;18431,4207,460,0,25;1822,4207,460,0,25;");
map.put("BABV", nextRandomDouble(9.6, 13.6, 2));
map.put("MILAGE", nextRandomDouble(0, 20, 2));
map.put("FUEL", nextRandomDouble(10, 20, 2));
map.put("G_FORCE", nextRandomDouble(-1, 1, 2));
map.put("PID_010C", "");
map.put("PID_010B", "");
map.put("PID_010F", "");
map.put("RECEIVED_TIME", recvDate);
map.put("REMARK", nextInt(0, 65535));
dataList.add(map);

}
return dataList;
}
private static final double nextRandomDouble(double startIndex, double endIndex, int scale){
double source = RandomUtils.nextFloat();
double differ = endIndex - startIndex;
double value = source * differ + startIndex;
double factor = Math.pow(10, scale);
return Math.floor(value * factor + 0.5) / factor;
}
private static final int nextInt(int startIndex, int endIndex){
double source = RandomUtils.nextDouble();
int differ = endIndex - startIndex;
return (int) (source * differ + startIndex);
}
private static final JsonConfig initjsonconfig(){
JsonConfig jsonConfig = new JsonConfig();
jsonConfig.registerJsonValueProcessor(java.util.Date.class, new JsonDateValueProcessor());
jsonConfig.registerJsonValueProcessor(java.sql.Date.class, new JsonDateValueProcessor());
return jsonConfig;
}
/**
* @param args
* void
* @date 2013-12-3
* @version V1.0.0
* @author linfenliang
*/
public static void main(String[] args) {
// System.out.println(dataList);
// for(int i=0;i<20;i++){
// System.out.println(nextDouble(100, 110,2));
// }
// for(int i=0;i<20;i++){
// System.out.println(nextInt(100, 110));
// }

}

}

一个JSON日期处理类:

package com.sides.file;

import java.text.DateFormat;
import java.text.SimpleDateFormat;

import net.sf.json.JsonConfig;
import net.sf.json.processors.JsonValueProcessor;

/****************************************************************************
* com.sides.pub.util JsonDateValueProcessor.java Created on 2013-9-16
* @Author: linfenliang
* @Description: 该类主要将日期类型(java.util.Date与java.sql.Date)格式化为yyyy-MM-dd HH:mm:ss字符串
* @Version: 1.0
***************************************************************************/
public class JsonDateValueProcessor implements JsonValueProcessor {
private static final DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
/**
* 将日期类型(java.util.Date与java.sql.Date)格式化为yyyy-MM-dd HH:mm:ss字符串
* @author linfenliang
*/
public JsonDateValueProcessor() {
super();
}

@Override
public Object processArrayValue(Object arg0, JsonConfig arg1) {
return process(arg0);
}

@Override
public Object processObjectValue(String arg0, Object arg1, JsonConfig arg2) {
return process(arg1);
}
private Object process(Object value) {
try {
if (value instanceof java.util.Date || value instanceof java.sql.Date) {
return dateFormat.format(value);
}
return value == null ? "" : value.toString();
} catch (Exception e) {
return "";
}
}
}


测试结果:

测试次数耗时(s)方式单KEY插入记录数单KEY每次插入记录数KEY个数线程数内存消耗CPU消耗
1706.232Jedis客户端720010100200约248MB<30%
2231.693Jedis客户端管道方式720010100200约248MB<30%
3178.745Jedis客户端管道方式72007200100200约248MB<30%
4180.466Jedis客户端管道方式720007200010200约248MB<30%
5179.186Jedis客户端管道方式7207201000200约248MB<30%
6180.310Jedis客户端管道方式90908000200约248MB<30%

由于本次测试是在家连接公司linux测试环境机器测试的,不是局域网,可能测试结果不是很准确,测试机器内存2GB,4CPU,公司网速不知。另外此处全部采用 lpush 方式写入数据,执行阻塞式强制同步。

测试结果还是比较满意的,但是内存占用稍有些大,可能与KEY的设计和写入方式有关,稍后会测试hset方式写入,优化下KEY的长度。

这是第二次测试结果,是在公司中测试的,局域网内
测试次数耗时(s)方式单KEY插入记录数单KEY每次插入记录数KEY个数线程数内存消耗CPU消耗
1122.488Jedis客户端720010100200约248MB<30%
223.306Jedis客户端管道方式720010100200约248MB<30%
322.004Jedis客户端管道方式72007200100200约248MB<30%
421.689Jedis客户端管道方式720007200010200约248MB<30%
522.305Jedis客户端管道方式7207201000200约248MB<30%
622.004Jedis客户端管道方式90908000200约248MB<30%


该测试结果令我很满意。
最后一条测试结果为 模拟8000并发,每个终端的数据量为90秒的数据,结果耗时22s。

在实际应用的,目标是10000并发,每个终端数据量为10条,测试耗时为:3.774秒,并且并无数据丢失情况,CPU消耗也不高。
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值