参考链接:http://www.blogjava.net/masfay/archive/2012/07/03/382080.html
Pipeline
官方的说明是:starts a pipeline,which is a very efficient way to send lots of command and read all the responses when you finish sending them。简单点说pipeline适用于批处理。当有大量的操作需要一次性执行的时候,可以用管道。
示例:
Pipeline p = jedis.pipelined();
p.set(key,value); //每个操作 都发送请求给redis-server
p.get(key,value);
![](http://www.blogjava.net/Images/dot.gif)
p.sync(); // 这段代码获取所有的response
这里我进行了20w次连续操作(10w读,10w写),不用pipeline耗时:187242ms,用pipeline耗时:1188ms,可见使用管道后的性能上了一个台阶。看了代码了解到,管道通过一次性写入请求,然后一次性读取响应。也就是说jedis是:request response,request response,...;pipeline则是:request request... response response的方式。这样无需每次请求都等待server端的响应。
原文如下:http://www.blogjava.net/masfay/archive/2012/07/03/382080.html
JedisPipeline.java 类 用于生成管道。
package com.eastcom.first.spark.data.redis.pipeline;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
import redis.clients.util.Pool;
@SuppressWarnings("deprecation")
public class JedisPipeline {
protected Pool<Jedis> jedisPool;
private Jedis jedis;
private Pipeline pipeline;
public JedisPipeline(Pool<Jedis> jedisPool) {
this.jedisPool = jedisPool;
}
public void open() {
jedis = jedisPool.getResource();
pipeline = jedis.pipelined();
}
public void syncAndChange() {
pipeline.sync();
jedisPool.returnResource(jedis);
jedis = jedisPool.getResource();
pipeline = jedis.pipelined();
}
public void close(int size) {
if (size > 0) {
pipeline.sync();
}
jedisPool.returnResource(jedis);
}
public void broken() {
jedisPool.returnBrokenResource(jedis);
}
public Jedis getJedis() {
return jedis;
}
public Pipeline getPipeline() {
return pipeline;
}
}
使用管道pipeline批处理类RedisBatchExector.java
package com.eastcom.first.spark.data.redis.pipeline;
import java.io.Closeable;
import redis.clients.jedis.Jedis;
import redis.clients.util.Pool;
/**
* 操作redis的基本方法
*
*/
public class RedisBatchExector implements Closeable {
private JedisPipeline jp;
private int pSize = 0;
private int batchSize = 1000;
public RedisBatchExector(Pool<Jedis> jedisPool) {
jp = new JedisPipeline(jedisPool);
}
public RedisBatchExector(Pool<Jedis> jedisPool, int batchSize) {
this.batchSize = batchSize;
jp = new JedisPipeline(jedisPool);
}
public void open() {
jp.open();
}
public void setBatchSize(int batchSize) {
this.batchSize = batchSize;
}
/*
* keys
*/
public void expire(String key, int seconds) {
jp.getPipeline().expire(key, seconds);
incr();
}
public void expire(byte[] key, int seconds) {
jp.getPipeline().expire(key, seconds);
incr();
}
public void del(String key) {
jp.getPipeline().del(key);
incr();
}
public void del(byte[] key) {
jp.getPipeline().del(key);
incr();
}
public void set(String key, String value) {
jp.getPipeline().set(key, value);
incr();
}
public void set(byte[] key, byte[] value) {
jp.getPipeline().set(key, value);
incr();
}
/*
* hashs
*/
public void hdel(String key, String... field) {
jp.getPipeline().hdel(key, field);
incr();
}
public void hset(byte[] key, byte[] field, byte[] value) {
jp.getPipeline().hset(key, field, value);
incr();
}
public void hset(String key, String field, String value) {
jp.getPipeline().hset(key, field, value);
incr();
}
/**
* 把指标值value累加到相应指标, redis中字段field的值添加value
*
* @param key
* ping成的key
* @param field
* 指标
* @param value
* 指标值
*/
public void hincrBy(String key, String field, long value) {
jp.getPipeline().hincrBy(key, field, value);
incr();
}
/*
* zset
*/
public void zincrBy(String key, double score, String member) {
jp.getPipeline().zincrby(key, score, member);
incr();
}
public void zadd(byte[] key, double score, byte[] member) {
jp.getPipeline().zadd(key, score, member);
incr();
}
public void zadd(String key, double score, String member) {
jp.getPipeline().zadd(key, score, member);
incr();
}
/*
* list
*/
public void lpush(String key, String... values) {
jp.getPipeline().lpush(key, values);
incr();
}
public void lpush(byte[] key, byte[]... values) {
jp.getPipeline().lpush(key, values);
incr();
}
/**
* 在set集合中往key中添加member成员 set
*/
public void sadd(String key, String... member) {
jp.getPipeline().sadd(key, member);
incr();
}
@Override
public void close() {
jp.close(pSize);
}
public void broken() {
jp.broken();
}
public void flush() {
if (pSize > 0) {
jp.syncAndChange();
}
}
private void incr() {
pSize++;
if (pSize >= batchSize) {
jp.syncAndChange();
pSize = 0;
}
}
}
操作测试类RedisPipelineClient.java
package com.eastcom.first.spark.data.redis.pipeline;
import com.eastcom.first.spark.data.redis.JedisClientPool;
import redis.clients.jedis.JedisPool;
public class RedisPipelineClient {
public static void main(String[] args) {
JedisPool jedisPool = JedisClientPool.getInstance().getJedisPool();
JedisPipeline jedisPipeline = new JedisPipeline(jedisPool);
RedisBatchExector redisBatchExector = new RedisBatchExector(jedisPool);
redisBatchExector.open();
redisBatchExector.set("we", "class01");
redisBatchExector.lpush("list", "hello", "world", "are", "you", "ok");
redisBatchExector.close();
}
}
其中
JedisPool jedisPool = JedisClientPool.getInstance().getJedisPool();
的jedisPool的生成key参考此文 《redis连接池实例》
运行结果:
127.0.0.1:6301> lrange list 0 -1
1) "ok"
2) "you"
3) "are"
4) "world"
5) "hello"
127.0.0.1:6301> keys *
1) "list"
2) "we"
127.0.0.1:6301> get we
"class01"
127.0.0.1:6301>
over