官网地址:http://redis.io/download
一、环境安装
1.1 linux安装
安装环境:gcc
yum install gcc
下载redis并解压
cd /opt
wget http://download.redis.io/releases/redis-5.0.3.tar.gz
tar -zxvf redis-5.0.3.tar.gz
cd redis-5.0.3
# 进入到解压好的redis‐5.0.3目录下,进行编译与安装
make
#修改redis.conf中的配置信息
daemonize yes #后台启动
protected‐mode no #关闭保护模式,开启的话,只有本机才可以访问redis
# 需要注释掉bind
#bind 127.0.0.1(bind绑定的是自己机器网卡的ip,如果有多块网卡可以配多个ip,代表允许客户
#端通过机器的哪些网卡ip去访问,内网一般可以不配置bind,注释掉即可)
启动服务:
src/redis-server redis.conf
#验证是否成功
ps ‐ef | grep redis
#进入redis客户端
src/redis‐cli
[root@k8s-master01 redis-5.0.3]# src/redis-cli
127.0.0.1:6379> set zengqingfa 100
OK
127.0.0.1:6379> get zengqingfa
"100"
127.0.0.1:6379>
# 退出客户端
quit
# 退出redis服务
1) pkill redis‐server
[root@k8s-master01 redis-5.0.3]# ps -ef|grep redis
root 6357 1 0 20:58 ? 00:00:00 src/redis-server *:6379
root 6881 26636 0 21:08 pts/1 00:00:00 grep --color=auto redis
[root@k8s-master01 redis-5.0.3]# pkill redis-server
[root@k8s-master01 redis-5.0.3]# ps -ef|grep redis
root 6893 26636 0 21:08 pts/1 00:00:00 grep --color=auto redis
2) kill 进程号
[root@k8s-master01 redis-5.0.3]# src/redis-server redis.conf
6921:C 20 Nov 2021 21:09:23.447 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
6921:C 20 Nov 2021 21:09:23.447 # Redis version=5.0.3, bits=64, commit=00000000, modified=0, pid=6921, just started
6921:C 20 Nov 2021 21:09:23.447 # Configuration loaded
[root@k8s-master01 redis-5.0.3]# ps -ef|grep redis
root 6922 1 0 21:09 ? 00:00:00 src/redis-server *:6379
root 6930 26636 0 21:09 pts/1 00:00:00 grep --color=auto redis
[root@k8s-master01 redis-5.0.3]# kill 6922
[root@k8s-master01 redis-5.0.3]# ps -ef|grep redis
root 6939 26636 0 21:09 pts/1 00:00:00 grep --color=auto redis
3) src/redis‐cli shutdown
[root@k8s-master01 redis-5.0.3]# src/redis-server redis.conf
6959:C 20 Nov 2021 21:09:57.734 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
6959:C 20 Nov 2021 21:09:57.734 # Redis version=5.0.3, bits=64, commit=00000000, modified=0, pid=6959, just started
6959:C 20 Nov 2021 21:09:57.734 # Configuration loaded
[root@k8s-master01 redis-5.0.3]# ps -ef|grep redis
root 6960 1 0 21:09 ? 00:00:00 src/redis-server *:6379
root 6974 26636 0 21:10 pts/1 00:00:00 grep --color=auto redis
[root@k8s-master01 redis-5.0.3]# src/redis-cli shutdown
[root@k8s-master01 redis-5.0.3]# ps -ef|grep redis
root 6987 26636 0 21:10 pts/1 00:00:00 grep --color=auto redis
1.2 windows安装
windows版本下载地址:https://github.com/tporadowski/redis/releases/
二、常用的数据结构
127.0.0.1:6379> help @string
APPEND key value
summary: Append a value to a key
since: 2.0.0
BITCOUNT key [start end]
summary: Count set bits in a string
since: 2.6.0
BITFIELD key [GET type offset] [SET type offset value] [INCRBY type offset increment] [OVERFLOW WRAP|SAT|FAIL]
summary: Perform arbitrary bitfield integer operations on strings
since: 3.2.0
BITOP operation destkey key [key ...]
summary: Perform bitwise operations between strings
since: 2.6.0
BITPOS key bit [start] [end]
summary: Find first bit set or clear in a string
since: 2.8.7
DECR key
summary: Decrement the integer value of a key by one
since: 1.0.0
DECRBY key decrement
summary: Decrement the integer value of a key by the given number
since: 1.0.0
GET key
summary: Get the value of a key
since: 1.0.0
GETBIT key offset
summary: Returns the bit value at offset in the string value stored at key
since: 2.2.0
GETRANGE key start end
summary: Get a substring of the string stored at a key
since: 2.4.0
GETSET key value
summary: Set the string value of a key and return its old value
since: 1.0.0
INCR key
summary: Increment the integer value of a key by one
since: 1.0.0
INCRBY key increment
summary: Increment the integer value of a key by the given amount
since: 1.0.0
INCRBYFLOAT key increment
summary: Increment the float value of a key by the given amount
since: 2.6.0
MGET key [key ...]
summary: Get the values of all the given keys
since: 1.0.0
MSET key value [key value ...]
summary: Set multiple keys to multiple values
since: 1.0.1
MSETNX key value [key value ...]
summary: Set multiple keys to multiple values, only if none of the keys exist
since: 1.0.1
PSETEX key milliseconds value
summary: Set the value and expiration in milliseconds of a key
since: 2.6.0
SET key value [expiration EX seconds|PX milliseconds] [NX|XX]
summary: Set the string value of a key
since: 1.0.0
SETBIT key offset value
summary: Sets or clears the bit at offset in the string value stored at key
since: 2.2.0
SETEX key seconds value
summary: Set the value and expiration of a key
since: 2.0.0
SETNX key value
summary: Set the value of a key, only if the key does not exist
since: 1.0.0
SETRANGE key offset value
summary: Overwrite part of a string at key starting at the specified offset
since: 2.2.0
STRLEN key
summary: Get the length of the value stored in a key
since: 2.2.0
2.1 String
常用操作
//字符串常用操作
SET key value //存入字符串键值对
MSET key value [key value ...] //批量存储字符串键值对
SETNX key value //存入一个不存在的字符串键值对
GET key //获取一个字符串键值
MGET key [key ...] //批量获取字符串键值
DEL key [key ...] //删除一个键
EXPIRE key seconds //设置一个键的过期时间(秒)
//原子加减
INCR key //将key中储存的数字值加1
DECR key //将key中储存的数字值减1
INCRBY key increment //将key所储存的值加上increment
DECRBY key decrement //将key所储存的值减去decrement
示例:
127.0.0.1:6379> mset name zengqingfa age 22
OK
127.0.0.1:6379> setnx student laishuihui
(integer) 1
127.0.0.1:6379> get name
"zengqingfa"
127.0.0.1:6379> mget name age
1) "zengqingfa"
2) "22"
127.0.0.1:6379> del name age
(integer) 2
127.0.0.1:6379> mget name age
1) (nil)
2) (nil)
127.0.0.1:6379> set name zengqingfa
OK
127.0.0.1:6379> expire name 5
(integer) 1
127.0.0.1:6379> ttl name
(integer) 3
127.0.0.1:6379> ttl name
(integer) -2
127.0.0.1:6379> get name
(nil)
127.0.0.1:6379> set id 1
OK
127.0.0.1:6379> incr id
(integer) 2
127.0.0.1:6379> incr id
(integer) 3
127.0.0.1:6379> decr id
(integer) 2
127.0.0.1:6379> decr id
(integer) 1
127.0.0.1:6379> get id
"1"
127.0.0.1:6379>
127.0.0.1:6379>
127.0.0.1:6379> incrby id 3
(integer) 4
127.0.0.1:6379> decrby id 2
(integer) 2
应用场景
单值缓存
SET key value
GET key
举例:某个商品的库存值和锁定值
#账面库存
stc:inv:stocklock:{warehouseCode}:{itemCode}:{stockType}
stc:inv:stock:{warehouseCode}:{itemCode}:{stockType}
#销售库存
stc:inv:saleslock:{warehouseCode}:{itemCode}:{partitionCode}
stc:inv:sales:{warehouseCode}:{itemCode}:{partitionCode}
对象缓存
SET user:1 value(json格式数据)
MSET user:1:name zhangsan user:1:balance 1888
MGET user:1:name user:1:balance
举例:基础信息的缓存:仓库、货主、商品等
stc:warehouse:{warehouseCode}
stc:owner:{ownerCode}
stc:item:{itemCode}
stc:warehouse:{warehouseId}
stc:owner:{ownerId}
分布式锁
SETNX product:10001 true //返回1代表获取锁成功
SETNX product:10001 true //返回0代表获取锁失败
。。。执行业务操作
DEL product:10001 //执行完业务释放锁
SET product:10001 true ex 10 nx //防止程序意外终止导致死锁
计数器
INCR article:readcount:{文章id}
GET article:readcount:{文章id}
举例:微信公众号文章访问量,csdn博客访问量
127.0.0.1:6379> incr article:readcount:1
(integer) 1
127.0.0.1:6379> incr article:readcount:1
(integer) 2
127.0.0.1:6379> incr article:readcount:1
(integer) 3
web集群session共享
spring session + redis实现session共享
分布式系统全局系列号
INCRBY orderId 1000 //redis批量生成序列号提升性能,在内存中进行++,如果服务器宕机,会存在丢失,不连续
127.0.0.1:6379> incr orderId
(integer) 1
127.0.0.1:6379> incr orderId
(integer) 2
127.0.0.1:6379> incr orderId
(integer) 3
2.2 Hash
常用操作
HSET key field value //存储一个哈希表key的键值
HSETNX key field value //存储一个不存在的哈希表key的键值
HMSET key field value [field value ...] //在一个哈希表key中存储多个键值对
HGET key field //获取哈希表key对应的field键值
HMGET key field [field ...] //批量获取哈希表key中多个field键值
HDEL key field [field ...] //删除哈希表key中的field键值
HLEN key //返回哈希表key中field的数量
HGETALL key //返回哈希表key中所有的键值
HINCRBY key field increment //为哈希表key中field键的值加上增量increment
示例:
127.0.0.1:6379> hset user name zengqingfa
(integer) 1
127.0.0.1:6379> hsetnx user:1 name laishuhui
(integer) 1
127.0.0.1:6379> hmset user age 22 sex 1
OK
127.0.0.1:6379> hmget user age sex
1) "22"
2) "1"
127.0.0.1:6379> hlen user
(integer) 3
127.0.0.1:6379> hgetall user
1) "name"
2) "zengqingfa"
3) "age"
4) "22"
5) "sex"
6) "1"
127.0.0.1:6379> hset user score 1
(integer) 1
127.0.0.1:6379> hincrby user score 1
(integer) 2
127.0.0.1:6379> hdel user score
(integer) 1
应用场景
对象缓存
HMSET user {userId}:name zhangsan {userId}:balance 1888
HMSET user 1:name zhangsan 1:balance 1888
HMGET user 1:name 1:balance
问题:如果用户数据量大的情况下,会存在大key (big key)(指key的value值很大),如果想把用户的所有数据取出来,会阻塞其他redis的操作。需要作为分段存储。
电商购物车
1)以用户id为key
2)商品id为field
3)商品数量为value
购物车操作
添加商品hset cart:1001 10088 1
增加数量hincrby cart:1001 10088 1
商品总数hlen cart:1001
删除商品hdel cart:1001 10088
获取购物车所有商品hgetall cart:1001
数据落库还是在数据库中,这个是提升服务的性能。
优点
1)同类数据归类整合储存,方便数据管理
2)相比string操作消耗内存与cpu更小(与底层数据结构相关)
3)相比string储存更节省空间(与底层数据结构相关)
缺点
1)过期功能不能使用在field上
2)只能用在key上Redis集群架构下不适合大规模使用
redis集群架构
2.3 List
常用操作
LPUSH key value [value ...] //将一个或多个值value插入到key列表的表头(最左边)
RPUSH key value [value ...] //将一个或多个值value插入到key列表的表尾(最右边)
LPOP key //移除并返回key列表的头元素
RPOP key //移除并返回key列表的尾元素
LRANGE key start stop //返回列表key中指定区间内的元素,区间以偏移量start和stop指定
BLPOP key [key ...] timeout //从key列表表头弹出一个元素,若列表中没有元素,阻塞等待 timeout秒,如果timeout=0,一直阻塞等待
BRPOP key [key ...] timeout //从key列表表尾弹出一个元素,若列表中没有元素,阻塞等待 timeout秒,如果timeout=0,一直阻塞等待
127.0.0.1:6379> lpush letter a
(integer) 1
127.0.0.1:6379> lpush letter b
(integer) 2
127.0.0.1:6379> lpush letter c
(integer) 3
127.0.0.1:6379> rpush letter d
(integer) 4
127.0.0.1:6379> rpush letter e
(integer) 5
127.0.0.1:6379> lrange letter 0 6
1) "c"
2) "b"
3) "a"
4) "d"
5) "e"
127.0.0.1:6379> lpop letter
"c"
127.0.0.1:6379> rpop letter
"e"
127.0.0.1:6379>
应用场景
常用数据结构
Stack(栈) = LPUSH + LPOP
Queue(队列)= LPUSH + RPOP
Blocking MQ(阻塞队列)= LPUSH + BRPOP
在分布式场景下,jdk提供的栈和队列已经不适合在分布式的环境下使用
微博和微信公号消息流
按时间的先后顺序排序进行展示
张三老师关注了匠心零度,好好学java等大V,假设公众号是一般公众号,粉丝量几千
1)匠心零度发公众号文章,消息ID为10018
LPUSH msg:{老师-ID} 10018
2)好好学java发公众号文章,消息ID为10086
LPUSH msg:{老师-ID} 10086
3)查看最新公众号文章
LRANGE msg:{老师-ID} 0 4
2.4 set
常用操作
//Set常用操作
SADD key member [member ...] //往集合key中存入元素,元素存在则忽略,
若key不存在则新建
SREM key member [member ...] //从集合key中删除元素
SMEMBERS key //获取集合key中所有元素
SCARD key //获取集合key的元素个数
SISMEMBER key member //判断member元素是否存在于集合key中
SRANDMEMBER key [count] //从集合key中选出count个元素,元素不从key中删除
SPOP key [count] //从集合key中选出count个元素,元素从key中删除
//Set运算操作
SINTER key [key ...] //交集运算
SINTERSTORE destination key [key ..] //将交集结果存入新集合destination中
SUNION key [key ..] //并集运算
SUNIONSTORE destination key [key ...] //将并集结果存入新集合destination中
SDIFF key [key ...] //差集运算
SDIFFSTORE destination key [key ...] //将差集结果存入新集合destination中
示例:
127.0.0.1:6379> sadd awared 1
(integer) 1
127.0.0.1:6379> sadd awared 2
(integer) 1
127.0.0.1:6379> sadd awared 3
(integer) 1
127.0.0.1:6379> smembers awared
1) "1"
2) "2"
3) "3"
127.0.0.1:6379> srem awared 1
(integer) 1
127.0.0.1:6379> smembers awared
1) "2"
2) "3"
127.0.0.1:6379> scard awared
(integer) 2
127.0.0.1:6379> sismember awared 1
(integer) 0
127.0.0.1:6379> srandmember awared 1
1) "3"
127.0.0.1:6379> srandmember awared 1
1) "2"
127.0.0.1:6379> srandmember awared 1
1) "2"
127.0.0.1:6379> spop awared 1
1) "3"
127.0.0.1:6379> spop awared 1
1) "2"
127.0.0.1:6379> spop awared 1
(empty list or set)
127.0.0.1:6379>
应用场景
微信抽奖小程序
1)点击参与抽奖加入集合
SADD key {userlD}
2)查看参与抽奖所有用户
SMEMBERS key
3)抽取count名中奖者
SRANDMEMBER key [count] //选出元素,不会从集合中删除元素,适合抽一次
SPOP key [count] //选出元素,会从集合中删除,适合抽多次奖,抽过的人不能参加下一次抽奖
微信微博点赞,收藏,标签
1) 点赞
SADD like:{消息ID} {用户ID}
2) 取消点赞
SREM like:{消息ID} {用户ID}
3) 检查用户是否点过赞
SISMEMBER like:{消息ID} {用户ID}
4) 获取点赞的用户列表
SMEMBERS like:{消息ID}
5) 获取点赞用户数
SCARD like:{消息ID}
这些重要的场景,适合使用redis吗?必须要做redis的高可用下,如果做不到,则不适合
集合操作
127.0.0.1:6379> sadd set1 a b c
(integer) 3
127.0.0.1:6379> sadd set2 b c d
(integer) 3
127.0.0.1:6379> sadd set3 c d e
(integer) 3
127.0.0.1:6379> sinter set1 set2 set3
1) "c"
127.0.0.1:6379> sunion set1 set2 set3
1) "e"
2) "c"
3) "a"
4) "d"
5) "b"
127.0.0.1:6379> sdiff set1 set2 set3
1) "a"
127.0.0.1:6379> sdiff set2 set1 set3
(empty list or set)
127.0.0.1:6379> sdiff set3 set1 set2
1) "e"
127.0.0.1:6379>
集合操作实现微博微信关注模型
1) 张三关注的人:
zhangsanSet-> {lisi, wangwu}
2) 李四关注的人:
lisiSet--> {zhangsan, wangwu, zhaoliu,tianqi}
3) 王五关注的人:
wangwuSet-> {zhangsan, lisi, zhaoliu, tianqi, sunba)
4) 张三和李四共同关注:
SINTER zhangsanSet lisiSet--> {wangwu}
5) 我(张三)关注的人也关注他(李四):
SISMEMBER wangwuSet lisi
6) 我(张三)可能认识的人: 需要去掉本人zhangsan
SDIFF lisiSet zhangsanSet-> {zhangsan, zhaoliu,tianqi}
示例:
127.0.0.1:6379> sadd zhangsanSet lisi wangwu
(integer) 2
127.0.0.1:6379> sadd lisiSet zhangsan wangwu zhaoliu tianqi
(integer) 4
127.0.0.1:6379> sadd wangwuSet zhangsan lisi zhaoliu tianqi sunba
(integer) 5
127.0.0.1:6379> sinter zhangsanSet lisiSet
1) "wangwu"
127.0.0.1:6379> sismember wangwuSet lisi
(integer) 1
127.0.0.1:6379> sdiff lisiSet zhangsanSet
1) "zhaoliu"
2) "tianqi"
3) "zhangsan"
127.0.0.1:6379>
如果我关注的人也关注他数量很大,在前台只展示前几条,进行分页,可以通过scan
集合操作实现电商商品筛选
SADD brand:huawei P40
SADD brand:xiaomi mi-10
SADD brand:iPhone iphone12
SADD os:android P40 mi-10
SADD cpu:brand:intel P40 mi-10
SADD ram:8G P40 mi-10 iphone12
SINTER os:android cpu:brand:intel ram:8G {P40,mi-10}
2.5 Zset
常用操作
// ZSet常用操作
ZADD key score member [[score member]…] //往有序集合key中加入带分值元素
ZREM key member [member …] //从有序集合key中删除元素
ZSCORE key member //返回有序集合key中元素member的分值
ZINCRBY key increment member //为有序集合key中元素member的分值加上increment
ZCARD key //返回有序集合key中元素个数
ZRANGE key start stop [WITHSCORES] //正序获取有序集合key从start下标到stop下标的元素
ZREVRANGE key start stop [WITHSCORES] //倒序获取有序集合key从start下标到stop下标的元素
// Zset集合操作
ZUNIONSTORE destkey numkeys key [key ...] //并集计算
ZINTERSTORE destkey numkeys key [key …] //交集计算
应用场景
Zset集合操作实现排行榜
1)点击新闻
ZINCRBY hotNews:20211121 1 十九届六中全会
2)展示当日排行前十
ZREVRANGE hotNews:20211121 0 9 WITHSCORES
3)七日搜索榜单计算
ZUNIONSTORE hotNews:20211114-20211121 7
hotNews:20211114 hotNews:20211115... hotNews:20211121
4)展示七日排行前十
ZREVRANGE hotNews:20211114-20211121 0 9 WITHSCORES
三、Redis的单线程和高性能
Redis是单线程吗?
**Redis 的单线程主要是指 Redis 的网络 IO 和键值对读写是由一个线程来完成的,这也是 Redis 对外 提供键值存储服务的主要流程。**但 Redis 的其他功能,比如持久化、异步删除、集群数据同步等,其实是由额外的线程执行的。
Redis 单线程为什么还能这么快?
因为它所有的数据都在内存中,所有的运算都是内存级别的运算,而且单线程避免了多线程的切换性 能损耗问题。正因为 Redis 是单线程,所以要小心使用 Redis 指令,对于那些耗时的指令(比如 keys),一定要谨慎使用,一不小心就可能会导致 Redis 卡顿。
[root@k8s-master01 redis-5.0.3]# src/redis-benchmark get
====== get ======
100000 requests completed in 1.58 seconds
50 parallel clients
3 bytes payload
keep alive: 1
99.09% <= 1 milliseconds
99.42% <= 2 milliseconds
99.50% <= 3 milliseconds
99.52% <= 4 milliseconds
99.75% <= 5 milliseconds
99.96% <= 6 milliseconds
100.00% <= 6 milliseconds
63411.54 requests per second
[root@k8s-master01 redis-5.0.3]# src/redis-benchmark set
====== set ======
100000 requests completed in 1.69 seconds
50 parallel clients
3 bytes payload
keep alive: 1
97.79% <= 1 milliseconds
98.50% <= 2 milliseconds
98.72% <= 3 milliseconds
98.83% <= 4 milliseconds
99.29% <= 5 milliseconds
99.60% <= 6 milliseconds
99.85% <= 7 milliseconds
99.87% <= 8 milliseconds
99.90% <= 18 milliseconds
99.95% <= 21 milliseconds
100.00% <= 21 milliseconds
59171.59 requests per second
Redis 单线程如何处理那么多的并发客户端连接?
Redis的IO多路复用:redis利用epoll来实现IO多路复用,将连接信息和事件放到队列中,依次放到文件事件分派器,事件分派器将事件分发给事件处理器。
# 查看redis支持的最大连接数,在redis.conf文件中可修改,# maxclients 10000
127.0.0.1:6379> CONFIG GET maxclients
1) "maxclients"
2) "10000"
四、其他高级命令
4.1 keys
keys:全量遍历键,用来列出所有满足特定正则字符串规则的key,当redis数据量比较大时, 性能比较差,要避免使用
127.0.0.1:6379> keys *
1) "k3"
2) "set3"
3) "k5"
4) "k6"
5) "backup2"
6) "k1"
7) "set2"
8) "set1"
9) "wangwuSet"
10) "backup3"
11) "zhangsanSet"
12) "k2"
13) "lisiSet"
14) "backup4"
15) "backup1"
16) "k7"
17) "k4"
127.0.0.1:6379> keys k*
1) "k3"
2) "k5"
3) "k6"
4) "k1"
5) "k2"
6) "k7"
7) "k4"
127.0.0.1:6379>
4.2 scan:渐进式遍历键
SCAN cursor [MATCH pattern] [COUNT count] scan 参数提供了三个参数,
第一个是 cursor 整数值(hash桶的索引值),
第二个是 key 的正则模式,
第三个是一次遍历的key的数量(参考值,底层遍历的数量不一定,扫全量key的数量,是一个大概值),并不是符合条件的结果数量。
第一次遍历时,cursor 值为0,然后将返回结果中第一个整数值作为下一次遍历的 cursor。一直遍历到返回的 cursor值为0时结束。
注意:但是scan并非完美无瑕, 如果在scan的过程中如果有键的变化(增加、 删除、 修改) ,那么遍历效果可能会碰到如下问题: 新增的键可能没有遍历到, 遍历出了重复的键等情况, 也就是说 scan并不能保证完整的遍历出来所有的键, 这些是我们在开发时需要考虑的。
127.0.0.1:6379> keys *
1) "k3"
2) "set3"
3) "k5"
4) "k6"
5) "backup2"
6) "k1"
7) "set2"
8) "set1"
9) "wangwuSet"
10) "backup3"
11) "zhangsanSet"
12) "k2"
13) "lisiSet"
14) "backup4"
15) "backup1"
16) "k7"
17) "k4"
127.0.0.1:6379> keys k*
1) "k3"
2) "k5"
3) "k6"
4) "k1"
5) "k2"
6) "k7"
7) "k4"
127.0.0.1:6379> scan 0 k* count 4
(error) ERR syntax error
127.0.0.1:6379> scan 0 match k* count 4
1) "26"
2) 1) "k1"
2) "k7"
127.0.0.1:6379> scan 26 match k* count 4
1) "14"
2) 1) "k5"
2) "k6"
3) "k2"
127.0.0.1:6379> scan 14 match k* count 4
1) "23"
2) 1) "k3"
127.0.0.1:6379> scan 23 match k* count 4
1) "0"
2) 1) "k4"
127.0.0.1:6379>
4.3 Info
查看redis服务运行信息,分为 9 大块,每个块都有非常多的参数,这 9 个块分别是:
Server 服务器运行的环境参数
Clients 客户端相关信息
Memory 服务器运行内存统计数据
Persistence 持久化信息
Stats 通用统计数据
Replication 主从复制相关信息
CPU CPU 使用情况
Cluster 集群信息
KeySpace 键值对统计数量信息
127.0.0.1:6379> info
# Server
redis_version:5.0.3
redis_git_sha1:00000000
redis_git_dirty:0
redis_build_id:36c5a99f34c31162
redis_mode:standalone
os:Linux 3.10.0-957.21.3.el7.x86_64 x86_64
arch_bits:64
multiplexing_api:epoll
atomicvar_api:atomic-builtin
gcc_version:4.8.5
process_id:7541
run_id:cbdfe78a9e7d1c2bb8edff7bb3ae0dac80413a4f
tcp_port:6379
uptime_in_seconds:47179
uptime_in_days:0
hz:10
configured_hz:10
lru_clock:10071677
executable:/opt/redis-5.0.3/src/redis-server
config_file:/opt/redis-5.0.3/redis.conf
# Clients
connected_clients:1 # 正在连接的客户端数量
client_recent_max_input_buffer:2
client_recent_max_output_buffer:0
blocked_clients:0
# Memory
used_memory:857216 # Redis分配的内存总量(byte),包含redis进程内部的开销和数据占用的内存
used_memory_human:837.12K # Redis分配的内存总量(Kb,human会展示出单位)
used_memory_rss:3833856
used_memory_rss_human:3.66M # 向操作系统申请的内存大小(Mb)
(这个值一般是大于used_memor y的,因为Redis的内存分配策略会产生内存碎片)
used_memory_peak:3886872 # redis的内存消耗峰值(byte)
used_memory_peak_human:3.71M # redis的内存消耗峰值(KB)
used_memory_peak_perc:22.05%
used_memory_overhead:841598
used_memory_startup:790968
used_memory_dataset:15618
used_memory_dataset_perc:23.58%
allocator_allocated:1284112
allocator_active:1642496
allocator_resident:8368128
total_system_memory:1927405568
total_system_memory_human:1.80G
used_memory_lua:37888
used_memory_lua_human:37.00K
used_memory_scripts:0
used_memory_scripts_human:0B
number_of_cached_scripts:0
maxmemory:0 # 配置中设置的最大可使用内存值(byte),默认0,不限制
maxmemory_human:0B # 配置中设置的最大可使用内存值
maxmemory_policy:noeviction # 当达到maxmemory时的淘汰策略
allocator_frag_ratio:1.28
allocator_frag_bytes:358384
allocator_rss_ratio:5.09
allocator_rss_bytes:6725632
rss_overhead_ratio:0.46
rss_overhead_bytes:-4534272
mem_fragmentation_ratio:4.70
mem_fragmentation_bytes:3018856
mem_not_counted_for_evict:0
mem_replication_backlog:0
mem_clients_slaves:0
mem_clients_normal:49694
mem_aof_buffer:0
mem_allocator:jemalloc-5.1.0
active_defrag_running:0
lazyfree_pending_objects:0
# Persistence
loading:0
rdb_changes_since_last_save:6
rdb_bgsave_in_progress:0
rdb_last_save_time:1637461015
rdb_last_bgsave_status:ok
rdb_last_bgsave_time_sec:0
rdb_current_bgsave_time_sec:-1
rdb_last_cow_size:335872
aof_enabled:0
aof_rewrite_in_progress:0
aof_rewrite_scheduled:0
aof_last_rewrite_time_sec:-1
aof_current_rewrite_time_sec:-1
aof_last_bgrewrite_status:ok
aof_last_write_status:ok
aof_last_cow_size:0
# Stats
total_connections_received:112
total_commands_processed:131
instantaneous_ops_per_sec:0 # 每秒执行多少次指令
total_net_input_bytes:2605386
total_net_output_bytes:10070588
instantaneous_input_kbps:0.00
instantaneous_output_kbps:0.00
rejected_connections:0
sync_full:0
sync_partial_ok:0
sync_partial_err:0
expired_keys:1
expired_stale_perc:0.00
expired_time_cap_reached_count:0
evicted_keys:0
keyspace_hits:40
keyspace_misses:7
pubsub_channels:0
pubsub_patterns:0
latest_fork_usec:286
migrate_cached_sockets:0
slave_expires_tracked_keys:0
active_defrag_hits:0
active_defrag_misses:0
active_defrag_key_hits:0
active_defrag_key_misses:0
# Replication
role:master
connected_slaves:0
master_replid:37536bdff58aa0f15af3f67a15683cbf805243d5
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:0
second_repl_offset:-1
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
# CPU
used_cpu_sys:28.679198
used_cpu_user:33.083586
used_cpu_sys_children:0.008181
used_cpu_user_children:0.003379
# Cluster
cluster_enabled:0
# Keyspace
db0:keys=17,expires=0,avg_ttl=0
127.0.0.1:6379>
cted_connections:0
sync_full:0
sync_partial_ok:0
sync_partial_err:0
expired_keys:1
expired_stale_perc:0.00
expired_time_cap_reached_count:0
evicted_keys:0
keyspace_hits:40
keyspace_misses:7
pubsub_channels:0
pubsub_patterns:0
latest_fork_usec:286
migrate_cached_sockets:0
slave_expires_tracked_keys:0
active_defrag_hits:0
active_defrag_misses:0
active_defrag_key_hits:0
active_defrag_key_misses:0
Replication
role:master
connected_slaves:0
master_replid:37536bdff58aa0f15af3f67a15683cbf805243d5
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:0
second_repl_offset:-1
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
CPU
used_cpu_sys:28.679198
used_cpu_user:33.083586
used_cpu_sys_children:0.008181
used_cpu_user_children:0.003379
Cluster
cluster_enabled:0
Keyspace
db0:keys=17,expires=0,avg_ttl=0
127.0.0.1:6379>