1.配置3台redis服务器
1.1配置C:\Windows\System32\drivers\etchosts文件,启用ip
1.2克隆主机-配置xsync
配置xsync
[cevent@hadoop213 bin]$ ll
总用量 15464
-rwxr-xr-x. 1 cevent cevent 4589179 7月
1 17:56 redis-benchmark
-rwxr-xr-x. 1 cevent cevent 22225 7月 1 17:56 redis-check-aof
-rwxr-xr-x. 1 cevent cevent 45443 7月 1 17:56 redis-check-dump
-rwxr-xr-x. 1 cevent cevent 4693138 7月
1 17:56 redis-cli
lrwxrwxrwx. 1 cevent cevent 12 7月
1 17:56 redis-sentinel
-> redis-server
-rwxr-xr-x. 1 cevent cevent 6466413 7月
1 17:56 redis-server
-rwxrwxrwx. 1 cevent cevent 316 7月 1 13:36 xcall
-rwxrwxrwx. 1 cevent cevent 842 7月 1 13:33 xsync
[cevent@hadoop213 bin]$ vim xsync
#!/bin/bash
#1 获取输入参数的个数,如果没有参数,直接退出,$#获取参数个数
pcount=$#
if((pcount==0)); then
#如果没有参数,返回no args,退出,程序关闭
echo no args;
exit;
fi
#2 获取文件名称:$1获取第一个参数,basename+ 路径1/路径2,获取路径最后一个值名称
p1=$1
fname=`basename $p1`
echo fname=$fname
#3 获取上级目录到:绝对路径
#获取文件路径:$(dirname $p1)
#cd -P 进入绝对路径 pwd获取路径
pdir=`cd -P $(dirname $p1); pwd`
echo pdir=$pdir
#4 获取当前用户名称
user=`whoami`
#5 循环
for((host=213; host<216; host++)); do
#echo $pdir/$fname $user@hadoop$host:$pdir
echo --------------- hadoop$host.cevent.com ----------------
#拼接路径用户
rsync -rvl $pdir/$fname $user@hadoop$host.cevent.com:$pdir
done
1.3配置xcall
[cevent@hadoop213 bin]$ vim xcall
#!/bin/bash
#$#获取参数个数
#$@ 获取所有参数
pcount=$#
if((pcount==0));then
echo no args;
exit;
fi
echo -------------localhost.cevent.com----------
$@
for((host=213; host<216; host++)); do
echo ----------hadoop$host.cevent.com---------
ssh hadoop$host.cevent.com $@
done
1.4拷贝多个redis.conf文件
[cevent@hadoop213 redis-3.0.4]$ cp redis.conf redis6379.conf
[cevent@hadoop213 redis-3.0.4]$ cp redis.conf redis6380.conf
[cevent@hadoop213 redis-3.0.4]$ cp redis.conf redis6381.conf
[cevent@hadoop213 redis-3.0.4]$ ll
总用量 288
-rw-rw-r--. 1 cevent cevent 31391 9月 8 2015 00-RELEASENOTES
-rw-r--r--. 1 cevent cevent 1079 7月 3 22:36 appendonly.aof
-rw-rw-r--. 1 cevent cevent 53 9月 8 2015 BUGS
-rw-rw-r--. 1 cevent cevent 1439 9月 8 2015 CONTRIBUTING
-rw-rw-r--. 1 cevent cevent 1487 9月 8 2015 COPYING
drwxrwxr-x. 6 cevent cevent 4096 7月 1 17:51 deps
-rw-rw-r--. 1 cevent cevent 41 7月 3 22:36 dump.rdb
-rw-rw-r--. 1 cevent cevent 11 9月 8 2015 INSTALL
-rw-rw-r--. 1 cevent cevent 151 9月 8 2015 Makefile
-rw-rw-r--. 1 cevent cevent 4223 9月 8 2015 MANIFESTO
-rw-rw-r--. 1 cevent cevent 5201 9月 8 2015 README
-rw-rw-r--. 1 cevent cevent 41405 7月 4
09:37 redis6379.conf
-rw-rw-r--. 1 cevent cevent 41405 7月 4 09:37 redis6380.conf
-rw-rw-r--. 1 cevent cevent 41405 7月 4 09:38 redis6381.conf
-rw-rw-r--. 1 cevent cevent 41405 7月 3 15:03 redis.conf
-rwxrwxr-x. 1 cevent cevent 271 9月 8 2015 runtest
-rwxrwxr-x. 1 cevent cevent 280 9月 8 2015 runtest-cluster
-rwxrwxr-x. 1 cevent cevent 281 9月 8 2015 runtest-sentinel
-rw-rw-r--. 1 cevent cevent 7109 9月 8 2015 sentinel.conf
drwxrwxr-x. 2 cevent cevent 4096 7月 1 17:52 src
drwxrwxr-x. 10 cevent cevent 4096 9月 8 2015 tests
drwxrwxr-x. 5 cevent cevent 4096 9月 8 2015 utils
1.5redis.conf(general配置)
redis.conf(general配置)
################################ GENERAL #####################################
# By default Redis does not run as a daemon.
Use 'yes' if you need it.
# Note that Redis will write a pid file in
/var/run/redis.pid when daemonized.
daemonize yes
# When running daemonized, Redis writes a
pid file in /var/run/redis.pid by
# default. You can specify a custom pid
file location here.
pidfile /var/run/redis6379.pid
# Accept connections on the specified port,
default is 6379.
# If port 0 is specified Redis will not
listen on a TCP socket.
port 6379
# TCP listen() backlog.
#
# In high requests-per-second environments
you need an high backlog in order
# to avoid slow clients connections issues.
Note that the Linux kernel
# will silently truncate it to the value of
/proc/sys/net/core/somaxconn so
# make sure to raise both the value of
somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
tcp-backlog 511
# By default Redis listens for connections
from all the network interfaces
# available on the server. It is possible
to listen to just one or multiple
# interfaces using the "bind"
configuration directive, followed by one or
# more IP addresses.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1
# Specify the path for the Unix socket that
will be used to listen for
# incoming connections. There is no default,
so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 700
# Close the connection after a client is
idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP
ACKs to clients in absence
# of communication. This is useful for two
reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the
point of view of network
#
equipment in the middle.
#
# On Linux, the specified value (in seconds)
is the period used to send ACKs.
# Note that to close the connection the
double of the time is needed.
# On other kernels the period depends on
the kernel configuration.
#
# A reasonable value for this option is 60
seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for
development/testing)
# verbose (many rarely useful info, but not
a mess like the debug level)
# notice (moderately verbose, what you want
in production probably)
# warning (only very important / critical
messages are logged)
loglevel notice
# Specify the log file name. Also the empty
string can be used to force
# Redis to log on the standard output. Note
that if you use standard
# output for logging but daemonize, logs
will be sent to /dev/null
logfile "6379.log"
# To enable logging to the system logger,
just set 'syslog-enabled' to yes,
# and optionally update the other syslog
parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER
or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default
database is DB 0, you can select
# a different one on a per-connection basis
using SELECT <dbid> where
# dbid is a number between 0 and
'databases'-1
databases 16
1.6redis.conf(snapshotting配置)
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
#
save <seconds> <changes>
#
#
Will save the DB if both the given number of seconds and the given
#
number of write operations against the DB occurred.
#
#
In the example below the behaviour will be to save:
#
after 900 sec (15 min) if at least 1 key changed
#
after 300 sec (5 min) if at least 10 keys changed
#
after 60 sec if at least 10000 keys changed
#
#
Note: you can disable saving completely by commenting out all
"save" lines.
#
#
It is also possible to remove all the previously configured save
#
points by adding a save directive with a single empty string argument
#
like in the following example:
#
#
save ""
save 900 1
save 120 10
save 60 10000
# By default Redis will stop accepting
writes if RDB snapshots are enabled
# (at least one save point) and the
latest background save failed.
# This will make the user aware (in a
hard way) that data is not persisting
# on disk properly, otherwise chances are
that no one will notice and some
# disaster will happen.
#
# If the background saving process will
start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper
monitoring of the Redis server
# and persistence, you may want to
disable this feature so that Redis will
# continue to work as usual even if there
are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error
yes
# Compress string objects using LZF when
dump .rdb databases?
# For default that's set to 'yes' as it's
almost always a win.
# If you want to save some CPU in the
saving child set it to 'no' but
# the dataset will likely be bigger if
you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum
is placed at the end of the file.
# This makes the format more resistant to
corruption but there is a performance
# hit to pay (around 10%) when saving and
loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum
disabled have a checksum of zero that will
# tell the loading code to skip the
check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump6379.rdb
1.7修改hadoop214
################################
GENERAL
#####################################
# When running daemonized, Redis writes a
pid file in /var/run/redis.pid by
# default. You can specify a custom pid
file location here.
pidfile /var/run/redis6380.pid
# Accept connections on the specified
port, default is 6379.
# If port 0 is specified Redis will not
listen on a TCP socket.
port 6380
# Specify the log file name. Also the
empty string can be used to force
# Redis to log on the standard output.
Note that if you use standard
# output for logging but daemonize, logs
will be sent to /dev/null
logfile "6380.log"
################################
SNAPSHOTTING
################################
# The filename where to dump the DB
dbfilename dump6380.rdb
1.8修改hadoop215
################################
GENERAL
#####################################
# When running daemonized, Redis writes a
pid file in /var/run/redis.pid by
# default. You can specify a custom pid file
location here.
pidfile
/var/run/redis6381.pid
# Accept connections on the specified
port, default is 6379.
# If port 0 is specified Redis will not
listen on a TCP socket.
port 6381
# Specify the log file name. Also the
empty string can be used to force
# Redis to log on the standard output.
Note that if you use standard
# output for logging but daemonize, logs
will be sent to /dev/null
logfile
"6381.log"
################################
SNAPSHOTTING
################################
# The filename where to dump the DB
dbfilename dump6381.rdb
1.9执行同步
[cevent@hadoop213 module]$ xsync redis-3.0.4/
2.安装yum准备配置
[cevent@hadoop215 ~]$ sudo vim /etc/yum.repos.d/CentOS-Base.repo
[sudo] password for cevent:
# CentOS-Base.repo
#
# The mirror system uses the connecting
IP address of the client and the
# update status of each mirror to pick
mirrors that are updated to and
# geographically close to the
client. You should use this for CentOS
updates
# unless you are manually picking other
mirrors.
#
# If the mirrorlist= does not work for
you, as a fall back you can try the
# remarked out baseurl= line instead.
#
#
[base]
name=CentOS-$releasever - Base - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/os/$basearch/
#mirrorlist=file:///mnt/cdrom
gpgcheck=1
enable=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
#released updates
[updates]
name=CentOS-$releasever - Updates - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/updates/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/extras/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/centosplus/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus
gpgcheck=1
enabled=0
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
#contrib - packages by Centos Users
[contrib]
name=CentOS-$releasever - Contrib - 163.com
baseurl=http://mirrors.163.com/centos/$releasever/contrib/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=contrib
gpgcheck=1
enabled=0
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
3.yum安装过程中取消,再次安装报错
[cevent@hadoop215 ~]$ sudo yum -y install
gcc
已加载插件:fastestmirror, refresh-packagekit, security
设置安装进程
Loading mirror speeds from cached hostfile
错误:database disk image is malformed
[cevent@hadoop215 ~]$ yum clean all
已加载插件:fastestmirror, refresh-packagekit, security
Cleaning repos: base extras updates
清理一切
无法删除 rpmdb 文件
/var/lib/yum/rpmdb-indexes/version
[cevent@hadoop215 ~]$ sudo yum -y install
gcc
已加载插件:fastestmirror, refresh-packagekit, security
设置安装进程
Loading mirror speeds from cached
hostfile
错误:database disk image is malformed
[cevent@hadoop215 ~]$ sudo vim /etc/yum/pluginconf.d/fastestmirror.conf 修改yum设置
[main]
enabled=0 改为0不启用
verbose=0
always_print_best_host = true
socket_timeout=3
#
Relative paths are relative to the cachedir (and so works for users as
well
# as root).
hostfilepath=timedhosts.txt
maxhostfileage=10
maxthreads=15
#exclude=.gov, facebook
#include_only=.nl,.de,.uk,.ie
[cevent@hadoop215 ~]$ sudo vim /etc/yum.conf 修改etc的yum配置
[main]
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
debuglevel=2
logfile=/var/log/yum.log
exactarch=1
obsoletes=1
gpgcheck=1
plugins=0 改为0不使用插件
installonly_limit=5
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=19&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release
[cevent@hadoop215 ~]$ yum clean all 清洗yum
Cleaning repos: base extras updates
清理一切
无法删除 rpmdb 文件 /var/lib/yum/rpmdb-indexes/version
[cevent@hadoop215 ~]$ yum makecache 建立缓存
base
| 3.7 kB 00:00
base/group_gz |
242 kB 00:00
base/filelists_db | 6.4
MB 00:01
base/other_db |
2.8 MB 00:00
extras
| 3.4 kB 00:00
extras/prestodelta | 2.2 kB 00:00
extras/other_db
| 14 kB 00:00
updates
| 3.4 kB 00:00
updates/filelists_db | 7.1 MB 00:02
updates/prestodelta | 333
kB 00:00
updates/other_db |
435 kB 00:00
元数据缓存已建立
[cevent@hadoop215 ~]$ sudo yum -y install gcc
设置安装进程
base
| 3.7 kB 00:00
extras
| 3.4 kB 00:00
updates
| 3.4 kB 00:00
解决依赖关系
--> 执行事务检查
---> Package gcc.x86_64 0:4.4.7-23.el6
will be 安装
[cevent@hadoop215 ~]$ sudo yum -y install gcc-c++
设置安装进程
解决依赖关系
--> 执行事务检查
---> Package gcc-c++.x86_64
0:4.4.7-23.el6 will be 安装
--> 处理依赖关系 libstdc++-devel = 4.4.7-23.el6,它被软件包 gcc-c++-4.4.7-23.el6.x86_64 需要
--> 处理依赖关系 libstdc++ = 4.4.7-23.el6,它被软件包 gcc-c++-4.4.7-23.el6.x86_64 需要
--> 执行事务检查
---> Package libstdc++.x86_64
0:4.4.7-17.el6 will be 升级
---> Package libstdc++.x86_64
0:4.4.7-23.el6 will be an update
---> Package libstdc++-devel.x86_64 0:4.4.7-23.el6
will be 安装
--> 完成依赖关系计算
4.启动redis报错权限不够/ make[1]: Leavingdirectory `/opt/module/redis-3.0.4/src’解决
[cevent@hadoop214 redis-3.0.4]$ gcc -v
[cevent@hadoop214 redis-3.0.4]$ make distclean 清洗
[cevent@hadoop214 redis-3.0.4]$ make install
cd src && make install
make[1]: Entering directory
`/opt/module/redis-3.0.4/src'
Hint: It's a good idea to run 'make test'
;)
INSTALL install
install: 无法创建普通文件"/usr/local/bin/redis-server": 权限不够
make[1]: *** [install] 错误 1
make[1]: Leaving
directory `/opt/module/redis-3.0.4/src'
make: *** [install] 错误 2
[cevent@hadoop214 redis-3.0.4]$ sudo chown -R cevent:cevent /opt/
[cevent@hadoop214 redis-3.0.4]$ sudo chown -R cevent:cevent /usr/local/bin/
[sudo] password for cevent:
[cevent@hadoop214 redis-3.0.4]$ make
install
cd src && make install
make[1]: Entering directory
`/opt/module/redis-3.0.4/src'
Hint: It's a good idea to run 'make test'
;)
INSTALL install
INSTALL install
INSTALL install
INSTALL install
INSTALL install
make[1]: Leaving directory
`/opt/module/redis-3.0.4/src'
5.Hadoop215安装
[cevent@hadoop215 ~]$ sudo yum -y install gcc
设置安装进程
base
| 3.7 kB 00:00
extras
| 3.4 kB 00:00
updates
| 3.4 kB 00:00
包 gcc-4.4.7-23.el6.x86_64 已安装并且是最新版本
无须任何处理
[cevent@hadoop215 ~]$ sudo yum -y install gcc-c++
设置安装进程
包 gcc-c++-4.4.7-23.el6.x86_64 已安装并且是最新版本
无须任何处理
[cevent@hadoop215 ~]$ sudo chown -R cevent:cevent /opt/
[cevent@hadoop215 ~]$ sudo chown -R cevent:cevent /usr/local/bin/
[cevent@hadoop215 redis-3.0.4]$ gcc -v
[cevent@hadoop215 redis-3.0.4]$ make distclean
[cevent@hadoop215 redis-3.0.4]$ make install
6.主从复制:1master2slave
6.1开启redis-master
[cevent@hadoop213 module]$ cd redis-3.0.4/
[cevent@hadoop213 redis-3.0.4]$ ll
总用量 288
-rw-rw-r--. 1 cevent cevent 31391 9月 8 2015 00-RELEASENOTES
-rw-r--r--. 1 cevent cevent 1079 7月 3 22:36 appendonly.aof
-rw-rw-r--. 1 cevent cevent 53 9月 8 2015 BUGS
-rw-rw-r--. 1 cevent cevent 1439 9月 8 2015 CONTRIBUTING
-rw-rw-r--. 1 cevent cevent 1487 9月 8 2015 COPYING
drwxrwxr-x. 6 cevent cevent 4096 7月 1 17:51 deps
-rw-rw-r--. 1 cevent cevent 41 7月 3 22:36 dump.rdb
-rw-rw-r--. 1 cevent cevent 11 9月 8 2015 INSTALL
-rw-rw-r--. 1 cevent cevent 151 9月 8 2015 Makefile
-rw-rw-r--. 1 cevent cevent 4223 9月 8 2015 MANIFESTO
-rw-rw-r--. 1 cevent cevent 5201 9月 8 2015 README
-rw-rw-r--. 1 cevent cevent 41421 7月 4 09:47 redis6379.conf
-rw-rw-r--. 1 cevent cevent 41421 7月 4 09:54 redis6380.conf
-rw-rw-r--. 1 cevent cevent 41421 7月 4 09:55 redis6381.conf
-rw-rw-r--. 1 cevent cevent 41405 7月 3 15:03 redis.conf
-rwxrwxr-x. 1 cevent cevent 271 9月 8 2015 runtest
-rwxrwxr-x. 1 cevent cevent 280 9月 8 2015 runtest-cluster
-rwxrwxr-x. 1 cevent cevent 281 9月 8 2015 runtest-sentinel
-rw-rw-r--. 1 cevent cevent 7109 9月 8 2015 sentinel.conf
drwxrwxr-x. 2 cevent cevent 4096 7月 1 17:52 src
drwxrwxr-x. 10 cevent cevent 4096 9月 8 2015 tests
drwxrwxr-x. 5 cevent cevent 4096 9月 8 2015 utils
[cevent@hadoop213 redis-3.0.4]$ redis-server redis6379.conf
[cevent@hadoop213 redis-3.0.4]$ redis-cli -p 6379
127.0.0.1:6379> ping
PONG
127.0.0.1:6379> keys *
1) "debt"
2) "balance"
6.2生成redis-rdb-conf配置文件
cevent@hadoop213 bin]$ cd /opt/module/redis-3.0.4/
[cevent@hadoop213 redis-3.0.4]$ ll
总用量 296
-rw-rw-r--. 1 cevent cevent 31391 9月 8 2015 00-RELEASENOTES
-rw-rw-r--. 1 cevent
cevent 2997 7月 4 10:05 6379.log
-rw-r--r--. 1 cevent cevent 1079 7月 3 22:36 appendonly.aof
-rw-rw-r--. 1 cevent cevent 53 9月 8 2015 BUGS
-rw-rw-r--. 1 cevent cevent 1439 9月 8 2015 CONTRIBUTING
-rw-rw-r--. 1 cevent cevent 1487 9月 8 2015 COPYING
drwxrwxr-x. 6 cevent cevent 4096 7月 1 17:51 deps
-rw-rw-r--. 1 cevent
cevent 41 7月 4 10:05 dump6379.rdb
-rw-rw-r--. 1 cevent cevent 41 7月 3 22:36 dump.rdb
-rw-rw-r--. 1 cevent cevent 11 9月 8 2015 INSTALL
-rw-rw-r--. 1 cevent cevent 151 9月 8 2015 Makefile
-rw-rw-r--. 1 cevent cevent 4223 9月 8 2015 MANIFESTO
-rw-rw-r--. 1 cevent cevent 5201 9月 8 2015 README
-rw-rw-r--. 1 cevent cevent
41421 7月 4 09:47
redis6379.conf
-rw-rw-r--. 1 cevent cevent
41421 7月 4 09:54
redis6380.conf
-rw-rw-r--. 1 cevent cevent
41421 7月 4 09:55
redis6381.conf
-rw-rw-r--. 1 cevent cevent 41405 7月 3 15:03 redis.conf
-rwxrwxr-x. 1 cevent cevent 271 9月 8 2015 runtest
-rwxrwxr-x. 1 cevent cevent 280 9月 8 2015 runtest-cluster
-rwxrwxr-x. 1 cevent cevent 281 9月 8 2015 runtest-sentinel
-rw-rw-r--. 1 cevent cevent 7109 9月 8 2015 sentinel.conf
drwxrwxr-x. 2 cevent cevent 4096 7月 1 17:52 src
drwxrwxr-x. 10 cevent cevent 4096 9月 8 2015 tests
drwxrwxr-x. 5 cevent cevent 4096 9月 8 2015 utils
[cevent@hadoop213 redis-3.0.4]$ cat 6379.log 查看redis日志
3537:M 04 Jul 10:03:06.309 # You
requested maxclients of 10000 requiring at least 10032 max file descriptors.
3537:M 04 Jul 10:03:06.309 # Redis can't
set maximum open files to 10032 because of OS error: Operation not permitted.
3537:M 04 Jul 10:03:06.309 # Current maximum
open files is 4096. maxclients has been reduced to 4064 to compensate for low
ulimit. If you need higher maxclients increase 'ulimit -n'.
_._
_.-``__ ''-._
_.-`` `. `_.
''-._ Redis 3.0.4
(00000000/0) 64 bit
.-`` .-```. ```\/ _.,_ ''-._
(
' , .-`
| `, ) Running in standalone mode
|`-._`-...-` __...-.``-._|'` _.-'| Port: 6379
|
`-._ `._ /
_.-' | PID: 3537
`-._ `-._ `-./
_.-' _.-'
|`-._`-._
`-.__.-' _.-'_.-'|
|
`-._`-._ _.-'_.-' | http://redis.io
`-._ `-._`-.__.-'_.-' _.-'
|`-._`-._
`-.__.-' _.-'_.-'|
|
`-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
3537:M 04 Jul 10:03:06.317 # WARNING: The
TCP backlog setting of 511 cannot be enforced because
/proc/sys/net/core/somaxconn is set to the lower value of 128.
3537:M 04 Jul 10:03:06.317 # Server
started, Redis version 3.0.4
3537:M 04 Jul 10:03:06.318 # WARNING
overcommit_memory is set to 0! Background save may fail under low memory
condition. To fix this issue add 'vm.overcommit_memory = 1' to
/etc/sysctl.conf and then reboot or run the command 'sysctl
vm.overcommit_memory=1' for this to take effect.
3537:M 04 Jul 10:03:06.318 # WARNING you
have Transparent Huge Pages (THP) support enabled in your kernel. This will
create latency and memory usage issues with Redis. To fix this issue run the
command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as
root, and add it to your /etc/rc.local in order to retain the setting after a
reboot. Redis must be restarted after THP is disabled.
3537:M 04 Jul 10:03:06.336 * DB saved on
disk
3537:M 04 Jul 10:03:06.336 * DB loaded
from append only file: 0.018 seconds
3537:M 04 Jul 10:03:06.336 * The server
is now ready to accept connections on port 6379
3537:M 04 Jul 10:05:07.086 * 10 changes
in 120 seconds. Saving...
3537:M 04 Jul 10:05:07.096 * Background
saving started by pid 3551
3551:C 04 Jul 10:05:07.107 * DB saved on
disk
3551:C 04 Jul 10:05:07.108 * RDB: 4 MB of
memory used by copy-on-write
3537:M 04 Jul 10:05:07.198 * Background
saving terminated with success
6.3单机开启3个redis
[cevent@hadoop213 redis-3.0.4]$ redis-server redis6379.conf
[cevent@hadoop213 redis-3.0.4]$ redis-cli -p 6379
127.0.0.1:6379> ping
PONG
127.0.0.1:6379> keys *
1) "debt"
2) "balance"
127.0.0.1:6379> ping
PONG
[cevent@hadoop213 redis-3.0.4]$ redis-server redis6380.conf
[cevent@hadoop213 redis-3.0.4]$ redis-cli -p 6380
127.0.0.1:6380> ping
PONG
127.0.0.1:6380> keys *
1) "debt"
2) "balance"
[cevent@hadoop213 redis-3.0.4]$ redis-server redis6381.conf
[cevent@hadoop213 redis-3.0.4]$ redis-cli -p 6381
127.0.0.1:6381> ping
PONG
127.0.0.1:6381> keys *
1) "debt"
2) "balance"
[cevent@hadoop213 ~]$ cd /opt/module/redis-3.0.4/
[cevent@hadoop213 redis-3.0.4]$ ps -ef | grep redis 查询redis进程
cevent
3537 1 0 10:03 ? 00:00:17 redis-server *:6379
cevent
3540 3409
0 10:03 pts/1 00:00:00
redis-cli -p 6379
cevent
4568 1 0 11:59 ? 00:00:12 redis-server *:6380
cevent
4572 4539 0 11:59 pts/2 00:00:00 redis-cli -p 6380
cevent
4611 1 0 12:00 ? 00:00:12 redis-server *:6381
cevent
4615 4579 0 12:00 pts/3 00:00:00 redis-cli -p 6381
cevent
6842 6814 0 17:50 pts/4 00:00:00 grep redis
6.4实现主从关系
6379
127.0.0.1:6379> set k1 v1
OK
127.0.0.1:6379> set k2 v2
OK
127.0.0.1:6379> set k3 v3
OK
127.0.0.1:6379> keys *
1) "debt"
2) "k1"
3) "balance"
4) "k3"
5) "k2"
127.0.0.1:6379> set k4 k4
OK
127.0.0.1:6379> info replication (获取主从信息,在set k4之前,其他端口开启slaveof 127.0.0.1 6379 跟踪)
# Replication
role:master
为主设备
connected_slaves:2 两个从设备
slave0:ip=127.0.0.1,port=6380,state=online,offset=263,lag=1
slave1:ip=127.0.0.1,port=6381,state=online,offset=263,lag=1
master_repl_offset:263
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:262
6380
127.0.0.1:6380> slaveof 127.0.0.1 6379
开启slave主从跟踪
OK
127.0.0.1:6380> get k4
"k4"
127.0.0.1:6380> get k1
"v1"
127.0.0.1:6380> info replication 获取主从信息
# Replication
role:slave 角色为从设备
master_host:127.0.0.1 主设备ip
master_port:6379 主设备端口
master_link_status:up 主设备连接状态
master_last_io_seconds_ago:1
master_sync_in_progress:0
slave_repl_offset:333
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
6381
127.0.0.1:6381> slaveof 127.0.0.1 6379
开启主从跟踪
OK
127.0.0.1:6381> get k4
"k4"
127.0.0.1:6381> get k2
"v2"
127.0.0.1:6381> info replication 主从信息
# Replication
role:slave
master_host:127.0.0.1
master_port:6379
master_link_status:up
master_last_io_seconds_ago:8
master_sync_in_progress:0
slave_repl_offset:333
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
7.主从问题演示
7.1主从复制,只有主机可以写,其他从机只能读,主机创建了库k1,从机无法创建k1
127.0.0.1:6379> set k6 v6
OK
127.0.0.1:6380> set k6 v99 从机创建失败
(error) READONLY You can't write against
a read only slave.
7.2主机未配置主从上位关系,主机死掉,从机依旧为slave
127.0.0.1:6379> shutdown 关闭主机
not connected> exit
127.0.0.1:6380> info replication 查看从机主从关系
# Replication
role:slave
master_host:127.0.0.1
master_port:6379
master_link_status:down
master_last_io_seconds_ago:-1
master_sync_in_progress:0
slave_repl_offset:1226
master_link_down_since_seconds:8
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
127.0.0.1:6381> info replication
# Replication
role:slave
master_host:127.0.0.1
master_port:6379
master_link_status:down
master_last_io_seconds_ago:-1
master_sync_in_progress:0
slave_repl_offset:1226
master_link_down_since_seconds:19
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
7.3主机再次开启,主机依旧为master,从机依旧为slave
[cevent@hadoop213 redis-3.0.4]$ redis-server redis6379.conf
[cevent@hadoop213 redis-3.0.4]$ redis-cli -p 6379
127.0.0.1:6379> keys *
1) "balance"
2) "k1"
3) "k2"
4) "k5"
5) "k3"
6) "k4"
7) "debt"
8) "k6"
127.0.0.1:6379> set k6 v6
OK
127.0.0.1:6379> info replication
# Replication
role:master
connected_slaves:2
slave0:ip=127.0.0.1,port=6380,state=online,offset=109,lag=1
slave1:ip=127.0.0.1,port=6381,state=online,offset=109,lag=0
master_repl_offset:109
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:108
127.0.0.1:6380> get k6 从机获取主机k6
"v6"
127.0.0.1:6380> info replication
# Replication
role:slave
master_host:127.0.0.1
master_port:6379
master_link_status:up
master_last_io_seconds_ago:1
master_sync_in_progress:0
slave_repl_offset:123
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
7.4从机死掉,不在于主机建立主从关系(除非将主从关系写入配置文件,则可以继续获取主机数据)
127.0.0.1:6380> shutdown 从机停止
not connected> exit
127.0.0.1:6379> set k7 v7 主机创建k7
OK
[cevent@hadoop213 redis-3.0.4]$ redis-server redis6380.conf 从机连接
[cevent@hadoop213 redis-3.0.4]$ redis-cli -p 6380
127.0.0.1:6380> keys * 主机刚才创建的收到
1) "k6"
2) "k7"
3) "k4"
4) "k2"
5) "debt"
6) "k1"
7) "k3"
8) "balance"
9) "k5"
127.0.0.1:6380> get k7
"v7"
127.0.0.1:6380> info replication 查看主从状态
# Replication
role:master
已经和主机没有关系
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
127.0.0.1:6379> info replication 查看主机状态
# Replication
role:master
connected_slaves:1 已经和6380脱离关系
slave0:ip=127.0.0.1,port=6381,state=online,offset=586,lag=0
master_repl_offset:586
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:585
127.0.0.1:6379> set k8 v8 再次创建k
OK
127.0.0.1:6380> get k8 从机无法接收k
(nil)
7.5从机脱机后,再次进入主机,需要进行slaveof 127.0.0.1 6379
127.0.0.1:6380> slaveof 127.0.0.1 6379
OK
127.0.0.1:6380> get k8
"v8"
127.0.0.1:6380> info replication
# Replication
role:slave
master_host:127.0.0.1
master_port:6379
master_link_status:up
master_last_io_seconds_ago:8
master_sync_in_progress:0
slave_repl_offset:1077
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
127.0.0.1:6379> info replication 主机查看从机
# Replication
role:master
connected_slaves:2
slave0:ip=127.0.0.1,port=6381,state=online,offset=1133,lag=1
slave1:ip=127.0.0.1,port=6380,state=online,offset=1133,lag=1
master_repl_offset:1133
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:1132
8.master传递slave成为master,递归,薪火相传
8.1Master6379不变
[cevent@hadoop213 redis-3.0.4]$ redis-server redis6379.conf
[cevent@hadoop213 redis-3.0.4]$ redis-cli -p 6379
127.0.0.1:6379> info replication
# Replication
role:master
connected_slaves:1
slave0:ip=127.0.0.1,port=6380,state=online,offset=477,lag=0
master_repl_offset:477
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:476
127.0.0.1:6379> keys *
1)
"k3"
2)
"k2"
3)
"k5"
4)
"k6"
5)
"k1"
6)
"balance"
7)
"k4"
8)
"k7"
9)
"k8"
10) "debt"
127.0.0.1:6379> set k9 v9 创建库
OK
8.2slave6380继承6379
[cevent@hadoop213 redis-3.0.4]$ redis-server redis6380.conf
[cevent@hadoop213 redis-3.0.4]$ redis-cli -p 6380
127.0.0.1:6380> slaveof 127.0.0.1 6379
OK
127.0.0.1:6380> get k9
"v9"
127.0.0.1:6380> info replication
# Replication
role:slave
为从设备
master_host:127.0.0.1
master_port:6379
master_link_status:up
master_last_io_seconds_ago:1
master_sync_in_progress:0
slave_repl_offset:1481
slave_priority:100 slave代理中
slave_read_only:1
connected_slaves:1 链接了一个slave
slave0:ip=127.0.0.1,port=6381,state=online,offset=1019,lag=0 代理从设备
master_repl_offset:1019
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:1018
8.3slave6381继承6380
[cevent@hadoop213 redis-3.0.4]$ redis-server redis6381.conf
[cevent@hadoop213 redis-3.0.4]$ redis-cli -p 6381
127.0.0.1:6381> slaveof 127.0.0.1 6380
OK
127.0.0.1:6381> info replication
# Replication
role:slave
master_host:127.0.0.1
master_port:6380 这里的master为slaveof所继承的slave6380
master_link_status:up
master_last_io_seconds_ago:4
master_sync_in_progress:0
slave_repl_offset:15
slave_priority:100
slave_read_only:1 只读
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
127.0.0.1:6381> get k9
"v9"
8.4反客为主
Master挂掉
127.0.0.1:6379> shutdown
not connected> exit
Slave1反客为主,成为master
127.0.0.1:6380> slaveof no one 取消slave跟从
OK
127.0.0.1:6380> info replication 自动升级为master
# Replication
role:master
connected_slaves:0
master_repl_offset:2307
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:2306
127.0.0.1:6380> set key10 value10
OK
127.0.0.1:6380> get key10
"value10"
Slave2跟从slave,继续服役
127.0.0.1:6381> slaveof 127.0.0.1 6380 更改主从关系
OK
127.0.0.1:6381> get key10
"value10"
127.0.0.1:6381> info replication
# Replication
role:slave
master_host:127.0.0.1
master_port:6380
master_link_status:up
master_last_io_seconds_ago:1
master_sync_in_progress:0
slave_repl_offset:2535
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0