一、理解Docker0
清空所有环境
docker rm -f $(docker ps -aq)
docker rmi -f $(docker images -aq)
docker run -d -p 8080:8080 --name tomcat01 tomcat
# 查看容器的内部网络地址 ip addr , 发现容器启动的时候会得到一个 eth0@if9 ip地址,docker分配的
root@vm:~# docker exec -it tomcat01 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
# 思考:linux能不能 ping 通容器内部
root@vm:~# ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.094 ms
64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.068 ms
64 bytes from 172.17.0.2: icmp_seq=3 ttl=64 time=0.065 ms
64 bytes from 172.17.0.2: icmp_seq=4 ttl=64 time=0.057 ms
# linux可以 ping 通 docker 容器内部
原理
1、 我们每启动一个docker容器,docker就会给docker容器分配一个ip,我们只要按照了docker,就会有一个docker0桥接模式,使用的技术是veth-pair技术!
#我们发现这个容器带来网卡,都是一对对的
# veth-pair 就是一对的虚拟设备接口,他们都是成对出现的,一端连着协议,一端彼此相连
# 正因为有这个特性 veth-pair 充当一个桥梁,连接各种虚拟网络设备的
# OpenStac,Docker容器之间的连接,OVS的连接,都是使用evth-pair技术
docker run -d -p 8081:8080 --name tomcat02 tomcat
结论 :
tomcat01 和 tomcat02 是公用一个路由器,docker0。
所有的容器不指定网络的情况下,都是通过docker0路由的,docker会给我们的容器分配一个默认的可用IP
–link
root@vm:~# docker exec -it tomcat02 ping tomcat01
ping: tomcat01: Name or service not known
# ping不通
# 如何解决呢?
# 通过 --link 就可以解决了
root@vm:~# docker run -d -p 8082:8080 --name tomcat03 --link tomcat02 tomcat
73f844c6d50215cf88feb7768fed4d3779d220a39b8d8a83ec928581a5db3fab
root@vm:~# docker exec -it tomcat03 ping tomcat02
PING tomcat02 (172.17.0.3) 56(84) bytes of data.
64 bytes from tomcat02 (172.17.0.3): icmp_seq=1 ttl=64 time=0.117 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=2 ttl=64 time=0.078 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=3 ttl=64 time=0.104 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=4 ttl=64 time=0.090 ms
# 用tomcat02 ping tomcat03 ping不通
root@vm:~# docker exec -it tomcat02 ping tomcat03
ping: tomcat03: Name or service not known
查看tomcat03就是在在本地配置了tomcat02的配置
# 查看hosts 配置,在这里原理发现
root@vm:~# docker exec -it tomcat03 cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.3 tomcat02 bbe470799f87
172.17.0.4 73f844c6d502
–link 本质就是在hosts配置中添加映射
现在使用Docker已经不建议使用–link了!
自定义网络,不适用docker0!
docker0问题:不支持容器名连接访问!
2、自定义网络
root@vm:~# docker network ls
NETWORK ID NAME DRIVER SCOPE
3eff373f788a bridge bridge local
6cfecc1a2b6f host host local
e5079482515f none null local
网络模式
-
bridge :桥接 docker(默认,自己创建也是用bridge模式)
-
none :不配置网络,一般不用
-
host :和宿主机共享网络
-
container :容器网络连通(用得少!局限很大)
测试
# 我们直接启动的命令 .--net bridge 而这个是我们的docker0
docker run -d -P --name tomcat01 --net bridge tomcat
# docker0特点:默认,域名不能访问,--link可以打通连接
# 我们可以自定义一个网络
# --driver bridge
# --subnet 192.168.0.0/16 子网
# --gateway 192.168.0.1 网关
docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
docker network ls
root@vm:~# docker network ls
NETWORK ID NAME DRIVER SCOPE
3eff373f788a bridge bridge local
6cfecc1a2b6f host host local
d4b3bb9b3b23 mynet bridge local
e5079482515f none null local
#查看自己的网络
#自己的网络就创建好了
docker network inspect mynet
root@vm:~# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "d4b3bb9b3b238815fa088f46237f596525ca0e46ad069a837c65516d7cac39e3",
"Created": "2021-05-20T20:38:00.13534475+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
]
#创建两个容器,使用自定义网络
root@vm:~# docker run -d -P --name tomcat-net-01 --net mynet tomcat
3cb700e0f0de0125b213c1a2e7efbd1c6f11be9ab8608d328888db3af3a61bbf
root@vm:~# docker run -d -P --name tomcat-net-02 --net mynet tomcat
14937f255671ddee30d446a305ad5cf1ea3339af95acecf062ad0687a0766d92
root@vm:~# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "d4b3bb9b3b238815fa088f46237f596525ca0e46ad069a837c65516d7cac39e3",
"Created": "2021-05-20T20:38:00.13534475+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"14937f255671ddee30d446a305ad5cf1ea3339af95acecf062ad0687a0766d92": {
"Name": "tomcat-net-02",
"EndpointID": "b80b5094dd4f81e92eefb95627b372f4d9b35f7b4ea050055743ed8722989a51",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
},
"3cb700e0f0de0125b213c1a2e7efbd1c6f11be9ab8608d328888db3af3a61bbf": {
"Name": "tomcat-net-01",
"EndpointID": "67ad264600870a5b2008f0a4ac117a0be90a70c43fa22ffa9ededdd83890d7a1",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
#在01容器ping02能ping通,反之也可以
root@vm:~# docker exec -it tomcat-net-01 ping 192.168.0.3
PING 192.168.0.3 (192.168.0.3) 56(84) bytes of data.
64 bytes from 192.168.0.3: icmp_seq=1 ttl=64 time=0.168 ms
64 bytes from 192.168.0.3: icmp_seq=2 ttl=64 time=0.083 ms
64 bytes from 192.168.0.3: icmp_seq=3 ttl=64 time=0.089 ms
64 bytes from 192.168.0.3: icmp_seq=4 ttl=64 time=0.083 ms
root@vm:~# docker exec -it tomcat-net-02 ping 192.168.0.2
PING 192.168.0.2 (192.168.0.2) 56(84) bytes of data.
64 bytes from 192.168.0.2: icmp_seq=1 ttl=64 time=0.106 ms
64 bytes from 192.168.0.2: icmp_seq=2 ttl=64 time=0.081 ms
64 bytes from 192.168.0.2: icmp_seq=3 ttl=64 time=0.082 ms
64 bytes from 192.168.0.2: icmp_seq=4 ttl=64 time=0.138 ms
root@vm:~# docker exec -it tomcat-net-02 ping tomcat-net-01
PING tomcat-net-01 (192.168.0.2) 56(84) bytes of data.
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.055 ms
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=2 ttl=64 time=0.085 ms
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=3 ttl=64 time=0.079 ms
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=4 ttl=64 time=0.090 ms
我们自定义的网络docker当我们维护好了对应的关系,推荐我们平时这样使用网络!
好处:
redis - 不同的集群使用不同的网络,保证集群是健康和安全的
mysql - 不同的集群使用不同的网络,保证集群是健康安全的
3、网络连通
docker run -d -P --name tomcat01 tomcat
docker run -d -P --name tomcat02 tomcat
root@vm:~# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
63c67f8e08a3 tomcat "catalina.sh run" 4 seconds ago Up 1 second 0.0.0.0:49157->8080/tcp, :::49157->8080/tcp tomcat02
8fc21124d029 tomcat "catalina.sh run" 14 seconds ago Up 12 seconds 0.0.0.0:49156->8080/tcp, :::49156->8080/tcp tomcat01
14937f255671 tomcat "catalina.sh run" 11 minutes ago Up 11 minutes 0.0.0.0:49155->8080/tcp, :::49155->8080/tcp tomcat-net-02
3cb700e0f0de tomcat "catalina.sh run" 11 minutes ago Up 11 minutes 0.0.0.0:49154->8080/tcp, :::49154->8080/tcp tomcat-net-01
root@vm:~# docker exec =it tomcat01 ping tomcat-net-01
Error: No such container: =it
# 测试打通 tomcat - mynet
# 连通之后就是将 tomcat01 放到了 mynet 网络下
docker network connect mynet tomcat01
# 一个容器两个ip地址
# 阿里云服务:公网ip 私网ip
root@vm:~# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "d4b3bb9b3b238815fa088f46237f596525ca0e46ad069a837c65516d7cac39e3",
"Created": "2021-05-20T20:38:00.13534475+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"14937f255671ddee30d446a305ad5cf1ea3339af95acecf062ad0687a0766d92": {
"Name": "tomcat-net-02",
"EndpointID": "b80b5094dd4f81e92eefb95627b372f4d9b35f7b4ea050055743ed8722989a51",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
},
"3cb700e0f0de0125b213c1a2e7efbd1c6f11be9ab8608d328888db3af3a61bbf": {
"Name": "tomcat-net-01",
"EndpointID": "67ad264600870a5b2008f0a4ac117a0be90a70c43fa22ffa9ededdd83890d7a1",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
},
"8fc21124d029cdb9cda98243c78772dc7845970c12702bf6eb102d54b879c184": {
"Name": "tomcat01",
"EndpointID": "9a3c5e198bb8fc8bc88a8226add9e762372c7bf7cf89c03be790c552a9d4c490",
"MacAddress": "02:42:c0:a8:00:04",
"IPv4Address": "192.168.0.4/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
root@vm:~# docker exec -it tomcat01 ping tomcat-net-01
PING tomcat-net-01 (192.168.0.2) 56(84) bytes of data.
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.110 ms
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=2 ttl=64 time=0.101 ms
64 bytes from tomcat-net-01.mynet (192.168.0.2): icmp_seq=3 ttl=64 time=0.101 ms
4、实战:部署Redis集群
# 创建网卡
docker network create redis --subnet 172.38.0.0/16
docker network ls
docker inspect redis
通过脚本创建六个redis配置
for port in $(seq 1 6);\
do \
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat << EOF >> /mydata/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done
#终端
root@vm:~# for port in $(seq 1 6);\
> do \
> mkdir -p /mydata/redis/node-${port}/conf
> touch /mydata/redis/node-${port}/conf/redis.conf
> cat << EOF >> /mydata/redis/node-${port}/conf/redis.conf
> port 6379
> bind 0.0.0.0
> cluster-enabled yes
> cluster-config-file nodes.conf
> cluster-node-timeout 5000
> cluster-announce-ip 172.38.0.1${port}
> cluster-announce-port 6379
> cluster-announce-bus-port 16379
> appendonly yes
> EOF
> done
root@vm:~# cd /mydata/
root@vm:/mydata# ls
redis
root@vm:/mydata# cd redis/
root@vm:/mydata/redis# ls
node-1 node-2 node-3 node-4 node-5 node-6
root@vm:/mydata/redis# cd node-1
root@vm:/mydata/redis/node-1# ls
conf
root@vm:/mydata/redis/node-1# cd conf/
root@vm:/mydata/redis/node-1/conf# ls
redis.conf
root@vm:/mydata/redis/node-1/conf# cat redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.11
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
for port in $(seq 1 6);\
docker run -p 637${port}:6379 -p 1667${port}:16379 --name redis-${port} \
-v /mydata/redis/node-${port}/data:/data \
-v /mydata/redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.1${port} redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
通过脚本运行六个redis
docker run -p 6371:6379 -p 16371:16379 --name redis-1 -v /mydata/redis/node-1/data:/data -v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6372:6379 -p 16372:16379 --name redis-2 -v /mydata/redis/node-2/data:/data -v /mydata/redis/node-2/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.12 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6373:6379 -p 16373:16379 --name redis-3 -v /mydata/redis/node-3/data:/data -v /mydata/redis/node-3/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.13 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6374:6379 -p 16374:16379 --name redis-4 -v /mydata/redis/node-4/data:/data -v /mydata/redis/node-4/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.14 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6375:6379 -p 16375:16379 --name redis-5 -v /mydata/redis/node-5/data:/data -v /mydata/redis/node-5/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.15 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
docker run -p 6376:6379 -p 16376:16379 --name redis-6 -v /mydata/redis/node-6/data:/data -v /mydata/redis/node-6/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.16 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
root@vm:/mydata/redis/node-1/conf# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
92179124254a redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 5 seconds ago Up 3 seconds 0.0.0.0:6376->6379/tcp, :::6376->6379/tcp, 0.0.0.0:16376->16379/tcp, :::16376->16379/tcp redis-6
d340f8bf4741 redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 12 seconds ago Up 10 seconds 0.0.0.0:6375->6379/tcp, :::6375->6379/tcp, 0.0.0.0:16375->16379/tcp, :::16375->16379/tcp redis-5
0f6f9477ebbd redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 19 seconds ago Up 17 seconds 0.0.0.0:6374->6379/tcp, :::6374->6379/tcp, 0.0.0.0:16374->16379/tcp, :::16374->16379/tcp redis-4
55e2a585ba58 redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 26 seconds ago Up 25 seconds 0.0.0.0:6373->6379/tcp, :::6373->6379/tcp, 0.0.0.0:16373->16379/tcp, :::16373->16379/tcp redis-3
196d3c3bf2db redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 41 seconds ago Up 40 seconds 0.0.0.0:6372->6379/tcp, :::6372->6379/tcp, 0.0.0.0:16372->16379/tcp, :::16372->16379/tcp redis-2
3b59c9e38742 redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 3 minutes ago Up 3 minutes 0.0.0.0:6371->6379/tcp, :::6371->6379/tcp, 0.0.0.0:16371->16379/tcp, :::16371->16379/tcp redis-1
docker exec -it redis-1 /bin/sh #redis默认没有bash
##创建集群
redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 172.38.0.16:6379 --cluster-replicas 1
/data # redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 172.38.0.13
:6379 172.38.0.14:6379 172.38.0.15:6379 172.38.0.16:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.38.0.15:6379 to 172.38.0.11:6379
Adding replica 172.38.0.16:6379 to 172.38.0.12:6379
Adding replica 172.38.0.14:6379 to 172.38.0.13:6379
M: 8ca26c786e2812c13480df26738472670c5f2f70 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
M: 32aff881f3a330dabb0222aebaef880dfd5359bc 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
M: 3e1e7ce0308360980d5c37ef8589af91cf756718 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
S: a21f2460d455461a00e368d8635fc034e7433880 172.38.0.14:6379
replicates 3e1e7ce0308360980d5c37ef8589af91cf756718
S: 2a04e9ec6c898e43b5713bbcc238eccc01105a94 172.38.0.15:6379
replicates 8ca26c786e2812c13480df26738472670c5f2f70
S: a4db594c9c82c15d887cfffed76c6f236d641d69 172.38.0.16:6379
replicates 32aff881f3a330dabb0222aebaef880dfd5359bc
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
....
>>> Performing Cluster Check (using node 172.38.0.11:6379)
M: 8ca26c786e2812c13480df26738472670c5f2f70 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: a4db594c9c82c15d887cfffed76c6f236d641d69 172.38.0.16:6379
slots: (0 slots) slave
replicates 32aff881f3a330dabb0222aebaef880dfd5359bc
M: 3e1e7ce0308360980d5c37ef8589af91cf756718 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: 32aff881f3a330dabb0222aebaef880dfd5359bc 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: a21f2460d455461a00e368d8635fc034e7433880 172.38.0.14:6379
slots: (0 slots) slave
replicates 3e1e7ce0308360980d5c37ef8589af91cf756718
S: 2a04e9ec6c898e43b5713bbcc238eccc01105a94 172.38.0.15:6379
slots: (0 slots) slave
replicates 8ca26c786e2812c13480df26738472670c5f2f70
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
redis-cli -c # 集群
/data # redis-cli -c
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:233
cluster_stats_messages_pong_sent:225
cluster_stats_messages_sent:458
cluster_stats_messages_ping_received:220
cluster_stats_messages_pong_received:233
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:458
127.0.0.1:6379> cluster nodes
a4db594c9c82c15d887cfffed76c6f236d641d69 172.38.0.16:6379@16379 slave 32aff881f3a330dabb0222aebaef880dfd5359bc 0 1621518339345 6 connected
3e1e7ce0308360980d5c37ef8589af91cf756718 172.38.0.13:6379@16379 master - 0 1621518337542 3 connected 10923-16383
32aff881f3a330dabb0222aebaef880dfd5359bc 172.38.0.12:6379@16379 master - 0 1621518338342 2 connected 5461-10922
a21f2460d455461a00e368d8635fc034e7433880 172.38.0.14:6379@16379 slave 3e1e7ce0308360980d5c37ef8589af91cf756718 0 1621518337542 4 connected
2a04e9ec6c898e43b5713bbcc238eccc01105a94 172.38.0.15:6379@16379 slave 8ca26c786e2812c13480df26738472670c5f2f70 0 1621518338000 5 connected
8ca26c786e2812c13480df26738472670c5f2f70 172.38.0.11:6379@16379 myself,master - 0 1621518339000 1 connected 0-5460
127.0.0.1:6379> set a b
-> Redirected to slot [15495] located at 172.38.0.13:6379
OK
#测试 停掉13的容器
root@vm:~# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
92179124254a redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 13 minutes ago Up 13 minutes 0.0.0.0:6376->6379/tcp, :::6376->6379/tcp, 0.0.0.0:16376->16379/tcp, :::16376->16379/tcp redis-6
d340f8bf4741 redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 13 minutes ago Up 13 minutes 0.0.0.0:6375->6379/tcp, :::6375->6379/tcp, 0.0.0.0:16375->16379/tcp, :::16375->16379/tcp redis-5
0f6f9477ebbd redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 13 minutes ago Up 13 minutes 0.0.0.0:6374->6379/tcp, :::6374->6379/tcp, 0.0.0.0:16374->16379/tcp, :::16374->16379/tcp redis-4
55e2a585ba58 redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 13 minutes ago Up 13 minutes 0.0.0.0:6373->6379/tcp, :::6373->6379/tcp, 0.0.0.0:16373->16379/tcp, :::16373->16379/tcp redis-3
196d3c3bf2db redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 13 minutes ago Up 13 minutes 0.0.0.0:6372->6379/tcp, :::6372->6379/tcp, 0.0.0.0:16372->16379/tcp, :::16372->16379/tcp redis-2
3b59c9e38742 redis:5.0.9-alpine3.11 "docker-entrypoint.s…" 16 minutes ago Up 16 minutes 0.0.0.0:6371->6379/tcp, :::6371->6379/tcp, 0.0.0.0:16371->16379/tcp, :::16371->16379/tcp redis-1
63c67f8e08a3 tomcat "catalina.sh run" 54 minutes ago Up 54 minutes 0.0.0.0:49157->8080/tcp, :::49157->8080/tcp tomcat02
8fc21124d029 tomcat "catalina.sh run" 55 minutes ago Up 55 minutes 0.0.0.0:49156->8080/tcp, :::49156->8080/tcp tomcat01
14937f255671 tomcat "catalina.sh run" About an hour ago Up About an hour 0.0.0.0:49155->8080/tcp, :::49155->8080/tcp tomcat-net-02
3cb700e0f0de tomcat "catalina.sh run" About an hour ago Up About an hour 0.0.0.0:49154->8080/tcp, :::49154->8080/tcp tomcat-net-01
root@vm:~# docker stop 55e2a585ba58
55e2a585ba58
#13的容器挂掉,从14的容器取值
172.38.0.13:6379> get a
Could not connect to Redis at 172.38.0.13:6379: Host is unreachable
(6.11s)
not connected>
/data # redis-cli -c
127.0.0.1:6379> get a
-> Redirected to slot [15495] located at 172.38.0.14:6379
"b"
#查看节点
172.38.0.14:6379> cluster nodes
a21f2460d455461a00e368d8635fc034e7433880 172.38.0.14:6379@16379 myself,master - 0 1621518633000 7 connected 10923-16383
3e1e7ce0308360980d5c37ef8589af91cf756718 172.38.0.13:6379@16379 master,fail - 1621518518896 1621518517793 3 connected
a4db594c9c82c15d887cfffed76c6f236d641d69 172.38.0.16:6379@16379 slave 32aff881f3a330dabb0222aebaef880dfd5359bc 0 1621518633087 6 connected
2a04e9ec6c898e43b5713bbcc238eccc01105a94 172.38.0.15:6379@16379 slave 8ca26c786e2812c13480df26738472670c5f2f70 0 1621518634088 5 connected
32aff881f3a330dabb0222aebaef880dfd5359bc 172.38.0.12:6379@16379 master - 0 1621518632587 2 connected 5461-10922
8ca26c786e2812c13480df26738472670c5f2f70 172.38.0.11:6379@16379 master - 0 1621518632587 1 connected 0-5460
docker搭建redis集群完成!