- A three-manager swarm tolerates a maximum loss of one manager.
- A five-manager swarm tolerates a maximum simultaneous loss of two manager nodes.
- An
N
manager cluster will tolerate the loss of at most(N-1)/2
managers. -
Docker recommends a maximum of seven manager nodes for a swarm.
Important Note: Adding more managers does NOT mean increased scalability or higher performance. In general, the opposite is true.
[root@node4 ~]# docker swarm init --advertise-addr 192.168.8.201
Swarm initialized: current node (avex4e0pezsywuzb4aqjm5zf1) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join \
--token SWMTKN-1-4o56a1xdy98sbpy18rtbxezsb2olre9l1im95q2tbepkvc75u2-7mk2byc266iibj36ev64ljtla \
192.168.8.201:2377
To add a manager to this swarm, run the following command:
docker swarm join \
--token SWMTKN-1-4o56a1xdy98sbpy18rtbxezsb2olre9l1im95q2tbepkvc75u2-dte6sh24rjwqcoq34w1uzplaa \
192.168.8.201:2377
[root@node4 ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
3cpke3p5dugg1xkvrp2y5719q node6.example.com Ready Active Reachable
4zqj46vjvr2htekleqha5f45s node2.example.com Ready Active
82v5xgvmvwsecqb4hz6jrr3ci node1.example.com Ready Active
8x4acgqf2h3uq70ulwcs5fs1f node5.example.com Ready Active Reachable
8y9fut6xh4k85bpp47owi1eol node3.example.com Ready Active
avex4e0pezsywuzb4aqjm5zf1 * node4.example.com Ready Active Leader
提示:docker node ls仅可在Manager上执行
[root@node6 ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
5dafc94f57c3 bridge bridge local
6f60b175cf00 docker_gwbridge bridge local
c039662c7673 host host local
1mhcme8jygd4 ingress overlay swarm
7dbc1f6966e3 none null local
[root@node6 ~]# docker network inspect ingress
[
{
"Name": "ingress",
"Id": "1mhcme8jygd43nf82yhbdda4x",
"Scope": "swarm",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "10.255.0.0/16",
"Gateway": "10.255.0.1"
}
]
},
"Internal": false,
"Containers": {
"ingress-sbox": {
"Name": "ingress-endpoint",
"EndpointID": "8258a07deb8387f9d7b0f86512c0f7df42166e9502c1433a8f4293bd2fcd517b",
"MacAddress": "02:42:0a:ff:00:05",
"IPv4Address": "10.255.0.5/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.driver.overlay.vxlanid_list": "256"
},
"Labels": {}
}
]
当然,也可以自定义overlay网络
https://docs.docker.com/engine/userguide/networking/get-started-overlay/
docker network create --driver overlay --subnet 10.0.9.0/24 swarm-network
四.管理swarm
[root@node4 ~]# docker service create --replicas=1 --name redis --network ingress --endpoint-mode vip --publish 6379:6379 192.168.8.254:5000/redis
6r7th5xvkuvetsy6wjgbfs00h
[root@node4 ~]# docker service scale redis=3
redis scaled to 3
[root@node4 ~]# docker service ps redis
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
31pjo1yrxygkq10i8ep639eat redis.1 192.168.8.254:5000/redis node4.example.com Running Running 34 seconds ago
alfiv9kyo4mxkm6h5j1ggvn93 redis.2 192.168.8.254:5000/redis node6.example.com Running Preparing 5 seconds ago
94yf3q1sqwviazu69nw2fo5j6 redis.3 192.168.8.254:5000/redis node3.example.com Running Running 4 seconds ago
[root@node4 ~]# docker service rm redis
redis
[root@node4 ~]# docker service create --replicas 6 --name redis --update-parallelism 2 --update-delay 10s --update-failure-action continue --network ingress --endpoint-mode vip --publish 6379:6379 192.168.8.254:5000/redis
7w0f6dcjdwtbonxe180rdfbzp
[root@node4 ~]# docker service ls
ID NAME REPLICAS IMAGE COMMAND
7w0f6dcjdwtb redis 6/6 192.168.8.254:5000/redis
[root@node4 ~]# docker service ps redis
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
0cqgbrpk7cpcwsute6swnnxb6 redis.1 192.168.8.254:5000/redis node6.example.com Running Running 14 seconds ago
b89fh3gslxb4031erpisqgk52 redis.2 192.168.8.254:5000/redis node4.example.com Running Running 15 seconds ago
dmsxpmbrujyhpf468fiw04789 redis.3 192.168.8.254:5000/redis node5.example.com Running Running 15 seconds ago
2rnh7gloduoux8fcuubthshyz redis.4 192.168.8.254:5000/redis node3.example.com Running Running 15 seconds ago
18z4ogihy5xaaki3li13vfrbi redis.5 192.168.8.254:5000/redis node1.example.com Running Running 15 seconds ago
7d56gdx74gzdwcsvs7s6qksy9 redis.6 192.168.8.254:5000/redis node2.example.com Running Running 15 seconds ago
[root@node4 ~]# docker service inspect --pretty redis
ID: 7w0f6dcjdwtbonxe180rdfbzp
Name: redis
Mode: Replicated
Replicas: 6
Placement:
UpdateConfig:
Parallelism: 2
Delay: 10s
On failure: pause
ContainerSpec:
Image: 192.168.8.254:5000/redis
Resources:
[root@node4 ~]# docker service ps nginx
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
bfx5sv59nzsfutfjlcaz98gtt nginx.1 192.168.8.254:5000/nginx node4.example.com Running Running about a minute ago
c4i1sepzfd0v6p9rgncxyud6t nginx.2 192.168.8.254:5000/nginx node3.example.com Running Running 2 minutes ago
1zm2fpa4ufh0q2jc8iywor4iw nginx.3 192.168.8.254:5000/nginx node6.example.com Running Running about a minute ago
f2u8ywqoccekr8qz5pvle2kt2 nginx.4 192.168.8.254:5000/nginx node1.example.com Running Running 2 minutes ago
f2w57lnt1495grq366b34eypd nginx.5 192.168.8.254:5000/nginx node4.example.com Running Running about a minute ago
7nu5a435xch7jux3lqlyoz095 nginx.6 192.168.8.254:5000/nginx node2.example.com Running Running 2 minutes ago
2v10rpt7bzttjc6q6wpjf32d8 nginx.7 192.168.8.254:5000/nginx node5.example.com Running Running about a minute ago
d5i7uxtv2hen5nu06hagrjflt nginx.8 192.168.8.254:5000/nginx node3.example.com Running Running 2 minutes ago
cch34fphg8coce9gn0umetkud nginx.9 192.168.8.254:5000/nginx node2.example.com Running Running 2 minutes ago
9afmuacg5d9w53vokwup4wb0i nginx.10 192.168.8.254:5000/nginx node5.example.com Running Running about a minute ago
3mwnc4sb61t94bqcbw55xfuqi nginx.11 192.168.8.254:5000/nginx node4.example.com Running Running 59 seconds ago
amx18cuwwhsei4j19jsjsuutq nginx.12 192.168.8.254:5000/nginx node3.example.com Running Running about a minute ago
3tjgqroqhw8ty6o5yr1zyh8e8 nginx.13 192.168.8.254:5000/nginx node6.example.com Running Running 59 seconds ago
7y2jtgvyg9shs1lvx6ku8se46 nginx.14 192.168.8.254:5000/nginx node1.example.com Running Running 59 seconds ago
[root@node4 ~]# docker node update --availability drain node1.example.com
node1.example.com
[root@node4 ~]# docker service ps nginx
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
bfx5sv59nzsfutfjlcaz98gtt nginx.1 192.168.8.254:5000/nginx node4.example.com Running Running 2 minutes ago
c4i1sepzfd0v6p9rgncxyud6t nginx.2 192.168.8.254:5000/nginx node3.example.com Running Running 3 minutes ago
1zm2fpa4ufh0q2jc8iywor4iw nginx.3 192.168.8.254:5000/nginx node6.example.com Running Running 2 minutes ago
02qmxvoa9tjeyvtvetantm9ki nginx.4 192.168.8.254:5000/nginx node5.example.com Running Running 10 seconds ago
f2u8ywqoccekr8qz5pvle2kt2 \_ nginx.4 192.168.8.254:5000/nginx node1.example.com Shutdown Shutdown 11 seconds ago
f2w57lnt1495grq366b34eypd nginx.5 192.168.8.254:5000/nginx node4.example.com Running Running 2 minutes ago
7nu5a435xch7jux3lqlyoz095 nginx.6 192.168.8.254:5000/nginx node2.example.com Running Running 3 minutes ago
2v10rpt7bzttjc6q6wpjf32d8 nginx.7 192.168.8.254:5000/nginx node5.example.com Running Running 2 minutes ago
d5i7uxtv2hen5nu06hagrjflt nginx.8 192.168.8.254:5000/nginx node3.example.com Running Running 3 minutes ago
cch34fphg8coce9gn0umetkud nginx.9 192.168.8.254:5000/nginx node2.example.com Running Running 3 minutes ago
9afmuacg5d9w53vokwup4wb0i nginx.10 192.168.8.254:5000/nginx node5.example.com Running Running 2 minutes ago
3mwnc4sb61t94bqcbw55xfuqi nginx.11 192.168.8.254:5000/nginx node4.example.com Running Running about a minute ago
amx18cuwwhsei4j19jsjsuutq nginx.12 192.168.8.254:5000/nginx node3.example.com Running Running about a minute ago
3tjgqroqhw8ty6o5yr1zyh8e8 nginx.13 192.168.8.254:5000/nginx node6.example.com Running Running about a minute ago
34xw07c0bsptc81gv5trt5uv7 nginx.14 192.168.8.254:5000/nginx node2.example.com Running Running 10 seconds ago
7y2jtgvyg9shs1lvx6ku8se46 \_ nginx.14 192.168.8.254:5000/nginx node1.example.com Shutdown Shutdown 11 seconds ago
5q5upnii1tgypmml2ebjr6hcu nginx.15 192.168.8.254:5000/nginx node5.example.com Running Running 10 seconds ago
6tpetexucqv3li2rtou6vh6dr \_ nginx.15 192.168.8.254:5000/nginx node1.example.com Shutdown Shutdown 11 seconds ago
[root@node4 ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
3cpke3p5dugg1xkvrp2y5719q node6.example.com Ready Active Reachable
4zqj46vjvr2htekleqha5f45s node2.example.com Ready Active
82v5xgvmvwsecqb4hz6jrr3ci node1.example.com Ready Drain
8x4acgqf2h3uq70ulwcs5fs1f node5.example.com Ready Active Reachable
8y9fut6xh4k85bpp47owi1eol node3.example.com Ready Active
avex4e0pezsywuzb4aqjm5zf1 * node4.example.com Ready Active Leader
ii.维护完成后重新active
[root@node4 ~]# docker node update --availability active node1.example.com
node1.example.com
[root@node4 ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
3cpke3p5dugg1xkvrp2y5719q node6.example.com Ready Active Reachable
4zqj46vjvr2htekleqha5f45s node2.example.com Ready Active
82v5xgvmvwsecqb4hz6jrr3ci node1.example.com Ready Active
8x4acgqf2h3uq70ulwcs5fs1f node5.example.com Ready Active Reachable
8y9fut6xh4k85bpp47owi1eol node3.example.com Ready Active
avex4e0pezsywuzb4aqjm5zf1 * node4.example.com Ready Active Leader
[root@node4 ~]# docker node promote node{2,3}.example.com
Node node2.example.com promoted to a manager in the swarm.
Node node3.example.com promoted to a manager in the swarm.
比在对应节点上执行docker node update --role manager更方便
[root@node4 ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
3cpke3p5dugg1xkvrp2y5719q node6.example.com Ready Active Reachable
4zqj46vjvr2htekleqha5f45s node2.example.com Ready Active Reachable
82v5xgvmvwsecqb4hz6jrr3ci node1.example.com Ready Active
8x4acgqf2h3uq70ulwcs5fs1f node5.example.com Ready Active Reachable
8y9fut6xh4k85bpp47owi1eol node3.example.com Ready Active Reachable
avex4e0pezsywuzb4aqjm5zf1 * node4.example.com Ready Active Leader
[root@node4 ~]# docker node demote node{2,3}.example.com
Manager node2.example.com demoted in the swarm.
Manager node3.example.com demoted in the swarm.
比在对应节点上执行docker node update --role worker更方便
7.删除swarm节点
i.从swarm中leave
[root@node6 ~]# docker swarm leave --force
Node left the swarm.
删除Manager节点需要加上--force参数,Worker节点可以直接leave掉
[root@node4 ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
2jw9hn8cmm701ewnsdxrhpfgo node1.example.com Ready Active
3cpke3p5dugg1xkvrp2y5719q node6.example.com Down Active Unreachable
4zqj46vjvr2htekleqha5f45s node2.example.com Ready Active
8x4acgqf2h3uq70ulwcs5fs1f node5.example.com Ready Active Reachable
8y9fut6xh4k85bpp47owi1eol node3.example.com Ready Active
avex4e0pezsywuzb4aqjm5zf1 * node4.example.com Ready Active Leader
注意:当节点从swarm中leave后状态及变为Down,如果是worker节点则可以直接node rm删掉,而如果是manager节点则需要先将manager降级为worker后再删
[root@node4 ~]# docker node rm node6.example.com
Error response from daemon: rpc error: code = 9 desc = node 3cpke3p5dugg1xkvrp2y5719q is a cluster manager and is a member of the raft cluster. It must be demoted to worker before removal
ii.降级为worker
[root@node4 ~]# docker node demote node6.example.com
Manager node6.example.com demoted in the swarm.
iii.删除节点
[root@node4 ~]# docker node rm node6.example.com
node6.example.com
$ docker node rm node9
Error response from daemon: rpc error: code = 9 desc = node node9 is not down and can't be removed
$ docker node rm --force node9
Node node9 removed from swarm
[root@node4 ~]# docker volume create --name data
data
[root@node4 ~]# docker volume ls
DRIVER VOLUME NAME
local b555e4b4cdc8a96d657f3f714b180324f8a69fa9152f305ff6b5c5e64c58044d
local data
ii.附加volume到service
[root@node4 ~]# docker service create --replicas=3 --name nginx --network ingress --endpoint-mode vip --publish 80:80 --publish 443:443 --mount type=volume,src=data,dst=/opt 192.168.8.254:5000/nginx
978a4hagibhj7jp969th5apvs
[root@node4 ~]# docker service inspect redis
[
{
"ID": "978a4hagibhj7jp969th5apvs",
"Version": {
"Index": 407
},
"CreatedAt": "2016-09-23T14:44:25.861546182Z",
"UpdatedAt": "2016-09-23T14:44:25.867551569Z",
"Spec": {
"Name": "redis",
"TaskTemplate": {
"ContainerSpec": {
"Image": "192.168.8.254:5000/nginx",
"Mounts": [
{
"Type": "volume",
"Source": "data",
"Target": "/opt",
"VolumeOptions": {
"DriverConfig": {
"Name": "fake",
"Options": {
"size": "100m",
"uid": "1000"
}
}
... ...
[root@node4 ~]# docker service ps nginx
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
4sp8inl597sz4ath6nk1ru1e3 nginx.1 192.168.8.254:5000/nginx node3.example.com Running Running 7 minutes ago
8xx1ehn9cawxiudb3su5orl1y nginx.2 192.168.8.254:5000/nginx node4.example.com Running Running 6 minutes ago
3lbdc4hvf8vq2l12hcaf23wly nginx.3 192.168.8.254:5000/nginx node2.example.com Running Running 6 minutes ago
[root@node4 ~]# curl node2.example.com -I
HTTP/1.1 200 OK
Server: nginx/1.11.3
Date: Fri, 23 Sep 2016 15:08:37 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 26 Jul 2016 14:54:48 GMT
Connection: keep-alive
ETag: "579779b8-264"
Accept-Ranges: bytes
[root@node4 ~]# curl node4.example.com -I
HTTP/1.1 200 OK
Server: nginx/1.11.3
Date: Fri, 23 Sep 2016 15:08:52 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 26 Jul 2016 14:54:48 GMT
Connection: keep-alive
ETag: "579779b8-264"
Accept-Ranges: bytes
[root@node4 ~]# docker service inspect nginx
[
{
"ID": "a0jv3kz45qdvkq6qp70w6vu69",
"Version": {
"Index": 743
},
"CreatedAt": "2016-09-23T15:00:59.368270075Z",
"UpdatedAt": "2016-09-23T15:01:25.350289424Z",
"Spec": {
"Name": "nginx",
"TaskTemplate": {
"ContainerSpec": {
"Image": "192.168.8.254:5000/nginx",
"Mounts": [
{
"Type": "volume",
"Source": "data",
"Target": "/opt"
}
]
},
"Resources": {
"Limits": {},
"Reservations": {}
},
"RestartPolicy": {
"Condition": "any",
"MaxAttempts": 0
},
"Placement": {}
},
"Mode": {
"Replicated": {
"Replicas": 3
}
},
"UpdateConfig": {
"Parallelism": 1,
"FailureAction": "pause"
},
"Networks": [
{
"Target": "1mhcme8jygd43nf82yhbdda4x"
}
],
"EndpointSpec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 443,
"PublishedPort": 443
},
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 80
}
]
}
},
"Endpoint": {
"Spec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 443,
"PublishedPort": 443
},
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 80
}
]
},
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 443,
"PublishedPort": 443
},
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 80
}
],
"VirtualIPs": [
{
"NetworkID": "1mhcme8jygd43nf82yhbdda4x",
"Addr": "10.255.0.2/16"
}
]
},
"UpdateStatus": {
"StartedAt": "0001-01-01T00:00:00Z",
"CompletedAt": "0001-01-01T00:00:00Z"
}
}
]