安装NFS
系统环境为centos,一共有7台机子组成了Docker Swarm集群,每台机子都要安装NFS
IP | Docker角色 | NFS角色 |
---|---|---|
111.111.3.41 | Manager | Server |
111.111.3.42 | Manager | Client |
111.111.3.43 | Manager | Client |
111.111.3.44 | Worker | Client |
111.111.3.45 | Manager | Client |
111.111.3.46 | Manager | Client |
111.111.3.47 | Worker | Client |
安装依赖
yum -y install nfs-utils
配置文件
使用111.111.3.41为NFS的服务端
先在/root目录下创建share文件夹,并赋予chmod 755权限
配置共享目录
nano /etc/exports
/root/share 111.111.3.0/24(rw,async,insecure,anonuid=1000,anongid=1000,no_root_squash)
生效配置
exportfs -rv
启动NFS服务
systemctl enable rpcbind
systemctl start rpcbind
systemctl enable nfs-server
systemctl start nfs-server
查看NFS服务状态
[root@KD111111003041 ~]# rpcinfo -p
program vers proto port service
100000 4 tcp 111 portmapper
100000 3 tcp 111 portmapper
100000 2 tcp 111 portmapper
100000 4 udp 111 portmapper
100000 3 udp 111 portmapper
100000 2 udp 111 portmapper
100024 1 udp 45889 status
100024 1 tcp 42518 status
100005 1 udp 20048 mountd
100005 1 tcp 20048 mountd
100005 2 udp 20048 mountd
100005 2 tcp 20048 mountd
100005 3 udp 20048 mountd
100005 3 tcp 20048 mountd
100003 3 tcp 2049 nfs
100003 4 tcp 2049 nfs
100227 3 tcp 2049 nfs_acl
100003 3 udp 2049 nfs
100003 4 udp 2049 nfs
100227 3 udp 2049 nfs_acl
100021 1 udp 37813 nlockmgr
100021 3 udp 37813 nlockmgr
100021 4 udp 37813 nlockmgr
100021 1 tcp 40301 nlockmgr
100021 3 tcp 40301 nlockmgr
100021 4 tcp 40301 nlockmgr
[root@KD111111003041 ~]# cat /var/lib/nfs/etab
/root/share 111.111.3.0/24(rw,async,wdelay,hide,nocrossmnt,insecure,no_root_squash,no_all_squash,no_subtree_check,secure_locks,acl,no_pnfs,anonuid=1000,anongid=1000,sec=sys,rw,insecure,no_root_squash,no_all_squash)
Docker Swarm中使用NFS
方式1:Docker Server 发布
创建Docker volume
每个docker节点都要手动创建相同名称的Docker volume
docker volume create --driver local \
--opt type=nfs \
--opt o=addr=111.111.3.41,rw \
--opt device=:/root/share \
foo33
查看volume
[root@KD111111003041 ~]# docker volume ls
DRIVER VOLUME NAME
local app_foo_new
local foo33
local portainer_data
查看foo33的详细
[root@KD111111003041 ~]# docker volume inspect foo33
[
{
"CreatedAt": "2020-09-17T09:55:08+08:00",
"Driver": "local",
"Labels": {},
"Mountpoint": "/var/lib/docker/volumes/foo33/_data",
"Name": "foo33",
"Options": {
"device": ":/root/share",
"o": "addr=111.111.3.41,rw",
"type": "nfs"
},
"Scope": "local"
}
]
发布服务
在manager节点下创建服务
docker service create \
--name test-nginx-nfs \
--publish 84:80 \
--mount type=volume,source=foo33,destination=/app/share \
--replicas 3 \
nginx
方式2:使用 docker stack 方发布
编写docker-compose.yml文档
version: '3.8'
services:
nginx-test5:
image: nginx:latest
deploy:
mode: replicated
replicas: 3
restart_policy:
condition: on-failure
ports:
- "88:80"
networks:
my-overlay-network:
aliases:
- nginx-test5
volumes:
- "foo_new:/app/share"
volumes:
foo_new:
driver: local
driver_opts:
type: "nfs"
o: "addr=111.111.3.41,rw"
device: ":/root/share"
networks:
my-overlay-network:
driver: overlay
使用docker stack编排方式,无需手动创建Docker volume
发布服务
将编写好的docker-compose.yml上传到是manager的机子上的任意目录下,然后在控制台转到对应的目录下输入命令
[root@KD111111003041 ~]# docker stack deploy -c docker-compose.yml app
Creating network app_my-overlay-network
Creating service app_nginx-test5
[root@KD111111003041 ~]# docker stack ls
NAME SERVICES ORCHESTRATOR
app 1 Swarm
[root@KD111111003041 ~]# docker stack services app
ID NAME MODE REPLICAS IMAGE PORTS
u02yjjdzt307 app_nginx-test5 replicated 3/3 nginx:latest *:88->80/tcp
[root@KD111111003041 ~]# docker service ps app_nginx-test5
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
m246y53tmslk app_nginx-test5.1 nginx:latest KD111111003042.ppp-bb.dion.ne.jp Running Running 3 minutes ago
oxsyphx6qnvh app_nginx-test5.2 nginx:latest KD111111003047.ppp-bb.dion.ne.jp Running Running 3 minutes ago
nj4l0193yxrd app_nginx-test5.3 nginx:latest KD111111003044.ppp-bb.dion.ne.jp Running Running 3 minutes ago
[root@KD111111003041 ~]#