Docker swarm集群

主机名 -------------------IP地址/子网掩码 ------------- 准备
master(管理节点)192.168.200.10/24 |docker、同步时间,关闭防火墙
node1(工作节点) 192.168.200.20/24 |docker、同步时间,关闭防火墙
node2(工作节点) 192.168.200.30/24 |docker、同步时间,关闭防火墙

如果一个swarm节点宕机,则该节点会从swarm集群中移出,在这节点上运行的容器会被被调度到其他节点上,以满足指定数量的副本保持运行。
如果node2节点宕机,又重新启动服务器后,node2节点原有的服务器不会自动调度到node2节点上,只能等到其他节点出现故障,或手动停止容器后,在根据内部算法重新转移task实例到其他节点上。

#################所有端点拉取镜像#######################
[root@master ~]# docker pull swarm
[root@node1 ~]# docker pull swarm
[root@node2 ~]# docker pull swarm
#################所有端点开启端口#######################
[root@master ~]# cat /etc/sysconfig/docker            //编辑配置文件
...
OPTIONS='--selinux-enabled --log-driver=journald --signature-verification=false -H tcp://0.0.0.0:2345 -H unix://var/run/docker.sock'        //添加两个-H参数
...

[root@master ~]#systemctl daemon-reload
[root@master ~]#systemctl restart docker        //重启服务

[root@master ~]# netstat -ntpl                //查看端口
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      1567/sshd           
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      3750/master         
tcp6       0      0 :::5000                 :::*                    LISTEN      8441/docker-proxy-c
tcp6       0      0 :::2345                 :::*                    LISTEN      1563/dockerd-curren
tcp6       0      0 :::8080                 :::*                    LISTEN      8547/docker-proxy-c
tcp6       0      0 :::21                   :::*                    LISTEN      1623/vsftpd         
tcp6       0      0 :::22                   :::*                    LISTEN      1567/sshd           
tcp6       0      0 ::1:25                  :::*                    LISTEN      3750/master         
tcp6       0      0 :::2368                 :::*                    LISTEN      8503/docker-proxy-c


###############创建集群########################
[root@master ~]# docker swarm init --advertise-addr 192.168.200.10            //在master上初始化集群,并获取唯一的token,作为集群的唯一标识
Swarm initialized: current node (3n9ea7rrccjpnkftnvgdu2lvm) is now a manager.

To add a worker to this swarm, run the following command:

    docker swarm join \
    --token SWMTKN-1-2degwouv8eomhgi0ihnrd6at2nylppexotmgz1lqhfzg2bkx8s-1n8d4n1iocpum8p3xwmzrswv8 \
    192.168.200.10:2377                                                                                                //在node节点输入此命令,加入集群

To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.

[root@node1 ~ ]# docker swarm join \
>     --token SWMTKN-1-2degwouv8eomhgi0ihnrd6at2nylppexotmgz1lqhfzg2bkx8s-1n8d4n1iocpum8p3xwmzrswv8 \
>     192.168.200.10:2377
This node joined a swarm as a worker.

[root@node2 ~]# docker swarm join \
>     --token SWMTKN-1-2degwouv8eomhgi0ihnrd6at2nylppexotmgz1lqhfzg2bkx8s-1n8d4n1iocpum8p3xwmzrswv8 \
>     192.168.200.10:2377
This node joined a swarm as a worker.

[root@master ~]# docker node ls                                                //在master上查看各节点信息
ID                           HOSTNAME  STATUS  AVAILABILITY  MANAGER STATUS
3n9ea7rrccjpnkftnvgdu2lvm *  master    Ready   Active        Leader
73bumrlhdsgnedlj04qdcn3s4    node1     Ready   Active        
yj3qz28zlsy811rovq9xyasw8    node2     Ready   Active  

###############部署nginx服务########################
//在三个节点都拉取nginx镜像    docker pull nginx
[root@master ~]#  docker network create -d overlay nginx_net            //在master上创建nginx_net网络,用于使不同主机上的容器互通
65zuplzifr0tsxdooi80qy00u
[root@master ~]# docker network ls            //查看
NETWORK ID          NAME                DRIVER              SCOPE
593ab00a3b22        bridge              bridge              local
8e77d9b52d65        docker-br0          bridge              local
2c9a80eca950        ghost_default       bridge              local
3099b165e120        host                host                local
v9bdpxg3ttmd        ingress             overlay             swarm
65zuplzifr0t        nginx_net           overlay             swarm
daee69d4ba1b        none                null                local


[root@master ~]# docker service create --replicas 1 --network nginx_net --name my-test -p 9999:80 nginx        //创建副本数为1的nginx容器
bm45po07ehqag0lnzs13x7857
[root@master ~]# docker service ls                        //查看服务
ID            NAME     MODE        REPLICAS  IMAGE
bm45po07ehqa  my-test  replicated  0/1       nginx:latest

[root@master ~]# docker service inspect --pretty my-test        //查看my-test服务的信息
ID:             bm45po07ehqag0lnzs13x7857
Name:           my-test
Service Mode:   Replicated
Replicas:      1
Placement:
UpdateConfig:
Parallelism:   1
On failure:    pause
Max failure ratio: 0
ContainerSpec:
Image:         nginx:latest@sha256:926b086e1234b6ae9a11589c4cece66b267890d24d1da388c96dd8795b2ffcfb
Resources:
Networks: nginx_net
Endpoint Mode:  vip
Ports:
PublishedPort 9999
  Protocol = tcp
  TargetPort = 80

[root@master ~]# docker service ps my-test            //查询容器运行在哪个节点
ID            NAME       IMAGE         NODE    DESIRED STATE  CURRENT STATE          ERROR  PORTS
bqgp2z2o1s8j  my-test.5  nginx:latest  master  Running        Running 2 minutes ago    

[root@master ~]# docker service scale my-test=5        //将my-test容器扩展到5个
my-test scaled to 5
[root@master ~]# docker service ps my-test            //查看5个容器都在哪里工作
ID            NAME       IMAGE         NODE    DESIRED STATE  CURRENT STATE               ERROR  PORTS
v8oknut563rd  my-test.1  nginx:latest  node2   Running        Running 8 seconds ago              
b1ecmrnjllm3  my-test.2  nginx:latest  node1   Running        Running about a minute ago         
raw64lteih8i  my-test.3  nginx:latest  node1   Running        Running about a minute ago         
nex3g1uvj8ql  my-test.4  nginx:latest  master  Running        Running about a minute ago         
bqgp2z2o1s8j  my-test.5  nginx:latest  master  Running        Running 4 minutes ago      



###############使用数据卷########################
//在三台节点上都执行以下命令,都创建volume-test的卷组
[root@master ~]# docker volume create --name volume-test        //创建volume-test卷组
volume-test
[root@master ~]# docker volume inspect volume-test            //查看卷组存放位置
[
    {
        "Driver": "local",
        "Labels": {},
        "Mountpoint": "/var/lib/docker/volumes/volume-test/_data",
        "Name": "volume-test",
        "Options": {},
        "Scope": "local"
    }
]


//在三台节点上的volume-test中都写入以下文件
#以下为master事例
[root@master ~]# cd /var/lib/docker/volumes/volume-test/_data/
[root@master _data]# echo "This is nginx-test in master" > index.html


//在master节点创建一个副本数为3的容器swarm-nginx,挂载volume-test到容器的/usr/share/nginx/html目录中
[root@master _data]# docker service create --replicas 3 --mount type=volume,src=volume-test,dst=/usr/share/nginx/html --name swarm-nginx -p 8081:80 nginx
8pyb8sw2owr77haxfdwvnp9o7
[root@master _data]# docker service ps swarm-nginx
ID            NAME           IMAGE         NODE    DESIRED STATE  CURRENT STATE          ERROR  PORTS
38ttdhd9xhb9  swarm-nginx.1  nginx:latest  node1   Running        Running 1 second ago          
w7qco66lzfjx  swarm-nginx.2  nginx:latest  node2   Running        Running 6 seconds ago         
62cjdegnbwrg  swarm-nginx.3  nginx:latest  master  Running        Running 5 seconds ago     

[root@master _data]# for i in {1..10};do curl 192.168.200.10:8081;done                //for循环访问10次192.168.200.10:8081
This is nginx-test in master
This is nginx-test in node2
This is nginx-test in node1
This is nginx-test in master
This is nginx-test in node2
This is nginx-test in node1
This is nginx-test in master
This is nginx-test in node2
This is nginx-test in node1
This is nginx-test in master
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值