docker(6)容器的三剑客:docker machine、docker-compose、docker Swarm


一.docker machine

  • docker machine:负责再多种平台快速安装docker环境。节点安装docker环境及升级。基于go语言。

1.在已经安装docker的目标主机部署

原理:远程传输shell脚本,在远程执行shell脚本

1)server11作为管理端,创建machine
##curl -L https://github.com/docker/machine/releases/download/v0.16.1/docker -
machine-`uname -s`-`uname -m` >/tmp/docker-machine
[root@zhenji file_recv]# scp docker-machine-* root@192.168.100.241:
[root@server11 ~]# ls
convoy                              docker-machine-prompt.bash           nginx.tar
convoy.tar.gz                       docker-machine-wrapper.bash          registry.tar
docker-machine-Linux-x86_64-0.16.1  lxcfs-2.0.5-3.el7.centos.x86_64.rpm
[root@server11 ~]# mv docker-machine-Linux-x86_64-0.16.1 /usr/local/bin/docker-machine
[root@server11 ~]# chmod +x /usr/local/bin/docker-machine
2)server12上已经安装docker
[root@server12 ~]# rpm -q docker-ce
docker-ce-20.10.2-3.el7.x86_64
3)免密
[root@server11 ~]# ssh-keygen 
[root@server11 ~]# ssh-copy-id 192.168.100.242
4)创建主机
[root@server11 ~]# docker-machine create --driver generic --generic-ip-address=192.168.100.242 server12
[root@server11 ~]# docker-machine env server12
export DOCKER_TLS_VERIFY="1"
export DOCKER_HOST="tcp://192.168.100.242:2376"
export DOCKER_CERT_PATH="/root/.docker/machine/machines/server12"
export DOCKER_MACHINE_NAME="server12"
# Run this command to configure your shell: 
# eval $(docker-machine env server12)

[root@server12 ~]# netstat -antlp|grep 2376
tcp6       0      0 :::2376                 :::*                    LISTEN      7098/dockerd    
5)启动docker
[root@server12 ~]# systemctl start docker
[root@server12 ~]# cd /etc/systemd/system/docker.service.d/
[root@server12 docker.service.d]# ls
10-machine.conf
[root@server12 docker.service.d]# cat 10-machine.conf 

在这里插入图片描述

[root@server11 ~]# docker-machine ls#查看主机
NAME       ACTIVE   DRIVER    STATE     URL                          SWARM   DOCKER     ERRORS
server12   -        generic   Running   tcp://192.168.100.242:2376           v20.10.2 

#再server11上远程产看server12#通过远程端口2376端口进行连接
[root@server11 ~]# docker-machine config  server12
[root@server11 ~]# docker `docker-machine config  server12` ps

#只要是docker指令都是server2上的
[root@server11 ~]# docker-machine env server12
export DOCKER_TLS_VERIFY="1"
export DOCKER_HOST="tcp://192.168.100.242:2376"
export DOCKER_CERT_PATH="/root/.docker/machine/machines/server12"
export DOCKER_MACHINE_NAME="server12"
# Run this command to configure your shell: 
# eval $(docker-machine env server12)
[root@server11 ~]# eval $(docker-machine env server12)
[root@server11 ~]# docker images#只要是docker指令都是server2上的

在这里插入图片描述

6)管理machine
先装bash脚本,使得行提示符更加的人性化
[root@server11 ~]# yum install bash-completion.noarch -y
三个官方脚本放进去
[root@server11 ~]# cd /etc/bash_completion.d/
[root@server11 bash_completion.d]# cp /root/*.bash .
[root@server11 bash_completion.d]# ls
docker-machine.bash          rct                          rhsm-debug
docker-machine-prompt.bash   redefine_filedir             rhsm-icon
docker-machine-wrapper.bash  rhn-migrate-classic-to-rhsm  subscription-manager
iprutils                     rhsmcertd

[root@server11 bash_completion.d]# cd
[root@server11 ~]# vim .bashrc  #行提示符
最后一行添加
PS1='[\u@\h \W$(__docker_machine_ps1)]\$'
#此时需要退掉再连接server11
[root@server11 ~]# exit
[root@zhenji Desktop]# ssh root@192.168.100.241
[root@server11 ~]#docker-machine env server12
# eval $(docker-machine env server12)
[root@server11 ~]#eval $(docker-machine env server12)
[root@server11 ~ [server12]]#docker ps#带[server12]
CONTAINER ID   IMAGE     COMMAND   CREATED   STATUS    PORTS     NAMES

在这里插入图片描述

2.在未安装docker的目标主机部署

  • 新建server13,server13上没有安装docker
1)自己在本地建好docker源并安装docker
[root@server13 yum.repos.d]# vim docker-ce.repo
[root@server13 ~]# cat /etc/yum.repos.d/docker-ce.repo 
[docker]
name=docker-ce
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/
gpgcheck=0
[base]
name=CentOS-7 - Base - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/7/os/$basearch/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#released updates 
[updates]
name=CentOS-7 - Updates - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/7/updates/$basearch/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#additional packages that may be useful
[extras]
name=CentOS-7 - Extras - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/7/extras/$basearch/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-7 - Plus - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/7/centosplus/$basearch/
gpgcheck=1
enabled=0
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#contrib - packages by Centos Users
[contrib]
name=CentOS-7 - Contrib - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/7/contrib/$basearch/
gpgcheck=1
enabled=0

[root@server13 yum.repos.d]# yum install docker-ce docker-ce-cli
选d下载到本地
[root@server13 ~]# mkdir docker
[root@server13 ~]# cd /var/cache/yum/x86_64/7Server/extras/packages/
[root@server13 packages]# cp * /root/docker/
[root@server13 packages]# cd /var/cache/yum/x86_64/7Server/docker/packages/
[root@server13 packages]# ls
[root@server13 packages]# cp * /root/docker/
[root@server13 packages]# cd /root/docker/
[root@server13 docker]# ls
[root@server13 docker]# cd
[root@server13 docker]#  yum install -y createrepo
[root@server13 docker]# createrepo .  #创建repodata
[root@server13 docker]# ls
repodata
[root@server13 ~]# scp -r docker/* root@192.168.100.141:/var/www/html/docker-ce

[root@zhenji yum.repos.d]# cd /var/www/html/
[root@zhenji html]# ls
rhel7.6  westos  zabbix  zabbix.tar.gz
[root@zhenji html]# mkdir docker-ce
[root@zhenji html]# ls
docker-ce  rhel7.6  westos  zabbix  zabbix.tar.gz
[root@zhenji html]# vim /etc/yum.repos.d/docker-ce.repo 
[root@zhenji html]# cat /etc/yum.repos.d/docker-ce.repo
[docker]
name=docker-ce
baseurl=http://192.168.100.141/docker-ce
gpgcheck=0
2)配置脚本
[root@server11 ~]#wget https://get.docker.com
[root@server11 ~]#mv index.html get-docker.sh
[root@server11 ~]#vim get-docker.sh 
改动两个
412行改repo源:
                centos|fedora|rhel)
                        yum_repo="http://192.168.100.141/docker-ce.repo"

476行改:
                                # install the correct cli version first
                                #if [ -n "$cli_pkg_version" ]; then
                                #       $sh_c "$pkg_manager install -y -q docker-ce-cli-$cli_pkg_version"
                                #fi
                                $sh_c "$pkg_manager install -y -q docker-ce"

在这里插入图片描述
在这里插入图片描述

2)免密

[root@server11 ~]# ssh-keygen
[root@server11 ~]# ssh-copy-id 192.168.100.243

3)调用sh安装

[root@server11 ~]#docker-machine create --driver generic --engine-install-url “http://192.168.100.241/get-docker.sh” --generic-ip-address 192.168.100.243 server13

二.docker-compose实践

  • Docker Compose是一种编排服务,基于pyhton语言实现,是一个用于在 Docker 上定义并运行复杂应用的工具,可以让用户在集群中部署分布式应用
  • Docker Compose 常用命令

    • Build: 构建或重新构建服务。
    • kill:强制停止服务容器。
    • logs:查看服务的输出。
    • port:打印绑定的公共端口。
    • ps:列出所有容器。
    • pull:拉取服务所需镜像。
    • rm:删除停止的服务容器。
    • up:构建并启动容器。
  • docker-compose.yml 属性

    • Image:指定为镜像名称或镜像 ID,如果镜像在本地不存在,Compose 将会尝试拉取这个镜像。
    • Build:指定 Dockerfile 所在文件夹的路径。 Compose 将会利用它自动构建这个镜像,然后使用这个镜像。
    • Command:覆盖容器启动后默认执行的命令。
    • Links:链接到其它服务中的容器。
    • Ports:端口映射。
    • Expose::暴露端口信息
    • Volumes:卷挂载路径设置
    • 更多属性查阅:https://docs.docker.com/compose/compose-file/
docker-compose up
[root@server11 ~]#mkdir compose
[root@server11 ~]#cd compose/
[root@server11 compose]#vim docker-compose.yml 
[root@server11 compose]#cat docker-compose.yml 
version: "3.9"
services:
  web1:
    image: nginx
    networks:
      - mynet
    volumes:
      - ./web1:/usr/share/nginx/html
  
  web2:
    image: nginx
    networks:
      - mynet
    volumes:
      - ./web2:/usr/share/nginx/html

  haproxy:
    image: haproxy
    networks:
      - mynet
    ports:
      - "80:80"
    volumes:
      - ./haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg

[root@server11 compose]#mkdir web1
[root@server11 compose]#mkdir web2
[root@server11 compose]#echo web1 > web1/index.html
[root@server11 compose]#echo web2 > web2/index.html
[root@server11 compose]#mkdir haproxy
[root@server11 compose]#cd haproxy/
[root@server11 haproxy]#vim haproxy.cfg
global
        maxconn         65535
        stats socket    /var/run/haproxy.stat mode 600 level admin
        log             127.0.0.1 local0
        uid             200
        gid             200
        #chroot          /var/empty
        daemon

defaults
        mode            http
        log             global
        option          httplog
        option          dontlognull
        monitor-uri     /monitoruri
        maxconn         8000
        timeout client  30s
        retries         2
        option redispatch
        timeout connect 5s
        timeout server  5s
        stats uri       /status
# The public 'www' address in the DMZ
frontend public
        bind            *:80 name clear
        #bind            192.168.1.10:443 ssl crt /etc/haproxy/haproxy.pem

        #use_backend     static if { hdr_beg(host) -i img }
        #use_backend     static if { path_beg /img /css   }
        default_backend dynamic

# The static backend backend for 'Host: img', /img and /css.
backend dynamic
        balance         roundrobin
        server          app1 web1:80 check inter 1000
        server          app2 web2:80 check inter 1000
        
[root@server11 haproxy]#docker-compose up -d
      Name                     Command               State    Ports
-------------------------------------------------------------------
compose_haproxy_1   docker-entrypoint.sh hapro ...   Exit 0        
compose_web1_1      /docker-entrypoint.sh ngin ...   Exit 0        
compose_web2_1      /docker-entrypoint.sh ngin ...   Exit 0        
[root@server11 haproxy]#docker-compose start
Starting web1    ... done
Starting web2    ... done
Starting haproxy ... done
[root@server11 haproxy]#docker-compose ps
      Name                     Command               State         Ports       
-------------------------------------------------------------------------------
compose_haproxy_1   docker-entrypoint.sh hapro ...   Up      0.0.0.0:80->80/tcp
compose_web1_1      /docker-entrypoint.sh ngin ...   Up      80/tcp            
compose_web2_1      /docker-entrypoint.sh ngin ...   Up      80/tcp 

[root@server11 haproxy]#docker-compose up

网页访问http://192.168.100.241/status
在这里插入图片描述负载均衡

[root@zhenji docker-ce]# curl 192.168.100.241
web2
[root@zhenji docker-ce]# curl 192.168.100.241
web1

在这里插入图片描述
自带健康检查

[root@server11 haproxy]#docker-compose stop web1
[root@zhenji docker-ce]# curl 192.168.100.241
web2
[root@zhenji docker-ce]# curl 192.168.100.241
web2
[root@server11 haproxy]#docker-compose logs web1#可以看日志
[root@server11 haproxy]#docker-compose rm#删除
[root@server11 haproxy]#docker-compose up -d#重启

三.docker Swarm

  • Swarm可以把多个 Docker 主机组成的系统转换为单一的虚拟 Docker 主机,使得容器可以组成跨主机的子网网络
  • Docker Swarm 是一个为 IT 运维团队提供集群和调度能力的编排工具
  • docker swarm 相关概念
    • 节点分为管理 (manager) 节点和工作 (worker) 节点
    • 任务 (Task)是 Swarm 中的最小的调度单位,目前来说就是一个单一的容器。
    • 服务 (Services) 是指一组任务的集合,服务定义了任务的属性。

server11、server12、server13都有docker

1.创建swarp集群

1)初始化
[root@server11 ~]# docker swarm --help
[root@server11 ~]# docker swarm init
Swarm initialized: current node (9gk2aq06rfub568jiirp98oyk) is now a manager.

To add a worker to this swarm, run the following command:

    docker swarm join --token SWMTKN-1-3vsbpghdpd9tq2qey8q8kfzf8mayes9dkutmnd8p9o4ojyxcx2-csfbm8gwilfhc8jl72doxwh6p 192.168.100.241:2377
2)在其他docker节点上执行命令
[root@server12 ~]# docker swarm join --token SWMTKN-1-3vsbpghdpd9tq2qey8q8kfzf8mayes9dkutmnd8p9o4ojyxcx2-csfbm8gwilfhc8jl72doxwh6p 192.168.100.241:2377
[root@server13 ~]# docker swarm join --token SWMTKN-1-3vsbpghdpd9tq2qey8q8kfzf8mayes9dkutmnd8p9o4ojyxcx2-csfbm8gwilfhc8jl72doxwh6p 192.168.100.241:2377
[root@server11 ~]# docker node ls
ID                            HOSTNAME   STATUS    AVAILABILITY   MANAGER STATUS   ENGINE VERSION
9gk2aq06rfub568jiirp98oyk *   server11   Ready     Active         Leader           20.10.2
kn68xxkqz82cm0yg3dogyci1u     server12   Ready     Active                          20.10.2
kt4o1yq9aralmuvvxile682ko     server13   Ready     Active                          20.10.2

在这里插入图片描述

3)部署swarm监控:(各节点提前导入dockersamples/visualizer镜像)
节点弹性伸缩
[root@server11 docker]# scp nginx.tar root@192.168.100.243:
[root@server13 ~]# ls
docker-anzhaung  nginx.tar
[root@server13 ~]# docker load -i nginx.tar 

[root@server11 docker]# docker service create --name my_cluster --replicas 2 -p 80:80  nginx
#server11.server12.13的80端口都开了,每个节点都有,都作为调度器
# --name 服务名称命名为 my_cluster, --network 指定服务使用的网络模型,--replicas 设置启动的示例数为3
[root@server11 docker]# netstat -antlp|grep :80
tcp6       0      0 :::80                   :::*                    LISTEN      3390/dockerd  
[root@server11 docker]# docker service scale my_cluster=4#扩充到4个
   

[root@server12 ~]# echo server12 > index.html
[root@server12 ~]# docker ps
CONTAINER ID   IMAGE          COMMAND                  CREATED         STATUS         PORTS     NAMES
12211165d4a1   nginx:latest   "/docker-entrypoint.…"   4 minutes ago   Up 4 minutes   80/tcp    my_cluster.2.yuxu22o95iv3mr1wdnsak0l6b
[root@server12 ~]# docker cp index.html 12211165d4a1:/usr/share/nginx/html

[root@server13 ~]#  echo server13 > index.html
[root@server13 ~]# docker cp index.html b569010bcd05:/usr/share/nginx/html
[root@server11 docker]# docker service ps my_cluster 
ID             NAME           IMAGE          NODE       DESIRED STATE   CURRENT STATE            ERROR     PORTS
y70sana59cq1   my_cluster.1   nginx:latest   server11   Running         Running 25 minutes ago             
yuxu22o95iv3   my_cluster.2   nginx:latest   server12   Running         Running 24 minutes ago             
u01zvq2ywc0f   my_cluster.3   nginx:latest   server13   Running         Running 7 minutes ago              
cqreyj0ub96k   my_cluster.4   nginx:latest   server13   Running         Running 7 minutes ago              

在这里插入图片描述
负载均衡

[root@zhenji Desktop]# curl 192.168.100.241
server13
[root@zhenji Desktop]# curl 192.168.100.241
server13
[root@zhenji Desktop]# curl 192.168.100.241
server12

[root@server11 docker]# docker service rm my_cluster 

在这里插入图片描述

[root@server11 ~]# docker pull ikubernetes/myapp:v1
[root@server11 ~]# docker tag ikubernetes/myapp:v1 myapp:v1
[root@server11 ~]# docker rmi ikubernetes/myapp:v1

[root@server12 ~]# docker pull ikubernetes/myapp:v1
[root@server12 ~]# docker tag ikubernetes/myapp:v1 myapp:v1
[root@server12 ~]# docker rmi ikubernetes/myapp:v1

[root@server13 ~]# docker pull ikubernetes/myapp:v1
[root@server13 ~]# docker tag ikubernetes/myapp:v1 myapp:v1
[root@server13 ~]# docker rmi ikubernetes/myapp:v1

[root@server11 docker]# docker service create --name my_cluster --replicas 2 -p 80:80 myapp:v1 
[root@server11 docker]# docker service ps my_cluster 
ID             NAME           IMAGE      NODE       DESIRED STATE   CURRENT STATE            ERROR     PORTS
i4gh2kfo1gbx   my_cluster.1   myapp:v1   server12   Running         Running 36 seconds ago             
qnxqytwuqxwh   my_cluster.2   myapp:v1   server11   Running         Running 45 seconds ago         

在这里插入图片描述

[root@zhenji images]# curl 192.168.100.241
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@zhenji images]# curl 192.168.100.241/hostname.html
61174f4c0f4d
[root@zhenji images]# curl 192.168.100.241/hostname.html
cfed57eabb08
[root@zhenji images]# curl 192.168.100.241/hostname.html
61174f4c0f4d
[root@zhenji images]# curl 192.168.100.241/hostname.html
cfed57eabb08

在这里插入图片描述

[root@server11 docker]# docker service scale my_cluster=6
#扩充到6个节点,依然会负载均衡

在这里插入图片描述

故障迁移,server13上的直接关掉,然后均分到server11和server12上

[root@server11 docker]# docker pull dockersamples/visualizer
[root@server11 docker]# docker service create   --name=viz   --publish=8080:8080/tcp   --constraint=node.role==manager   --mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock   dockersamples/visualizer
[root@server13 ~]# systemctl stop docker#之后再起开systemctl start docker
4)转移leader至server12
%%%再开一个server14
[root@server11 ~]# cd harbor/harbor/
[root@server11 harbor]# docker-compose down

[root@server11 ~]# docker node promote server12#升级server12
Node server12 promoted to a manager in the swarm.
[root@server11 ~]# docker node demote server11#降级server11
Manager server11 demoted in the swarm.

[root@server12 ~]# docker node ls

在这里插入图片描述

[root@server11 ~]# docker swarm leave##leave离开集群
rm server11 #删除server11。从集群里分离出来
[root@server12 ~]# docker node rm server11
[root@server14 ~]# docker swarm join --token SWMTKN-1-3vsbpghdpd9tq2qey8q8kfzf8mayes9dkutmnd8p9o4ojyxcx2-csfbm8gwilfhc8jl72doxwh6p 192.168.100.242:2377
[root@server11 harbor]# ./install.sh --with-chartmuseum

%网页访问192.168.100.241 harbor页面,登陆admin Harbor12345

5)加入私有仓库,速度快
[root@server12 ~]# vim /etc/hosts
[root@server12 ~]# cd /etc/docker/
[root@server12 docker]# ls
ca.pem  certs.d  daemon.json  key.json  plugins  server-key.pem  server.pem
[root@server12 docker]# vim daemon.json 
[root@server12 docker]# cat daemon.json 
{
  "registry-mirrors": ["https://reg.westos.org"]
}

[root@server12 docker]# systemctl reload docker.service 
[root@server12 docker]# scp daemon.json root@192.168.100.243:/etc/docker/
[root@server12 docker]# scp daemon.json root@192.168.100.244:/etc/docker/
[root@server13 ~]# systemctl reload docker.service 
[root@server14 ~]# systemctl reload docker.service 

[root@server12 docker]# scp -r certs.d/ root@192.168.100.243:/etc/docker/
[root@server12 docker]# scp -r certs.d/ root@192.168.100.244:/etc/docker/
[root@server13 ~]# vim /etc/hosts
192.168.100.241 server11 reg.westos.org
[root@server11 harbor]# docker tag myapp:v1 reg.westos.org/library/myapp:v1
[root@server11 harbor]# docker push reg.westos.org/library/myapp:v1

[root@server13 ~]# docker pull myapp:v1 
[root@server13 ~]# docker images
REPOSITORY   TAG       IMAGE ID       CREATED       SIZE
myapp        v1        d4a5e0eaa84f   2 years ago   15.5MB

[root@server12 docker]# docker service rm my_cluster
[root@server12 docker]# docker rmi myapp:v1

[root@server12 docker]# docker service create --name my_web --replicas 3 -p 80:80 myapp:v1
6)滚动更新

全部更新,当系统出了问题,就会损失严重。parallelism 设置每次更新节点数;delay设置每轮更新的时间间隔,直到更新完所有节点。

#扩充10个
[root@server12 docker]# docker service scale my_web=10
[root@server11 docker]# docker pull ikubernetes/myapp:v2
[root@server11 docker]# docker tag ikubernetes/myapp:v2 reg.westos.org/library/myapp:v2
[root@server11 docker]# docker push reg.westos.org/library/myapp:v2
[root@server12 docker]# docker service update --image myapp:v2 --update-parallelism 2 --update-delay 5s my_web #parallelism 每次更新数;delay每隔多久

[root@server11 docker]# curl http://192.168.100.242/hostname.html
c91945ef3008
[root@server11 docker]# curl http://192.168.100.242/hostname.html
2c149c2c4aa2
[root@server11 docker]# curl http://192.168.100.242/hostname.html
7cd9ce9e540a
[root@server11 docker]# curl http://192.168.100.242/hostname.html
19a610c4a0aa
[root@server11 docker]# curl http://192.168.100.242/hostname.html
ed5c5611b0ad

在这里插入图片描述

[root@server12 compose]# vim docker-compose.yml
[root@server11 docker]# docker tag dockersamples/visualizer:latest reg.westos.org/library/visualizer:latest
[root@server11 docker]# docker push reg.westos.org/library/visualizer:latest
[root@server12 compose]# docker service rm viz
viz
[root@server12 compose]# docker service rm my_web 
[root@server12 compose]# docker stack deploy -c docker-compose.yml my_cluster
#直接改docker-compose.yml中的replicas:6,就会扩充到6,再推一次
[root@server12 compose]# docker stack deploy -c docker-compose.yml my_cluster
7)Portainer可视化
[root@server11 ~]# mkdir portainer
[root@server11 ~]# cd portainer/
[root@server11 portainer]# pwd
/root/portainer
[root@server11 portainer]# ls
portainer-agent-stack.yml  portainer-agent.tar  portainer.tar
[root@server11 portainer]# docker load -i portainer-agent.tar

[root@server11 portainer]# docker load -i portainer.tar
[root@server11 portainer]# docker tag portainer/agent:latest reg.westos.org/library/agent:latest
[root@server11 portainer]# docker push reg.westos.org/library/agent:latest


[root@server12 compose]# docker service ls

[root@server12 ~]# mv portainer-agent-stack.yml compose/
[root@server12 compose]# vim portainer-agent-stack.yml
[root@server12 compose]# docker stack rm my_cluster
[root@server12 compose]# docker stack deploy -c portainer-agent-stack.yml portainer

[root@server12 compose]# docker stack ps portainer

网页访问192.168.100.242:9000
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值