docker

docker安装

docker 下载地址

https://docs.docker.com/engine/install/

# 1、移除原来的安装包
yum remove docker \
               docker-client \
               docker-client-latest \
               docker-common \
               docker-latest \
               docker-latest-logrotate \
               docker-logrotate \
               docker-engine
# 2、需要的安装包
yum install -y yum-utils
# 3、设置镜像的仓库(国内aliyun镜像)
yum-config-manager \
    --add-repo \
    http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# 更新yum软件包索引
yum makecache fast
# 4、安装docker相关的
yum install docker-ce docker-ce-cli containerd.io
# 5、启动docker
systemctl start docker
# 6、验证安装成功
docker version
# 7、启动hello-world程序
docker run hello-world
# 8、查看镜像
docker images
# 9、自启动
systemctl enable docker
# 10、配置阿里云镜像
vi /etc/docker/daemon.json
# 内容如下:
{
 "registry-mirrors":["https://6kx4zyno.mirror.aliyuncs.com"]
}
# 重启docker
systemctl daemon-reload
systemctl restart docker

卸载docker

# 1、卸载依赖
yum remove docker-ce docker-ce-cli containerd.io
# 2、删除资源
rm -rf /var/lib/docker
rm -rf /var/lib/containerd

docker的常用命令

帮助命令

# 显示docker版本信息
docker version
# 显示docker的系统信息,包括镜像和容器的数量
docker info
# 帮助命令
docker --help

帮助文档地址:https://docs.docker.com/reference/

镜像命令

docker images 查看所有本地的主机上的镜像

# 可选项
-a	# 列出所有镜像
-q	# 只显示镜像的id

docker search 搜索镜像

docker search mysql
[root@localhost ~]# docker search mysql
NAME                              DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
mysql                             MySQL is a widely used, open-source relation…   11110     [OK]       
mariadb                           MariaDB Server is a high performing open sou…   4211      [OK]       
mysql/mysql-server                Optimized MySQL Server Docker images. Create…   825                  [OK]
mysql/mysql-cluster               Experimental MySQL Cluster Docker images. Cr…   88                   
centos/mysql-57-centos7           MySQL 5.7 SQL database server                   88                   
centurylink/mysql                 Image containing mysql. Optimized to be link…   59                   [OK]
databack/mysql-backup             Back up mysql databases to... anywhere!         44                   
deitch/mysql-backup               REPLACED! Please use http://hub.docker.com/r…   41                   [OK]
prom/mysqld-exporter                                                              39                   [OK]
tutum/mysql                       Base docker image to run a MySQL database se…   35                   
linuxserver/mysql                 A Mysql container, brought to you by LinuxSe…   30                   
schickling/mysql-backup-s3        Backup MySQL to S3 (supports periodic backup…   29                   [OK]
mysql/mysql-router                MySQL Router provides transparent routing be…   21                   
centos/mysql-56-centos7           MySQL 5.6 SQL database server                   20                   
arey/mysql-client                 Run a MySQL client from a docker container      17                   [OK]
fradelg/mysql-cron-backup         MySQL/MariaDB database backup using cron tas…   15                   [OK]
yloeffler/mysql-backup            This image runs mysqldump to backup data usi…   7                    [OK]
genschsa/mysql-employees          MySQL Employee Sample Database                  7                    [OK]
openshift/mysql-55-centos7        DEPRECATED: A Centos7 based MySQL v5.5 image…   6                    
devilbox/mysql                    Retagged MySQL, MariaDB and PerconaDB offici…   3                    
ansibleplaybookbundle/mysql-apb   An APB which deploys RHSCL MySQL                2                    [OK]
jelastic/mysql                    An image of the MySQL database server mainta…   1                    
widdpim/mysql-client              Dockerized MySQL Client (5.7) including Curl…   1                    [OK]
vitess/mysqlctld                  vitess/mysqlctld                                1                    [OK]
centos/mysql-80-centos7           MySQL 8.0 SQL database server                   1   

# 可选项
--filter=STARS=3000	#搜索出来的结果就是STARS大于3000的结果

[root@localhost ~]# docker search mysql --filter=STARS=3000
NAME      DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
mysql     MySQL is a widely used, open-source relation…   11110     [OK]       
mariadb   MariaDB Server is a high performing open sou…   4211      [OK]

docker pull 下载镜像

docker pull 镜像名[:tag]

[root@localhost ~]# docker pull mysql
Using default tag: latest	# 不写tag,默认就是latest
latest: Pulling from library/mysql
b4d181a07f80: Pull complete 	# 分层下载,docker image的核心 联合文件系统
a462b60610f5: Pull complete 
578fafb77ab8: Pull complete 
524046006037: Pull complete 
d0cbe54c8855: Pull complete 
aa18e05cc46d: Pull complete 
32ca814c833f: Pull complete 
9ecc8abdb7f5: Pull complete 
ad042b682e0f: Pull complete 
71d327c6bb78: Pull complete 
165d1d10a3fa: Pull complete 
2f40c47d0626: Pull complete 
Digest: sha256:52b8406e4c32b8cf0557f1b74517e14c5393aff5cf0384eff62d9e81f4985d4b	# 签名,防伪标志
Status: Downloaded newer image for mysql:latest
docker.io/library/mysql:latest	# 真实地址

# 等价
docker pull mysql
docker pull docker.io/library/mysql:latest

docker rmi 删除镜像

docker rmi -f 镜像id	# 删除指定的镜像
docker rmi -f 镜像id 镜像id2 镜像id3	# 删除多个镜像
docker rmi -f $(docker images -aq)	#删除所有的镜像

修改镜像tag(会增加一个tag)

docker tag 镜像id 新镜像名称
docker system df	查看镜像/容器/数据卷所占的空间

仓库名、标签都是<none>的镜像,俗称虚悬镜像 dangling image(实际应用中删除即可)

#查询所有虚悬镜像
docker image ls -f dangling=true
#删除所有虚悬镜像
docker image prune

镜像打包并传到别的服务器

docker save -o flannel.tar flannel/flannel
scp flannel.tar root@node2:/opt/k8s/
#在另一台机器上直接下载即可
docker load -i flannel.tar

容器命令

新建容器并启启动

docker run [可选参数] image
# 参数说明
--name="Name"	# 容器名字,用来区分容器
-d				# 后台方式运行
-it				# 使用交互方式运行,进入容器查看内容
-P				# 随机指定容器的端口 -p 8080:8080
-p				# 随机指定端口# 启动并进入容器docker run -it centos /bin/bash
# 退出容器
exit
# 容器不停止退出
Ctrl+P+Q

列出所有的运行的容器

# 查看所有的容器
docker ps
# 参数说明
-a	# 查询所有的容器,包括历史运行过的容器
-n=# 显示最近创建的容器,如-n=2,最近创建的2个容器

删除容器

docker rm 容器id	# 删除指定的容器,不能删除运行的容器,可以使用-f强制删除
docker rm -f $(docker ps -aq)	# 删除所有的容器
docker ps -a -q |xargs docker rm	# 删除所有的容器

启动和停止容器操作

docker start 容器id	# 启动容器
docker restart 容器id	# 重启容器
docker stop 容器id	#停止容器
docker kill 容器id	# 杀掉容器

常用的其他命令

后台启动容器

docker run -d centos
# 运行后,docker ps一下,发现centos停止了
# docker 容器使用后台运行,就必须要有一个前台进程,docker 发现没有应用,就会自动停止

查看日志

docker logs -f -t --tail number

查看容器中的进程信息

docker top 容器id

查看镜像的元数据

docker inspect 容器id

进入当前正在运行的容器

# 方式一:进入容器后开启了新的终端,可以在里面操作(常用)
docker exec -it 容器id bashShell
# 方式二:进入容器正在执行的终端,不会启动新的终端
docker attach 容器id

从容器内拷贝文件到主机

docker cp 容器id:容器内路径 目的主机路径

作业练习

Docker安装nginx

# 搜索镜像 search ,去docker官网搜索
# 下载镜像 pull
[root@localhost ~]# docker pull nginxUsing default tag: latest
latest: Pulling from library/nginx
b4d181a07f80: Pull complete 
66b1c490df3f: Pull complete 
d0f91ae9b44c: Pull complete 
baf987068537: Pull complete 
6bbc76cbebeb: Pull complete 
32b766478bc2: Pull complete 
Digest:sha256:353c20f74d9b6aee359f30e8e4f69c3d7eaea2f610681c4a95849a2fd7c497f9
Status: Downloaded newer image for nginx:latest
docker.io/library/nginx:latest
[root@localhost ~]# docker images
REPOSITORY    TAG       IMAGE ID       CREATED         SIZE
nginx         latest    4cdc5dd7eaad   4 days ago      133MB
hello-world   latest    bf756fb1ae65   18 months ago   13.3kB
# 运行测试
[root@localhost ~]# docker run -d --name nginx01 -p 3344:80 nginx
0667de173587f9c44081ff3114b6fa7ecfa96e4267e77107fedeea8f937fdb88
[root@localhost ~]# docker ps
CONTAINER ID   IMAGE     COMMAND                  CREATED          STATUS          PORTS                                   NAMES
0667de173587   nginx     "/docker-entrypoint.…"   12 seconds ago   Up 10 seconds   0.0.0.0:3344->80/tcp, :::3344->80/tcp   nginx01
[root@localhost ~]# curl localhost:3344
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
            width: 35em;
            margin: 0 auto;        
            font-family: Tahoma, Verdana, Arial, sans-serif;    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed andworking. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
# 进入容器
[root@localhost ~]# docker exec -it nginx01 /bin/bash
root@0667de173587:/# whereis nginx
nginx: /usr/sbin/nginx /usr/lib/nginx /etc/nginx /usr/share/nginx

Docker安装Tomcat

# 下载tomcat
[root@localhost ~]# docker pull tomcat
Using default tag: latest
latest: Pulling from library/tomcat
0bc3020d05f1: Pull complete 
a110e5871660: Pull complete 
83d3c0fa203a: Pull complete 
a8fd09c11b02: Pull complete 
96ebf1506065: Pull complete 
b8bf70f9cc4d: Pull complete 
3f6da67b9e68: Pull complete 
257407776119: Pull complete 
7bd0a187fb92: Pull complete 
307fc4df04c9: Pull complete Digest: sha256:a5abf192aceed45620dbb2e09f8abdec2b96108b86365a988c85e753c28cd36b
Status: Downloaded newer image for tomcat:latest
docker.io/library/tomcat:latest
# 启动tomcat
docker run -d -p 3355:8080 --name tomcat01 tomcat
# 测试访问
# 进入容器
docker exec -it tomcat01 /bin/bash
# 发现问题:linux命令少了;没有webapps,阿里云镜像的原因,默认是最小的镜像,所有不必要的都剔除掉,保证最小可用的环境
# 可以把webapps.dist下的文件复制到webapp文件夹下,即可正常访问tomcat网页

Docker安装ES+Kibana

# 拉取镜像
# 启动ES
docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch
# 修改ES配置,不然会很卡。加参数 -e 限制
docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx512m" elasticsearch
# 查看
docker stats

Docker镜像讲解

Docker镜像加载原理

docker镜像是由一层一层的文件系统组成,这种层级的文件系统成为联合文件系统,即UnionFS。

commit镜像

docker commit 提交容器成为一个新的副本
# 命令与git类似
docker commit -m="提交的描述信息" -a="作者" 容器id 目标镜像名[:TAG]

实战测试

# 启动一个tomcat,里面没有东西
# 将文件cp到webapps下
# 退出容器后,打包镜像
[root@localhost ~]# docker commit -m="add webapps" -a="cui" 41100057194c tomcat02:1.0
sha256:14799f30458fc8cd38e80948ccce5aeeb352020451006846c415ddc42f3dd1af
[root@localhost ~]# docker images
REPOSITORY    TAG       IMAGE ID       CREATED         SIZE
tomcat02      1.0       14799f30458f   5 seconds ago   672MB
nginx         latest    4cdc5dd7eaad   6 days ago      133MB
tomcat        latest    36ef696ea43d   10 days ago     667MB
hello-world   latest    bf756fb1ae65   18 months ago   13.3kB

容器数据卷

什么是数据卷

数据在容器中,但是数据需要持久化,Mysql数据需要存储在本地

卷技术实质就是目录的挂载,挂载到Linux上面

容器之间也可以数据共享

使用数据卷

方式一:直接使用命令挂载 -v

docker run -it -v 主机目录:容器内目录 centos /bin/bash
# 可以通过 docker inspect 容器id 进行查看是否挂在成功-有一个“Mounts”的信息
# 容器向虚拟机传递数据没问题
# 虚拟机向容器传递数据测试
# 1.停止容器 2.虚拟机修改文件 3.启动容器 4.容器内的数据依旧是同步的

实战:安装Mysql

# 获取镜像docker pull mysql:5.7
# 运行镜像时,需要镜像挂载
docker run -d -p 3310:3306 -v /home/mysql/conf:/etc/mysql/conf.d -v /home/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 --name mysql01 mysql:5.7
# 启动成功后,可以通过数据库管理工具连接docker中的mysql进行验证
# 其中官方测试,配置MySQL的密码
docker run --name some-mysql -e MYSQL_ROOT_PASSWORD=my-secret-pw -d mysql:tag
# -e 参数代表配置环境值
# 挂载成功后,即使容器被删除,同步到本地的数据也不会丢失,这就是容器数据持久化功能

# 如果启动mysql后宿主机连接不上容器中的mysql,可以尝试如下方法
vim /usr/lib/sysctl.d/00-system.conf
# 在末尾增加如下命令
net.ipv4.ip_forward=1
# 重启网络
systemctl restart network
# 重启mysql
docker restart mysql容器id
#设置自启动
# 启动
docker start d5c440e6d44f
# 自动启动
docker update --restart=always d5c440e6d44f

具名挂载与匿名挂载

# 匿名挂载
docker run -d -P --name nginx01 -v /etc/nginx nginx
# 查看所有卷的情况
[root@localhost ~]# docker volume ls
DRIVER    VOLUME NAME
local     029af465f9e677e81e58c88b98ff04945f0e8ec7babdf721ebaf2fb7e8c7ab44
local     242cb1de1eabac458ab3d383991e0633628962dbac008c371f28a1c090937e72
local     633972c07b4a1b1530a10969fe71309577f461d0e15afac0adfecf616fbf474a
local     6287782b23ae213d043b88905cfdab6fa016de7f9f7f3185f8d040b66b46a10b
local     b63ed7909f8ce8895679a201b125ecf192fab0f1fd8876c14514b53e7b771103
local     bd00b10e463151080a430129de74256ab0b8371798ba276cc1e22c33b214f245
local     cdcab66331b6934b6d3d4539586ad039eee4788246e4183b941d9bab5b37c8ed
# 上面这种格式的就是匿名卷,即我们在使用-v只写了容器内的路径,没有写容器外的路径
# 具名挂载,通过 -v 卷名:容器内路径
docker run -d -P --name nginx01 -v jumingguazai:/etc/nginx nginx
[root@localhost ~]# docker run -d -P -v jumingguazai:/etc/nginx nginx
a5b9194cf670cbd5253f41374180faa73c4652fff2a52581a2f6bea2d5f07fe3
[root@localhost ~]# docker volume ls
DRIVER    VOLUME NAMElocal     029af465f9e677e81e58c88b98ff04945f0e8ec7babdf721ebaf2fb7e8c7ab44
local     242cb1de1eabac458ab3d383991e0633628962dbac008c371f28a1c090937e72
local     633972c07b4a1b1530a10969fe71309577f461d0e15afac0adfecf616fbf474a
local     6287782b23ae213d043b88905cfdab6fa016de7f9f7f3185f8d040b66b46a10b
local     b63ed7909f8ce8895679a201b125ecf192fab0f1fd8876c14514b53e7b771103
local     bd00b10e463151080a430129de74256ab0b8371798ba276cc1e22c33b214f245
local     cdcab66331b6934b6d3d4539586ad039eee4788246e4183b941d9bab5b37c8ed
local     jumingguazai
# 查看卷的具体位置[root@localhost data]# docker inspect jumingguazai
[
    {
            "CreatedAt": "2021-07-15T21:53:49+08:00",
            "Driver": "local",        
            "Labels": null,        
            "Mountpoint": "/var/lib/docker/volumes/jumingguazai/_data",        
            "Name": "jumingguazai",        
            "Options": null,        
            "Scope": "local"    
     }
]

如何确定挂载方式

-v 容器内路径	# 匿名挂载
-v 卷名:容器内路径	# 具名挂载
-v /宿主机路径:容器内路径	# 指定路径挂载

拓展

# 通过 -v 容器路径:ro或rw 改变读写权限
# :ro		read only,只能通过宿主机操作,容器内部是无法操作的
# :rw		read write,默认方式
docker run -d -P --name nginx01 -v jumingguazai:/etc/nginx:ro nginx
docker run -d -P --name nginx01 -v jumingguazai:/etc/nginx:rw nginx

初识DockerFile

Dockerfile就是用来构建docker镜像的构建文件

# 创建一个dockerfile文件,建议Dockerfile# 文件中的指令大写
# 文件内容如下:
FROM centos
VOLUME ["volume01","volume02"]
CMD echo "--------end-------"
CMD /bin/bash
# 构建镜像的命令
docker build -f 文件路径 -t 镜像名称:TAG .
注意:如果启动没有成功,需要修改为->VOLUME ["/volume01","/volume02"]
[root@localhost docker-test-volume]# docker build -f dockerfile1 -t cui-centos:1.0 .
Sending build context to Docker daemon  2.048kB
Step 1/4 : FROM centos 
---> 300e315adb2f
Step 2/4 : VOLUME ["volume01","volume02"] 
---> Running in eab737bea8bf
Removing intermediate container eab737bea8bf 
---> c3011eb41f54
Step 3/4 : CMD echo "--------end-------" 
---> Running in e46cd661979b
Removing intermediate container e46cd661979b 
---> bc1a8405f6a3
Step 4/4 : CMD /bin/bash 
---> Running in a5f0ec59b297Removing intermediate container a5f0ec59b297 
---> 2b83c5df194d
Successfully built 2b83c5df194d
Successfully tagged cui-centos:1.0
[root@localhost docker-test-volume]# docker images
REPOSITORY    TAG       IMAGE ID       CREATED          SIZE
cui-centos    1.0       2b83c5df194d   19 seconds ago   209MB
tomcat02      1.0       14799f30458f   2 days ago       672MB
nginx         latest    4cdc5dd7eaad   9 days ago       133MB
tomcat        latest    36ef696ea43d   13 days ago      667MB
mysql         5.7       09361feeb475   3 weeks ago      447MB
centos        latest    300e315adb2f   7 months ago     209MB
hello-world   latest    bf756fb1ae65   18 months ago    13.3kB
# 启动容器后,可以看到容器中挂载的目录位置
# 查看容器挂载的路径位置docker inspect 容器id
"Mounts": [
            {
                "Type": "volume",
                "Name": "1db7aef31408e45a1b947d67f532376d55cce92f73b1ead66066ae201b6dc107",                
                "Source": "/var/lib/docker/volumes/1db7aef31408e45a1b947d67f532376d55cce92f73b1ead66066ae201b6dc107/_data",                
                "Destination": "/volume01",                
                "Driver": "local",                
                "Mode": "",                
                "RW": true,                
                "Propagation": ""            
            },            
            {                
                "Type": "volume",                
                "Name": "952e077379b1f39e316bf870faf8e61e2bb4fd762af3290b10a0fc86f9a7118b",                
                "Source": "/var/lib/docker/volumes/952e077379b1f39e316bf870faf8e61e2bb4fd762af3290b10a0fc86f9a7118b/_data",                
                "Destination": "/volume02",                
                "Driver": "local",                
                "Mode": "",                
                "RW": true,                
                "Propagation": ""            
             }        
          ],

数据卷容器

# 同步容器之间的数据,vigilant_cohen是其中一个的容器名称,启动另一个时的指令
docker run -it --name my-centos02 --volumes-from vigilant_cohen cui-centos:1.0
# 在容器中创建的数据都可以相互同步到对方
# 结论:容器之间的信息配置传递,数据卷容器的生命周期一直持续到没有容器使用为止!

DockerFile

DockerFile介绍

构建步骤:

1、编写dockerfile文件

2、docker build 构建成为镜像

3、docker run 运行镜像

4、docker push 发布镜像(DockerHub、阿里云镜像仓库)

从官网可以找到镜像的具体版本信息

在这里插入图片描述

官网centos镜像的dockerfile文件

在这里插入图片描述

DockerFile构建过程

基础知识

1、每个保留关键字(指令)都是大写字母

2、从上到下顺序执行

3、#表示注释

4、每一个指令都会创建提交一个新的镜像层,并提交

DockerFile的指令

img
在这里插入图片描述

实战测试

Docker Hub中大部分镜像都是从 FROM scratch 这个基础镜像过来的,然后再配置需要的软件和配置进行构建

# 1、编写自己的centos
[root@localhost dockerfile]# cat dockerfile-centos 
FROM centos
MAINTAINER cui<123@qq.com>
ENV MYPATH /usr/local
WORKDIR $MYPATH
RUN yum -y install vim
RUN yum -y install net-tools
EXPOSE 80
CMD echo $MYPATHCMD echo "------end-------"
CMD /bin/bash
# 2、通过这个文件构建镜像
docker build -f dockerfile-centos -t mycentos:0.1 .
# 3、运行测试

对比:与之前的原生centos对比

[root@localhost ~]# docker run -it 300e315adb2f /bin/bash
[root@bf507c55b6fa /]# pwd
/
[root@bf507c55b6fa /]# ifconfig
bash: ifconfig: command not found
[root@bf507c55b6fa /]# vim test
bash: vim: command not found
[root@localhost dockerfile]# docker run -it af4ae1507976 /bin/bash
[root@f832845fbc5c local]# pwd/usr/local
[root@f832845fbc5c local]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500        
	inet 172.17.0.2  netmask 255.255.0.0  broadcast 172.17.255.255        
	ether 02:42:ac:11:00:02  txqueuelen 0  (Ethernet)        
	RX packets 8  bytes 648 (648.0 B)        
	RX errors 0  dropped 0  overruns 0  frame 0        
	TX packets 0  bytes 0 (0.0 B)        
	TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0        
        loop  txqueuelen 1000  (Local Loopback)        
        RX packets 0  bytes 0 (0.0 B)        
        RX errors 0  dropped 0  overruns 0  frame 0        
        TX packets 0  bytes 0 (0.0 B)        
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
[root@f832845fbc5c local]# vim test
[root@f832845fbc5c local]#

docker history 镜像id查看镜像构建过程

[root@localhost ~]# docker ps
CONTAINER ID   IMAGE          COMMAND       CREATED         STATUS         PORTS     NAMES
bf507c55b6fa   300e315adb2f   "/bin/bash"   3 minutes ago   Up 3 minutes             magical_hertz
f832845fbc5c   af4ae1507976   "/bin/bash"   6 minutes ago   Up 6 minutes   80/tcp    sweet_shtern
[root@localhost ~]# docker history af4ae1507976
IMAGE          CREATED          CREATED BY                                      SIZE      COMMENT
af4ae1507976   13 minutes ago   /bin/sh -c #(nop)  CMD ["/bin/sh" "-c" "/bin…   0B        
1e572cb090f8   13 minutes ago   /bin/sh -c #(nop)  CMD ["/bin/sh" "-c" "echo…   0B        
5f8715d53735   13 minutes ago   /bin/sh -c #(nop)  CMD ["/bin/sh" "-c" "echo…   0B        
8a5927e8cc47   13 minutes ago   /bin/sh -c #(nop)  EXPOSE 80                    0B        
97feb82f1b79   13 minutes ago   /bin/sh -c yum -y install net-tools             26.6MB    
f81a9da1e5e3   13 minutes ago   /bin/sh -c yum -y install vim                   63.4MB    
c896b683aa60   14 minutes ago   /bin/sh -c #(nop) WORKDIR /usr/local            0B        
bdcb295054b5   14 minutes ago   /bin/sh -c #(nop)  ENV MYPATH=/usr/local        0B        
9f1bee1b0ff4   14 minutes ago   /bin/sh -c #(nop)  MAINTAINER cui<123@qq.com>   0B        
300e315adb2f   7 months ago     /bin/sh -c #(nop)  CMD ["/bin/bash"]            0B        
<missing>      7 months ago     /bin/sh -c #(nop)  LABEL org.label-schema.sc…   0B        
<missing>      7 months ago     /bin/sh -c #(nop) ADD file:bd7a2aed6ede423b7…   209MB 

CMD 与 ENTRYPOINT 区别

CMD		# 指定容器启动的时候要运行的命令,只有最后一个会生效,可被替换
# 比如,dockerfile文件内容如下
FROM cemtos
CMD ["ls","-a"]
# 构建镜像后docker build -f dockerfile -t test:1.0 .
# 能正常启动docker run -it test:1.0 /bin/bash
# 不能正常启动,因为最后的指令会替换dockerfile文件中的CMD指令,不是追加,而-l不是命令,所以会报错
docker run -it test:1.0 /bin/bash -l
ENTRYPOINT	# 指定容器启动的时候要运行的命令,可以追加命令

实战:Tomcat镜像

1、准备镜像文件 tomcat 压缩包,jdk 压缩包

在这里插入图片描述

2、编写dockerfile文件,官方命名Dockerfile,构建的时候就不用-f指令

[root@localhost tomcat-docker]# cat Dockerfile 
FROM centos
MAINTAINER cui<123@qq.com>
COPY readme.txt /usr/local/readme.txt
ADD jdk-8u291-linux-x64.tar.gz /usr/local/
ADD apache-tomcat-8.5.60.tar.gz /usr/local/
RUN yum -y install vim
ENV MYPATH /usr/local
WORKDIR $MYPATH
ENV JAVA_HOME /usr/local/jdk1.8.0_291
ENV CLASSPATH $JAVA_HOME/lib/dt.jar;$JAVA_HOME/lib/tools.jar
ENV CATALINA_HOME /usr/local/apache-tomcat-8.5.60
ENV CATALINA_BASE /usr/local/apache-tomcat-8.5.60
ENV PATH $PATH:$JAVA_HOME/bin;$CATALINA_HOME/lib;$CATALINA_HOME/bin
EXPOSE 8080
CMD /usr/local/apache-tomcat-8.5.60/bin/startup.sh && tail -f /usr/local/apache-tomcat-8.5.60/logs/catalina.out

3、构建镜像

[root@localhost tomcat-docker]# docker build -t diytomcat .
Sending build context to Docker daemon  3.072kB
Step 1/15 : FROM centos 
---> 300e315adb2f
Step 2/15 : MAINTAINER cui<123@qq.com> 
---> Using cache 
---> 9f1bee1b0ff4
Step 3/15 : COPY readme.txt /usr/local/readme.txt 
---> 07f6de4e2881
Step 4/15 : ADD /opt/soft/jdk-8u291-linux-x64.tar.gz /usr/local/
ADD failed: file not found in build context or excluded by .dockerignore: stat opt/soft/jdk-8u291-linux-x64.tar.gz: file does not exist
[root@localhost tomcat-docker]# vim Dockerfile 
[root@localhost tomcat-docker]# vim Dockerfile 
[root@localhost tomcat-docker]# docker build -t diytomcat .
Sending build context to Docker daemon  155.4MB
Step 1/15 : FROM centos 
---> 300e315adb2f
Step 2/15 : MAINTAINER cui<123@qq.com> 
---> Using cache 
---> 9f1bee1b0ff4
Step 3/15 : COPY readme.txt /usr/local/readme.txt 
---> Using cache 
---> 07f6de4e2881
Step 4/15 : ADD jdk-8u291-linux-x64.tar.gz /usr/local/ 
---> ab43e6ce33c2
Step 5/15 : ADD apache-tomcat-8.5.60.tar.gz /usr/local/ 
---> 358028007269
Step 6/15 : RUN yum -y install vim 
---> Running in 4ae486713438
CentOS Linux 8 - AppStream                      1.3 MB/s | 8.1 MB     00:06    
CentOS Linux 8 - BaseOS                         695 kB/s | 3.6 MB     00:05    
CentOS Linux 8 - Extras                         9.7 kB/s | 9.8 kB     00:01    
Dependencies resolved.
================================================================================ 
Package             Arch        Version                   Repository      Size
================================================================================
Installing: 
vim-enhanced        x86_64      2:8.0.1763-15.el8         appstream      1.4 M
Installing dependencies: 
gpm-libs            x86_64      1.20.7-17.el8             appstream       39 k 
vim-common          x86_64      2:8.0.1763-15.el8         appstream      6.3 M 
vim-filesystem      noarch      2:8.0.1763-15.el8         appstream       48 k 
which               x86_64      2.21-12.el8               baseos          49 k
Transaction Summary
================================================================================
Install  5 Packages
Total download size: 7.8 M
Installed size: 30 M
Downloading Packages:
(1/5): gpm-libs-1.20.7-17.el8.x86_64.rpm        185 kB/s |  39 kB     00:00    
(2/5): vim-filesystem-8.0.1763-15.el8.noarch.rp 453 kB/s |  48 kB     00:00    
(3/5): which-2.21-12.el8.x86_64.rpm              82 kB/s |  49 kB     00:00    
(4/5): vim-enhanced-8.0.1763-15.el8.x86_64.rpm  1.1 MB/s | 1.4 MB     00:01    
(5/5): vim-common-8.0.1763-15.el8.x86_64.rpm    2.2 MB/s | 6.3 MB     00:02    
--------------------------------------------------------------------------------
Total                                           1.6 MB/s | 7.8 MB     00:04     
warning: /var/cache/dnf/appstream-02e86d1c976ab532/packages/gpm-libs-1.20.7-17.el8.x86_64.rpm: Header V3 RSA/SHA256 Signature, key ID 8483c65d: NOKEY
CentOS Linux 8 - AppStream                      1.6 MB/s | 1.6 kB     00:00    
Importing GPG key 0x8483C65D: 
 Userid     : "CentOS (CentOS Official Signing Key) <security@centos.org>"
 Fingerprint: 99DB 70FA E1D7 CE22 7FB6 4882 05B5 55B3 8483 C65D 
 From       : /etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
Key imported successfully
Running transaction check
Transaction check succeeded.
Running transaction test
Transaction test succeeded.
Running transaction
  Preparing        :                                                        1/1   
  Installing       : which-2.21-12.el8.x86_64                               1/5   
  Installing       : vim-filesystem-2:8.0.1763-15.el8.noarch                2/5   
  Installing       : vim-common-2:8.0.1763-15.el8.x86_64                    3/5   
  Installing       : gpm-libs-1.20.7-17.el8.x86_64                          4/5   
  Running scriptlet: gpm-libs-1.20.7-17.el8.x86_64                          4/5   
  Installing       : vim-enhanced-2:8.0.1763-15.el8.x86_64                  5/5   
  Running scriptlet: vim-enhanced-2:8.0.1763-15.el8.x86_64                  5/5   
  Running scriptlet: vim-common-2:8.0.1763-15.el8.x86_64                    5/5   
  Verifying        : gpm-libs-1.20.7-17.el8.x86_64                          1/5   
  Verifying        : vim-common-2:8.0.1763-15.el8.x86_64                    2/5   
  Verifying        : vim-enhanced-2:8.0.1763-15.el8.x86_64                  3/5   
  Verifying        : vim-filesystem-2:8.0.1763-15.el8.noarch                4/5   
  Verifying        : which-2.21-12.el8.x86_64                               5/5 
Installed:
  gpm-libs-1.20.7-17.el8.x86_64         vim-common-2:8.0.1763-15.el8.x86_64      
  vim-enhanced-2:8.0.1763-15.el8.x86_64 vim-filesystem-2:8.0.1763-15.el8.noarch  
  which-2.21-12.el8.x86_64             
Complete!
Removing intermediate container 4ae486713438 
---> 38130833a2d1
Step 7/15 : ENV MYPATH /usr/local 
---> Running in 28cd9260154b
Removing intermediate container 28cd9260154b 
---> 3923f1303ae0
Step 8/15 : WORKDIR $MYPATH 
---> Running in 5ab21486fb5fRemoving intermediate container 5ab21486fb5f 
---> fbf08fd36af8
Step 9/15 : ENV JAVA_HOME /usr/local/jdk1.8.0_291 
---> Running in ef213e7e6e8e
Removing intermediate container ef213e7e6e8e 
---> b8795505859b
Step 10/15 : ENV CLASSPATH $JAVA_HOME/lib/dt.jar;$JAVA_HOME/lib/tools.jar 
---> Running in 5aa5f5b6af69
Removing intermediate container 5aa5f5b6af69 ---> ea7ae79c4fa8
Step 11/15 : ENV CATALINA_HOME /usr/local/apache-tomcat-8.5.60 
---> Running in 46022b01f999
Removing intermediate container 46022b01f999 
---> 16181cce70b7
Step 12/15 : ENV CATALINA_BASE /usr/local/apache-tomcat-8.5.60 
---> Running in 603f740b68ce
Removing intermediate container 603f740b68ce 
---> ecb9745d1629
Step 13/15 : ENV PATH $PATH:$JAVA_HOME/bin;$CATALINA_HOME/lib;$CATALINA_HOME/bin 
---> Running in 03ed87fc7ae2
Removing intermediate container 03ed87fc7ae2 
---> 8b08a12f8f89
Step 14/15 : EXPOSE 8080 
---> Running in 70c7fdee9cfd
Removing intermediate container 70c7fdee9cfd 
---> 66b1f6004bda
Step 15/15 : CMD /usr/local/apache-tomcat-8.5.60/bin/startup.sh && tail -f /usr/local/apache-tomcat-8.5.60/logs/catalina.out 
---> Running in 311057f305f6
Removing intermediate container 311057f305f6 
---> d05f98d7f016
Successfully built d05f98d7f016
Successfully tagged diytomcat:latest

4、启动镜像

docker run -d -p 9090:8080 --name tomcattest -v /data/tomcat/test:/usr/local/apache-tomcat-8.5.60/webapps/test -v /data/tomcat/logs:/usr/local/apache-tomcat-8.5.60/logs diytomcat

5、访问测试

6、发布项目(由于第4步做了卷挂载,我们直接在本地编写项目就可以发布了)

将文件web.xml放在/data/tomcat/test/WEB-INF/目录下面,index.jsp放在/data/tomcat/test/下面,即可访问到该文件

web.xml

<?xml version="1.0" encoding="UTF-8"?>
<web-app xmlns="http://xmlns.jcp.org/xml/ns/javaee"  
  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"  
  xsi:schemaLocation="http://xmlns.jcp.org/xml/ns/javaee                      
  http://xmlns.jcp.org/xml/ns/javaee/web-app_3_1.xsd"  
  version="3.1"  
  metadata-complete="true">
</web-app>

index.jsp

<%@ page language="java" contentType="text/html; charset=UTF-8"    
	pageEncoding="UTF-8"%>
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>菜鸟教程(runoob.com)</title>
</head>
<body>
Hello World!<br/>
<%out.println("你的 IP 地址 " + request.getRemoteAddr());
%>
</body>
</html>

Docker网络

理解Docker0

清空所有环境

测试

在这里插入图片描述

三个网络,docker 是如何处理容器网络访问的?

# 下载并启动tomcat
[root@localhost ~]# docker run -d -P --name tomcat01 tomcat
# 查看容器的内部网络地址 ip addr
[root@localhost ~]# docker ps
CONTAINER ID   IMAGE     COMMAND             CREATED              STATUS              PORTS                                         NAMES
5d9ee849922a   tomcat    "catalina.sh run"   About a minute ago   Up About a minute   0.0.0.0:49153->8080/tcp, :::49153->8080/tcp   tomcat01
[root@localhost ~]# docker exec -it 5d9ee849922a ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00    
    inet 127.0.0.1/8 scope host lo       
    	valid_lft forever preferred_lft forever
20: eth0@if21: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
     link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0    
     inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0       
     	valid_lft forever preferred_lft forever
# linux ping 容器内部
[root@localhost ~]# ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.089 ms
64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.160 ms
64 bytes from 172.17.0.2: icmp_seq=3 ttl=64 time=0.151 ms
64 bytes from 172.17.0.2: icmp_seq=4 ttl=64 time=0.126 ms
64 bytes from 172.17.0.2: icmp_seq=5 ttl=64 time=0.138 ms

原理

1、每启动一个容器,docker就会给docker容器分配一个ip,我们只要安装了docker,就会有一个docker0桥接模式,使用的技术就是veth-pair技术!

再次测试ip addr

在这里插入图片描述

# 我们发现容器的网卡都是一对对的(容器内的是20,这儿是21)
# veth-pair 就是一对虚拟设备接口,他们都是成对存在的,一段连着协议,一段彼此相连
# 正是这个特性,veth-pair 充当了一个桥梁,连接各种网络设备

2、启动第二个tomcat

[root@localhost ~]# docker run -d -P --name tomcat02 tomcatf
484b8edac84ccb68e34a7e9a7e45b3d2606c75a9dd4f1fd9f342c2a9ddec93b[root@localhost ~]# docker ps
CONTAINER ID   IMAGE     COMMAND             CREATED             STATUS             PORTS                                         NAMES
f484b8edac84   tomcat    "catalina.sh run"   5 seconds ago       Up 3 seconds       0.0.0.0:49154->8080/tcp, :::49154->8080/tcp   tomcat02
5d9ee849922a   tomcat    "catalina.sh run"   About an hour ago   Up About an hour   0.0.0.0:49153->8080/tcp, :::49153->8080/tcp   tomcat01

在这里插入图片描述

在这里插入图片描述

3、两个tomcat能相互ping通

[root@localhost ~]# docker ps
CONTAINER ID   IMAGE     COMMAND             CREATED             STATUS             PORTS                                         NAMES
f484b8edac84   tomcat    "catalina.sh run"   3 minutes ago       Up 3 minutes       0.0.0.0:49154->8080/tcp, :::49154->8080/tcp   tomcat02
5d9ee849922a   tomcat    "catalina.sh run"   About an hour ago   Up About an hour   0.0.0.0:49153->8080/tcp, :::49153->8080/tcp   tomcat01
[root@localhost ~]# docker exec -it f484b8edac84 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00    
    inet 127.0.0.1/8 scope host lo       
    	valid_lft forever preferred_lft forever
24: eth0@if25: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
     link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0    
     inet 172.17.0.3/16 brd 172.17.255.255 scope global eth0       
     	valid_lft forever preferred_lft forever
[root@localhost ~]# docker exec -it 5d9ee849922a /bin/bash
root@5d9ee849922a:/usr/local/tomcat# pwd
/usr/local/tomcat
root@5d9ee849922a:/usr/local/tomcat# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00    
    inet 127.0.0.1/8 scope host lo       
    	valid_lft forever preferred_lft forever
20: eth0@if21: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
     link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0    
     inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0       
     	valid_lft forever preferred_lft forever
root@5d9ee849922a:/usr/local/tomcat# ping 172.17.0.3
PING 172.17.0.3 (172.17.0.3) 56(84) bytes of data.
64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.130 ms
64 bytes from 172.17.0.3: icmp_seq=2 ttl=64 time=0.253 ms
64 bytes from 172.17.0.3: icmp_seq=3 ttl=64 time=0.146 ms
64 bytes from 172.17.0.3: icmp_seq=4 ttl=64 time=0.144 ms
^C
--- 172.17.0.3 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 12ms
rtt min/avg/max/mdev = 0.130/0.168/0.253/0.050 ms

只要容器删除,veth-pair就会消失

–link(目前不推荐)

思考一个场景,我们编写了一个微服务,database=ip:,项目不重启,数据库的ip换掉了,怎么处理这个问题,通过名字访问容器

[root@localhost ~]# docker exec -it tomcat01 ping tomcat02
ping: tomcat02: Name or service not known
# 如何解决,通过 --link 命令;tomcat03能ping通tomcat02,但是tomcat02不能ping通tomcat03
[root@localhost ~]# docker run -d -P --name tomcat03 --link tomcat02 tomcat
f27fa6ab0cccd64cb57f2ec3aaa8c1b4592d011594607da65ee770b1dcd7cca4
[root@localhost ~]# docker exec -it tomcat02 ping tomcat03
ping: tomcat03: Name or service not known
[root@localhost ~]# docker exec -it tomcat03 ping tomcat02
PING tomcat02 (172.17.0.3) 56(84) bytes of data.
64 bytes from tomcat02 (172.17.0.3): icmp_seq=1 ttl=64 time=0.136 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=2 ttl=64 time=0.167 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=3 ttl=64 time=0.147 ms
# tomcat03中配置了tomcat02的配置
[root@localhost ~]# docker exec -it f27fa6ab0ccc cat /etc/hosts
127.0.0.1	localhost
::1	localhost ip6-localhost ip6-loopback
fe00::0	ip6-localnet
ff00::0	ip6-mcastprefix
ff02::1	ip6-allnodes
ff02::2	ip6-allrouters
172.17.0.3	tomcat02 f484b8edac84
172.17.0.4	f27fa6ab0ccc

自定义网络

查看所有的docker网络

在这里插入图片描述

网络模式

bridge:桥接 docker(默认,自己创建也使用bridge模式)

none:不配置网络

host:和宿主机共享网络

container:容器网络联通(用的少,局限很大)

测试

# 我们直接启动的命令
docker run -d -P --name tomcat01 tomcat
docker run -d -P --name tomcat01 --net bridge tomcat
# 创建自己的网络(推荐使用:优点就是可以通过容器名相互ping通)
[root@localhost ~]# docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
795a3cc39a6bc6b1d24e9c0907f1c492c7586eb27c2f94bbc4c19e51e05c74bd
# 查看网络情况
[root@localhost ~]# docker network ls
NETWORK ID     NAME      DRIVER    SCOPE
f4d3fbf753d6   bridge    bridge    local
8f0a492b8fc8   host      host      local
795a3cc39a6b   mynet     bridge    local
8a44f9f44172   none      null      local
[root@localhost ~]# docker network inspect mynet
[
    {
            "Name": "mynet",        
            "Id": "795a3cc39a6bc6b1d24e9c0907f1c492c7586eb27c2f94bbc4c19e51e05c74bd",        
            "Created": "2021-07-25T18:50:27.887523881+08:00",        "Scope": "local",        
            "Driver": "bridge",        
            "EnableIPv6": false,        
            "IPAM": {            
            	"Driver": "default",            
            	"Options": {},            
            	"Config": [
            	    {                    
            	    	"Subnet": "192.168.0.0/16",                    
            	    	"Gateway": "192.168.0.1"                
            	    }           
          		]        
     },        
     "Internal": false,        
     "Attachable": false,        
     "Ingress": false,        
     "ConfigFrom": {            
     	"Network": ""       
     },        
     "ConfigOnly": false,        
     "Containers": {            "63a6db8a544d78507437aaf917ec00144a0c9f0c9acc4109ab36c57121db7f29": {                
     	"Name": "tomcat01",                
     	"EndpointID": "8cded50230edd00a56fd274c9d8ee84f98602bf8a7110309604614aa37ce7ccc",                
     	"MacAddress": "02:42:c0:a8:00:02",                
     	"IPv4Address": "192.168.0.2/16",                
     	"IPv6Address": ""            
     },            
     "96c471b131dc1fc51b6ace86d864baf8fd6e9917e4b850af5bafa109d5770c07": {                
     	"Name": "tomcat02",                
     	"EndpointID": "62c94f6609eb568f89296978a370e1c3f8da526848235d0d27bbce871f968664",                
     	"MacAddress": "02:42:c0:a8:00:03",                
     	"IPv4Address": "192.168.0.3/16",                
     	"IPv6Address": ""            
     	}        
  	},        
  	"Options": {},        
  	"Labels": {}    
  }
]
# 使用自己的网络创建并启动容器
[root@localhost ~]# docker run -d -P --name tomcat01 --net mynet tomcat
63a6db8a544d78507437aaf917ec00144a0c9f0c9acc4109ab36c57121db7f29
[root@localhost ~]# docker run -d -P --name tomcat02 --net mynet tomcat
96c471b131dc1fc51b6ace86d864baf8fd6e9917e4b850af5bafa109d5770c07
[root@localhost ~]# docker exec -it tomcat01 ping tomcat02
PING tomcat02 (192.168.0.3) 56(84) bytes of data.
64 bytes from tomcat02.mynet (192.168.0.3): icmp_seq=1 ttl=64 time=0.141 ms
64 bytes from tomcat02.mynet (192.168.0.3): icmp_seq=2 ttl=64 time=0.193 ms
64 bytes from tomcat02.mynet (192.168.0.3): icmp_seq=3 ttl=64 time=0.180 ms
[root@localhost ~]# docker exec -it tomcat02 ping tomcat01
PING tomcat01 (192.168.0.2) 56(84) bytes of data.
64 bytes from tomcat01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.049 ms
64 bytes from tomcat01.mynet (192.168.0.2): icmp_seq=2 ttl=64 time=0.148 ms
64 bytes from tomcat01.mynet (192.168.0.2): icmp_seq=3 ttl=64 time=0.148 ms

网络连通

docker network

在这里插入图片描述

docker network connect

在这里插入图片描述

# bridge 网络下的 tomcat-bridge01打通与mynet网络的连接;
# 一个容器两个ip地址
[root@localhost ~]# docker network connect mynet tomcat-bridge01
[root@localhost ~]# docker network inspect mynet
[
    {
            "Name": "mynet",        
            "Id": "795a3cc39a6bc6b1d24e9c0907f1c492c7586eb27c2f94bbc4c19e51e05c74bd",        
            "Created": "2021-07-25T18:50:27.887523881+08:00",        
            "Scope": "local",        
            "Driver": "bridge",        
            "EnableIPv6": false,        
            "IPAM": {
                        "Driver": "default",            
                        "Options": {},            
                        "Config": [
                                        {                    
                                        "Subnet": "192.168.0.0/16",                    
                                        "Gateway": "192.168.0.1"                
                                        }            
                                  ]        
             },        
             "Internal": false,        
             "Attachable": false,        
             "Ingress": false,        
             "ConfigFrom": {
                         "Network": ""        
              },        
              "ConfigOnly": false,        
              "Containers": {            "63a6db8a544d78507437aaf917ec00144a0c9f0c9acc4109ab36c57121db7f29": {
             "Name": "tomcat01",                
             "EndpointID": "8cded50230edd00a56fd274c9d8ee84f98602bf8a7110309604614aa37ce7ccc",                
             "MacAddress": "02:42:c0:a8:00:02",                "IPv4Address": "192.168.0.2/16",                
             "IPv6Address": ""            
     },            
     "96c471b131dc1fc51b6ace86d864baf8fd6e9917e4b850af5bafa109d5770c07": {                
     	"Name": "tomcat02",                
     	"EndpointID": "62c94f6609eb568f89296978a370e1c3f8da526848235d0d27bbce871f968664",                
     	"MacAddress": "02:42:c0:a8:00:03",                
     	"IPv4Address": "192.168.0.3/16",                
     	"IPv6Address": ""            
    },            
"e4a134b608a7a1a4102c6b0b010f9496e3d036061fcc7a49bdcc28701950ee44": {                
	"Name": "tomcat-bridge01",                
	"EndpointID": "c8fd345a6df238974ba7ce4fbfb75e26c621e10e1dbd309e618cb7423322d9a4",                
	"MacAddress": "02:42:c0:a8:00:04",                
	"IPv4Address": "192.168.0.4/16",                
	"IPv6Address": ""            
	}        
},        
"Options": {},        
"Labels": {}    
}
]
# tomcat01与tomcat-bridge01就可以相互ping通
[root@localhost ~]# docker exec -it tomcat01 ping tomcat-bridge01
PING tomcat-bridge01 (192.168.0.4) 56(84) bytes of data.
64 bytes from tomcat-bridge01.mynet (192.168.0.4): icmp_seq=1 ttl=64 time=0.114 ms
64 bytes from tomcat-bridge01.mynet (192.168.0.4): icmp_seq=2 ttl=64 time=0.152 ms
64 bytes from tomcat-bridge01.mynet (192.168.0.4): icmp_seq=3 ttl=64 time=0.145 ms
[root@localhost ~]# docker exec -it tomcat-bridge01 ping tomcat01
PING tomcat01 (192.168.0.2) 56(84) bytes of data.
64 bytes from tomcat01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.071 ms
64 bytes from tomcat01.mynet (192.168.0.2): icmp_seq=2 ttl=64 time=0.147 ms
64 bytes from tomcat01.mynet (192.168.0.2): icmp_seq=3 ttl=64 time=0.176 ms

实战:部署redis集群

# 创建网卡
docker network create redis --subnet 172.38.0.0/16
# 通过脚本创建6个redis配置
for port in $(seq 1 6); \
do \
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat << EOF > /mydata/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-bus-port 16379
appendonly yes
EOF
done

for port in $(seq 1 6); \
do \
docker run -p 637${port}:6379 -p 1637${port}:16379 --name redis-${port} \
-v /mydata/redis/node-${port}/data:/data \
-v /mydata/redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.1${port} redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf; \
done
# 在redis-1中(其他亦可)创建集群
[root@localhost redis]# docker exec -it redis-1 /bin/sh
/data # ls
appendonly.aof  nodes.conf
/data # redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 172.38.0.16:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.38.0.15:6379 to 172.38.0.11:6379
Adding replica 172.38.0.16:6379 to 172.38.0.12:6379
Adding replica 172.38.0.14:6379 to 172.38.0.13:6379
M: a9a8845ea553c4327a872312f30c7b8a0453e886 172.38.0.11:6379   slots:[0-5460] (5461 slots) master
M: 9b206430a2918e2f09fbf0b0cb3d2c57a413dc82 172.38.0.12:6379   slots:[5461-10922] (5462 slots) master
M: 63223dc500544ffa27edbd5702950710c1bede24 172.38.0.13:6379   slots:[10923-16383] (5461 slots) master
S: e38cac6b78b4556bcc702a9a38781698ad963a5b 172.38.0.14:6379   replicates 63223dc500544ffa27edbd5702950710c1bede24
S: 3194807b243bfc801a9c8df3e28f9b0b6c5c0acc 172.38.0.15:6379   replicates a9a8845ea553c4327a872312f30c7b8a0453e886
S: 1a65f75980bcfbb6eca9b28b9777bc5428127302 172.38.0.16:6379   replicates 9b206430a2918e2f09fbf0b0cb3d2c57a413dc82
Can I set the above configuration? (type 'yes' to accept): yes>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the clusterWaiting for the cluster to join...
>>> Performing Cluster Check (using node 172.38.0.11:6379)
M: a9a8845ea553c4327a872312f30c7b8a0453e886 172.38.0.11:6379   slots:[0-5460] (5461 slots) master   1 additional replica(s)
S: 3194807b243bfc801a9c8df3e28f9b0b6c5c0acc 172.38.0.15:6379   slots: (0 slots) slave   replicates a9a8845ea553c4327a872312f30c7b8a0453e886
M: 63223dc500544ffa27edbd5702950710c1bede24 172.38.0.13:6379   slots:[10923-16383] (5461 slots) master   1 additional replica(s)
M: 9b206430a2918e2f09fbf0b0cb3d2c57a413dc82 172.38.0.12:6379   slots:[5461-10922] (5462 slots) master   1 additional replica(s)
S: e38cac6b78b4556bcc702a9a38781698ad963a5b 172.38.0.14:6379   slots: (0 slots) slave   replicates 63223dc500544ffa27edbd5702950710c1bede24
S: 1a65f75980bcfbb6eca9b28b9777bc5428127302 172.38.0.16:6379   slots: (0 slots) slave   replicates 9b206430a2918e2f09fbf0b0cb3d2c57a413dc82
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
>[OK] All 16384 slots covered.
# 查看集群信息
/data # redis-cli -c
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:320
cluster_stats_messages_pong_sent:311
cluster_stats_messages_sent:631
cluster_stats_messages_ping_received:306
cluster_stats_messages_pong_received:320
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:631
127.0.0.1:6379> cluster nodes
3194807b243bfc801a9c8df3e28f9b0b6c5c0acc 172.38.0.15:6379@16379 slave 
a9a8845ea553c4327a872312f30c7b8a0453e886 0 1627221893000 5 connected
a9a8845ea553c4327a872312f30c7b8a0453e886 172.38.0.11:6379@16379 myself,master - 0 1627221893000 1 connected 0-5460
63223dc500544ffa27edbd5702950710c1bede24 172.38.0.13:6379@16379 master - 0 1627221893459 3 connected 10923-16383
9b206430a2918e2f09fbf0b0cb3d2c57a413dc82 172.38.0.12:6379@16379 master - 0 1627221894579 2 connected 5461-10922
e38cac6b78b4556bcc702a9a38781698ad963a5b 172.38.0.14:6379@16379 slave 
63223dc500544ffa27edbd5702950710c1bede24 0 1627221894476 4 connected
1a65f75980bcfbb6eca9b28b9777bc5428127302 172.38.0.16:6379@16379 slave 
9b206430a2918e2f09fbf0b0cb3d2c57a413dc82 0 1627221894000 6 connected
# 测试127.0.0.1:6379> set a b
-> Redirected to slot [15495] located at 172.38.0.13:6379
OK
# 模拟主挂掉
[root@localhost redis]# docker stop 3b545069cfb0
3b545069cfb0
# 再次获取key为a的值
/data # redis-cli -c	# 重新连接了一次集群
127.0.0.1:6379> get a
-> Redirected to slot [15495] located at 172.38.0.14:6379
"b"
# 再次查看集群信息
172.38.0.14:6379> cluster nodes
3194807b243bfc801a9c8df3e28f9b0b6c5c0acc 172.38.0.15:6379@16379 slave 
a9a8845ea553c4327a872312f30c7b8a0453e886 0 1627222490000 5 connectede
38cac6b78b4556bcc702a9a38781698ad963a5b 172.38.0.14:6379@16379 myself,master - 0 1627222488000 7 connected 10923-16383
63223dc500544ffa27edbd5702950710c1bede24 172.38.0.13:6379@16379 master,fail - 1627222272776 1627222271000 3 connected
9b206430a2918e2f09fbf0b0cb3d2c57a413dc82 172.38.0.12:6379@16379 master - 0 1627222489000 2 connected 5461-10922
a9a8845ea553c4327a872312f30c7b8a0453e886 172.38.0.11:6379@16379 master - 0 1627222489561 1 connected 0-5460
1a65f75980bcfbb6eca9b28b9777bc5428127302 172.38.0.16:6379@16379 slave 
9b206430a2918e2f09fbf0b0cb3d2c57a413dc82 0 1627222490076 6 connected
# 查看网络信息
docker network ls

—20231211

3主3从集群搭建

# docker run:创建并运行docker容器实例
#--name redis-node-6:容器名字
#--net host:使用宿主机的IP和端口,默认
#--privileged=true:获取宿主机root用户权限
#-v /data/redis/share/redis-node-6:/data:容器卷,宿主机地址:docker内部地址
#redis:6.0.8:redis镜像和版本号
#--cluster-enabled yes:开启redis集群
#--appendonly yes:开启持久化
#--port 6386:redis端口号

docker run -d --name redis-node-1 --net host --privileged=true -v /data/redis/share/redis-node-1:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6381
docker run -d --name redis-node-2 --net host --privileged=true -v /data/redis/share/redis-node-2:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6382
docker run -d --name redis-node-3 --net host --privileged=true -v /data/redis/share/redis-node-3:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6383
docker run -d --name redis-node-4 --net host --privileged=true -v /data/redis/share/redis-node-4:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6384
docker run -d --name redis-node-5 --net host --privileged=true -v /data/redis/share/redis-node-5:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6385
docker run -d --name redis-node-6 --net host --privileged=true -v /data/redis/share/redis-node-6:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6386

注意,进入docker容器后(docker exec -it b4ee28f6edf3 /bin/bash)才能执行以下命令,且注意自己的真实lP地址

# --cluster-replicas 1:表示为每个master创建一个slave节点
root@master:/data# redis-cli --cluster create 192.168.1.12:6381 192.168.1.12:6382 192.168.1.12:6383 192.168.1.12:6384 192.168.1.12:6385 192.168.1.12:6386 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168.1.12:6385 to 192.168.1.12:6381
Adding replica 192.168.1.12:6386 to 192.168.1.12:6382
Adding replica 192.168.1.12:6384 to 192.168.1.12:6383
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots:[0-5460] (5461 slots) master
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[5461-10922] (5462 slots) master
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[10923-16383] (5461 slots) master
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
S: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   replicates 0e88f98d43ff2386f9ec11798a82e3d116775680
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.
>>> Performing Cluster Check (using node 192.168.1.12:6381)
M: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   slots: (0 slots) slave
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   slots: (0 slots) slave
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
S: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   slots: (0 slots) slave
   replicates 0e88f98d43ff2386f9ec11798a82e3d116775680
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

链接进入6381作为切入点,查看节点状态,cluster infocluster nodes

[root@master ~]# docker exec -it redis-node-1 /bin/bash
root@master:/data# redis-cli -p 6381
127.0.0.1:6381> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:335
cluster_stats_messages_pong_sent:350
cluster_stats_messages_sent:685
cluster_stats_messages_ping_received:345
cluster_stats_messages_pong_received:335
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:685
127.0.0.1:6381> cluster nodes
0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381@16381 myself,master - 0 1702305890000 1 connected 0-5460
dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384@16384 slave 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 0 1702305890839 2 connected
e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385@16385 slave 03b7fd7c2af6e476b04038104623ce13ad8b33f0 0 1702305891848 3 connected
43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386@16386 slave 0e88f98d43ff2386f9ec11798a82e3d116775680 0 1702305889000 1 connected
03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383@16383 master - 0 1702305890000 3 connected 10923-16383
22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382@16382 master - 0 1702305889000 2 connected 5461-10922

redis集群读写error说明,需要加-c参数

127.0.0.1:6381> set k1 v1
(error) MOVED 12706 192.168.1.12:6383

root@master:/data# redis-cli -p 6381 -c
127.0.0.1:6381> set k1 v1
-> Redirected to slot [12706] located at 192.168.1.12:6383
OK

查看集群信息

root@master:/data# redis-cli --cluster check 192.168.1.12:6381
192.168.1.12:6381 (0e88f98d...) -> 0 keys | 5461 slots | 1 slaves.
192.168.1.12:6383 (03b7fd7c...) -> 0 keys | 5461 slots | 1 slaves.
192.168.1.12:6382 (22687ff0...) -> 0 keys | 5462 slots | 1 slaves.
[OK] 0 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.12:6381)
M: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   slots: (0 slots) slave
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   slots: (0 slots) slave
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
S: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   slots: (0 slots) slave
   replicates 0e88f98d43ff2386f9ec11798a82e3d116775680
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

主从容错迁移

[root@master ~]# docker stop redis-node-1
redis-node-1
[root@master ~]# docker exec -it redis-node-2 /bin/bash
root@master:/data# redis-cli -p 6382
127.0.0.1:6382> cluster nodes
03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383@16383 master - 0 1702307006121 3 connected 10923-16383
43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386@16386 master - 0 1702307005112 7 connected 0-5460
22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382@16382 myself,master - 0 1702307004000 2 connected 5461-10922
0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381@16381 master,fail - 1702306927072 1702306923030 1 disconnected
dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384@16384 slave 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 0 1702307004000 2 connected
e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385@16385 slave 03b7fd7c2af6e476b04038104623ce13ad8b33f0 0 1702307004000 3 connected

master1挂了,slave1成为master,master1再次重启后,依然是slave。

主从扩容

docker run -d --name redis-node-7 --net host --privileged=true -v /data/redis/share/redis-node-7:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6387
docker run -d --name redis-node-8 --net host --privileged=true -v /data/redis/share/redis-node-8:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6388
docker ps

将新增的6387作为master节点加入集群

redis-cli --cluster add-node 192.168.1.12:6387 192.168.1.12:6381 
# 6387就是将要作为master新增节点
# 6381就是原来集群节点里面的领路人,相当于6387拜拜6381的码头从而找到组织加入集群
[root@master ~]# docker exec -it redis-node-1 /bin/bash
root@master:/data# redis-cli --cluster add-node 192.168.1.12:6387 192.168.1.12:6381 
>>> Adding node 192.168.1.12:6387 to cluster 192.168.1.12:6381
>>> Performing Cluster Check (using node 192.168.1.12:6381)
S: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots: (0 slots) slave
   replicates 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b
M: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   slots: (0 slots) slave
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   slots: (0 slots) slave
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.1.12:6387 to make it join the cluster.
[OK] New node added correctly.

重新分派槽号

redis-cli --cluster reshard 192.168.1.12:6381
# 再次检查集群
root@master:/data# redis-cli --cluster check 192.168.1.12:6381
192.168.1.12:6386 (43ec76eb...) -> 0 keys | 4096 slots | 1 slaves.
192.168.1.12:6387 (38c8c4f2...) -> 0 keys | 4096 slots | 0 slaves.
192.168.1.12:6382 (22687ff0...) -> 0 keys | 4096 slots | 1 slaves.
192.168.1.12:6383 (03b7fd7c...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.12:6381)
S: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots: (0 slots) slave
   replicates 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b
M: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: 38c8c4f2a0aeef1450363ae2abcc18547b0c133b 192.168.1.12:6387
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   slots: (0 slots) slave
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   slots: (0 slots) slave
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

位6387分配从节点

redis-cli --cluster add-node 192.168.1.12:6388 192.168.1.12:6387 --cluster-slave --cluster-master-id 38c8c4f2a0aeef1450363ae2abcc18547b0c133b 
# 这个是6387的编号,按照自己实际情况

root@master:/data# redis-cli --cluster add-node 192.168.1.12:6388 192.168.1.12:6387 --cluster-slave --cluster-master-id 38c8c4f2a0aeef1450363ae2abcc18547b0c133b 
>>> Adding node 192.168.1.12:6388 to cluster 192.168.1.12:6387
>>> Performing Cluster Check (using node 192.168.1.12:6387)
M: 38c8c4f2a0aeef1450363ae2abcc18547b0c133b 192.168.1.12:6387
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots: (0 slots) slave
   replicates 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   slots: (0 slots) slave
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   slots: (0 slots) slave
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
M: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.1.12:6388 to make it join the cluster.
Waiting for the cluster to join

>>> Configure node as replica of 192.168.1.12:6387.
[OK] New node added correctly.

集群缩容

1先清除从节点6388
2清出来的槽号重新分配
3再删除6387
4恢复成3主3从

redis-cli --cluster del-node 192.168.1.12:6388 a03ad5c3edb3ca53a4f8242dcfb169e3f2cef9e2

root@master:/data# redis-cli --cluster del-node 192.168.1.12:6388 a03ad5c3edb3ca53a4f8242dcfb169e3f2cef9e2
>>> Removing node a03ad5c3edb3ca53a4f8242dcfb169e3f2cef9e2 from cluster 192.168.1.12:6388
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.


#检查集群状态
root@master:/data# redis-cli --cluster check 192.168.1.12:6381
192.168.1.12:6386 (43ec76eb...) -> 0 keys | 4096 slots | 1 slaves.
192.168.1.12:6387 (38c8c4f2...) -> 0 keys | 4096 slots | 0 slaves.
192.168.1.12:6382 (22687ff0...) -> 0 keys | 4096 slots | 1 slaves.
192.168.1.12:6383 (03b7fd7c...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.12:6381)
S: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots: (0 slots) slave
   replicates 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b
M: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: 38c8c4f2a0aeef1450363ae2abcc18547b0c133b 192.168.1.12:6387
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   slots: (0 slots) slave
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   slots: (0 slots) slave
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

#重新分配槽
root@master:/data# redis-cli --cluster reshard 192.168.1.12:6381
>>> Performing Cluster Check (using node 192.168.1.12:6381)
S: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots: (0 slots) slave
   replicates 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b
M: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: 38c8c4f2a0aeef1450363ae2abcc18547b0c133b 192.168.1.12:6387
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   slots: (0 slots) slave
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   slots: (0 slots) slave
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096
What is the receiving node ID? 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b                                 
Please enter all the source node IDs.
  Type 'all' to use all the nodes as source nodes for the hash slots.
  Type 'done' once you entered all the source nodes IDs.
Source node #1: 38c8c4f2a0aeef1450363ae2abcc18547b0c133b
Source node #2: done

root@master:/data# redis-cli --cluster check 192.168.1.12:6381
192.168.1.12:6386 (43ec76eb...) -> 0 keys | 8192 slots | 1 slaves.
192.168.1.12:6387 (38c8c4f2...) -> 0 keys | 0 slots | 0 slaves.
192.168.1.12:6382 (22687ff0...) -> 0 keys | 4096 slots | 1 slaves.
192.168.1.12:6383 (03b7fd7c...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.12:6381)
S: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots: (0 slots) slave
   replicates 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b
M: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   slots:[0-6826],[10923-12287] (8192 slots) master
   1 additional replica(s)
M: 38c8c4f2a0aeef1450363ae2abcc18547b0c133b 192.168.1.12:6387
   slots: (0 slots) master
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   slots: (0 slots) slave
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   slots: (0 slots) slave
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

#删除6387
root@master:/data# redis-cli --cluster del-node 192.168.1.12:6387 38c8c4f2a0aeef1450363ae2abcc18547b0c133b
>>> Removing node 38c8c4f2a0aeef1450363ae2abcc18547b0c133b from cluster 192.168.1.12:6387
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.

#再次检查集群状态
root@master:/data# redis-cli --cluster check 192.168.1.12:6381
192.168.1.12:6386 (43ec76eb...) -> 0 keys | 8192 slots | 1 slaves.
192.168.1.12:6382 (22687ff0...) -> 0 keys | 4096 slots | 1 slaves.
192.168.1.12:6383 (03b7fd7c...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.1.12:6381)
S: 0e88f98d43ff2386f9ec11798a82e3d116775680 192.168.1.12:6381
   slots: (0 slots) slave
   replicates 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b
M: 43ec76eb8c72bcb4fb0f790a73de83eb065cd10b 192.168.1.12:6386
   slots:[0-6826],[10923-12287] (8192 slots) master
   1 additional replica(s)
S: dabcd53e8e42724bc2237fdbfd2f3444ef06ed9e 192.168.1.12:6384
   slots: (0 slots) slave
   replicates 22687ff05bc4540ffecd0807f92f44b6db0cb4fd
M: 22687ff05bc4540ffecd0807f92f44b6db0cb4fd 192.168.1.12:6382
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: e2fbb1428c0d47f88812c49013141440278bf4a0 192.168.1.12:6385
   slots: (0 slots) slave
   replicates 03b7fd7c2af6e476b04038104623ce13ad8b33f0
M: 03b7fd7c2af6e476b04038104623ce13ad8b33f0 192.168.1.12:6383
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

网络模式

网络模式
bridge:为每一个容器分配、设置IP等,并将容器连接到一个docker0虚拟网桥,默认为该模式。
host:容器将不会虚拟出自己的网卡,配置自己的IP等,而是使用宿主机的IP和端口。
none:容器有独立的Network namespace,但并没有对其进行佳何网络设置,如分配veth pair和网桥连接,IP等。
container:新创建的容器不会创建自己的网卡和配置自己的IP,而是和一个指定的容器共享IP、端口范围等。

Docker服务默认会创建一个docker0网桥(其上有一个docker0内部接口),该桥接网络的名称为docker0,它在内核层连通了其他的物理或虚拟网卡,这就将所有容器和本地主机都放到同一个物理网络。Docker默认指定了docker0接口的lP地址和子网掩码,让主机和容器之间可以通过网桥相互通信。

# 查看bridge 网络的详细信息,并通过grep获取名称项
[root@master docker]# docker network inspect bridge|grep name
            "com.docker.network.bridge.name": "docker0",
[root@master docker]# ifconfig|grep docker
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500

docker-compose

#注意必须是在安装docker的基础上
#下载安装包单独安装DockerCompose
curl -L https://github.com/docker/compose/releases/download/1.21.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
#docker-compose添加执行权限
chmod +x /usr/local/bin/docker-compose
#查看docker-compose版本
docker-compose --version

compose常用命令

docker-compose -h #查看帮助
docker-compose up #启动所有docker-compose服务
docker-compose up -d #启动所有docker-compose服务并后台运行
docker-compose down #停止并删除容器、网络、卷、镜像。
#进入容器实例内部docker-compose exec docker-compose.yml文件中写的服务id /bin/bash
docker-compose exec yml里面的服务id 
docker-compose ps #展示当前docker-compose编排过的运行的所有容器
docker-compose top #展示当前docker-compose编排过的容器进程
docker-compose logs yml里面的服务id #查看容器输出日志
dokcer-compose config #检查配置
dokcer-compose config -q #检查配置,有问题才有输出
docker-compose restart #重启服务
docker-compose start#启动服务
docker-compose stop #停止服务
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值