文章目录
1.实现并总结容器跨主机的通信过程
docker-1主机操作
# 设置docker-1宿主机的docker网络地址
root@docker-1:~# vim /lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd-Hfd://--containerd=/run/containerd/containerd.sock--bip=10.10.0.1/24
root@docker-1:~# systemctl daemon-reload
root@docker-1:~# systemctl restart docker
# 启动centos容器验证通信
root@docker-1:/opt# docker run -it --rm centos:7.6.1810 bash
[root@2b6fa5e51234 /]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255
ether 02:42:0a:0a:00:02 txqueuelen 0 (Ethernet)
RX packets 6226 bytes 30004850 (28.6 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 5029 bytes 274916 (268.4 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
# 在docker-1宿主机添加通往10.20.0.2的添加路由
root@docker-1:~# routeadd-net 10.20.0.0/24 gw 172.18.10.12
root@docker-1:~# iptables -A FORWARD -s 172.18.0.0/16 -j ACCEPT # 允许响应报文的转发
# 此时容器即可与docker-2宿主机上的容器通信
[root@2b6fa5e51234 /]# ping 10.20.0.2
PING 10.20.0.2 (10.20.0.2) 56(84) bytes of data.
64 bytes from 10.20.0.2: icmp_seq=1 ttl=62 time=0.304 ms
64 bytes from 10.20.0.2: icmp_seq=2 ttl=62 time=0.377 ms
docker-2主机操作
# 设置docker-2宿主机的docker网络地址
root@docker-2:~# vim /lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd-Hfd://--containerd=/run/containerd/containerd.sock--bip=10.20.0.1/24
root@docker-2:~# systemctl daemon-reload
root@docker-3:~# systemctl restart docker
root@docker-2:/opt# docker run -it --rm centos:7.6.1810 bash
[root@e5c70c3fdf86 /]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.20.0.2 netmask 255.255.255.0 broadcast 10.20.0.255
ether 02:42:0a:14:00:02 txqueuelen 0 (Ethernet)
RX packets 8442 bytes 30123267 (28.7 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 6923 bytes 377229 (368.3 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
root@docker-2:~# route add -net 10.10.0.0/24 gw 172.18.10.11
root@docker-2:~# iptables -A FORWARD -s 172.18.0.0/16 -j ACCEPT
# 此时容器即可与docker-1宿主机上的容器通信
[root@e5c70c3fdf86 /]# ping 10.10.0.2
PING 10.10.0.2 (10.10.0.2) 56(84) bytes of data.
64 bytes from 10.10.0.2: icmp_seq=1 ttl=62 time=0.394 ms
64 bytes from 10.10.0.2: icmp_seq=2 ttl=62 time=0.246 ms
64 bytes from 10.10.0.2: icmp_seq=3 ttl=62 time=0.289 ms
2.总结Dockerfile的常见指令
FROM nginx:v1 # 指定当前引用的父镜像,在整个dockerfile除注释外第一行
LABEL author="jack@gmail.com" # "key":"value"形式,设置镜像的属性标签 author="jack@gmail.com"
ADD /home/1.tar.gz /etc/ # 将宿主机本地的文件、目录、压缩包等资源添加到镜像中,会自动解压tar.gz格式的压缩包,但是不会自动解压zip包。
COPY /home/1.txt /etc/1.txt # 和ADD类似,也是添加宿主机资源到镜像中去,但是没有解压缩功能。
RUN yum install -y unzip # 执行shell命令,但是一定要以非交互式的方式执行
ENV app_dir=/apps/tomcat # 定义环境变量,例如MY_NAME="john Doe"
EXPOSE 80 # 声明要把容器的某些端口映射到宿主机,只是声明,真的映射要在启动容器时-p指定
WORKDIR /apps # 定义当前的工作目录,定义后后面的操作都在该目录下
CMD ["nainx","stop"] # 执行命令,有三种方式定义容器启动时所默认执行的命令或脚本,CMD ["executable","param1","param2"] 推荐的方式,要是用双引号,命令后面跟参数;CMD ["param1","param2"] 配合ENTRYPOINT作为参数使用;CMD command param1 param2 基于shell命令。
ENTRYPOINT ["top","-b","-c"] # 也可以用于定义容器再启动时默认执行的命令或者脚本,如果是和CMD混合使用的时候,会将CMD的命令作为参数传递给ENTRYPOINT后面的脚本,可以在脚本中对参数做判断并相应的容器初始化操作。
# 案例一
ENTRYPOINT ["top","-b"]
CMD ["-c"]
# 等于如下一行
ENTRYPOINT ["top","-b","-c"]
# 案例二
ENTRYPOINT ["docker-entrypoint.sh"] # 定义一个入口点脚本,并传递mysqld参数
CMD ["mysqld"]
# 等同于下一行
ENTRYPOINT ["docker-entrypoint.sh","mysqld"]
# 使用总结ENTRYPOINT(脚本)+CMD(当做参数传递给ENTRYPOINT)
3.基于Dockerfile Nginx镜像并验证可以启动为容器
# 以centos:7.6.1810镜像为父镜像生成nginx镜像
root@docker-1:/opt/dockerfile/nginx# cat Dockerfile
# 以centos:7.6.1810镜像为父镜像生成nginx镜像
FROM centos:7.6.1810
MAINTAINER "zxw20230715@qq.com"
ADD sources.list /etc/apt/sources.list
RUN yum install -y vim wget unzip pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop gcc gcc-c++
ADD nginx-1.14.2.tar.gz /usr/local/src/
RUN cd /usr/local/src/nginx-1.14.2 && ./configure --prefix=/usr/local/nginx && make && make install && ln -sv /usr/local/nginx/sbin/nginx /usr/bin/
RUN groupadd -g 2000 nginx && useradd -g nginx -s /usr/sbin/nologin -u 2000 nginx && chown -R nginx.nginx /usr/local/nginx
ADD nginx.conf /usr/local/nginx/conf/
RUN echo "MyApp1" > /usr/local/nginx/html/index.html
EXPOSE 80 443
CMD ["nginx","-g","daemon off;"]
# 构建镜像脚本
root@docker-1:/opt/dockerfile/nginx# cat build-command.sh
#!/bin/bash
TAG=$1
docker build -t centos-nginx:v1 .
# 目录准备相关文件
root@docker-1:/opt/dockerfile/nginx# tree ./
./
├── build-command.sh
├── Dockerfile
├── nginx-1.14.2.tar.gz
├── nginx.conf
└── sources.list
0 directories, 5 files
# 执行构建脚本
root@docker-1:/opt/dockerfile/nginx# ./build-command.sh
# 使用构建好的镜像启动容器
root@docker-1:/opt/dockerfile/nginx# docker run -d --name nginx222 -p 80:80 centos-nginx:v1
# 验证是否可以正常访问
root@docker-1:/opt/dockerfile/nginx# curl http://172.18.10.11
MyApp1
# 返回MyApp1验证成功
4.部署单机harbor并实现镜像的上传与下载
# 下载harbor离线安装包并tar -xf harbor-offline-installer-v2.3.2.tgz解压
root@docker-1:/opt/harbor-install# ls
harbor harbor-offline-installer-v2.3.2.tgz
root@docker-1:/opt/harbor-install# cd harbor/
root@docker-1:/opt/harbor-install/harbor# cp harbor.yml.tmpl harbor.yml
root@docker-1:/opt/harbor-install/harbor# vim harbor.yml #修改配置文件如下图
# 添加地址解析
root@docker-1:/opt/harbor-install/harbor# vim /etc/hosts
172.18.10.11 docker-1 harbor.magedu.com
root@docker-1:/opt/harbor-install/harbor# ./install.sh --with-trivy # 开启镜像扫描
# 安装完成后,页面访问172.18.10.11登录,账号admin/12345678
# 新建一个公开的public项目,用于上传镜像
# 配置仓库信任
root@docker-1:/opt/harbor-install/harbor# vim /etc/docker/daemon.json
{
"registry-mirrors": ["https://ekbjrpq6.mirror.aliyuncs.com"],
"insecure-registries": ["172.18.10.11","harbor.magedu.com"] # 仓库信任
}
root@docker-1:/opt/harbor-install/harbor# systemctl daemon-reload
root@docker-1:/opt/harbor-install/harbor# systemctl restart docker
root@docker-1:/opt/harbor-install/harbor# docker-compose stop
root@docker-1:/opt/harbor-install/harbor# docker-compose start
# 登录镜像仓库
root@docker-1:/opt/harbor-install/harbor# docker login harbor.magedu.com
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
root@docker-1:/opt/harbor-install/harbor# docker tag centos-nginx:v1 harbor.magedu.com/public/centos-nginx:v1 # tag修改镜像名称
root@docker-1:/opt/harbor-install/harbor# docker push harbor.magedu.com/public/centos-nginx:v1 # 上传镜像
The push refers to repository [harbor.magedu.com/public/centos-nginx]
5020ae8866af: Pushed
f409f1fdbb7d: Pushed
bafc855492ae: Pushed
0a4547b31ec1: Pushed
03b7199d1e51: Pushed
4de23cbb87a3: Pushed
16d88e7c272c: Pushed
89169d87dbe2: Pushed
v1: digest: sha256:a63929bd5c21f2907628d16dfbee4a1149274ed8b3ce67a82d1f3cdcbd71ee8d size: 1998
# 前端页面验证
5.基于systemd实现容器的CPU及内存的使用限制
docker早期使用cgroupfs进行容器的资源限制管理,然后再调用内核的cgroup进行资源限制,而kubernetes后来使用systemd直接调用cgroup对进程实现资源限制,等于绕过了docker的cgroupfs的管理,对资源限制更严格、性能更好,因此在kubernetes环境推荐使用systemd进行资源限制。
[root@docker-master ~]# vim /etc/docker/daemon.json
"exec-opts":["native.cgroupdriver=systemd"] # 添加参数
[root@docker-master ~]# systemctl daemon-reload
[root@docker-master ~]# systemctl restart docker
[root@docker-master ~]# docker pull lorel/docker-stress-ng # 下载测试镜像
1、常用内存限制参数
# -m or --memory#限制容器可以使用的最大内存量,如果设置此选项,最小存值为4m(4兆字节)。
[root@docker-master ~]# docker run -it --rm -m 256m --name c1 lorel/docker-stress-ng:latest --vm 2 --vm-bytes 256M
[root@docker-master ~]# docker stats # 查看资源使用情况,最多使用256m
# --memory-swap # 容器可以使用的交换分区大小,必须要在设置了物理内存限制的前提才能设置交换分区的限制
[root@docker-master ~]# docker run -it --rm -m 256m --memory-swap 512m --name c1 lorel/docker-stress-ng:latest --vm 2 --vm-bytes 256M
[root@docker-master ~]# docker run -it --rm -m 256m --memory-swap 512m --name c1 alpine:3.16.2 sh
#--memory-swappiness#设置容器使用交换分区的倾向性,值越高表示越倾向于使用swap分区,范围为0-100,0为能不用就不用,100为能用就用。
[root@docker-master ~]# docker run -it --rm -m 256m --memory-reservation 128m --name c1 lorel/docker-stress-ng --vm 2 --vm-bytes 256M
# --oom-kill-disable#默认情况下,发生OOM时,kernel会杀死容器内进程,但是可以使用--oom-kill-disable参数,可以禁止oom发生在指定的容器上,即仅在已设置-m/-memory选项的容器上禁用OOM,如果-m参数未配置,产生OOM时,主机为了释放内存还会杀死系统进程。
[root@docker-master ~]# docker run -it --rm -m 256m --oom-kill-disable --name c1 lorel/docker-stress-ng --vm 2 --vm-bytes 256M
2、常用CPU限制参数
# --cpus 限制cpu核数
[root@docker-master ~]# docker run -it --rm --cpus 2 --name c1 lorel/docker-stress-ng --cpu 4 --vm 4
# 将容器运行到指定的CPU上
[root@docker-master ~]# docker run -it --rm --cpus 2 --cpuset-cpus 1,3 --name c1 lorel/docker-stress-ng --cpu 4 --vm 4
# 基于cpu--shares值(共享值)对CPU进行限制,分别启动两个容器,magedu-c1的--cpu-shares值为1000,magedu-c2的--cpu-shares为500,观察最终效果,--cpu-shares值为1000的magedu-c1的CPU利用率基本是--cpu-shares为500的magedu-c2的两倍
# 启动两个容器,一个设置为1000,一个设置为3000,此时cpu使用的比例的1:3
[root@docker-master ~]# docker run -it --rm --name c1 --cpu-shares 1000 lorel/docker-stress-ng --cpu 4 --vm 4
[root@docker-master ~]# docker run -it --rm --name c2 --cpu-shares 3000 lorel/docker-stress-ng --cpu 4 --vm 4
扩展:
1.总结镜像的分层构建流程
docker镜像基于union fs将多个目录合并挂载到一个目录供容器使用,docker只有rootfs没有内核,运行容器时使用的是宿主机的bootfs(rootfs 文件系统;boot fs 主要包含bootloader和Kernel),一个镜像有一层或者多层,每层称为一个layer,镜像可以基于其他的镜像进行重新构建,一个镜像也可以被多个容器引用,镜像是只读的,在容器内直接修改不会修改镜像,而是在镜像上的可写层进行操作,在宿主机上的镜像存储,使用相同layer的镜像只会存储一份,大大节省了空间和镜像的上传下载速度。
2.总结基于lxcfs对容器的内存及CPU的资源限制
器内⾥⾯是从/proc/cpuinfo中获取到CPU的核数,但是容器⾥⾯的/proc⽂件系统是物理机的,内存也是显示的宿主机的/proc/meminfo的信息,因此不准确,⽽lxcfs则是通过⽂件挂载的⽅式,把宿主机cgroup中关于系统的相关信息读取出来,通过docker的volume挂载给容器内部的proc系统,然后让docker内的应⽤读取proc中信息的时候以为就是读取的宿主机的真实的proc。
root@ubuntu:~# apt install lxcfs #对容器实现更加准确的限制,内存、⽹络、磁盘IO、读写速率、磁盘空间⼤⼩等
root@docker-server1:~# docker run -it -m 256m --storage-opt size=10G centos:7.6.1810 /bin/bash
docker: Error response from daemon:--storage-optis supported onlyforoverlay over xfs with'pquota'mount option.
root@ubuntu:~# cat /etc/fstab #磁盘挂载选项/dev/sdb /var/lib/docker xfs defaults,prjquota00
# 限制内存为指定大小
docker run-it-m256m \
-v/var/lib/lxcfs/proc/cpuinfo:/proc/cpuinfo:rw \
-v/var/lib/lxcfs/proc/diskstats:/proc/diskstats:rw \
-v/var/lib/lxcfs/proc/meminfo:/proc/meminfo:rw \
-v/var/lib/lxcfs/proc/stat:/proc/stat:rw \
-v/var/lib/lxcfs/proc/swaps:/proc/swaps:rw \
-v/var/lib/lxcfs/proc/uptime:/proc/uptime:rw \
centos:7.9.2009 /bin/bash
# 限制只能使⽤1核CPU并绑定⾄id为2的cpu核⼼
docker run-it--cpus1--cpuset-cpus"2"\
-v/var/lib/lxcfs/proc/cpuinfo:/proc/cpuinfo:rw \
-v/var/lib/lxcfs/proc/diskstats:/proc/diskstats:rw \
-v/var/lib/lxcfs/proc/meminfo:/proc/meminfo:rw \
-v/var/lib/lxcfs/proc/stat:/proc/stat:rw \
-v/var/lib/lxcfs/proc/swaps:/proc/swaps:rw \
-v/var/lib/lxcfs/proc/uptime:/proc/uptime:rw \
centos:7.9.2009 /bin/bash
# 限制磁盘IO
docker run-it-m256m--cpus1--cpuset-cpus"2"--device-read-iops/dev/sdb:10--
device-write-iops/dev/sdb:10 \
-v/var/lib/lxcfs/proc/cpuinfo:/proc/cpuinfo:rw \
-v/var/lib/lxcfs/proc/diskstats:/proc/diskstats:rw \
-v/var/lib/lxcfs/proc/meminfo:/proc/meminfo:rw \
-v/var/lib/lxcfs/proc/stat:/proc/stat:rw \
-v/var/lib/lxcfs/proc/swaps:/proc/swaps:rw \
-v/var/lib/lxcfs/proc/uptime:/proc/uptime:rw \
centos:7.9.2009 /bin/bash
# 限制磁盘速录
docker run-it-m256m--cpus1--device-read-bps/dev/vda:10MB--device-write-bps /dev/vda:10MB \
-v/var/lib/lxcfs/proc/cpuinfo:/proc/cpuinfo:rw \
-v/var/lib/lxcfs/proc/diskstats:/proc/diskstats:rw \
-v/var/lib/lxcfs/proc/meminfo:/proc/meminfo:rw \
-v/var/lib/lxcfs/proc/stat:/proc/stat:rw \
-v/var/lib/lxcfs/proc/swaps:/proc/swaps:rw \
-v/var/lib/lxcfs/proc/uptime:/proc/uptime:rw \
ubuntu:22.04 /bin/bash
# 只限制⼤⼩只加--storage-opt size=10G即可
docker run-it--storage-optsize=10G centos:7.6.1810 /bin/sh
# 同时限制内存和磁盘空间
docker run-it-m256m--storage-optsize=10G \
-v/var/lib/lxcfs/proc/cpuinfo:/proc/cpuinfo:rw \
-v/var/lib/lxcfs/proc/diskstats:/proc/diskstats:rw \
-v/var/lib/lxcfs/proc/meminfo:/proc/meminfo:rw \
-v/var/lib/lxcfs/proc/stat:/proc/stat:rw \
-v/var/lib/lxcfs/proc/swaps:/proc/swaps:rw \
-v/var/lib/lxcfs/proc/uptime:/proc/uptime:rw \
centos:7.9.2009 /bin/bash # 同时限制内存和磁盘要挂载lxcs