自定义网络模式下,docker提供了三种自定义网络驱动,分别为:
bridge:与原生网络的bridge不同之处在于自定义的bridge网络提供了DNS解析功能,针对单机
overlay、macvlan用于创建跨主机网络,经常应用于集群当中,针对于多机
自定义网络构建
[root@server1 ~]# docker network create -d bridge my_net1 ##创建网络my_net1,-d:指定网络类型,默认为bridge
0f283adad81f9fc1c9eed99dc13332402643a9eaf02bcca98344602fcb0e06bb
[root@server1 ~]# docker network ls ##查看网络是否创建成功
NETWORK ID NAME DRIVER SCOPE
a3b785379510 bridge bridge local
66a8ce5625c5 host host local
0f283adad81f my_net1 bridge local
ebc2c1a28d75 none null local
[root@server1 ~]# docker network inspect my_net1 ##查看自定义网络的信息
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "172.18.0.0/16",
"Gateway": "172.18.0.1" ##分配子网地址为:172.18.0.1,由此可知范佩的子网也是单点递增形式
}
]
[root@server1 ~]# docker run -it --name vm1 --network my_net1 ubuntu ##以自定义网络创建容器,
root@e29f0f44d8c5:/# ip addr ##查看ip,分配ip为172.18.0.2
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
5: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth0
valid_lft forever preferred_lft forever
自定义子网网段
(1)自定义网络网段
[root@server1 ~]# docker network create -d bridge --subnet 172.22.0.0/24 --gateway 172.22.0.1 my_net2
64ecaf807e6484fad020f9e6cb4a58f8b726383c4cbd0319bdffd8fbd6975a04
[root@server1 ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
a3b785379510 bridge bridge local
66a8ce5625c5 host host local
0f283adad81f my_net1 bridge local
64ecaf807e64 my_net2 bridge local
ebc2c1a28d75 none null local
[root@server1 ~]# docker run -it --name vm2 --network my_net2 ubuntu ##以my_net2创建容器
root@f591a5cfe41c:/# ip addr ##查看ip,分配地址为:172.22.0.2,容器内地址呈单调递增形式
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:16:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.22.0.2/24 brd 172.22.0.255 scope global eth0
valid_lft forever preferred_lft forever
(2)自定义容器的ip
[root@server1 ~]# docker run -it --name vm3 --network my_net2 --ip 172.22.0.10 ubuntu ##自定义分配容器ip为172.22.0.10
root@69dc1aff58db:/# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
10: eth0@if11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:16:00:0a brd ff:ff:ff:ff:ff:ff
inet 172.22.0.10/24 brd 172.22.0.255 scope global eth0
valid_lft forever preferred_lft forever
root@69dc1aff58db:/# ping 172.22.0.2 ##与同一网络my_net2创建的容器可以实现通信
PING 172.22.0.2 (172.22.0.2) 56(84) bytes of data.
64 bytes from 172.22.0.2: icmp_seq=1 ttl=64 time=0.099 ms
64 bytes from 172.22.0.2: icmp_seq=2 ttl=64 time=0.051 ms
^C
--- 172.22.0.2 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.051/0.075/0.099/0.024 ms
root@69dc1aff58db:/# ping 172.18.0.2 ##与不同网络my_net1创建的容器无法进行通信
PING 172.18.0.2 (172.18.0.2) 56(84) bytes of data.
^C
--- 172.18.0.2 ping statistics ---
4 packets transmitted, 0 received, 100% packet loss, time 2999ms
原因是桥接到不同网络段的容器无法实现通信功能
[root@server1 ~]# brctl show
bridge name bridge id STP enabled interfaces
br-0f283adad81f 8000.024250685ac8 no vethc776773 ##my_net1桥接1个物理网卡
br-64ecaf807e64 8000.02424cef9619 no veth0d9cbce ##my_net2桥接两个物理网卡
veth39d5730
docker0 8000.0242c447dc14 no ##docker0无桥接
[root@server1 ~]# iptables -S ##查看火墙机制,数据被双向隔离,则无法实现通信
-A DOCKER-ISOLATION-STAGE-2 -o br-64ecaf807e64 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o br-0f283adad81f -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
不同网桥的容器实现通信功能
给容器vm1添加两块网卡,其中一块桥接到my_net2上
[root@server1 ~]# docker network connect my_net2 vm1 ##给my_net1创建的容器vm1添加一块my_net2物理网卡
[root@server1 ~]# docker start vm1 ##打开容器
vm1
[root@server1 ~]# docker container attach vm1
root@e29f0f44d8c5:/#
root@e29f0f44d8c5:/# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
12: eth0@if13: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth0
valid_lft forever preferred_lft forever
14: eth1@if15: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default ##添加物理网卡my_net2分配到的地址:172.22.0.3/24
link/ether 02:42:ac:16:00:03 brd ff:ff:ff:ff:ff:ff
inet 172.22.0.3/24 brd 172.22.0.255 scope global eth1
valid_lft forever preferred_lft forever
root@e29f0f44d8c5:/# ping vm2 ##与容器vm2实现通信
PING vm2 (172.22.0.2) 56(84) bytes of data.
64 bytes from vm2.my_net2 (172.22.0.2): icmp_seq=1 ttl=64 time=0.072 ms
64 bytes from vm2.my_net2 (172.22.0.2): icmp_seq=2 ttl=64 time=0.049 ms
^C
--- vm2 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.049/0.060/0.072/0.013 ms
root@e29f0f44d8c5:/# ping vm3 ##与容器vm3实现通信
PING vm3 (172.22.0.10) 56(84) bytes of data.
64 bytes from vm3.my_net2 (172.22.0.10): icmp_seq=1 ttl=64 time=0.061 ms
64 bytes from vm3.my_net2 (172.22.0.10): icmp_seq=2 ttl=64 time=0.074 ms
64 bytes from vm3.my_net2 (172.22.0.10): icmp_seq=3 ttl=64 time=0.046 ms
^C
--- vm3 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.046/0.060/0.074/0.013 ms