查看docker网络类型
docker network ls
# output
NETWORK ID NAME DRIVER SCOPE
e486a3825265 bridge bridge local
9e40fc4ee6bd docker_default bridge local
d2224c568d45 host host local
02d31d76556f none null local
查看docker网络详情
docker netword inspect 网络名称
docker network inspect bridge
容器通信原理
启动两个centos容器,分别命名c1,c2
docker run -dit --name=c1 centos:7
docker run -dit --name=c2 centos:7
查看下c1的容器信息
docker inspect c1
# 主要output
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "e486a38252658603b7c4aad36215728c3e1f9d916d518f46b9980a3925832740",
"EndpointID": "608457db1ffb72d53b186a9d07f4cb4685246b9942dc5655572b002af277232c",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:02",
"DriverOpts": null
}
}
查看下c2的容器信息
docker inspect c2
# 主要output
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "e486a38252658603b7c4aad36215728c3e1f9d916d518f46b9980a3925832740",
"EndpointID": "a98d0692e6c4229c1bae4161841d0f98885f68d17a2abe5c60c32d9975b91d72",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.3",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:03",
"DriverOpts": null
}
}
查看下主机的ip信息,结合查看c1,c2的信息
c1,c2 的Gateway都是 doker0(172.17.0.1)
且看到veth 10530,10532 是和docker0连接的
ip a
# output
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:16:3e:20:ab:0b brd ff:ff:ff:ff:ff:ff
inet 172.24.58.134/20 brd 172.24.63.255 scope global dynamic noprefixroute eth0
valid_lft 309209957sec preferred_lft 309209957sec
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:5f:d3:10:2a brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
4: br-9e40fc4ee6bd: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:9f:55:35:c7 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.1/16 brd 172.18.255.255 scope global br-9e40fc4ee6bd
valid_lft forever preferred_lft forever
10530: veth6de333f@if10529: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether 26:5c:f5:b3:7b:c5 brd ff:ff:ff:ff:ff:ff link-netnsid 0
10532: veth395059a@if10531: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether aa:88:ff:07:7a:b9 brd ff:ff:ff:ff:ff:ff link-netnsid 3
查看下docker bridge网络信息
也能看到c1,c2和docker0连接
docker network inspect bridge
#output
[
{
"Name": "bridge",
"Id": "e486a38252658603b7c4aad36215728c3e1f9d916d518f46b9980a3925832740",
"Created": "2022-05-13T18:55:08.822310251+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"4a118b25554d7b37b39eedaef2c12e5e68fe439a95198a64f18f555a5bf19cf4": {
"Name": "c2",
"EndpointID": "a98d0692e6c4229c1bae4161841d0f98885f68d17a2abe5c60c32d9975b91d72",
"MacAddress": "02:42:ac:11:00:03",
"IPv4Address": "172.17.0.3/16",
"IPv6Address": ""
},
"c3ae0e6c3d063b3a1085b34af00176a4f01582d4c136fade58702a296e5f4c15": {
"Name": "c1",
"EndpointID": "608457db1ffb72d53b186a9d07f4cb4685246b9942dc5655572b002af277232c",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]
查看容器c1内网络信息
可以看到veth 10529和10530是一对
# 如果没有输出信息,说明容器内部没有安装iproute
# 进入容器内部 通过 yum install -y iproute 安装
docker exec -it c1 ip a
# output
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
10529: eth0@if10530: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
查看容器c2内网络信息
可以看到 10531和10532是一对
docker exec -it c2 ip a
# output
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
10531: eth0@if10532: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.3/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
在容器c1中ping c2
在容器c2中Ping c1
# c1中 ping c2
[root@NameNode ~]# docker exec -it c1 ping 172.17.0.3
PING 172.17.0.3 (172.17.0.3) 56(84) bytes of data.
64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.082 ms
# c2中 ping c1
[root@NameNode ~]# docker exec -it c2 ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.077 ms
容器之间能够通信,是应为每个容器都通过veth和docker0连接
docker0是通过NAT和eth0 相通信的
所以容器也能访问网络
docker --link 参数
这个参数能让容器名称代替其ip进行通信
# 启动c1,c2容器
docker run -dit --name=c1 centos:7
# 使用 --link 容器名
docker run -dit --name=c2 --link c1 centos:7
# c1 ping c2容器名
[root@NameNode ~]# docker exec -it c1 ping c2
ping: c2: Name or service not known
# c2 ping c1容器名
# 这就是 --link的作用
[root@NameNode ~]# docker exec -it c2 ping c1
PING c1 (172.17.0.2) 56(84) bytes of data.
64 bytes from c1 (172.17.0.2): icmp_seq=1 ttl=64 time=0.053 ms
创建网络
除了默认的几个docker网络,我们还可以创建自己的docker网络
# 创建 my-bridge网络
docker network create -d bridge my-bridge
# 看一下有哪些网络
docker network ls
# output
NETWORK ID NAME DRIVER SCOPE
e486a3825265 bridge bridge local
9e40fc4ee6bd docker_default bridge local
d2224c568d45 host host local
36d2950f85af my-bridge bridge local
02d31d76556f none null local
让容器连接我们新创建的网络
# 让容器 c1 连接 my-bridge
docker network connect my-bridge c1
# 让容器 c2 连接 my-bridge
docker network connect my-bridge c2
# 查看下 my-bridge的连接情况
docker network inspect my-bridge
#output
"Containers": {
"45c39251af7990720c13b6b024afd893c69eb9f4deb880b8d37c4c9e4b128301": {
"Name": "c2",
"EndpointID": "ad8c5142c1f237ac5eeb3b59b99ce6870d12ce3bb20558289641d1f4c2f71cf9",
"MacAddress": "02:42:ac:13:00:03",
"IPv4Address": "172.19.0.3/16",
"IPv6Address": ""
},
"741bb4988c429e4dbaa5ccca3b9287ba733053f7866b86dad43da8a616e3c9f6": {
"Name": "c1",
"EndpointID": "8b8fa9dc3704de8a7c045d75989de622ace9d02243ff0e2a1c46bcd3f75a1b12",
"MacAddress": "02:42:ac:13:00:02",
"IPv4Address": "172.19.0.2/16",
"IPv6Address": ""
}
},
容器c1,c2成功连接到my-bridge
而且你会发现一个神奇的东西,我们之前只给c2容器指定了link c1,使其能够通过ping c1 和c1通信
自从连接了my-bidge后,c1也能通过ping c2 和 c1通信了
说明自己创建的bridge网络自带link
[root@NameNode ~]# docker exec -it c1 ping c2
PING c2 (172.19.0.3) 56(84) bytes of data.
64 bytes from c2.my-bridge (172.19.0.3): icmp_seq=1 ttl=64 time=0.058 ms
启动容器时指定网络
通过 --network 指定网络
bridge 网络是常用的
none 网络只能自己玩自己的
host 网络和主机网络一致,但是容易出现端口冲突
# 通过 --network 网络名 指定容器网络
docker run -dit --name=c3 --network none centos:7