思路一:修改底层网络设备配置,加入容器网络IP地址的管理,修改路由器网关等,该方式主要和SDN结合。
思路二:完全不修改底层网络设备配置,复用原有的underlay平面网络,解决容器跨主机通信,主要有如下两种方式:
Overlay隧道传输。把容器的数据包封装到原主机网络的三层或者四层数据包中,然后使用原来的网络使用IP或者TCP/UDP传输到目标主机,目标主机再拆包转发给容器。Overlay隧道如Vxlan、ipip等,目前使用Overlay技术的主流容器网络如Flannel、Weave等。
修改主机路由。把容器网络加到主机路由表中,把主机当作容器网关,通过路由规则转发到指定的主机,实现容器的三层互通。目前通过路由技术实现容器跨主机通信的网络如Flannel host-gw、Calico等。
#!/bin/bash
NAMESPACE=$1
if [[-z $NAMESPACE ]]; then
ls -1 /var/run/docker/netns/
exit 0
fi
NAMESPACE_FILE=/var/run/docker/netns/${NAMESPACE}
if [[ ! -f $NAMESPACE_FILE ]]; then
NAMESPACE_FILE=$(docker inspect -f "{{.NetworkSettings.SandboxKey}}" $NAMESPACE 2>/dev/null)
fi
if [[ ! -f $NAMESPACE_FILE ]]; then
echo "Cannot open network namespace '$NAMESPACE': No such file or directory"
exit 1
fi
shift
if [[ $# -lt 1 ]]; then
echo "No command specified"
exit 1
fi
nsenter --net=${NAMESPACE_FILE} $@
# ./docker_netns.sh # list namespaces
4-a4a048ac67
abe31dbbc394
default
# ./docker_netns.sh busybox ip addr # Enter busybox namespace
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
354: eth0@if355: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
link/ether 02:42:c0:a8:64:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet
192.168.100.2/24 brd 192.168.100.255 scope global eth0
valid_lft forever preferred_lft forever
356: eth1@if357: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 1
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth1
valid_lft forever preferred_lft forever
#!/bin/bash
DOCKER_NETNS_SCRIPT=./docker_netns.sh
IFINDEX=$1
if [[ -z $IFINDEX ]]; then
for namespace in $($DOCKER_NETNS_SCRIPT); do
printf "\e[1;31m%s: \e[0m\n" $namespace
$DOCKER_NETNS_SCRIPT $namespace ip -c -o link
printf "\n"
done
else
for namespace in $($DOCKER_NETNS_SCRIPT); do
if $DOCKER_NETNS_SCRIPT $namespace ip -c -o link | grep -Pq
"^$IFINDEX: "; then
printf "\e[1;31m%s: \e[0m\n" $namespace
$DOCKER_NETNS_SCRIPT $namespace ip -c -o link | grep -P "^$IFINDEX: ";
printf "\n"
fi
done
fi
# ./find_links.sh 354
abe31dbbc394:
354: eth0@if355: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP mode DEFAULT group default
link/ether 02:42:c0:a8:64:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
Deep dive into docker overlay networks part 1[1]
Deep dive into docker overlay networks part 2[2]
Deep dive into docker overlay networks part 3[3]
Node名 | 主机IP |
---|---|
node-1 | 192.168.1.68 |
node-2 | 192.168.1.254 |
首先创建一个Overlay网络:
docker network create -d overlay --subnet 10.20.0.0/16 overlay
docker run -d --name busybox --net overlay busybox sleep 36000
Node名 | 主机IP | 容器IP |
---|---|---|
node-1 | 192.168.1.68 | 10.20.0.3/16 |
node-2 | 192.168.1.254 | 10.20.0.2/16 |
# docker exec busybox-node-1 ip r
default via 172.18.0.1 dev eth1
10.20.0.0/16 dev eth0 scope link src 10.20.0.3
172.18.0.0/16 dev eth1 scope link src 172.18.0.2
# docker exec busybox-node-1 ip link show eth1
77: eth1@if78: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff
# ./find_links.sh 78
default:78: vethf2de5d4@if77: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker_gwbridge state UP mode DEFAULT
group default
link/ether 2e:6a:94:6a:09:c5 brd ff:ff:ff:ff:ff:ff link-netnsid 1
# brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.02427406ba1a no
docker_gwbridge 8000.0242bb868ca3 no vethf2de5d4
# iptables-save -t nat | grep -- '-A POSTROUTING'
-A POSTROUTING -s 172.18.0.0/16 ! -o docker_gwbridge -j MASQUERADE
# docker exec busybox-node-1 ip link show eth0
75: eth0@if76: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:14:00:03 brd ff:ff:ff:ff:ff:ff
# ./find_links.sh 76
1-19c5d1a7ef:
76: veth0@if75: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master br0 state UP mode DEFAULT group default \ link/ether 6a:ce:89:a2:89:4a brd ff:ff:ff:ff:ff:ff link-netnsid 1
# ./docker_netns.sh 1-19c5d1a7ef ip link show veth0
76: veth0@if75: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master br0 state UP mode DEFAULT group default
link/ether 6a:ce:89:a2:89:4a brd ff:ff:ff:ff:ff:ff link-netnsid 1
# ./docker_netns.sh 1-19c5d1a7ef brctl show
bridge name bridge id STP enabled interfaces
br0 8000.6ace89a2894a no veth0
vxlan0
./docker_netns.sh 1-19c5d1a7ef ip -c -d link show vxlan0
74: vxlan0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master br0 state UNKNOWN mode DEFAULT
group default
link/ether 96:9d:64:39:76:4e brd ff:ff:ff:ff:ff:ff link-netnsid 0 promiscuity 1
vxlan id 256 srcport 0 0 dstport 4789 proxy l2miss l3miss ttl inherit ageing 300 udpcsum noudp6zerocsumtx noudp6zerocsumrx
...
./docker_netns.sh 2-19c5d1a7ef ip -d -o link show vxlan0 | grep proxy_arp
# ./docker_netns.sh 3-19c5d1a7ef ip neigh
10.20.0.3 dev vxlan0 lladdr 02:42:0a:14:00:03 PERMANENT
10.20.0.4 dev vxlan0 lladdr 02:42:0a:14:00:04 PERMANENT
容器MAC地址 | Vxlan ID | Remote VTEP |
---|---|---|
02:42:0a:14:00:03 | 256 | 192.168.1.254 |
02:42:0a:14:00:04 | 256 | 192.168.1.245 |
... | ... | ... |
./docker_netns.sh 3-19c5d1a7ef bridge fdb
...
02:42:0a:14:00:04 dev vxlan0 dst 192.168.1.245 link-netnsid 0 self permanent
02:42:0a:14:00:03 dev vxlan0 dst 192.168.1.254 link-netnsid 0 self permanent
...
weave launch --ipalloc-range 172.111.222.0/24 192.168.1.68 192.168.
1.254 192.168.1.245
# node-1
docker run -d --name busybox-node-1 --net weave busybox sleep 3600
# node-2
docker run -d --name busybox-node-2 --net weave busybox sleep 3600
# node-3
docker run -d --name busybox-node-3 --net weave busybox sleep 3600
# docker exec -t -i busybox-node-$NODE ip r
default via 172.18.0.1 dev eth0
172.18.0.0/16 dev eth0 scope link src 172.18.0.2
172.111.222.0/24 dev ethwe0 scope link src 172.111.222.128
224.0.0.0/4 dev ethwe0 scope link
# ./find_links.sh 14
default:
14: vethwl816281577@if13: <BROADCAST,MULTICAST,UP,LOWER_UP>mtu
1376 qdisc noqueue
master weave state UP mode DEFAULT group default
link/ether de:12:50:59:f0:d9 brd ff:ff:ff:ff:ff:ff link-netnsid 0
# brctl show weave
bridge name bridge id STP enabled interfaces
weave 8000.d2939d07704b no vethwe-bridge
vethwl816281577
# ip link show vethwe-bridge
9: vethwe-bridge@vethwe-datapath: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1376 qdisc noqueue
master weave state UP mode DEFAULT group default
link/ether 0e:ee:97:bd:f6:25 brd ff:ff:ff:ff:ff:ff
# ip -d link show vethwe-datapath
8: vethwe-datapath@vethwe-bridge: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1376 qdisc noqueue
master datapath state UP mode DEFAULT group default
link/ether f6:74:e9:0b:30:6d brd ff:ff:ff:ff:ff:ff promiscuity 1
veth
openvswitch_slave addrgenmode eui64 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
# ovs-vsctl show
96548648-a6df-4182-98da-541229ef7b63
ovs_version: "2.9.2"
# ovs-dpctl show
system@datapath:
lookups: hit:109 missed:1508 lost:3
flows: 1
masks: hit:1377 total:1 hit/pkt:0.85
port 0: datapath (internal)
port 1: vethwe-datapath
port 2: vxlan-6784 (vxlan:packet_type=ptap)
port 0:datapath (internal)
port 1:vethwe-datapath
port 2:vxlan-6784
# ip -d link show vxlan-6784
10: vxlan-6784: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 65535 qdisc noqueue
master datapath state UNKNOWN mode DEFAULT group default qlen 1000
link/ether d2:21:db:c1:9b:28 brd ff:ff:ff:ff:ff:ff promiscuity 1
vxlan id 0 srcport 0 0 dstport 6784 nolearning ttl inherit ageing 300 udpcsum noudp6zerocsumtx udp6zerocsumrx external
openvswitch_slave addrgenmode eui64 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
Node名 | 主机IP | 分配的子网 |
---|---|---|
node-1 | 192.168.1.68 | 40.15.43.0/24 |
node-2 | 192.168.1.254 | 40.15.26.0/24 |
node-3 | 192.168.1.245 | 40.15.56.0/24 |
在三个集成了Flannel网络的Node环境下分别创建一个BusyBox容器:
docker run -d --name busybox busybox:latest sleep 36000
Node名 | 主机IP | 容器IP |
---|---|---|
node-1 | 192.168.1.68 | 40.15.43.2/24 |
node-2 | 192.168.1.254 | 40.15.26.2/24 |
node-3 | 192.168.1.245 | 40.15.56.2/24 |
# ./docker_netns.sh busybox ip -d -c link
416: eth0@if417: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 8951 qdisc noqueue state UP mode DEFAULT group default
link/ether 02:42:28:0f:2b:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0 promiscuity 0
veth addrgenmode eui64 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
# ./find_links.sh 417
default:
417: veth1cfe340@if416: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 8951 qdisc noqueue master docker0 state UP mode DEFAULT group default
link/ether 26:bd:de:86:21:78 brd ff:ff:ff:ff:ff:ff link-netnsid 0
# brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.0242d6f8613e no veth1cfe340
vethd1fae9d
docker_gwbridge 8000.024257f32054 no
# ip r
default via 192.168.1.1 dev eth0 proto dhcp src 192.168.1.68 metric 100
40.15.26.0/24 via 40.15.26.0 dev flannel.1 onlink
40.15.43.0/24 dev docker0 proto kernel scope link src 40.15.43.1
40.15.56.0/24 via 40.15.56.0 dev flannel.1 onlink
...
413: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 8951 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/ether 0e:08:23:57:14:9a brd ff:ff:ff:ff:ff:ff promiscuity 0
vxlan id 1 local 192.168.1.68 dev eth0 srcport 0 0 dstport 8472 nolearning ttl inherit ageing 300
udpcsum noudp6zerocsumtx noudp6zerocsumrx addrgenmode eui64 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
bridge fdb | grep flannel.1
4e:55:ee:0a:90:38 dev flannel.1 dst 192.168.1.245 self permanent
da:17:1b:07:d3:70 dev flannel.1 dst 192.168.1.254 self permanent
# for subnet in $(etcdctl ls /coreos.com/network/subnets); do etcdctl get $subnet;done
{"PublicIP":"192.168.1.68","BackendType":"vxlan","BackendData":{"VtepMAC":"0e:08:23:57:14:9a"}}
{"PublicIP":"192.168.1.254","BackendType":"vxlan","BackendData":{"VtepMAC":"da:17:1b:07:d3:70"}}
{"PublicIP":"192.168.1.245","BackendType":"vxlan","BackendData":{"VtepMAC":"4e:55:ee:0a:90:38"}}
Node名 | 主机IP | 容器IP |
---|---|---|
node-1 | 192.168.1.68 | 40.15.43.2/24 |
node-2 | 192.168.1.254 | 40.15.26.2/24 |
node-3 | 192.168.1.245 | 40.15.56.2/24 |
# ip r
default via 192.168.1.1 dev eth0 proto dhcp src 192.168.1.68 metric 100
40.15.26.0/24 via 192.168.1.254 dev eth0
40.15.43.0/24 dev docker0 proto kernel scope link src 40.15.43.1
40.15.56.0/24 via 192.168.1.245 dev eth0
...
node-1: 197.168.1.68/24
node-2: 197.168.1.254/24
node-3: 197.168.1.245/24
node-4: 197.168.0.33/24
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:CreateRoute",
"ec2:DeleteRoute",
"ec2:ModifyInstanceAttribute",
"ec2:DescribeRouteTables",
"ec2:ReplaceRoute"
],
"Resource": "*"
}
]
}
iptables -I FORWARD --dest 40.15.0.0/16 -j ACCEPT
iptables -I FORWARD --src 40.15.0.0/16 -j ACCEPT
# etcdctl get /coreos.com/network/config | jq .
{
"Network": "40.15.0.0/16",
"Backend": {
"Type": "aws-vpc"
}
}
Node名 | 主机IP | 容器IP |
---|---|---|
node-1 | 192.168.1.68 | 40.15.16.0/24 |
node-2 | 192.168.1.254 | 40.15.64.0/24 |
node-3 | 192.168.1.245 | 40.15.13.0/24 |
node-4 | 192.168.0.33 | 40.15.83.0/24 |
Node名 | 主机IP | 容器IP |
---|---|---|
node-1 | 192.168.1.68 | 40.15.16.2/24 |
node-2 | 192.168.1.254 | 40.15.64.2/24 |
node-3 | 192.168.1.245 | 40.15.13.2/24 |
node-4 | 192.168.0.33 | 40.15.83.2/24 |
# etcdctl get /coreos.com/network/config | jq .
{
"Network": "40.15.0.0/16",
"Backend": {
"Type": "aws-vpc",
"RouteTableID": [
"rtb-0686cdc9012674692",
"rtb-054dfd5f3e47102ae"
]
}
}
The biggest advantage of using flannel AWS-VPC backend is that the AWS knows about that IP. That makes it possible to set up ELB to route directly to that container.
# calicoctl get ipPool -o yaml
- apiVersion: v1
kind: ipPool
metadata:
cidr: 197.19.0.0/16
spec:
ipip:
enabled: true
mode: cross-subnet
nat-outgoing: true
- apiVersion: v1
kind: ipPool
metadata:
cidr: fd80:24e2:f998:72d6::/64
spec: {}
for host in $(etcdctl --endpoints $ENDPOINTS ls /calico/ipam/v2/host/); do
etcdctl --endpoints $ENDPOINTS ls $host/ipv4/block | awk -F '/' '{sub(/-/,"/",$NF)}{print $6,$NF}'
done | sort
int32bit-docker-1 197.19.38.128/26
int32bit-docker-2 197.19.186.192/26
int32bit-docker-3 197.19.26.0/26
Node名 | 主机IP | 容器IP |
---|---|---|
node-1 | 192.168.1.68 | 197.19.38.136 |
node-2 | 192.168.1.254 | 197.19.186.197 |
node-3 | 192.168.1.245 | 197.19.26.5/24 |
# ./docker_netns.sh busybox ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: tunl0@NONE: <NOARP> mtu 1480 qdisc noop state DOWN group default qlen 1000
link/ipip 0.0.0.0 brd 0.0.0.0
14: cali0@if15: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 197.19.38.136/32 brd 197.19.38.136 scope global cali0
valid_lft forever preferred_lft forever
# ./docker_netns.sh busybox ip r
default via 169.254.1.1 dev cali0
169.254.1.1 dev cali0 scope link
所有容器的MAC地址都是ee:ee:ee:ee:ee:ee
网关地址是169.254.1.1,然而我找尽了所有的namespaces也没有找到这个IP。
# ip r | grep 197.19
197.19.26.0/26 via 192.168.1.245 dev eth0 proto bird
blackhole 197.19.38.128/26 proto bird
197.19.38.139 dev calia2656637189 scope link
197.19.38.140 dev calie889861df72 scope link
197.19.186.192/26 via 192.168.1.254 dev eth0 proto bird
# docker network ls | grep calico
ad7ca8babf01 calico-net-1 calico global
5eaf3984f69d calico-net-2 calico global
docker run -d --name busybox-3 --net calico-net-2 busybox sleep 36000
# docker exec busybox-3 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: tunl0@NONE: <NOARP> mtu 1480 qdisc noop qlen 1000
link/ipip 0.0.0.0 brd 0.0.0.0
24: cali0@if25: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff
inet 197.19.38.141/32 brd 197.19.38.141 scope global cali0
valid_lft forever preferred_lft forever
# ip r | grep 197.19
197.19.26.0/26 via 192.168.1.245 dev eth0 proto bird
blackhole 197.19.38.128/26 proto bird
197.19.38.139 dev calia2656637189 scope link
197.19.38.140 dev calie889861df72 scope link
197.19.38.141 dev calib12b038e611 scope link
197.19.186.192/26 via 192.168.1.254 dev eth0 proto bird
# iptables-save -t filter | grep -- '-A FORWARD'
-AFORWARD -m comment --comment "cali:wUHhoiAYhphO9Mso" -j cali-FORWARD
...
# iptables-save -t filter | grep -- '-A cali-FORWARD'
-A cali-FORWARD -i cali+ -m comment --comment "cali:X3vB2lGcBrfkYquC" -j cali-from-wl-dispatch
-A cali-FORWARD -o cali+ -m comment --comment "cali:UtJ9FnhBnFbyQMvU" -j cali-to-wl-dispatch
-A cali-FORWARD -i cali+ -m comment --comment "cali:Tt19HcSdA5YIGSsw" -j ACCEPT
-A cali-FORWARD -o cali+ -m comment --comment "cali:9LzfFCvnpC5_MYXm" -j ACCEPT
...
# iptables-save -t filter | grep -- '-A cali-to-wl-dispatch'
-A cali-to-wl-dispatch -o calia2656637189 -m comment --comment "cali:TFwr8sfMnFH3BUla" -g cali-tw-calia2656637189
-A cali-to-wl-dispatch -o calib12b038e611 -m comment --comment "cali:ZbRb0ozg-GGeUfRA" -g cali-tw-calib12b038e611
-A cali-to-wl-dispatch -o calie889861df72 -m comment --comment "cali:5OoGv50NzX0sKdMg" -g cali-tw-calie889861df72
-A cali-to-wl-dispatch -m comment --comment "cali:RvicCiwAy9cIEAKA" -m comment --comment "Unknown interface" -j DROP
# iptables-save -t filter | grep -- '-A cali-tw-calia2656637189'
-A cali-tw-calia2656637189 -m comment --comment "cali:259EHpBvnovN8_q6" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A cali-tw-calia2656637189 -m comment --comment "cali:YLokMEiVkZggfg9R" -m conntrack --ctstate INVALID -j DROP
-A cali-tw-calia2656637189 -m comment --comment "cali:pp8a6fGxqaALtRK5" -j MARK --set-xmark 0x0/0x1000000
-A cali-tw-calia2656637189 -m comment --comment "cali:bgw2sCtlIfZjhXLA" -j cali-pri-calico-net-1
-A cali-tw-calia2656637189 -m comment --comment "cali:1Z2NvhoS27pP03Ll" -m comment --comment "Return if profile accepted" -m mark --mark 0x1000000/0x1000000 -j RETURN
-A cali-tw-calia2656637189 -m comment --comment "cali:mPb8hORsTXeVt7yC" -m comment --comment "Drop if no profiles matched" -j DROP
0x1000000:报文的处理动作,置1表示放行,默认0表示拒绝
0x2000000:是否已经经过了policy规则检测,置1表示已经过
0x4000000:报文来源,置1,表示来自host-endpoint
# iptables-save -t filter | grep -- '-A cali-pri-calico-net-1'
-A cali-pri-calico-net-1 -m comment --comment "cali:Gvse2HBGxQ9omCdo" -m set --match-set cali4-s:VFoIKKR-LOG_UuTlYqcKubo src -j MARK --set-xmark 0x1000000/0x1000000
-A cali-pri-calico-net-1 -m comment --comment "cali:0vZpvvDd_5bT7g_k" -m mark --mark 0x1000000/0x1000000 -j RETURN
# ipset list cali4-s:VFoIKKR-LOG_UuTlYqcKubo
Name: cali4-s:VFoIKKR-LOG_UuTlYqcKubo
Type: hash:ip
Revision: 4
Header: family inet hashsize 1024 maxelem 1048576
Size in memory: 280
References: 1
Number of entries: 4
Members:
197.19.38.143
197.19.26.7
197.19.186.199
197.19.38.144
docker run -d --name busybox-node-4 --net calico-net-1 busybox sleep 36000
docker exec busybox-node-4 ping -c 1 -w 1 197.19.38.144
PING 197.19.38.144 (197.19.38.144): 56 data bytes
64 bytes from 197.19.38.144: seq=0 ttl=62 time=0.539 ms
--- 197.19.38.144 ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 0.539/0.539/0.539 ms
# ip r | grep 197.19
197.19.26.0/26 via 192.168.1.245 dev eth0 proto bird
blackhole 197.19.38.128/26 proto bird
197.19.38.142 dev cali459cc263d36 scope link
197.19.38.143 dev cali6d0015b0c71 scope link
197.19.38.144 dev calic8e5fab61b1 scope link
197.19.65.128/26 via 192.168.0.33 dev tunl0 proto bird onlink
197.19.186.192/26 via 192.168.1.254 dev eth0 proto bird
# ip -d link show tunl0
5: tunl0@NONE: <NOARP,UP,LOWER_UP> mtu 1440 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/ipip 0.0.0.0 brd 0.0.0.0 promiscuity 0
ipip any remote any local any ttl inherit nopmtudisc addrgenmode eui64 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
# ip -d tunnel show
tunl0: any/ip remote any local any ttl inherit nopmtudisc
vm Container whatever
| | |
tapX tapY tapZ
| | |
| | |
qbrX qbrY qbrZ
| | |
---------------------------------------------
| br-int(OVS) |
---------------------------------------------
|
----------------------------------------------
| br-tun(OVS) |
----------------------------------------------
https://blog.d2si.io/2017/04/25/deep-dive-into-docker-overlay-networks-part-1/
https://blog.d2si.io/2017/05/09/deep-dive-into-docker-overlay-networks-part-2/
https://blog.d2si.io/2017/08/20/deep-dive-3-into-docker-overlay-networks-part-3/
https://www.weave.works/use-cases/multicast-networking/