1、Linux网卡
1.1、查看网卡
- ip link show
- ip a
- ls /sys/class/net
1.2、配置网卡
- 通过修改配置文件配置IP
vi /etc/sysconfig/network-scripts/ifcfg-eth0
- 通过命令配置IP
# 新增IP
ip addr add 10.0.2.16/24 dev eth0
# 删除IP
ip addr delete 10.0.2.16/24 dev eth0
1.3、重启网卡
systemctl restart network
1.4、启动/关闭某个网卡
ifup/ifdown eth0
或
ip link set eth0 up/down
2、Network Namespace
在linux上,网络的隔离是通过network namespace来管理的,不同的network namespace是互相隔离的
2.1、查看ns1
ip netns list
2.2、添加ns1
ip netns add ns1
2.3、删除ns1
ip netns delete ns1
2.4、查看ns1下网卡的情况
[root@localhost ~]# ip netns exec ns1 ip a
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2.5、启动ns1上的lo网卡
[root@localhost ~]# ip netns exec ns1 ifup lo
[root@localhost ~]# ip netns exec ns1 ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2.6、添加ns2
ip netns add ns2
2.7、通过Virtual Ethernet Pair让ns1和ns2网络连通
ip link add veth-ns1 type veth peer name veth-ns2
2.8、查看link情况
[root@localhost ~]# ip link
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 52:54:00:8a:fe:e6 brd ff:ff:ff:ff:ff:ff
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 08:00:27:e7:97:fa brd ff:ff:ff:ff:ff:ff
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
link/ether 02:42:98:73:c2:9c brd ff:ff:ff:ff:ff:ff
7: veth-ns2@veth-ns1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 3a:76:e5:f3:7d:16 brd ff:ff:ff:ff:ff:ff
8: veth-ns1@veth-ns2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 1e:4b:3f:29:08:28 brd ff:ff:ff:ff:ff:ff
2.9、将veth-ns1加入ns1中,将veth-ns2加入ns2中
ip link set veth-ns1 netns ns1
ip link set veth-ns2 netns ns2
2.10、查看宿主机link情况
[root@localhost ~]# ip link
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 52:54:00:8a:fe:e6 brd ff:ff:ff:ff:ff:ff
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 08:00:27:e7:97:fa brd ff:ff:ff:ff:ff:ff
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
link/ether 02:42:98:73:c2:9c brd ff:ff:ff:ff:ff:ff
2.11、查看ns1 link情况
[root@localhost ~]# ip netns exec ns1 ip link
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
8: veth-ns1@if7: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 1e:4b:3f:29:08:28 brd ff:ff:ff:ff:ff:ff link-netnsid 1
2.12、查看ns2 link情况
[root@localhost ~]# ip netns exec ns2 ip link
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
7: veth-ns2@if8: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 3a:76:e5:f3:7d:16 brd ff:ff:ff:ff:ff:ff link-netnsid
2.13、给veth-ns1、veth-ns2添加IP地址
ip netns exec ns1 ip addr add 192.168.0.10/24 dev veth-ns1
ip netns exec ns2 ip addr add 192.168.0.11/24 dev veth-ns2
2.14、启动veth-ns1、veth-ns2
ip netns exec ns1 ip link set veth-ns1 up
2.15、互相访问IP
[root@localhost ~]# ip netns exec ns2 ping 192.168.0.10
PING 192.168.0.10 (192.168.0.10) 56(84) bytes of data.
64 bytes from 192.168.0.10: icmp_seq=1 ttl=64 time=0.082 ms
64 bytes from 192.168.0.10: icmp_seq=2 ttl=64 time=0.032 ms
[root@localhost ~]# ip netns exec ns1 ping 192.168.0.11
PING 192.168.0.11 (192.168.0.11) 56(84) bytes of data.
64 bytes from 192.168.0.11: icmp_seq=1 ttl=64 time=0.059 ms
64 bytes from 192.168.0.11: icmp_seq=2 ttl=64 time=0.074 ms
3、docker网络配置
3.1、创建network
docker network create tomcat-net
# 指定IP地址
docker network create --subnet=192.168.10.0/24 tomcat-net
3.2、查看network
[root@localhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
79fd442d27d8 bridge bridge local
27ea3f49c777 host host local
6316d31e9bc1 none null local
79576a960ecb tomcat-net bridge local
3.3、查看tomcat-net详情信息
[root@localhost ~]# docker network inspect tomcat-net
[
{
"Name": "tomcat-net",
"Id": "df1985fffde2e33194b1d44b7e7acef93cd2235ac217e38e7bba4fa2627f9f17",
"Created": "2021-03-06T15:38:40.717770312Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.10.0/24"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
]
3.4、创建tomcat容器,指定网络tomcat-net
[root@localhost ~]# docker run -d --name my-tomcat --network tomcat-net tomcat:8.5.34
3.5、查看容器网络
root@localhost ~]# docker exec -it da59661767fa ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
11: eth0@if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:c0:a8:0a:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 192.168.10.2/24 brd 192.168.10.255 scope global eth0
valid_lft forever preferred_lft forever
3.5、创建my-tomcat1容器,不指定网络
[root@localhost ~]# docker run -d --name my-tomcat1 tomcat:8.5.34
3.6、my-tomcat1容器ping my-tomcat容器IP,访问不通
[root@localhost ~]# docker exec -t my-tomcat1 ping 192.168.10.2
PING 192.168.10.2 (192.168.10.2) 56(84) bytes of data.
3.7、my-tomcat1容器连接到网络tomcat-net
root@localhost ~]# docker network connect tomcat-net my-tomcat1
3.8、my-tomcat1容器ping my-tomcat容器IP
[root@localhost ~]# docker exec -t my-tomcat1 ping 192.168.10.2
PING 192.168.10.2 (192.168.10.2) 56(84) bytes of data.
64 bytes from 192.168.10.2: icmp_seq=1 ttl=64 time=0.133 ms
64 bytes from 192.168.10.2: icmp_seq=2 ttl=64 time=0.138 ms