环境介绍
用于测试的环境:
一台物理服务器,两个网口做了team0,基础环境如下:
[root@localhost network-scripts]# cat ifcfg-team0
DEVICE=team0
TEAM_CONFIG="{\"runner\":{\"name\":\"lacp\"}}"
DEVICETYPE=Team
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
NAME=team0
UUID=9d0f4a8c-048e-4121-98d3-8923e0cc212c
ONBOOT=yes
IPADDR=192.168.181.78
PREFIX=24
GATEWAY=192.168.181.254
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
[root@localhost network-scripts]# cat ifcfg-team0-port1
NAME=team0-port1
UUID=f5c19c57-ec4d-4c14-88b3-7946c0316c01
DEVICE=enp1s0f0
ONBOOT=yes
TEAM_MASTER=team0
DEVICETYPE=TeamPort
[root@localhost network-scripts]# cat ifcfg-enp1s0f0
# Generated by dracut initrd
NAME="enp1s0f0"
DEVICE="enp1s0f0"
ONBOOT=no
NETBOOT=yes
UUID="62757359-400c-4db0-80e2-ce733e3073ce"
IPV6INIT=yes
BOOTPROTO=dhcp
TYPE=Ethernet
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
PEERDNS=yes
PEERROUTES=yes
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
[root@localhost network-scripts]# cat ifcfg-team0-port2
NAME=team0-port2
UUID=d5970872-5363-48cb-b8ed-88018908a122
DEVICE=enp1s0f1
ONBOOT=yes
TEAM_MASTER=team0
DEVICETYPE=TeamPort
[root@localhost network-scripts]# cat ifcfg-enp1s0f1
TYPE=Ethernet
BOOTPROTO=dhcp
DEFROUTE=yes
PEERDNS=yes
PEERROUTES=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
IPV6_FAILURE_FATAL=no
NAME=enp1s0f1
UUID=f00430ad-b27c-4f3c-9137-1ec78400289b
DEVICE=enp1s0f1
ONBOOT=no
步骤
看当前网络状态
查看虚拟网桥列表:
[root@localhost ~]# brctl show
bridge name bridge id STP enabled interfaces
这是一台全新的服务器,没有任何网桥。
创建虚拟网卡
创建虚拟网卡ttbr0
,将原来team0上的地址192.168.181.78
挪到ttbr0上,将网关也设置到新的虚拟网卡上:
[root@localhost network-scripts]# nmcli connection add con-name ttbr0 type bridge autoconnect true ifname ttbr0 ip4 192.168.181.78/24 gw4 192.168.181.254
Connection 'ttbr0' (c9625cdc-67e3-459c-b318-3b6070c03d3d) successfully added.
查看:
[root@localhost network-scripts]# brctl show
bridge name bridge id STP enabled interfaces
ttbr0 8000.000000000000 yes
将原有网卡接到新的虚拟网卡上
原来的地址在team0
上,新的虚拟网卡是ttbr0
:
[root@localhost network-scripts]# nmcli connection modify team0 connection.slave-type bridge connection.master ttbr0
查看:
[root@localhost network-scripts]# brctl show
bridge name bridge id STP enabled interfaces
ttbr0 8000.000000000000 yes
这时候还没生效,重启下网卡:
systemctl restart network.service
再看,team0
已经被加到ttbr0
中了:
[root@localhost ~]# brctl show
bridge name bridge id STP enabled interfaces
ttbr0 8000.688f84f02c92 yes team0
为kvm虚拟机添加网卡并接入物理服务器
启动kvm虚拟机后,通过console
登录进去,可以看到当前除了loopback口,是没有网卡存在的:
virsh # console cobbler
[root@cobbler ~]# ifconfig -a
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:74 errors:0 dropped:0 overruns:0 frame:0
TX packets:74 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:7140 (6.9 KiB) TX bytes:7140 (6.9 KiB)
在虚拟机的宿主机上,为虚拟机cobbler
添加一个虚拟网卡,并桥接到物理服务器的虚拟交换机ttbr0
上:
virsh attach-interface cobbler --type bridge --source ttbr0
[root@localhost ~]# virsh attach-interface cobbler --type bridge --source ttbr0
Interface attached successfully
完成后,再登录到虚拟机cobbler中看,会多出来一个网卡:
[root@cobbler ~]# ifconfig -a
eth4 Link encap:Ethernet HWaddr 52:54:00:7E:AF:0C
inet6 addr: fe80::5054:ff:fe7e:af0c/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:4 errors:0 dropped:0 overruns:0 frame:0
TX packets:3 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:202 (202.0 b) TX bytes:258 (258.0 b)
Interrupt:11
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:74 errors:0 dropped:0 overruns:0 frame:0
TX packets:74 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:7140 (6.9 KiB) TX bytes:7140 (6.9 KiB)
调试
测试kvm和宿主机的通路:
宿主机:192.168.181.78
kvm:192.168.181.100[root@cobbler network-scripts]# ping 192.168.181.78 PING 192.168.181.78 (192.168.181.78) 56(84) bytes of data. 64 bytes from 192.168.181.78: icmp_seq=1 ttl=64 time=0.716 ms 64 bytes from 192.168.181.78: icmp_seq=2 ttl=64 time=0.184 ms
测试kvm和网关的通路:
[root@cobbler network-scripts]# ping 192.168.181.254 PING 192.168.181.254 (192.168.181.254) 56(84) bytes of data. From 192.168.181.100 icmp_seq=2 Destination Host Unreachable From 192.168.181.100 icmp_seq=3 Destination Host Unreachable
不通,可能是宿主机的网卡混杂模式导致
打开宿主机混杂模式
在宿主机的物理网卡上配置,混杂模式要配置到自启动流程里,不然重启后网络还是不通。[root@localhost init.d]# ifconfig enp1s0f0 promisc [root@localhost init.d]# ifconfig enp1s0f1 promisc [root@localhost init.d]#
混杂模式配置完成后,再次尝试kvm网络,已正常:
[root@cobbler network-scripts]# ping 192.168.181.254 PING 192.168.181.254 (192.168.181.254) 56(84) bytes of data. 64 bytes from 192.168.181.254: icmp_seq=1 ttl=254 time=0.724 ms 64 bytes from 192.168.181.254: icmp_seq=2 ttl=254 time=0.745 ms