Play with OVN (by quqi99)

作者:张华 发表于:2019-11-22
版权声明:可以任意转载,转载时请务必以超链接形式标明文章原始出处和作者信息及本版权声明

OVN is the replacement of Neutron, so we need to have a look at OVN.

Refer - https://zhhuabj.blog.csdn.net/article/details/42773417

Basic ovn L2 test

#controller node has ovn-central(northbound and southbound db), but compute node (local ovn-controller) doesn't have it
sudo apt install -y openvswitch-switch ovn-common ovn-controller-vtep ovn-docker ovn-host ovn-central

#In controller, enable remote access, tell OVN southbound db to accept TCP connections from ovn-controllers
sudo ovn-sbctl set-connection ptcp:6642
#In controller, for the time being don't need to tell OVN northbound db to accept TCP connections from ovn-controllers
sudo ovn-nbctl set-connection ptcp:6641
$ netstat -lntp | grep  664
(Not all processes could be identified, non-owned process info
 will not be shown, you would have to be root to see it all.)
tcp        0      0 0.0.0.0:6642            0.0.0.0:*               LISTEN      -                   
tcp        0      0 127.0.0.1:6640          0.0.0.0:*               LISTEN      -                   
tcp        0      0 0.0.0.0:6641            0.0.0.0:*               LISTEN      -         

#In controller, create logical switch and ports:
sudo ovn-nbctl ls-add sw
sudo ovn-nbctl lsp-add sw sp1
sudo ovn-nbctl lsp-set-addresses sp1 "00:00:00:00:00:01 10.0.0.1"
sudo ovn-nbctl lsp-add sw sp2
sudo ovn-nbctl lsp-set-addresses sp2 "00:00:00:00:00:02 10.0.0.2"
sudo ovn-nbctl show

#In compute node, make ovn-controller connect to southbound db, and create test
#external_ids:ovn-remote="tcp:<remote-controller-ip>:6642" external_ids:ovn-encap-ip=<my-ip-used-for-endpoint>
sudo ovs-vsctl set open_vswitch . external_ids:ovn-remote="tcp:localhost:6642" external_ids:ovn-encap-ip=localhost \
external_ids:ovn-encap-type="geneve" external_ids:system-id="vm1"
sudo ip link add sp1_l type veth peer name sp1_r
sudo ovs-vsctl add-port br-int sp1_l  #sudo ovs-vsctl add-br br-int -- set Bridge br-int fail-mode=secure
sudo ovs-vsctl set interface sp1_l external_ids:iface-id=sp1
sudo ip link set sp1_l up
sudo ip netns add sp1
sudo ip link set sp1_r netns sp1
sudo ip netns exec sp1 ip link set sp1_r up
sudo ip netns exec sp1 ip addr add 10.0.0.1/24 dev sp1_r
sudo ip netns exec sp1 ip link set dev sp1_r address 00:00:00:00:00:01

#or run the following command on another machine instead
sudo ovs-vsctl set open_vswitch . external_ids:ovn-remote="tcp:localhost:6642" external_ids:ovn-encap-ip=localhost \
external_ids:ovn-encap-type="geneve" external_ids:system-id="vm2"
sudo ip link add sp2_l type veth peer name sp2_r
sudo ovs-vsctl add-port br-int sp2_l
sudo ovs-vsctl set interface sp2_l external_ids:iface-id=sp2
sudo ip link set sp2_l up
sudo ip netns add sp2
sudo ip link set sp2_r netns sp2
sudo ip netns exec sp2 ip link set sp2_r up
sudo ip netns exec sp2 ip addr add 10.0.0.2/24 dev sp2_r
sudo ip netns exec sp2 ip link set dev sp2_r address 00:00:00:00:00:02

#test
$ sudo ip netns exec sp1 ping 10.0.0.2 -c 1
PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
64 bytes from 10.0.0.2: icmp_seq=1 ttl=64 time=0.318 ms

hua@node1:~$ sudo ovn-nbctl show
switch a28291a4-bea9-445f-af0f-cae65c04e9f7 (sw)
    port sp2
        addresses: ["00:00:00:00:00:02 10.0.0.2"]
    port sp1
        addresses: ["00:00:00:00:00:01 10.0.0.1"]
hua@node1:~$ sudo ovn-sbctl show
Chassis vm2
    hostname: node1
    Encap geneve
        ip: localhost
        options: {csum="true"}
    Port_Binding sp2
    Port_Binding sp1

# reset
sudo ovn-nbctl lsp-del sp1
sudo ovn-nbctl lsp-del sp2
sudo ovn-nbctl ls-del sw
sudo ip netns del sp1
sudo ip netns del sp2
sudo ovs-vsctl del-port br-int sp1_l
sudo ovs-vsctl del-port br-int sp2_l

Basic ovs flow test

sudo docker run -itd --name i1 --net=none ubuntu:20.04 /bin/bash
sudo docker run -itd --name i2 --net=none ubuntu:20.04 /bin/bash
sudo ovs-vsctl add-br br-int
sudo ovs-docker add-port br-int eth0 i1 --ipaddress=192.168.1.2/24
sudo ovs-docker add-port br-int eth0 i2 --ipaddress=192.168.1.3/24
sudo docker exec -it i1 bash
sudo docker exec -it i1 ping 192.168.1.3
sudo ovs-vsctl list interface d15c80f8359d4_l
#when doing test in the two machines
#sudo ovs-vsctl add-port br-int vxlan1 -- set interface vxlan1 type=vxlan options:remote_ip=192.168.99.122 options:key=flow
#sudo ovs-vsctl add-port br-int vxlan1 -- set interface vxlan1 type=vxlan options:remote_ip=192.168.99.123 options:key=flow

#test 1 - delete flow
$ sudo ovs-ofctl dump-flows br-int
 cookie=0x0, duration=563.338s, table=0, n_packets=6, n_bytes=364, priority=0 actions=NORMAL
sudo ovs-ofctl del-flows br-int
sudo docker exec -it i1 ping 192.168.1.3

#test 2 - add flow
$ sudo ovs-vsctl list interface d15c80f8359d4_l |grep ofport
ofport              : 3
$ sudo ovs-vsctl list interface 85023af15d0c4_l |grep ofport
ofport              : 4
sudo ovs-ofctl add-flow br-int "priority=1,in_port=3,actions=output:4"
sudo ovs-ofctl add-flow br-int "priority=2,in_port=4,actions=output:3"
sudo ovs-ofctl dump-flows br-int
sudo docker exec -it i1 ping 192.168.1.3

ovn L3 test

#create two vSwitches
sudo ovn-nbctl ls-add sw0
sudo ovn-nbctl lsp-add sw0 sw0-port1
sudo ovn-nbctl lsp-set-addresses sw0-port1 "50:54:00:00:00:01 192.168.0.2"

sudo ovn-nbctl ls-add sw1
sudo ovn-nbctl lsp-add sw1 sw1-port1
sudo ovn-nbctl lsp-set-addresses sw1-port1 "50:54:00:00:00:03 11.0.0.2"

#create a vRouter, and connect two vSwitches to it
sudo ovn-nbctl lr-add lr0

sudo ovn-nbctl lrp-add lr0 lrp0 00:00:00:00:ff:01 192.168.0.1/24
sudo ovn-nbctl lsp-add sw0 lrp0-attachment
sudo ovn-nbctl lsp-set-type lrp0-attachment router
sudo ovn-nbctl lsp-set-addresses lrp0-attachment 00:00:00:00:ff:01
sudo ovn-nbctl lsp-set-options lrp0-attachment router-port=lrp0

sudo ovn-nbctl lrp-add lr0 lrp1 00:00:00:00:ff:02 11.0.0.1/24
sudo ovn-nbctl lsp-add sw1 lrp1-attachment
sudo ovn-nbctl lsp-set-type lrp1-attachment router
sudo ovn-nbctl lsp-set-addresses lrp1-attachment 00:00:00:00:ff:02
sudo ovn-nbctl lsp-set-options lrp1-attachment router-port=lrp1

hua@node1:~$ sudo ovn-nbctl show
switch 25c77c44-9be4-434b-8b38-e325d4c2b044 (sw0)
    port lrp0-attachment
        type: router
        addresses: ["00:00:00:00:ff:01"]
        router-port: lrp0
    port sw0-port1
        addresses: ["50:54:00:00:00:01 192.168.0.2"]
switch 0b33e823-577b-4c76-a661-c3bd358624cc (sw1)
    port lrp1-attachment
        type: router
        addresses: ["00:00:00:00:ff:02"]
        router-port: lrp1
    port sw1-port1
        addresses: ["50:54:00:00:00:03 11.0.0.2"]
router cd8095b3-287f-4e0c-af3d-a10a089f977c (lr0)
    port lrp1
        mac: "00:00:00:00:ff:02"
        networks: ["11.0.0.1/24"]
    port lrp0
        mac: "00:00:00:00:ff:01"
        networks: ["192.168.0.1/24"]
hua@node1:~$ sudo ovn-trace --minimal sw0 'inport == "sw0-port1" && eth.src == 50:54:00:00:00:01 && ip4.src == 192.168.0.2 && eth.dst == 00:00:00:00:ff:01 && ip4.dst == 11.0.0.2 && ip.ttl == 64'
# ip,reg14=0x1,vlan_tci=0x0000,dl_src=50:54:00:00:00:01,dl_dst=00:00:00:00:ff:01,nw_src=192.168.0.2,nw_dst=11.0.0.2,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=64
ip.ttl--;
eth.src = 00:00:00:00:ff:02;
eth.dst = 50:54:00:00:00:03;
output("sw1-port1");

# reset
sudo ovn-nbctl ls-del sw0
sudo ovn-nbctl ls-del sw1
sudo ovn-nbctl lr-del lr0

其他见文档 OVN Routing and ovn-trace - http://dani.foroselectronica.es/ovn-routing-and-ovn-trace-550/

ovn dhcp

#create dhcp port
sudo ovn-nbctl lsp-add sw0 sw0-dhcpport
sudo ovn-nbctl lsp-set-addresses sw0-dhcpport "02:ac:10:ff:01:30 192.168.0.3"
#sudo ovn-nbctl lsp-set-port-security sw0-dhcpport "02:ac:10:ff:01:30 192.168.0.3"
options="$(sudo ovn-nbctl create DHCP_Options cidr=192.168.0.0/24 \
options="\"server_id\"=\"192.168.0.10\" \"server_mac\"=\"02:ac:10:ff:01:29\" \
\"lease_time\"=\"3600\" \"router\"=\"192.168.0.10\"")" 
echo "DHCP options is: " $options
sudo ovn-nbctl lsp-set-dhcpv4-options sw0-dhcpport $options
sudo ovn-nbctl list dhcp_options
sudo ovn-nbctl lsp-get-dhcpv4-options sw0-dhcpport

#create test vm
sudo ip netns add nsi3
sudo ovs-vsctl add-port br-int i3 -- set interface i3 type=internal
sudo ip link set i3 address 02:ac:10:ff:01:30
sudo ip link set i3 netns nsi3
sudo ovs-vsctl set Interface i3 external_ids:iface-id=sw0-i3

#get ip address via dhcp
sudo ip netns exec nsi3 dhclient i3
sudo ip netns exec nsi3 ip addr show i3
sudo ip netns exec nsi3 ip route show

OVN vRouter

sudo ovn-nbctl ls-add sw0
sudo ovn-nbctl lsp-add sw0 outs-wan
sudo ovn-nbctl lsp-set-addresses outs-wan unknown
sudo ovn-nbctl lsp-set-type outs-wan localnet 
sudo ovn-nbctl lsp-set-options outs-wan network_name=wanNet

sudo ovs-vsctl add-br br-eth
sudo ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=wanNet:br-eth
sudo ovs-vsctl add-port br-eth eth0
sudo ip link set br-eth up
sudo ip addr add 192.168.66.111/23 dev br-eth

#associated VM's port to bridge
sudo ovs-vsctl set Interface i3 external_ids:iface-id=i3

OVN SNAT/DNAT

#fixed-ip: 10.0.1.103  FIP: 192.168.99.122 
ovn-nbctl -- --id=@nat create nat type="snat" logical_ip=10.0.1.0/24 external_ip=192.168.99.122 -- add logical_router gateway_route nat @nat
ovn-nbctl -- --id=@nat create nat type="dnat_and_snat" logical_ip=10.0.1.103 external_ip=192.168.99.122 -- add logical_router gateway_route nat @nat

参考: How to verify the SNAT and DNAT mapping in OVN-DVR - https://access.redhat.com/solutions/4073951

Data Model & CLI

#From NB leader:
ovn-nbctl show
ovn-nbctl list NB_Global
ovn-nbctl list Logical_Switch_Port
ovn-nbctl list Forwarding_Group
ovn-nbctl list Address_Set
ovn-nbctl list Port_Group
#openstack loadbalancer list
ovn-nbctl list Load_Balancer
ovn-nbctl list Load_Balancer_Health_Check
ovn-nbctl list ACL
ovn-nbctl list Logical_Router
ovn-nbctl list QOS
ovn-nbctl list Logical_Router_Port
ovn-nbctl list Logical_Router_Static_Route
ovn-nbctl list Logical_Router_Policy
ovn-nbctl list NAT
ovn-nbctl list Gateway_Chassis

#From SB leader:
ovn-sbctl show
ovn-sbctl lflow-list

juju run --application octavia-ovn-chassis 'ovn-appctl -t ovn-controller exit --restart'
juju run --application octavia-ovn-chassis 'systemctl restart ovn-controller.service'

ovn-sbctl lflow-list > lflow.txt

一个问题,如何从ovn网关ping其他机器

如下,以前non-ovn环境下,可以从l3-agent上的qrouter-xxx上通过GW(192.168.21.1)来ping servic vm IP (192.168.21.39), 但现在ovn情况下无namespace了,该如何来做这件事呢?
router ca47f4c3-3379-475a-ab80-fac4062e0a3e (neutron-ab4310a3-09b9-4638-8f7c-3873bc9b4c47) (aka provider-router)
    port lrp-c6ffc24d-9213-4e31-80e2-f68107f9aad1
        mac: "fa:16:3e:0e:f8:f5"
        networks: ["192.168.21.1/24"]
    ...
switch a14b3c9a-b5f8-43f2-9ac8-18e58d9f1887 (neutron-0f409568-561e-4e33-89ea-2814faa44add) (aka private)
    port 3f382ff5-7601-4bec-a687-e964adadefc2
        addresses: ["fa:16:3e:f1:fa:a5 192.168.21.237"]
    port 38d6c6f7-8a64-406d-94bb-c550eb50427d
        type: localport
        addresses: ["fa:16:3e:8d:0b:5b 192.168.21.2"]
    port c6ffc24d-9213-4e31-80e2-f68107f9aad1
        type: router
        router-port: lrp-c6ffc24d-9213-4e31-80e2-f68107f9aad1
    port 18ce4590-453a-4152-97b9-eafb3523047d (aka octavia-lb-6058a336-3922-468f-9f70-8e34bd14e195)
        type: virtual
        addresses: ["fa:16:3e:ce:84:fd 192.168.21.39"]

这样,来做一个pingvm
sudo ovn-nbctl lsp-add a14b3c9a-b5f8-43f2-9ac8-18e58d9f1887 pingvm
sudo ovn-nbctl lsp-set-addresses pingvm "40:44:00:00:00:01 192.168.21.122"

在计算节点中继续:
sudo ovs-vsctl add-port br-int pingvm -- set Interface pingvm type=internal -- set Interface pingvm external_ids:iface-id=pingvm
sudo ip netns add pingvm
sudo ip link set pingvm netns pingvm
sudo ip netns exec pingvm ip link set pingvm address 40:44:00:00:00:01
sudo ip netns exec pingvm ip addr add 192.168.21.122/24 dev pingvm
sudo ip netns exec pingvm ip link set pingvm up
sudo ip netns exec pingvm ip route add default via 192.168.21.1

测试, 上面实验可能ping不了,因为应该用octavia的管理IP,但这不妨碍,思路如此.
sudo ip netns exec pingvm ping 192.168.21.39
sudo ip netns exec pingvm ssh 192.168.21.39

debug CLIs

ovn-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound
journalctl --no-pager --unit ovn-controller
ovn-sbctl -v show

1, the heartbeat ovn-nbctl debug
ovn-nbctl list NB_Global |grep nb_cfg
ovn-sbctl list Chassis |grep nb_cfg
juju run -u ovn-central/leader 'ovn-nbctl -p /etc/ovn/key_host -C /etc/ovn/ovn-central.crt -c /etc/ovn/cert_host --db ssl:<ovn-central/0 ip>:6641,ssl:<ovn-central/1 ip>:6641,ssl:<ovn-central/2 ip>:6641 list NB_Global'
juju run -u ovn-central/leader 'ovn-sbctl -p /etc/ovn/key_host -C /etc/ovn/ovn-central.crt -c /etc/ovn/cert_host --db ssl:<ovn-central/0 ip>:16642,ssl:<ovn-central/1 ip>:16642,ssl:<ovn-central/2 ip>:16642 list Chassis'

2, the flow for agents status is
Neutron server agent check updates [2] --> OVN NB NB_global nb_cfg --> ovn-northd --> OVN SB SB_global --> ovn-controller (on each compute node) --> OVN SB Chassis nb_cfg --> ovn-northd --> OVN NB NB_global hv_cfg

3, ovsdb is configured to use RAFT protocol for leadership election - https://bugs.launchpad.net/charm-ovn-central/+bug/1899371

ovn-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/change-election-timer OVN_Southbound 9000
ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound |grep 'Election timer'
#juju run -u ovn-central/leader -- "ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound |grep 'Election timer'"
juju config ovn-central ovsdb-server-election-timer

# https://blog.csdn.net/zhengmx100/article/details/75426710
ovn-sbctl --help
#ovn-sbctl set-connection ptcp:6642:0.0.0.0 -- set connection . inactivity_probe=60000
ovn-sbctl list Connection   #run in sb-db leader
ovn-nbctl list Connection   #run in nb-db leader
# https://www.mail-archive.com/ovs-discuss@openvswitch.org/msg07499.html
 +--------------+  driver configuration
 |  ovn-driver  |
 +--------------+
     ^    |
     |    v
 +--------------+  inactivity_probe in table "Connection"
 |  ovn-nb-db   |
 +--------------+
     ^    |
     |    v
 +--------------+  options:northd_probe_interval in table "NB_Global"
 |  ovn-northd  |  in nbdb.
 +--------------+
     ^    |
     |    v
 +--------------+  inactivity_probe in table "Connection"
 |  ovn-sb-db   |
 +--------------+
     ^    |
     |    v
 +--------------------------------+  in table "Open_vSwitch" in
 +--------------------------------+ ovsdb-server
 |        ovn-controller          |  ovn-remote-probe-interval for TCP
 +--------------------------------+  probe to ovsdb-server,
     ^    |            ^    |        ovn-openflow-probe-interval for
> > UNIX
     |    v TCP        |    v UNIX   probe to ovs-vswitchd
 +--------------+  +--------------+
 | ovsdb-server |  | ovs-vswitchd |
 +--------------+  +--------------+

# ovn-openflow-probe-interval, ovn-controller sends an echo msg to the openflow connection
# every 5(ovn-openflow-probe-interval) seconds, if ov-controolers taks >5 seconds to 
# process logical flows, this resuls in 100% cpu usage 
# https://bugzilla.redhat.com/show_bug.cgi?id=1744962
#https://numans.blog/2018/01/05/debugging-ovn-external-connectivity-part-1/
#ovs-vsctl get open . external_ids
{hostname=juju-8a10cb-ovn-6.cloud.sts, ovn-encap-ip="10.5.1.204", ovn-encap-type=geneve, ovn-remote="ssl:10.5.3.192:6642,ssl:10.5.0.152:6642,ssl:10.5.0.249:6642", rundir="/var/run/openvswitch", system-id=juju-8a10cb-ovn-6.cloud.sts}
ovs-vsctl set open . external_ids:ovn-openflow-probe-interval=30
ovs-vsctl get open . external_ids:ovn-openflow-probe-interval

所以这个问题似乎是ovn-openflow-probe-interval的5秒过小,见:
https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/1899369

20220422设置下列配置之后’100% CPU usage’变成了’53% CPU usage’, 另外'Unreasonably long 1433ms poll interval'之类的错误也消失了.再剩下的BFP flapping (BFD state change: up->down)日志是由缺少provnet造成的 (bridge not found for localnet port ‘provnet-aa3a4fec-a788-42e6-a773-bf3a0cdb52c2’ with network name ‘sriovfabric1’),而这似乎与sriovfabric1有关.

ovs-vsctl set open . external_ids:ovn-openflow-probe-interval=0
ovs-vsctl set open . external_ids:ovn-monitor-all=true
ovs-vsctl set open . external_ids:ovn-remote-probe-interval=100000
ovn-nbctl set connection . inactivity_probe='180000'
ovn-sbctl set connection . inactivity_probe='180000'

juju 支持设置ovsdb-server-inactivity-probe, 其他的设置见:https://bugs.launchpad.net/charm-ovn-chassis/+bug/1961933
juju config ovn-central ovsdb-server-inactivity-probe=180
如果看到下列6644的错误:

18T09:15:47.313Z|92550|reconnect|ERR|ssl:100.94.0.158:6644: no response to inactivity probe after 5 seconds, disconnecting

TCP/6641 => OVN northbound listener
TCP/6642 => OVN southbound listener
TCP/6643 => OVN northbound raft port
TCP/6644 => OVN southbound raft port
TCP/16642 => OVN southbound listener admin port

根据(https://github.com/ovn-org/ovn/blob/3fb397b63663297acbcbf794e1233951222ae5af/tests/ovn-sbctl.at#L168-L175)可用下列命令同时设置6642, 6644

ovn-sbctl --inactivity-probe=30000 set-connection pssl:6642 pssl:6644 pssl:16642
ovn-sbctl --inactivity-probe=30000 set-connection ptcp:6644:127.0.0.1 punix:/var/run/ovn/ovnsb_db.sock
ovn-sbctl list connection

下面是如何也设置role的命令格式:

$ sudo ovn-sbctl get-connection
read-write role="" pssl:16642
read-write role="ovn-controller" pssl:6642
$ sudo ovn-sbctl set-connection read-write role="" pssl:16642 read-write role="ovn-controller" pssl:6642 read-write role="" pssl:6644
$ sudo ovn-sbctl get-connection
read-write role="" pssl:16642
read-write role="" pssl:6644
read-write role="ovn-controller" pssl:6642

这里也有一个lp bug (https://bugs.launchpad.net/charm-ovn-chassis/+bug/1961933), 但它只是在bump up ovn-remote-probe-interval, 不是为inactivity-probe SB RAFT,并且这个lp bug是针对ovn-chassis, 不是ovn-central,

20201215更新 - ovs的一种死法

底层(mmap, sbrk, malloc)RLIMIT_MEMLOCK设置了mlock所允许的最大长度,但ovs没有它甚至将MCL_FUTRUE都用mlockall加锁(https://github.com/openvswitch/ovs/blob/v2.13.1/vswitchd/ovs-vswitchd.c#L93-L103), workaroud可以是将lxd容器里的RLIMIT_MEMLOCK设置为unlimited, 见-https://bugs.launchpad.net/charm-ovn-chassis/+bug/1906280

20201224更新 - ovn-controller的一种死法

https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/1899369

20201224更新 - ovn-controller的一种死法

https://bugs.launchpad.net/charm-layer-ovn/+bug/1896630

20201224更新 - MQ死后网络断了

https://bugs.launchpad.net/neutron/+bug/1869808
https://bugs.launchpad.net/neutron/+bug/1871850

20201227更新 - OVS的一种死法

遇到下面问题

2020-12-25T01:59:31.486Z|00022|ovs_rcu(urcu5)|WARN|blocked 8000 ms waiting for main to quiesce
2020-12-25T01:59:37.164Z|00918|timeval|WARN|Unreasonably long 14005ms poll interval (2ms user, 7ms system)
2020-12-25T01:59:37.165Z|00919|timeval|WARN|context switches: 1 voluntary, 0 involuntary

相关bugs:
https://bugs.launchpad.net/ubuntu/+source/openvswitch/+bug/1839592
https://bugs.launchpad.net/ubuntu/+source/glibc/+bug/1864864
重现和修复问题:

sudo apt install build-essential -y
wget https://launchpadlibrarian.net/467541631/bug23844.wr.c
gcc bug23844.wr.c -lpthread -o bug23844wr
for i in {1..20}; do ./bug23844wr; done

# reproduce the problem
sudo apt install libc6=2.27-3ubuntu1.2 libc6-dev=2.27-3ubuntu1.2 libc6-dbg=2.27-3ubuntu1.2 libc-bin=2.27-3ubuntu1.2 libc-dev-bin=2.27-3ubuntu1.2 locales=2.27-3ubuntu1.2 multiarch-support=2.27-3ubuntu1.2 openvswitch-common=2.9.2-0ubuntu0.18.04.3 openvswitch-switch=2.9.2-0ubuntu0.18.04.3 -y --allow-downgrades
sudo systemctl daemon-reexec
root@juju-ef33d1-ovn-6:~# for i in {1..20}; do ./bug23844wr; done
trylock_wr

# fix the problem
sudo apt install libc6=2.27-3ubuntu1.4 libc6-dev=2.27-3ubuntu1.4 libc6-dbg=2.27-3ubuntu1.4 libc-bin=2.27-3ubuntu1.4 libc-dev-bin=2.27-3ubuntu1.4 locales=2.27-3ubuntu1.4 multiarch-support=2.27-3ubuntu1.4 openvswitch-common=2.9.5-0ubuntu0.18.04.1 openvswitch-switch=2.9.5-0ubuntu0.18.04.1 -y
sudo systemctl daemon-reexec
root@juju-ef33d1-ovn-6:~# for i in {1..20}; do ./bug23844wr; done
...

20210507更新 - not work, just for reference

cd /bak/work/charms/
git clone https://github.com/openstack-charmers/openstack-on-lxd.git
cd openstack-on-lxd
lxc profile create juju-default 2>/dev/null || echo "juju-default profile already exists"
cat ./lxd-profile.yaml |lxc profile edit juju-defaul
#lxc profile device set juju-default root pool=default
lxc launch ubuntu:focal central -p juju-default
lxc launch ubuntu:focal compute1 -p juju-default
lxc launch ubuntu:focal compute2 -p juju-default
$ lxc list
+----------+---------+----------------------+------+-----------+-----------+
|   NAME   |  STATE  |         IPV4         | IPV6 |   TYPE    | SNAPSHOTS |
+----------+---------+----------------------+------+-----------+-----------+
| central  | RUNNING | 10.239.40.216 (eth0) |      | CONTAINER | 0         |
+----------+---------+----------------------+------+-----------+-----------+
| compute1 | RUNNING | 10.239.40.9 (eth0)   |      | CONTAINER | 0         |
+----------+---------+----------------------+------+-----------+-----------+
| compute2 | RUNNING | 10.239.40.35 (eth0)  |      | CONTAINER | 0         |
+----------+---------+----------------------+------+-----------+-----------+

#on controller
lxc exec `lxc list |grep central |awk -F '|' '{print $2}'` bash
apt install ovn-host ovn-central net-tools -y
ovn-nbctl set-connection ptcp:6641
ovn-sbctl set-connection ptcp:6642
netstat -lntp |grep 664

#on two compute node
lxc exec `lxc list |grep compute1 |awk -F '|' '{print $2}'` bash
lxc exec `lxc list |grep compute2 |awk -F '|' '{print $2}'` bash
apt install openvswitch-switch ovn-host net-tools -y
ovs-vsctl add-br br-int
ovs-vsctl set bridge br-int protocols=OpenFlow10,OpenFlow11,OpenFlow12,OpenFlow13,OpenFlow14,OpenFlow15
#associated VM's port to bridge
#ovs-vsctl set Interface i1 external_ids:iface-id=i1
ovs-vsctl set open_vswitch .  \
  external_ids:ovn-remote=tcp:10.239.40.216:6642 \
  external_ids:ovn-encap-ip=$(ip addr show eth0| awk '$1 == "inet" {print $2}' | cut -f1 -d/) \
  external_ids:ovn-encap-type=geneve \
  external_ids:system-id=$(hostname)
ovs-vsctl show

#on controller
#create private subnet and dhcp options for it
ovn-nbctl ls-add private
ovn-nbctl set logical_switch private other_config:subnet="10.0.2.0/24" other_config:exclude_ips="10.0.2.2..10.0.2.10"
ovn-nbctl dhcp-options-create 10.0.2.0/24
DHCP_UUID=$(ovn-nbctl --bare --columns=_uuid find dhcp_options cidr="10.0.2.0/24")
ovn-nbctl dhcp-options-set-options ${DHCP_UUID} \
  lease_time=3600 router=10.0.2.1 server_id=10.0.2.1 server_mac=c0:ff:ee:00:00:01
ovn-nbctl list dhcp_options

#create vm port
ovn-nbctl lsp-add private port_i1
ovn-nbctl lsp-set-addresses port_i1 "00:0c:29:e7:ff:ac dynamic"
ovn-nbctl lsp-set-dhcpv4-options port_i1 $DHCP_UUID
ovn-trace --summary private '
  inport=="port_i1" &&
  eth.src==00:0c:29:e7:ff:ac &&
  ip4.src==0.0.0.0 &&
  ip.ttl==1 &&
  ip4.dst==255.255.255.255 &&
  udp.src==68 &&
  udp.dst==67'

root@central:~# ovn-nbctl list logical_switch_port |grep addresses
addresses           : ["00:0c:29:e7:ff:ac dynamic"]
dynamic_addresses   : "00:0c:29:e7:ff:ac 10.0.2.11"
options             : {lease_time="3600", router="10.0.2.1", server_id="10.0.2.1", server_mac="c0:ff:ee:00:00:01"}
root@central:~# ovn-trace --summary private '
>   inport=="port_i1" &&
>   eth.src==00:0c:29:e7:ff:ac &&
>   ip4.src==0.0.0.0 &&
>   ip.ttl==1 &&
>   ip4.dst==255.255.255.255 &&
>   udp.src==68 &&
>   udp.dst==67'
# udp,reg14=0x1,vlan_tci=0x0000,dl_src=00:0c:29:e7:ff:ac,dl_dst=00:00:00:00:00:00,nw_src=0.0.0.0,nw_dst=255.255.255.255,nw_tos=0,nw_ecn=0,nw_ttl=1,tp_src=68,tp_dst=67
ingress(dp="private", inport="port_i1") {
    next;
    reg0[3] = put_dhcp_opts(offerip = 10.0.2.11, lease_time = 3600, netmask = 255.255.255.0, router = 10.0.2.1, server_id = 10.0.2.1);
    /* We assume that this packet is DHCPDISCOVER or DHCPREQUEST. */;
    next;
    eth.dst = eth.src;
    eth.src = c0:ff:ee:00:00:01;
    ip4.src = 10.0.2.1;
    udp.src = 67;
    udp.dst = 68;
    outport = inport;
    flags.loopback = 1;
    output;
    egress(dp="private", inport="port_i1", outport="port_i1") {
        next;
        output;
        /* output to "port_i1", type "" */;
    };
};
root@central:~# ovn-nbctl list logical_switch_port |grep addresses
addresses           : ["00:0c:29:e7:ff:ac dynamic"]
dynamic_addresses   : "00:0c:29:e7:ff:ac 10.0.2.11"
root@central:~# ovn-sbctl dump-flows |grep 10.0.2.11
  table=14(ls_in_dhcp_options ), priority=100  , match=(inport == "port_i1" && eth.src == 00:0c:29:e7:ff:ac && ip4.src == 0.0.0.0 && ip4.dst == 255.255.255.255 && udp.src == 68 && udp.dst == 67), action=(reg0[3] = put_dhcp_opts(offerip = 10.0.2.11, lease_time = 3600, netmask = 255.255.255.0, router = 10.0.2.1, server_id = 10.0.2.1); next;)
  table=14(ls_in_dhcp_options ), priority=100  , match=(inport == "port_i1" && eth.src == 00:0c:29:e7:ff:ac && ip4.src == 10.0.2.11 && ip4.dst == {10.0.2.1, 255.255.255.255} && udp.src == 68 && udp.dst == 67), action=(reg0[3] = put_dhcp_opts(offerip = 10.0.2.11, lease_time = 3600, netmask = 255.255.255.0, router = 10.0.2.1, server_id = 10.0.2.1); next;)

# change to external port 
ovn-nbctl lsp-set-type port_i1 external
root@central:~# ovn-sbctl dump-flows |grep 10.0.2.11
<empty>

# create vRouter
ovn-nbctl lr-add provider-router
ovn-nbctl lrp-add provider-router lsp-private a0:10:00:00:00:01 10.0.2.1/24
ovn-nbctl lsp-add private router_port_private
ovn-nbctl set Logical_Switch_Port router_port_private type=router \
    options:router-port=lsp-private addresses=router
root@central:~# ovn-sbctl dump-flows |grep 10.0.2.11
  table=12(lr_in_arp_resolve  ), priority=100  , match=(outport == "lsp-private" && reg0 == 10.0.2.11), action=(eth.dst = 00:0c:29:e7:ff:ac; next;)

# create public switch, and map it to provider-router and br-data
ovn-nbctl ls-add public
ovn-nbctl lrp-add provider-router lsp-public a0:10:00:00:00:02 10.239.40.122/24
ovn-nbctl lsp-add public router_port_public
ovn-nbctl set Logical_Switch_Port router_port_public type=router \
    options:router-port=lsp-public addresses=router

ovn-nbctl lsp-add public provnet-xxx
ovn-nbctl lsp-set-addresses provnet-xxx unknown
ovn-nbctl lsp-set-type provnet-xxx localnet 
ovn-nbctl lsp-set-options provnet-xxx network_name=physnet1

# create dhcp port and snat port
ovn-nbctl lsp-add private dhcp_port
ovn-nbctl lsp-set-addresses dhcp_port "00:00:00:00:00:04 10.0.2.2"
ovn-nbctl lsp-add public snat_port
ovn-nbctl lsp-set-addresses snat_port "00:00:00:00:00:03 10.239.40.123"

# on two compute nodes
ovs-vsctl add-br br-data
ovs-vsctl add-port br-data eth1
ifconfig eth1 up
ovs-vsctl set open . external-ids:ovn-bridge-mappings=physnet1:br-data

# create HA_CHASSIS_GROUP, and compute1 is the chassis with the highest priority
ovn-nbctl ha-chassis-group-add hagrp1
ovn-sbctl list chassis
ovn-nbctl ha-chassis-group-add-chassis hagrp1 compute1.lxd 30
ovn-nbctl ha-chassis-group-add-chassis hagrp1 compute2.lxd 29
ovn-nbctl list ha_chassis_group
ovn-nbctl -f csv list ha_chassis |egrep -v '^_uuid' |sort -t ',' -k 4
ovn-sbctl list Port_Binding

# on two compute nodes
ovs-vsctl set Open_vSwitch . external-ids:ovn-cms-options=\"enable-chassis-as-gw\"

# associate port_i1 to ha_chassis_group
#ovn-nbctl lsp-set-type port_i1 external
hagrp1_uuid=`ovn-nbctl --bare --columns _uuid find ha_chassis_group name="hagrp1"`
ovn-nbctl set Logical_Switch_Port port_i1 ha-chassis-group=$hagrp1_uuid


ip netns add nsi1
ovs-vsctl add-port br-int i2 -- set interface i1 type=internal
ip link set i2 address 02:ac:10:ff:01:30
ip link set i2 netns nsi1
#associated VM's port to bridge
ovs-vsctl set Interface i2 external_ids:iface-id=i2
ip netns exec nsi1 dhclient i2
ip netns exec nsi1 ifconfig i2 10.5.0.122/24

20220414 - ovn dhcp issue

下列命令可以很容易重现一个问题,o-hm0 port上没有IP

./generate-bundle.sh --name octavia --series focal --release xena --octavia-ipv4 --ovn --use-stable-charms --run
./tools/vault-unseal-and-authorise.sh
./tools/upload_octavia_amphora_image.sh --release xena
./tools/create_ipv4_octavia.sh
./tools/configure_octavia.sh
./configure
./tools/instance_launch.sh 1 focal
./tools/float_all.sh
./tools/sec_groups.sh
ssh -i ~/testkey.priv ubuntu@10.5.150.108 -- sudo apt install apache2 -y
./tools/create_octavia_lb.sh --member-vm <uuid from prev step>

参考了这几篇文章:

  • OVN实战—《An Introduction to OVN Routing》翻译 https://www.cnblogs.com/YaoDD/p/7475728.html
  • https://blog.oddbit.com/post/2019-12-19-ovn-and-dhcp/
  • https://lk668.github.io/2020/09/21/2020-09-21-ovn-dhcp-for-external-host/

发现'ovn-nbctl lsp-get-dhcpv4-options 5fab7b8c-7dff-4e31-93c7-349ae8c6a406'输出为空.

# ovn-nbctl show 9eecbda9-ef5e-4d51-95a8-ce238f7a4673
switch 9eecbda9-ef5e-4d51-95a8-ce238f7a4673 (neutron-ef7df67d-7e27-4422-9c2a-9aed26511a28) (aka lb-mgmt-net)
    port e478b92e-a38b-4942-a076-0057fbefe923
        type: router
        router-port: lrp-e478b92e-a38b-4942-a076-0057fbefe923
    port 85437db6-51cf-4b88-81c7-b8898aadd081
        type: localport
        addresses: ["fa:16:3e:7a:09:00 10.100.0.2"]
    port 5fab7b8c-7dff-4e31-93c7-349ae8c6a406 (aka octavia-health-manager-octavia-0-listen-port)
        addresses: ["fa:16:3e:06:b4:e6 10.100.0.219"]

# ovn-nbctl list dhcp_options  |grep 10.100.0.0 -B1 -A3
_uuid               : ac4abaaf-9598-4026-ac70-1ba493288448
cidr                : "10.100.0.0/24"
external_ids        : {"neutron:revision_number"="1", subnet_id="5c871695-2f0f-4aca-8af5-2809a92e282f"}
options             : {classless_static_route="{169.254.169.254/32,10.100.0.2, 0.0.0.0/0,10.100.0.1}", dns_server="{10.5.0.15}", domain_name="\"octavia.stsstack.qa.1ss.\"", lease_time="43200", mtu="1492", router="10.100.0.1", server_id="10.100.0.1", server_mac="fa:16:3e:4c:11:6f"}

ovn-nbctl lsp-get-dhcpv4-options 5fab7b8c-7dff-4e31-93c7-349ae8c6a406

运行了下列命令,o-hm0上的IP就有了:

ovn-nbctl lsp-set-dhcpv4-options 5fab7b8c-7dff-4e31-93c7-349ae8c6a406 ac4abaaf-9598-4026-ac70-1ba493288448

原因是o-hm0 port使用了neutron:LOADBALANCERV2 (https://opendev.org/openstack/neutron/src/branch/master/neutron/common/ovn/utils.py#L125-L127)

openstack port create --network lb-mgmt-net --device-owner neutron:LOADBALANCERV2 --security-group fe575293-a434-48fd-9b06-74770cab1a6f testoctavia5

导致缺乏下列流,

$ sudo ovs-sbctl lflow-list
table=17(ls_in_dhcp_options ), priority=100 , match=(inport == "74afdfcd-7291-476f-bfae-877264cc0b61" && eth.src == fa:16:3e:db:75:61 && ip4.src == 0.0.0.0 && ip4.dst == 255.255.255.255 && udp.src == 68 && udp.dst == 67), action=(reg0[3] = put_dhcp_opts(offerip = 10.100.0.233, classless_static_route = {169.254.169.254/32,10.100.0.2, 0.0.0.0/0,10.100.0.1}, dns_server = {10.5.0.2}, domain_name = "octaviaovn.stsstack.qa.1ss.", lease_time = 43200, mtu = 1492, netmask = 255.255.255.0, router = 10.100.0.1, server_id = 10.100.0.1); next;)
table=17(ls_in_dhcp_options ), priority=100 , match=(inport == "74afdfcd-7291-476f-bfae-877264cc0b61" && eth.src == fa:16:3e:db:75:61 && ip4.src == 10.100.0.233 && ip4.dst == {10.100.0.1, 255.255.255.255} && udp.src == 68 && udp.dst == 67), action=(reg0[3] = put_dhcp_opts(offerip = 10.100.0.233, classless_static_route = {169.254.169.254/32,10.100.0.2, 0.0.0.0/0,10.100.0.1}, dns_server = {10.5.0.2}, domain_name = "octaviaovn.stsstack.qa.1ss.", lease_time = 43200, mtu = 1492, netmask = 255.255.255.0, router = 10.100.0.1, server_id = 10.100.0.1); next;)

具体见: https://bugs.launchpad.net/charm-octavia/+bug/1946325

20220427 - ovn performance issue

这是一个综合性的问题.
1, 先修复no response to inactivity probe after 60.4 seconds, disconnecting

ovs-vsctl set open . external_ids:ovn-openflow-probe-interval=0
ovs-vsctl set open . external_ids:ovn-monitor-all=true
ovs-vsctl set open . external_ids:ovn-remote-probe-interval=100000
ovn-nbctl set connection . inactivity_probe='180000'
ovn-sbctl set connection . inactivity_probe='180000'
ovn-nbctl get connection . inactivity_probe

2, 再修复一些错误的路由: Address family doesn’t match between ‘ip_prefix’ 0.0.0.0/0 and ‘nexthop’ 2620:2d:4000:2000::1 in static

#https://man7.org/linux/man-pages/man8/ovn-nbctl.8.html
openstack subnet list --ip-version 6
sudo ovn-nbctl list logical_router
sudo ovn-nbctl list Logical_Router_Static_Route
sudo ovn-nbctl list Logical_Router_Policy
sudo ovn-nbctl list nat
ovn-nbctl lr-route-list neutron-21e7ddd5-2b82-4829-b295-f17dbb067ef8
ovn-nbctl lr-route-del neutron-21e7ddd5-2b82-4829-b295-f17dbb067ef8 0.0.0.0/0
#sudo ovn-nbctl --may-exist lr-route-add neutron-7c2bc1bc-3188-423b-b0a7-a9e1d8045c61 "0.0.0.0/0" 

3, 发现有ssl证书过期的错误,ssl证书renew之后服务没有重启(10.130.11.131:6642: send error: Broken pipe),重启的顺序是:先重启NB(ovn-northd,ovn-ovsdb-server-nb)再重启SB(ovn-ovsdb-server-sb),另外,先重启non-leader units上的,再重启leader units上的.

#The root cause of this is that the ovn-northd service has not been restarted after writing the certificates to disk
#Failed to reload ovn-northd.service: Job type reload is not applicable for unit ovn-northd.service.
#lib/charm/openstack/ovn_central.py
#https://bugs.launchpad.net/charm-ovn-central/+bug/1895303
#first restrt ovn-northd and ovn-ovsdb-server-nb on two slaves nodes, then on master node
juju ssh ovn-central/0 sudo systemctl restart ovn-northd
juju ssh ovn-central/0 sudo restart ovn-ovsdb-server-nb
juju ssh ovn-central/0 sudo restart ovn-ovsdb-server-sb
openssl s_client -CAfile ovn-central.crt -connect 10.130.13.120:16642
$ for u in {0..2}; do
   echo "### Logs: ovn-central/${u} ###";
   juju ssh "ovn-central/${u}" '
     for log in $(sudo ls /var/log/ovn/*.log); do
       echo "--> Log: ${log}";
       sudo tail -n100 "${log}";
       echo -en "\n";
     done;'
   echo -en "\n\n";
done;

4, 运行上面一步之后,在ovn-central/1与ovn-central/2上看到了下面日志,似乎它们俩无法连ovn-central/0

2022-04-27T06:07:37.821Z|00092|ovn_northd|INFO|ovn-northd lock lost. This ovn-northd instance is now on standby.
2022-04-27T06:07:37.822Z|00093|ovn_northd|INFO|ovn-northd lock acquired. This ovn-northd instance is now active.

同时在它们俩上看到了如此多的writes(它在update status)

2022-04-27T01:04:35.987Z|434275|timeval|WARN|Unreasonably long 2957ms poll interval (2601ms user, 319ms system)
2022-04-27T01:04:35.988Z|434276|timeval|WARN|faults: 391067 minor, 0 major
2022-04-27T01:04:35.990Z|434277|timeval|WARN|disk: 0 reads, 151488 writes

我们试图enable northd的debug log,但它没有unix ctl无法通过ovs-appctl命令来做,得直接修改service来做

#enable debug log for ovnnb_db
ovs-appctl -t /var/run/ovn/ovnnb_db.ctl vlog/set DBG
#enable debug log for ovnsb_db
ovs-appctl --unixctl=/var/run/ovn/ovnsb_db.ctl vlog/set DBG 
#enable debug log for nrothd, northd doesn't have an unix ctl

之后,发现仍能看到’ovn-northd instance is now on standby’与’ovn-northd instance is now active’日志,后来检查两次发生的时间,原来是一样的,这样NB DB的问题已经解决了.同时也说明,neutron agent flapping的原因应该还是在SB DB上,不是NB DB

5, 也处理了一个telegraf每10秒就产生300k floiws的问题, 以一个时间偏移问题, 它们可能产生性能问题

/etc/telegraf/telegraf.d/ovs_dump_flows.conf 会时不会会调用ovs_dump_flows会造成网络忽然中断(dump 300k flows every 10 sec).
juju config telegraf disabled_plugins="ovs_dump_flows:ovs_dpctl"
/var/lib/juju/agents/unit-telegraf-228/.venv/bin/python /var/lib/juju/agents/unit-telegraf-228/charm/files/telegraf_exec_metrics.py --metric ovs_dump_flows

另一个是时间偏移问题, 缺失这个关系( juju add-relation nrpe:nrpe-external-master ntp:nrpe-external-master)会有此问题,可由下列命令确认.
juju run -a ovn-chassis "/opt/ntpmon-ntp-charm/check_ntpmon.py --check offset reach proc vars"

减小openstack-metadata-workers的个数来减轻cpu压力
同时设置enable-distributed-fip and enable-dvr为true

6, 是配置问题吗? 但据说这个配置已经运行多年了啊. 继续看到下列错误:

$ grep -r 'force recompute next time' ps5-rb1-n1_ovn-controller-log_00325247_2022-04-27.log
2022-04-27T03:21:28.306Z|477397|main|INFO|OVNSB commit failed, force recompute next time.
2022-04-27T06:01:09.861Z|481574|main|INFO|OVNSB commit failed, force recompute next time.
2022-04-27T06:07:35.153Z|481705|main|INFO|OVNSB commit failed, force recompute next time.
2022-04-27T06:08:00.113Z|481708|main|INFO|OVNSB commit failed, force recompute next time.

$ grep -r 'bridge not found for localnet port' ps5-rb1-n1_ovn-controller-log_00325247_2022-04-27.log |tail -n1
2022-04-27T09:02:56.182Z|486315|patch|ERR|bridge not found for localnet port 'provnet-aa3a4fec-a788-42e6-a773-bf3a0cdb52c2' with network name 'sriovfabric1'

现有配置是:

$juju config ovn-chassis-sriov ovn-bridge-mappings
physnet1:br-data sriovfabric1:br-data sriovfabric2:br-data
$juju config ovn-chassis ovn-bridge-mappings
physnet1:br-data

$ juju config ovn-chassis-sriov bridge-interface-mappings
br-data:bond0
$ juju config ovn-chassis bridge-interface-mappings
br-data:bond0

是需要这样吗?

ip l add name veth-bond0 type veth peer name veth-ex
ip l set dev veth-bond0 up
ip l set dev veth-ex up
ip l set veth-bond0 master bond0
juju config ovn-chassis ovn-bridge-mappings="physnet1:br-data sriovfabric1:br-ex sriovfabric2:br-ex"
juju config ovn-chassis bridge-interface-mappings="br-data:bond0 br-ex:veth-ex

不是需要上面, 其实多个physnet可以用同一个ovs bridge, 所以可能是:

juju config ovn-chassis ovn-bridge-mappings="physnet1:br-data sriovfabric1:br-data sriovfabric2:br-data"

# 避免使用juju config造成ovn-controller重启
echo '== PRE =='
for i in $(juju status | awk '/ ovn-chassis\// {print $1}' | tr -d '*'); do
  cat <<'EOF' | juju ssh "${i}" bash -e
echo
hostname
sudo ovs-vsctl get open . external-ids'
EOF
done

echo '== POST =='
for i in $(juju status | awk '/ ovn-chassis\// {print $1}' | tr -d '*'); do
  cat <<'EOF' | juju ssh "${i}" bash -e
sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings="physnet1:br-data,sriovfabric1:br-data,sriovfabric2:br-data"
sleep 10
sudo ovs-vsctl get open . external-ids'
EOF
done

但客户担心上面juju配置会造成所有节点ovn-controller重启, 所以只找了其中一个chassis运行了下列命令:

ovs-vsctl set open . external-ids:ovn-bridge-mappings="physnet1:br-data,sriovfabric1:br-data,sriovfabric2:br-data"

这样, 'bridge not found for localnet port’就消失了, 但出现了大量下列日志,

2022-04-28T11:01:39.429Z|1380217|binding|INFO|Changing chassis for lport 64a1a58d-a7da-4ef2-a633-61a3e35068cf from n6.maas to n4.maas.
2022-04-28T11:01:39.429Z|1380218|binding|INFO|64a1a58d-a7da-4ef2-a633-61a3e35068cf: Claiming fa:16:3e:2f:ee:58 10.133.57.131
2022-04-28T11:14:35.443Z|1380545|binding|INFO|Releasing lport 64a1a58d-a7da-4ef2-a633-61a3e35068cf from this chassis.

并且’ovn-nbctl find Logical_Switch_Port type=external’显示所有external port 的dhcp_options是空的, 'ovn-nbctl list dhcp_options’显示有问题的sunbet所关联的dhcp_options不为空 (那可能是: https://bugs.launchpad.net/charm-octavia/+bug/1946325 )

接着在所有节点修改了这个设置(external-ids:ovn-bridge-mappings)之后, Changing chassis for lport 这些错误就消失了, ovn-controller log就只剩下下列一种 :

2022-04-29T11:20:51.434Z|1640975|poll_loop|INFO|wakeup due to 0-ms timeout at controller/ovn-controller.c:2115 (100% CPU usage)
2022-04-29T11:21:23.993Z|1640976|timeval|WARN|Unreasonably long 32558ms poll interval (32465ms user, 89ms system)

ovs的日志就是BFD flapping

2022-04-29T12:54:15.566Z|01596|bfd(handler131)|INFO|ovn-ps5-ra-18: BFD state change: down->up "Control Detection Time Expired"->"Control Detection Time Expired".

BFD flapping是由high cpu usage造成的, 但high cpu usage又是由什么造成的呢?

  • neutron-ovn-db-sync-uti, ovn与neutron之间的db不同步?
  • 大量过期的flow ?

BFD是通过Geneve tunnel走的, Geneve tunnel是走br-int -> br-tun的, 所以和之前的添加br-int和br-data之间的peer device无关.

  • 与sr-iov port的external port无关(br-int -> br-data), 因为sr-iov
  • port没有使用dhcp并且也是少量的 对于tenant port呢(br-int -> geneve tunnel), 它可能有关.

7, 继续痛苦中
先提高了bfd-mult和设置了use_parallel_build, 但都不好使.

#not work, so then revert to 5 again
sudo ovn-nbctl set NB_Global . options:"bfd-mult"=5
sudo ovs-appctl bfd/show

#on northd, use_parallel_build: If set to true, ovn-northd will attempt to compute logical flows in parallel.
sudo ovn-nbctl set NB_Global . options:"use_parallel_build"=true

日志是:

$ juju run -a ovn-central -- sudo tail -n 20 /var/log/ovn/ovn-northd.log
    2022-04-29T16:06:42.225Z|79058|poll_loop|INFO|Dropped 13 log messages in last 4 seconds (most recently, 0 seconds ago) due to excessive rate
    2022-04-29T16:06:42.225Z|79059|poll_loop|INFO|wakeup due to [POLLIN] on fd 3 (10.130.11.131:49824<->10.130.11.131:16642) at lib/stream-ssl.c:832 (97% CPU usage)
#these are connections from northd, 
# https://www.openvswitch.org/support/ovscon2020/slides/OVSCON_OVN_Controller_IP.pdf
$ juju run -a ovn-central -- sudo tail -n 20 /var/log/ovn/ovsdb-server-nb.log
    2022-04-29T16:14:36.253Z|46012|stream_ssl|WARN|SSL_accept: system error (Success)
    2022-04-29T16:14:36.253Z|46013|jsonrpc|WARN|ssl:127.0.0.1:54038: receive error: Protocol error
    2022-04-29T16:14:36.254Z|46014|reconnect|WARN|ssl:127.0.0.1:54038: connection dropped (Protocol error)

路径是NB <- Northd -> SB <- ovn-controller -> br-int, 如果SB一直reconnect northd的话可能会无产生flows.
SB用来产生flow, 它的每一个run的代码如下,所以应该是Dhcp请求regenerates flows (而没有对这些regenerates these flows: Port binding changes, OVS interface and Port changes, Datapath binding changes)

○ Translates logical flows to
OpenFlow rules - lflow_run()
○ Generate OF flows which
connects physical-to-logical and
logical-to-physical (physical
flows)
○ lflow_run() is called even for
pinctrl (packet-ins) events.  # - that's a dhcp req/offer i.e.

这篇文档(https://www.openvswitch.org/support/ovscon2020/slides/OVSCON_OVN_Controller_IP.pdf)提到了增量处理( force_recompute happens on almost any event that wakes the ovn-controller, so not just dhcp, and upstream also state that with the introduction of I-P in 20.06 they found some regressions they were fixed in 20.09.), 可能的方法应该是将ovn由20.03升级到20.09
这里也有一篇ovn-controller编码分析的文章: https://www.jianshu.com/p/952714cd3c33

20230829更新, BFD flapping也可能是db不同步造成的,neutron-ovn-db-sync-util默认的是log而不是repair, 另外也需要:
https://bugs.launchpad.net/ubuntu/+source/openvswitch/+bug/2025323
https://bugs.launchpad.net/ubuntu/+source/ovn/+bug/2003056

20220513 - ovn upgrade from 20.03 to 22.03

从20.03升级到22.03, 注意一点,ovn-central有source属性,ovn-chassis没有source,当juju config ovn-central source=distro时意味着ovn-central上不使用UCA,而只用UA. 如果不添加这个,会造成ovn-central与ovn-chassis用不同的ovn版本从而造成openstack network agent list返回0

#https://docs.openstack.org/charm-guide/latest/project/procedures/ovn-upgrade-2203.html
#upgrade juju client to the latest version
sudo snap refresh juju
juju upgrade-controller
juju ssh -m controller 0
juju upgrade-model

#neutron-api-plugin-ovn charm must support the ovn-source
juju config neutron-api-plugin-ovn ovn-source
juju refresh neutron-api-plugin-ovn

#to minimise timeout issues during the upgrade
juju config ovn-central ovsdb-server-election-timer=30
juju status ovn-central

#upgrade ovn-chassis and ovn-host to the lastest version
juju run -a ovn-chassis 'apt update && apt -y install \
   --only-upgrade openvswitch-common ovn-common'
juju run -a ovn-central 'apt update && apt -y install \
   --only-upgrade openvswitch-common ovn-common'
   
#to prevent an ovn data plane outage during the upgrade to 22.03, set ovn-controller to fail-safe mode
juju run -a ovn-central 'systemctl stop ovn-northd'
juju run -u ovn-central/<leader> 'ovn-sbctl set sb-global . options:northd_internal_version="safe"'

#perform the upgrade, 'source=distro' will let ovn-central not use UCA configured in ovn-source
juju refresh ovn-central --channel 22.03/stable \
   --config <(printf "ovn-central:\n source: \"distro\"")
juju config ovn-central ovn-source=cloud:focal-ovn-22.03
juju status ovn-central
#I hit this error for on unit(leader-settings-changed) Unit is ready (leader: ovnsb_db)
#that's because that unit is still using ovn 20.03, so upgrade ovn to 22.03 by hand then restart the unit

#verify DB version after migration
juju run -a ovn-central 'ovsdb-tool schema-version /usr/share/ovn/ovn-nb.ovsschema'
juju run -a ovn-central 'ovsdb-client get-schema-version unix:/var/run/ovn/ovnnb_db.sock OVN_Northbound'
juju run -a ovn-central 'ovsdb-tool schema-version /usr/share/ovn/ovn-sb.ovsschema'

#verify cluster status
juju run -a ovn-central 'ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound' | egrep "Server ID|Role|Leader"
juju run -a ovn-central 'ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound' | egrep "Server ID|Role|Leader"

#upgrade ovn-chassis
juju refresh ovn-chassis --channel 22.03/stable
juju config ovn-chassis ovn-source=cloud:focal-ovn-22.03
juju status ovn-chassis
#juju run -a ovn-chassis 'apt update && apt -y install --only-upgrade openvswitch-common ovn-common'
juju run -a ovn-chassis 'systemctl restart neutron-ovn-metadata-agent'

#upgrade neutron-api-plugin-ovn to 22.03
juju config neutron-api-plugin-ovn ovn-source="cloud:focal-ovn-22.03"
juju status neutron-api
#agent size will be 0 if ovn versions between ovn-central and ovn-chassis are different
juju run -a ovn-central -- sudo dpkg -l |grep ovn
juju run -a ovn-chassis -- sudo dpkg -l |grep ovn
openstack network agent list

升级之后,ovn-controller性能问题就解决了(之前的错误"OVNSB commit failed, force recompute next time."消失, https://bugzilla.redhat.com/show_bug.cgi?id=2050154 ), 但仍然还有这种日志(logical_flow_output, recompute ((null)) took 1454ms),这应该是正常的因为性能问题确实解决了.
注:上面这种升级办法若是从20.03升级到22.03的话实际上会导致DNS临时Downtime, 因为上述第2步中的–restart特性(“ovn-appctl -t ovn-controller exit --restart”)在20.03版本并不支持.更正常的升级办法可能是先设置:

  • 设置ovn-match-northd-version(sudo ovs-vsctl set open-vswitch . external_ids:ovn-match-northd-version=“true”),这样就不用分ovn-controller和ovn-northd的升级顺序了, 这样先升级ovn-northd时,ovn-controller检测到了DB change也不会做流更新这样就不会影响数据面,当ovn-controller和版本与ovn-northd的版本一致后再来处理DB change. 当然这个特性在20.03里也不存在.
  • 有internal-dns时,要在升级前设置(juju config neutron-api enable-ml2-dns=False)吗?当enable-ml2-dns=True时,ovn-controller会根据dhcp_option_v4里的dns_server来生成流,这个流将proxy dns request to the upstream dns server

或部分升级:

juju status | grep idle | grep  "leader:.*ovnnb_db"
juju ssh <LEADER> "sudo tar -czvf /tmp/ovnsb_db.tar.gz -C /var/lib/ovn/ ovnsb_db.db"

# Upgrading to the latest available in focal-updates
juju run -a neutron-api "sudo apt update; sudo apt install --upgrade neutron-common neutron-plugin-ml2 neutron-server python3-neutron python3-openvswitch"
juju run -a ovn-chassis,octavia-ovn-chassis "sudo apt update; sudo apt install --upgrade neutron-common neutron-ovn-metadata-agent python3-neutron python3-openvswitch openvswitch-common openvswitch-switch"

# ovn-central
juju refresh ovn-central --switch ovn-central --channel 22.03/stable
# wait till upgrade completes
juju config ovn-central ovn-source=cloud:focal-ovn-22.03

#Check that SB schema matches target
ovsdb-tool schema-version /usr/share/ovn/ovn-nb.ovsschema
sudo ovsdb-client get-schema-version unix:/var/run/ovn/ovnnb_db.sock OVN_Northbound
#Check that NB schema matches target
ovs-appctl -t  /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound
sudo ovsdb-client get-schema-version unix:/var/run/ovn/ovnsb_db.sock OVN_Southbound
#Check cluster status
ovs-appctl -t  /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound
ovs-appctl -t  /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound

# ovn-chassis
# TODO: Clarify whether upgrading from charmstore to charmhub 22.03 is okay?
juju refresh --switch ch:ovn-chassis --channel 22.03/stable ovn-chassis
juju refresh --switch ch:ovn-chassis --channel 22.03/stable octavia-ovn-chassis
juju config ovn-chassis ovn-source=cloud:focal-ovn-22.03
juju config octavia-ovn-chassis ovn-source=cloud:focal-ovn-22.03

#had to restart on central units
sudo systemctl restart ovn-ovsdb-server-nb.service and ovn-ovsdb-server-sb.service

#had to restart on neutron
sudo systemctl restart neutron-server.service

重启ovn-central的顺序

start with NB services first, northd after and SB last.
Select any NB follower (the one which is not leader) restart it, wait until it joined, pick the other follower do the same. Once they joined it is safe to restart the NB leader.

ovn-central/0*                           active    idle   14       10.5.0.164      6641/tcp,6642/tcp  Unit is ready (leader: ovnnb_db, ovnsb_db)
ovn-central/1                            active    idle   15       10.5.0.35       6641/tcp,6642/tcp  Unit is ready (northd: active)
ovn-central/2                            active    idle   16       10.5.1.176      6641/tcp,6642/tcp  Unit is ready

#Non ovnnb_db leader
juju run --unit ovn-central/1 "sudo systemctl restart ovn-nb-ovsdb.service"
#Verify if that service restarted properly with
juju run --unit ovn-central/1 "sudo systemctl status ovn-nb-ovsdb.service"
#verify it with
juju run --unit ovn-central/0 "sudo ovn-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound"

#Non ovnnb_db leader 2
juju run --unit ovn-central/2 "sudo systemctl restart ovn-nb-ovsdb.service"
#Verify if that service restarted properly with
juju run --unit ovn-central/2 "sudo systemctl status ovn-nb-ovsdb.service"
#verify it with again
juju run --unit ovn-central/0 "sudo ovn-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound"

#only after we verified that both are fine
#ovnnb_db leader
juju run --unit ovn-central/0 "sudo systemctl restart ovn-nb-ovsdb.service"
#Verify if that service restarted properly with
juju run --unit ovn-central/0 "sudo systemctl status ovn-nb-ovsdb.service"
#at this point ovnnb leader could have switched to a different host
ubuntu@alin-serdean-bastion:~$ juju status | grep ovn-central
ovn-central                         20.03.2  active      3  ovn-central             20.03/edge      35  no       Unit is ready (leader: ovnsb_db)
ovn-central/0*                           active    idle   14       10.5.0.164      6641/tcp,6642/tcp  Unit is ready (leader: ovnsb_db)
ovn-central/1                            active    idle   15       10.5.0.35       6641/tcp,6642/tcp  Unit is ready (leader: ovnnb_db northd: active)
ovn-central/2                            active    idle   16       10.5.1.176      6641/tcp,6642/tcp  Unit is ready

the above is for leader: ovnnb_db
same should be adapted for northd: active and leader: ovnsb_db
for northd: active the service is: ovn-northd.service
and for leader: ovnsb_db the service is: ovn-sb-ovsdb.service

20230425 - Unable to allocate network

在将ovn升级到22.03解决leadership transfer问题之后,可能仍会偶尔有Unable to allocate network

  • 升级ovn到22.03时neutron-api没有升级 - https://bugs.launchpad.net/charm-neutron-api-plugin-ovn/+bug/1992770
  • 重用IP频繁,可能还没及时更新NB_DB memory就已经又开始重用了,https://bugs.launchpad.net/charm-neutron-api-plugin-ovn/+bug/1992770
  • 因为ACL问题导致OVN DB没有sync - https://bugs.launchpad.net/neutron/+bug/1951296
juju ssh neutron-api/leader
sudo cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.copy
sudo sed -i "s@auth_section = .*@#auth_section = keystone_authtoken@g" /etc/neutron/neutron.conf.copy
sudo neutron-ovn-db-sync-util --config-file /etc/neutron/neutron.conf.copy --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --ovn-neutron_sync_mode log

20231024 - 做DB sync时crash

升级到了ovn 22.03,但仍然有问题,可能是db没有sync,但做db sync时crash.
DB not synced可能造成ovn-central的High CPU, 进而造成recompute, 但反过来做DB sync时可能因为ovn-central超时而无法执行SetLSwitchPortCommand从而crash (OVN-DB频繁的重连接反过来影响了DB-sync)
DB-sync时一下子6000多个SetLSwitchPortCommand反过来也会对ovn-central造成压力啊,可以通过ovsdb_connection_timeout解决么(不能,似乎180s是固定的) - neutron-ovn-db-sync-util … --ovn-ovsdb_connection_timeout 600

sudo neutron-ovn-db-sync-util --debug --ovn-ovsdb_connection_timeout 600 --config-file /etc/neutron/neutron.conf.copy --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --ovn-neutron_sync_mode log | tee SYNC_UTIL_LOG_1_08_12.txt

另一个原因可能是这个bug在做DB-sync时没有判断是否route port真更新了就全量一下子触发了约110 router ports的更新到OVN-NB - https://bugs.launchpad.net/neutron/+bug/2030773

# what does nb db sync do?
# https://github.com/openstack/neutron/blob/master/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py
* Schema upgrade
* Migration from stateless to stateful Floating IPs
* Migration from Address Sets to Port Groups
* Fix revision number inconsistencies
* Fix DHCP options on subnets
* Turn on/off IGMP snooping based on config setting
* and more...

# confirm
juju run -a nova-compute -- 'echo $(hostname) $(grep -c "OVNSB commit failed, force recompute next time" /var/log/ovn/ovn-controller.log)'
 
# for db rsync issue - SetLSwitchPortCommand timeout

# decrease neutron-api's threads
juju config neutron-api worker-multiplier=0.21

# election timer
juju config ovn-central ovsdb-server-election-timer=30
juju config ovn-central ovsdb-server-inactivity-probe=120

# CPU governor is expected to be set to "Performance" instead of "On-Demand"

# too many routers
https://bugs.launchpad.net/neutron/+bug/2030773

还有可能: https://bugs.launchpad.net/neutron/+bug/2045811
cleaned-up RBAC entries and addressed BFD issue by stopping VMs and rebuilding amphorae.
还有ovsdbapp没有做batch处理,db sync脚本一次性给ovn-central与neutron-server送1万多个port update ( pinged the upstream maintainer)

# https://bugs.launchpad.net/neutron/+bug/1961112
RuntimeError: ACL (to-lport, 1002, outport == @pg_eb32f5aa_90c6_4e81_a71d_5844ab36222e && ip4 && ip4.src == 10.101.8.0/24 && icmp4) already exists
https://bugs.launchpad.net/neutron/+bug/1973347
https://bugs.launchpad.net/neutron/+bug/1961112

Allow ovn_db_sync to continue on duplicate normalised CIDR - https://github.com/openstack/neutron/commit/5a0a2b7847da067817640404f53e0807755e08d7

Handle no more IP addresses available during a network sync - https://github.com/openstack/neutron/commit/a86e300a0b3c9b00974041935861ffb9279a6960

When a PortBindingChassisEvent is received, the revision_number is incremented, port bindings are updated (with the only change being the revision_number), which triggers a new PortBindingChassisEvent.
https://bugs.launchpad.net/neutron/+bug/1973347
https://bugs.launchpad.net/neutron/+bug/1955578
https://review.opendev.org/c/openstack/neutron/+/896926

tcpdum抓包看看neutron-api与ovn-central究竟是什么在连接:

# 30 mins worth of traffic between ovn-central and each neutron-api unit should hopefully show us what is going on.
timeout 1800 tcpdump -enli <interface> host <neutron-api-host-0> or host <neutron-api-host-1> or host <neutron-api-host-2> -w ovn-central-`hostname`.pcap

ovn-central上看是否leader switch

juju run -a neutron-api -- grep -c 'clustered database server is not cluster leader' /var/log/neutron/neutron-server.log

也不像是db 数据不一致问题:

nbleader=`juju status ovn-central| sed -rn "s,(ovn-central/[0-9])\*? .+ ovnnb_db.+,\1,p"`
sbleader=`juju status ovn-central| sed -rn "s,(ovn-central/[0-9])\*? .+ ovnsb_db.+,\1,p"`
readarray -t ports<<<`juju ssh ${nbleader} -- sudo ovn-nbctl show| grep gateway -B 3| sed -rn 's/.+port (\S+).*/\1/p'`
for port in ${ports[@]}; do
echo $port
juju ssh ${nbleader} -- "sudo ovn-nbctl lrp-get-gateway-chassis $port| head -n 1; sudo ovn-nbctl --bare --columns external-ids find Logical_Router_Port name=$port"
juju ssh ${sbleader} -- sudo ovn-sbctl --bare --columns external-ids find Port_Binding logical_port=$port
done

declare -A nb_external_ids=()
readarray -t nb_lrps<<<`sed -rn "s/^name\s+:\s+(\S+).*/\1/p" ovn-nbctl_--no-leader-only_list_Logical_Router_Port`
for p in ${nb_lrps[@]}; do
nb_external_ids[$p]=`egrep "^name\s+:\s+$p" ovn-nbctl_--no-leader-only_list_Logical_Router_Port -B 7| sed -rn 's/^external_ids\s+:\s+(.+)/\1/p'`
done

echo "NB Database has ${#nb_external_ids[@]} records"

for p in ${nb_lrps[@]}; do
ids="`egrep "logical_port.+: $p" ovn-sbctl_--no-leader-only_list_Port_Binding -B3| sed -rn 's/^external_ids\s+:\s+(.+)/\1/p'`"
if [[ ${nb_external_ids[$p]} != $ids ]]; then
echo -e "
Port "$p" does not match:"
echo "NB: ${nb_external_ids[$p]}"
echo "SB: $ids"
fi
done

nova-compute上的ovs有大量的BFD问题,neutron-api上还有这种日志:

RuntimeError: OVSDB Error: The transaction failed because the IDL has been configured to require a database lock
# maybe
$ git show 1d7e99bc0f

上面的RuntimeError也不一定,因为可能maintence认为它有lock实际它没有,这也正常。
可能问题是大量的revision bumps, 可能需要想办法让它停了(https://github.com/openstack/neutron/blame/stable/yoga/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py), (git log --oneline --no-merges 16.4.2…yoga )
BFD还是nova-compute节点性能造成的,可能ussuri+ovn22.03就是一个bad solution

20231113 - 处理OVN L1

客户从ovs升级到ovn 22.04, 升级时ovn-central/0 leader出错退出了,后发现只有ovn-central上的debian package没升级,其他两个都升级了。
此时由于ovn-central/0是leader并且没升级导到无法运行db sync, 于是他们停了ovn-central/0,这样ovn-central/1变成了leader(此时ovn集群只有2个节点, ovn-central/1与ovn-central/2),此时,可以正常启动VM with FIP, 但启动octavia失败。
看到的问题有:

  • db sync时会看到RevisionConflict, neutron.common.ovn.exceptions.RevisionConflict: OVN revision number for lrp-c86cf21c-655d-48d1-ba85-c7f488a1751e (type: router_ports) is equal or higher than the given resource. Skipping update, 是因为router port已经被ovn-central/0更新过有了更新的revision从而导致新leader ovn-central/1不能再更新它吗? (20231212 update - it’s caused by https://bugs.launchpad.net/neutron/+bug/1973347 , https://review.opendev.org/q/topic:%22bug/1973347%22)
  • 看到neutron mysql db中的binding_port_id指向了neutron-gateway (在从ovs升级到ovn之后,neutron-gateway是不存在了的), 难道是从ovs到ovn升级时导致的脏数据吗?这样做db sync (从mysql db到ovn nb db)还能修复脏数据吗?理论上应该就是:neutron里的ml2_port_bindings表OVN NBDB都有对应的应该叫logical_port, 同步之后OVN就不会有问题了,但neutron mysql db还有这些数据是理论上正常的

当时脏数据实在说不清楚是从ovs到ovn升级引入的,还是从mysql到ovn nb的db sync引入的。没办法,只能要求试一下rebuild ovn db, 至少无害(只是rebuild ovn db, 不会损害mysql db, 最终数据可以db sync再回来).

然后客户做了ovn sb db rebuild, 也做了ovn nb db rebuild, 居然就能ping通了(在设置SG之后),另外发现ping外网时断时续那是因为有些计算节点上没有br-ex (后通过 lrp-set-gateway-chassis临时用一个好的计算节点做GW).

但接着客户又说neutron-api上high cpu usage, 原来是有些计算节点上的路由不对,客户的某个软件将路由设置的与netplan里不一样,客户用br-ex上的IP做geneve endpoint IP, 难道这些路由缺失导致数据流量走了默认路由的bondA, 从bondA是用来连接SB_DB的路由,从而导致连接SB_DB超时重连吗 ( BFD是由于high cpu load, 可能由nova-compute上的bond误配造成的, BFD是通过geneve tunnel走的, 故走默认路由 )?这个和neutron-api high cpu无关吧(没有用sar,lxd容器运行ps看到的cpu利用率都是0, 现在无法确认了), 但neutron-api上的high cpu应该是减小了线程数和重启了mysql解决了DBDeadlock解决的.

对于ovn-central的high cpu 可能是由于RevisionConflict造成的, 可能和这个bug 有关: https://bugs.launchpad.net/neutron/+bug/1955578

20240222 - 为什么无法删除ovn里的对象

我想删除dns对象,如下,但怎么也删除不成功:

root@juju-53cf31-ovn-12:/home/ubuntu# ovn-sbctl list dns
_uuid               : 49e339bb-c485-4e43-8664-1041ef4babbb
datapaths           : [64ec4cac-202e-48d9-8db4-852078be46cc]
external_ids        : {dns_id="68b9954a-46b0-42a0-9401-50738499ba02"}
records             : {cirros2-081030="192.168.21.72", cirros2-081030.test.ucloud.internal="192.168.21.72", jammy-090702="192.168.21.4", jammy-090702.ucloud.external="192.168.21.4"}
root@juju-53cf31-ovn-12:/home/ubuntu# ovn-sbctl remove dns 49e339bb-c485-4e43-8664-1041ef4babbb records cirros2-081030="192.168.21.72"
root@juju-53cf31-ovn-12:/home/ubuntu# ovn-sbctl list dns
_uuid               : 49e339bb-c485-4e43-8664-1041ef4babbb
datapaths           : [64ec4cac-202e-48d9-8db4-852078be46cc]
external_ids        : {dns_id="68b9954a-46b0-42a0-9401-50738499ba02"}
records             : {cirros2-081030="192.168.21.72", cirros2-081030.test.ucloud.internal="192.168.21.72", jammy-090702="192.168.21.4", jammy-090702.ucloud.external="192.168.21.4"}

原因是需要使用ovn-nbctl代替ovn-sbctl来删除:

ubuntu@juju-53cf31-ovn-13:~$ sudo ovn-nbctl list dns
_uuid               : 68b9954a-46b0-42a0-9401-50738499ba02
external_ids        : {ls_name=neutron-64a6f5a3-9631-4d9d-a35e-167e4fad36da}
records             : {cirros2-081030="192.168.21.72", cirros2-081030.test.ucloud.internal="192.168.21.72", jammy-090702="192.168.21.4", jammy-090702.ucloud.external="192.168.21.4"}
root@juju-53cf31-ovn-13:~# ovn-nbctl destroy dns 68b9954a-46b0-42a0-9401-50738499ba02                                                                                                  
root@juju-53cf31-ovn-13:~# ovn-nbctl list dns                                                                                                                                         
root@juju-53cf31-ovn-13:~# 

20240529 - 常用命令

ovn-nbctl list Logical_Switch
ovn-nbctl list Logical_Switch_Port
ovn-nbctl list ACL
ovn-nbctl list Logical_Router
ovn-nbctl list Logical_Router_Port
ovn-nbctl list Logical_Router_Static_Route
ovn-nbctl list NAT
ovn-nbctl list DHCP_Options
ovn-nbctl list Gateway_Chassis

ovn-sbctl list Chassis
ovn-sbctl list Encap
ovn-sbctl lflow-list
ovn-sbctl --db tcp:<IP>:6642 --ovs lflow-list
ovn-sbctl list Multicast_Group
ovn-sbctl list Datapath_Binding
ovn-sbctl list Port_Binding
ovn-sbctl list MAC_Binding
ovn-sbctl list DHCP_Options
ovn-sbctl list Gateway_Chassis
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

quqi99

你的鼓励就是我创造的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值