linux 内核vxlan收发包流程

1.vxlan口收包处理流程

前面几章详细介绍了vxlan接口的使用,而且已经知道vxlan是 MAC IN UDP中的封装,因此,在解封装之前,一切按照原有流程走,在此复习一下内核收发包流程(驱动层的数据处理这次不再解析,直接从__netif_receive_skb_core开始)

#物理网卡处理中断,触发softirq
i40e_intr
  └─napi_schedule_irqoff

#i40e softirq把包放到per cpu backlog,触发softirq
i40e_napi_poll
  └─i40e_clean_rx_irq
      ├─i40e_fetch_rx_buffer
      |   └─__napi_alloc_skb
      |       └─skb->dev = napi->dev;
      ├─i40e_process_skb_fields
      └─i40e_receive_skb
          └─napi_gro_receive
              └─napi_skb_finish
                  └─netif_receive_skb_internal
                      └─__netif_receive_skb

__netif_receive_skb
---__netif_receive_skb_one_core
------__netif_receive_skb_core

static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
				    struct packet_type **ppt_prev)
{
	struct packet_type *ptype, *pt_prev;
	rx_handler_func_t *rx_handler;
    
    ......
    	rx_handler = rcu_dereference(skb->dev->rx_handler);
	if (rx_handler) {
		if (pt_prev) {
			ret = deliver_skb(skb, pt_prev, orig_dev);
			pt_prev = NULL;
		}
		switch (rx_handler(&skb)) {
		case RX_HANDLER_CONSUMED:
			ret = NET_RX_SUCCESS;
			goto out;
		case RX_HANDLER_ANOTHER:
			goto another_round;
		case RX_HANDLER_EXACT:
			deliver_exact = true;
		case RX_HANDLER_PASS:
			break;
		default:
			BUG();
		}
	}


   .......
   	type = skb->protocol;

	/* deliver only exact match when indicated */
	if (likely(!deliver_exact)) {
		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
				       &ptype_base[ntohs(type) &
						   PTYPE_HASH_MASK]);    //根据IP协议注册函数,回调ip_rcv
	}

	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
			       &orig_dev->ptype_specific);

	if (unlikely(skb->dev != orig_dev)) {
		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
				       &skb->dev->ptype_specific);
	}

   ........

}
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
	   struct net_device *orig_dev)
{
	struct net *net = dev_net(dev);

	skb = ip_rcv_core(skb, net);
	if (skb == NULL)
		return NET_RX_DROP;
	return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
		       net, NULL, skb, dev, NULL,
		       ip_rcv_finish);
}
ip_rcv_finish
---ip_rcv_finish_core
------ip_route_input_noref
----------ip_route_input_rcu
-------------ip_route_input_slow  主要完成路由查找,针对vxlan报文目的IP地址为本机IP地址
----------------rt_dst_alloc   

rt_dst_alloc  函数非常重要,主要指定RTCF_LOCAL类型的报文接下里的处理函数ip_local_deliver

struct rtable *rt_dst_alloc(struct net_device *dev,
			    unsigned int flags, u16 type,
			    bool nopolicy, bool noxfrm, bool will_cache)
{
	struct rtable *rt;

	rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
		       (will_cache ? 0 : DST_HOST) |
		       (nopolicy ? DST_NOPOLICY : 0) |
		       (noxfrm ? DST_NOXFRM : 0));

	if (rt) {
		rt->rt_genid = rt_genid_ipv4(dev_net(dev));
		rt->rt_flags = flags;
		rt->rt_type = type;
		rt->rt_is_input = 0;
		rt->rt_iif = 0;
		rt->rt_pmtu = 0;
		rt->rt_mtu_locked = 0;
		rt->rt_gateway = 0;
		rt->rt_uses_gateway = 0;
		INIT_LIST_HEAD(&rt->rt_uncached);

		rt->dst.output = ip_output;
		if (flags & RTCF_LOCAL)
			rt->dst.input = ip_local_deliver;
	}

	return rt;
}
static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	int ret;

	/* if ingress device is enslaved to an L3 master device pass the
	 * skb to its handler for processing
	 */
	skb = l3mdev_ip_rcv(skb);
	if (!skb)
		return NET_RX_SUCCESS;

	ret = ip_rcv_finish_core(net, sk, skb, dev);
	if (ret != NET_RX_DROP) //查询到路由信息返回OK,所以会调用下面的dst_input
		ret = dst_input(skb);  
	return ret;
}

dst_input函数即为前面注册的ip_local_deliver
static inline int dst_input(struct sk_buff *skb)
{
	return skb_dst(skb)->input(skb);
}

ip_local_deliver
---ip_local_deliver_finish

static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
	__skb_pull(skb, skb_network_header_len(skb));

	rcu_read_lock();
	{
		int protocol = ip_hdr(skb)->protocol;
		const struct net_protocol *ipprot;
		int raw;

	resubmit:
		raw = raw_local_deliver(skb, protocol);

		ipprot = rcu_dereference(inet_protos[protocol]); //根据报文协议类型进行处理,调用各自的回调
		if (ipprot) {
			int ret;

			if (!ipprot->no_policy) {
				if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
					kfree_skb(skb);
					goto out;
				}
				nf_reset(skb);
			}
			ret = ipprot->handler(skb);
			if (ret < 0) {
				protocol = -ret;
				goto resubmit;
			}
			__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
		} else {
			if (!raw) {
				if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
					__IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
					icmp_send(skb, ICMP_DEST_UNREACH,
						  ICMP_PROT_UNREACH, 0);
				}
				kfree_skb(skb);
			} else {
				__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
				consume_skb(skb);
			}
		}
	}
 out:
	rcu_read_unlock();

	return 0;
}

inet_protos[protocol]数组中记录UDP/TCP/ICMP等各种协议类型对应的处理函数(handler)

	if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
		pr_crit("%s: Cannot add ICMP protocol\n", __func__);
	if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
		pr_crit("%s: Cannot add UDP protocol\n", __func__);
	if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
		pr_crit("%s: Cannot add TCP protocol\n", __func__);


static struct net_protocol udp_protocol = {
	.early_demux =	udp_v4_early_demux,
	.early_demux_handler =	udp_v4_early_demux,
	.handler =	udp_rcv,
	.err_handler =	udp_err,
	.no_policy =	1,
	.netns_ok =	1,
};

udp类型的报文对应的handler 为udp_rcv,所有的udp报文均由udp_rcv处理

udp_rcv
---__udp4_lib_rcv
------__udp4_lib_lookup_skb //根据端口号获取sk
---------udp_unicast_rcv_skb
------------udp_queue_rcv_skb


static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
	struct udp_sock *up = udp_sk(sk);
	int is_udplite = IS_UDPLITE(sk);

	/*
	 *	Charge it to the socket, dropping if the queue is full.
	 */
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto drop;
	nf_reset(skb);

	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) { //tunnel报文处理
		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);

		/*
		 * This is an encapsulation socket so pass the skb to
		 * the socket's udp_encap_rcv() hook. Otherwise, just
		 * fall through and pass this up the UDP socket.
		 * up->encap_rcv() returns the following value:
		 * =0 if skb was successfully passed to the encap
		 *    handler or was discarded by it.
		 * >0 if skb should be passed on to UDP.
		 * <0 if skb should be resubmitted as proto -N
		 */

		/* if we're overly short, let UDP handle it */
		encap_rcv = READ_ONCE(up->encap_rcv);
		if (encap_rcv) {
			int ret;

			/* Verify checksum before giving to encap */
			if (udp_lib_checksum_complete(skb))
				goto csum_error;

			ret = encap_rcv(sk, skb);
			if (ret <= 0) {
				__UDP_INC_STATS(sock_net(sk),
						UDP_MIB_INDATAGRAMS,
						is_udplite);
				return -ret;
			}
		}

		/* FALLTHROUGH -- it's a UDP Packet */
	}

	/*
	 * 	UDP-Lite specific tests, ignored on UDP sockets
	 */
	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {

		/*
		 * MIB statistics other than incrementing the error count are
		 * disabled for the following two types of errors: these depend
		 * on the application settings, not on the functioning of the
		 * protocol stack as such.
		 *
		 * RFC 3828 here recommends (sec 3.3): "There should also be a
		 * way ... to ... at least let the receiving application block
		 * delivery of packets with coverage values less than a value
		 * provided by the application."
		 */
		if (up->pcrlen == 0) {          /* full coverage was set  */
			net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
					    UDP_SKB_CB(skb)->cscov, skb->len);
			goto drop;
		}
		/* The next case involves violating the min. coverage requested
		 * by the receiver. This is subtle: if receiver wants x and x is
		 * greater than the buffersize/MTU then receiver will complain
		 * that it wants x while sender emits packets of smaller size y.
		 * Therefore the above ...()->partial_cov statement is essential.
		 */
		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
			net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
			goto drop;
		}
	}

	prefetch(&sk->sk_rmem_alloc);
	if (rcu_access_pointer(sk->sk_filter) &&
	    udp_lib_checksum_complete(skb))
			goto csum_error;

	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
		goto drop;

	udp_csum_pull_header(skb);

	ipv4_pktinfo_prepare(sk, skb);
	return __udp_queue_rcv_skb(sk, skb);

csum_error:
	__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
	__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
	atomic_inc(&sk->sk_drops);
	kfree_skb(skb);
	return -1;
}

在创建vxlan接口时会注册encap_rcv 回调函数,并且设置encap_type 为1

static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
					      __be16 port, u32 flags)
{
	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
	struct vxlan_sock *vs;
	struct socket *sock;
	unsigned int h;
	struct udp_tunnel_sock_cfg tunnel_cfg;

	vs = kzalloc(sizeof(*vs), GFP_KERNEL);
	if (!vs)
		return ERR_PTR(-ENOMEM);

	for (h = 0; h < VNI_HASH_SIZE; ++h)
		INIT_HLIST_HEAD(&vs->vni_list[h]);

	sock = vxlan_create_sock(net, ipv6, port, flags);
	if (IS_ERR(sock)) {
		kfree(vs);
		return ERR_CAST(sock);
	}

	vs->sock = sock;
	refcount_set(&vs->refcnt, 1);
	vs->flags = (flags & VXLAN_F_RCV_FLAGS);

	spin_lock(&vn->sock_lock);
	hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
	udp_tunnel_notify_add_rx_port(sock,
				      (vs->flags & VXLAN_F_GPE) ?
				      UDP_TUNNEL_TYPE_VXLAN_GPE :
				      UDP_TUNNEL_TYPE_VXLAN);
	spin_unlock(&vn->sock_lock);

	/* Mark socket as an encapsulation socket. */
	memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
	tunnel_cfg.sk_user_data = vs;
	tunnel_cfg.encap_type = 1;
	tunnel_cfg.encap_rcv = vxlan_rcv;
	tunnel_cfg.encap_destroy = NULL;
	tunnel_cfg.gro_receive = vxlan_gro_receive;
	tunnel_cfg.gro_complete = vxlan_gro_complete;

	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);

	return vs;
}



void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
			   struct udp_tunnel_sock_cfg *cfg)
{
	struct sock *sk = sock->sk;

	/* Disable multicast loopback */
	inet_sk(sk)->mc_loop = 0;

	/* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
	inet_inc_convert_csum(sk);

	rcu_assign_sk_user_data(sk, cfg->sk_user_data);

	udp_sk(sk)->encap_type = cfg->encap_type;
	udp_sk(sk)->encap_rcv = cfg->encap_rcv;
	udp_sk(sk)->encap_destroy = cfg->encap_destroy;
	udp_sk(sk)->gro_receive = cfg->gro_receive;
	udp_sk(sk)->gro_complete = cfg->gro_complete;

	udp_tunnel_encap_enable(sock);
}


/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
	struct pcpu_sw_netstats *stats;
	struct vxlan_dev *vxlan;
	struct vxlan_sock *vs;
	struct vxlanhdr unparsed;
	struct vxlan_metadata _md;
	struct vxlan_metadata *md = &_md;
	__be16 protocol = htons(ETH_P_TEB);
	bool raw_proto = false;
	void *oiph;
	__be32 vni = 0;

	/* Need UDP and VXLAN header to be present */
	if (!pskb_may_pull(skb, VXLAN_HLEN))
		goto drop;

	unparsed = *vxlan_hdr(skb);
	/* VNI flag always required to be set */
	if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
		netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
			   ntohl(vxlan_hdr(skb)->vx_flags),
			   ntohl(vxlan_hdr(skb)->vx_vni));
		/* Return non vxlan pkt */
		goto drop;
	}
	unparsed.vx_flags &= ~VXLAN_HF_VNI;
	unparsed.vx_vni &= ~VXLAN_VNI_MASK;

	vs = rcu_dereference_sk_user_data(sk);
	if (!vs)
		goto drop;

	vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);

	vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); //查找vxlan设备
	if (!vxlan)
		goto drop;

	/* For backwards compatibility, only allow reserved fields to be
	 * used by VXLAN extensions if explicitly requested.
	 */
	if (vs->flags & VXLAN_F_GPE) {
		if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
			goto drop;
		raw_proto = true;
	}

	if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
				   !net_eq(vxlan->net, dev_net(vxlan->dev))))
			goto drop;

	if (vxlan_collect_metadata(vs)) {
		struct metadata_dst *tun_dst;

		tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
					 key32_to_tunnel_id(vni), sizeof(*md));  //收集tunnel信息,其中有外层的源目IP地址和源目端口号

		if (!tun_dst)
			goto drop;

		md = ip_tunnel_info_opts(&tun_dst->u.tun_info);

		skb_dst_set(skb, (struct dst_entry *)tun_dst);
	} else {
		memset(md, 0, sizeof(*md));
	}

	if (vs->flags & VXLAN_F_REMCSUM_RX)
		if (!vxlan_remcsum(&unparsed, skb, vs->flags))
			goto drop;
	if (vs->flags & VXLAN_F_GBP)
		vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
	/* Note that GBP and GPE can never be active together. This is
	 * ensured in vxlan_dev_configure.
	 */

	if (unparsed.vx_flags || unparsed.vx_vni) {
		/* If there are any unprocessed flags remaining treat
		 * this as a malformed packet. This behavior diverges from
		 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
		 * in reserved fields are to be ignored. The approach here
		 * maintains compatibility with previous stack code, and also
		 * is more robust and provides a little more security in
		 * adding extensions to VXLAN.
		 */
		goto drop;
	}

	if (!raw_proto) {
		if (!vxlan_set_mac(vxlan, vs, skb, vni))
			goto drop;
	} else {
		skb_reset_mac_header(skb);
		skb->dev = vxlan->dev;  //更改dev为vxlan->dev
		skb->pkt_type = PACKET_HOST;
	}

	oiph = skb_network_header(skb);
	skb_reset_network_header(skb);

	if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
		++vxlan->dev->stats.rx_frame_errors;
		++vxlan->dev->stats.rx_errors;
		goto drop;
	}

	rcu_read_lock();

	if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
		rcu_read_unlock();
		atomic_long_inc(&vxlan->dev->rx_dropped);
		goto drop;
	}

	stats = this_cpu_ptr(vxlan->dev->tstats);
	u64_stats_update_begin(&stats->syncp);
	stats->rx_packets++;
	stats->rx_bytes += skb->len;
	u64_stats_update_end(&stats->syncp);

	gro_cells_receive(&vxlan->gro_cells, skb);

	rcu_read_unlock();

	return 0;

drop:
	/* Consume bad packet */
	kfree_skb(skb);
	return 0;
}

总结:vxlan收包流量涉及的函数

#rx softirq处理,一起捅到udp层,把skb上的dev切换成vxlan device,再入backlog,触发softirq
process_backlog
  └─__netif_receive_skb
      └─__netif_receive_skb_core
         └─deliver_skb
            └─(pt_prev->func)ip_rcv
              └─ip_rcv_finish
                  └─ip_route_input_noref
                      └─ip_route_input_slow
                         └─ip_local_deliver
                             └─ip_local_deliver_finish
                                └─(ipprot->handler)udp_rcv
                                    └─__udp4_lib_rcv
                                       └─udp_queue_rcv_skb
                                           └─vxlan_rcv
                                              ├─skb->dev = vxlan->dev;//这儿skb的切换到vxlan device
                                              └─gro_cells_receive
                                                 └─netif_rx
                                                     └─netif_rx_internal
                                                         └─enqueue_to_backlog
                                                            └─____napi_schedule

报文解封vxlan后重新进入协议栈处理
process_backlog
  └─__netif_receive_skb
    └─__netif_receive_skb_core
       └─(rx_handler)netdev_frame_hook //ovs创建的vxlan口会有一个回调(报文通过回调送入ovs处理),普通内核创建的vxlan口没有注册回调,会根据默认的协议栈流程进行处理
           └─netdev_port_receive
               └─ovs_vport_receive
                   ├─ovs_flow_key_extract
                   └─ovs_dp_process_packet

2.vxlan发包处理流程

由vxlan注册函数可知,其发送接口为vxlan_xmit
static const struct net_device_ops vxlan_netdev_ops = {
	.ndo_init		= vxlan_init,
	.ndo_open		= vxlan_open,
	.ndo_stop		= vxlan_stop,
	.ndo_start_xmit		= vxlan_xmit,
	.ndo_get_stats64	= ip_tunnel_get_stats64,
	.ndo_set_rx_mode	= vxlan_set_multicast_list,
	.ndo_change_mtu		= eth_change_mtu,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_set_mac_address	= eth_mac_addr,
	.ndo_fdb_add		= vxlan_fdb_add,
	.ndo_fdb_del		= vxlan_fdb_delete,
	.ndo_fdb_dump		= vxlan_fdb_dump,
};
vxlan_xmit
  └─vxlan_xmit_one
      ├─vxlan_build_skb
      └─udp_tunnel_xmit_skb
          └─iptunnel_xmit
             └─ip_local_out_sk

补充说明下ovs上的vxlan发包流程

datapath执行output

do_execute_actions
  └─do_output
      └─ovs_vport_send
          ├─skb->dev = vport->dev;//把dev替换成内核net列表里的dev 
          └─dev_queue_xmit
              └─__dev_queue_xmit
                 ├─validate_xmit_skb
                 ├─dev_hard_start_xmit
                 |   └─xmit_one
                 |       └─netdev_start_xmit
                 |           └─ndo_start_xmit就是vxlan_xmit
                 └─dev_xmit_complete

后面的流程与上面内核中的vxlan接口发包流程一致

vxlan_xmit
  └─vxlan_xmit_one
      ├─vxlan_build_skb
      └─udp_tunnel_xmit_skb
          └─iptunnel_xmit
             └─ip_local_out_sk
               

由此可见ovs的vxlan处理也使用了内核的vxlan处理流程

3 vxlan接口创建

内核方式创建vxlan接口

ip link add vxlan1 type vxlan id 1 dstport 4789 remote 10.10.10.9 local 10.10.10.11 dev eth0 

1> vxlan1 即为创建的接口名称 ,type 为 vxlan 类型。
2> id 即为 VNI。
3> dstport 指定 UDP的目的端口,IANA 为vxlan分配的目的端口是 4789。
4> remote 和 local ,即远端和本地的IP地址,因为vxlan是MAC-IN-UDP,需要指定外层IP,此处即指。
dev 本地流量接口。用于 vtep 通信的网卡设备,用来读取 IP 地址。注意这个参数和 local参数含义是相同的,在这里写出来是为了告诉大家有两个参数存在。


ovs下创建vxlan接口

ovs-vsctl add-port br1 vx1 -- set interface vx1 type=vxlan options:remote_ip=172.16.0.39 options:key=100

创建完成后发现内核态也会生成一个vxlan接口,其vni值为0

ip -d link show vxlan_sys_4789
8: vxlan_sys_4789: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 65535 qdisc noqueue master ovs-system state UNKNOWN mode DEFAULT qlen 1000
    link/ether 0e:49:c4:05:a6:87 brd ff:ff:ff:ff:ff:ff promiscuity 1 
    vxlan id 0 srcport 0 0 dstport 4789 nolearning ageing 300 udpcsum udp6zerocsumrx 
    openvswitch_slave addrgenmode eui64 
[root@p57093v weiyanhua]# 

上面的例子中通过使用ovs-vsctl命令创建了一个vxlan,那ovs-vsctl添加一个vxlan port,ovs-vswitchd怎么知道的,答案就是ovs-vswitch和ovsdb-server之间也有连接,ovs-vswitch通过IDL感知ovsdb-server的变化。

ovs-vswitchd main线程有个while(1)处理ovsdb-server变化和ovs-appctl命令,ovs-vsctl创建一个ovs vxlan port,最终调用到了dpif_netlink_port_add。

main
 ├─unixctl_server_create
 ├─bridge_init
 └─while//特别注意这个while循环
    ├─bridge_run
    |  ├─ovsdb_idl_run
    |  └─bridge_reconfigure
    |     └─bridge_add_ports
    |        └─iface_create
    |           └─ofproto_port_add
    |              └─(ofproto_class->port_add)port_add
    |                 └─dpif_port_add
    |                     └─(dpif_class->port_add)dpif_netlink_port_add
    ├─unixctl_server_run
    |   └─run_connection
    |      └─process_command
    |         └─(command->cb)
    ├─netdev_run
    ├─memory_wait
    ├─bridge_wait
    ├─unixctl_server_wait
    ├─netdev_wait
    └─pool_block

dpif_netlink_port_add用netlink发送给内核

dpif_netlink_port_add
  └─dpif_netlink_rtnl_port_create_and_add
      └─dpif_netlink_rtnl_port_create
          ├─dpif_netlink_rtnl_create
          |   └─dpif_netlink_rtnl_create//注意这个RTM_NEWLINK和IFF_UP
          └─dpif_netlink_port_add__//通知内核ovs添加vxlan_vport

内核ovs模块初始化,内核处理ovs route netlink发送来的添加vxlan口的消息,创建一个vxlan device还要创建一个vport device,vport device里嵌套 vxlan device,内核openvswitch模块处理添加vport

ovs_vport_cmd_new
  └─new_vport
      └─ovs_vport_add
           └─vxlan_create
               ├─vxlan_tnl_create
               |   ├─vxlan_dev_create
               |   └─dev_change_flags
               |       └─__dev_change_flags//UP这个口
               |           └─__dev_open
               |               ├─(ops->ndo_open)也就是vxlan_open
               |               └─netpoll_rx_enable
               └─ovs_netdev_link
                   ├─vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
                   ├─netdev_rx_handler_register(vport->dev, netdev_frame_hook,vport);
                   ├─dev_disable_lro
                   └─dev_set_promiscuity

上面分析了ovs创建vxlan接口的主要处理流量,接下来简单看下内核创建vxlan接口的流程

 

vxlan_init_module
  └─rtnl_link_register(&vxlan_link_ops)
rtnl_newlink
  ├─rtnl_create_link
  |   ├─dev=kzalloc()
  |   └─alloc_netdev_mqs
  |       └─vxlan_setup
  ├─(ops->newlink)vxlan_newlink
  └─rtnl_configure_link
      └─__dev_change_flags
          └─__dev_open
              └─(ops->ndo_open)vxlan_open
  • 0
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值