3403 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3404 gfp_t flags)
3405 {
3406 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3407 NULL, 0);
3408 }
3388 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3389 unsigned int change, u32 event,
3390 gfp_t flags, int *new_nsid, int new_ifindex)
3391 {
3392 struct sk_buff *skb;
3393
3394 if (dev->reg_state != NETREG_REGISTERED)
3395 return;
3396
3397 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3398 new_ifindex);
3399 if (skb)
3400 rtmsg_ifinfo_send(skb, dev, flags);
3401 }
3351 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3352 unsigned int change,
3353 u32 event, gfp_t flags, int *new_nsid,
3354 int new_ifindex)
3355 {
3356 struct net *net = dev_net(dev);
3357 struct sk_buff *skb;
3358 int err = -ENOBUFS;
3359 size_t if_info_size;
3360
3361 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
3362 if (skb == NULL)
3363 goto errout;
3364
3365 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3366 type, 0, 0, change, 0, 0, event,
3367 new_nsid, new_ifindex, -1, flags);
3368 if (err < 0) {
3369 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3370 WARN_ON(err == -EMSGSIZE);
3371 kfree_skb(skb);
3372 goto errout;
3373 }
3374 return skb;
3375 errout:
3376 if (err < 0)
3377 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3378 return NULL;
3379 }
501 /**
502 * nlmsg_new - Allocate a new netlink message
503 * @payload: size of the message payload
504 * @flags: the type of memory to allocate.
505 *
506 * Use NLMSG_DEFAULT_SIZE if the size of the payload isn't known
507 * and a good default is needed.
508 */
509 static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
510 {
511 return alloc_skb(nlmsg_total_size(payload), flags);
512 }
292 /* nlmsg_total_size - length of netlink message including padding
293 * @payload: length of message payload
294 */
295 static inline int nlmsg_total_size(int payload)
296 {
297 return NLMSG_ALIGN(nlmsg_msg_size(payload));
298 }
983 static noinline size_t if_nlmsg_size(const struct net_device *dev,
984 u32 ext_filter_mask)
985 {
986 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
987 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
988 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
989 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
990 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
991 + nla_total_size(sizeof(struct rtnl_link_stats))
992 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
993 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
994 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
995 + nla_total_size(4) /* IFLA_TXQLEN */
996 + nla_total_size(4) /* IFLA_WEIGHT */
997 + nla_total_size(4) /* IFLA_MTU */
998 + nla_total_size(4) /* IFLA_LINK */
999 + nla_total_size(4) /* IFLA_MASTER */
1000 + nla_total_size(1) /* IFLA_CARRIER */
1001 + nla_total_size(4) /* IFLA_PROMISCUITY */
1002 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1003 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1004 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1005 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1006 + nla_total_size(1) /* IFLA_OPERSTATE */
1007 + nla_total_size(1) /* IFLA_LINKMODE */
1008 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1009 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1010 + nla_total_size(4) /* IFLA_GROUP */
1011 + nla_total_size(ext_filter_mask
1012 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1013 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1014 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1015 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1016 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1017 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1018 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1019 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1020 + rtnl_xdp_size() /* IFLA_XDP */
1021 + nla_total_size(4) /* IFLA_EVENT */
1022 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1023 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1024 + nla_total_size(1) /* IFLA_PROTO_DOWN */
1025 + nla_total_size(4) /* IFLA_IF_NETNSID */
1026 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1027 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1028 + nla_total_size(4) /* IFLA_MIN_MTU */
1029 + nla_total_size(4) /* IFLA_MAX_MTU */
1030 + 0;
1031 }
654
655 /**
656 * nla_attr_size - length of attribute not including padding
657 * @payload: length of payload
658 */
659 static inline int nla_attr_size(int payload)
660 {
661 return NLA_HDRLEN + payload; 上面那么多项每个都是以struct nlattr打头的吧
662 }
663
664 /**
665 * nla_total_size - total length of attribute including padding
666 * @payload: length of payload
667 */
668 static inline int nla_total_size(int payload)
669 {
670 return NLA_ALIGN(nla_attr_size(payload));
671 }
include/uapi/linux/netlink.h:90:#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
include/uapi/linux/netlink.h:230:#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
include/uapi/linux/netlink.h:229:#define NLA_ALIGNTO 4
include/uapi/linux/netlink.h:231:#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr)))
1589 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1590 struct net_device *dev, struct net *src_net,
1591 int type, u32 pid, u32 seq, u32 change,
1592 unsigned int flags, u32 ext_filter_mask,
1593 u32 event, int *new_nsid, int new_ifindex,
1594 int tgt_netnsid, gfp_t gfp)
1595 {
1596 struct ifinfomsg *ifm;
1597 struct nlmsghdr *nlh;
1598
1599 ASSERT_RTNL();
1600 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1601 if (nlh == NULL)
1602 return -EMSGSIZE;
1603
1604 ifm = nlmsg_data(nlh); ifm作为struct nlmsghdr的数据部分
1605 ifm->ifi_family = AF_UNSPEC;
1606 ifm->__ifi_pad = 0;
1607 ifm->ifi_type = dev->type;
1608 ifm->ifi_index = dev->ifindex;
1609 ifm->ifi_flags = dev_get_flags(dev);
1610 ifm->ifi_change = change;
1611
1612 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_IF_NETNSID, tgt_netnsid))
1613 goto nla_put_failure;
1614
1615 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1616 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1617 nla_put_u8(skb, IFLA_OPERSTATE,
1618 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1619 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1620 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1621 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1622 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1623 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1624 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1625 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1626 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1627 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1628 #ifdef CONFIG_RPS
1629 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1630 #endif
1631 put_master_ifindex(skb, dev) ||
1632 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1633 (dev->qdisc &&
1634 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1635 nla_put_ifalias(skb, dev) ||
1636 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1637 atomic_read(&dev->carrier_up_count) +
1638 atomic_read(&dev->carrier_down_count)) ||
1639 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) ||
1640 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1641 atomic_read(&dev->carrier_up_count)) ||
1642 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1643 atomic_read(&dev->carrier_down_count)))
1644 goto nla_put_failure;
1645
1646 if (event != IFLA_EVENT_NONE) {
1647 if (nla_put_u32(skb, IFLA_EVENT, event))
1648 goto nla_put_failure;
1649 }
1650
1651 if (rtnl_fill_link_ifmap(skb, dev))
1652 goto nla_put_failure;
1653
1654 if (dev->addr_len) {
1655 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1656 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1657 goto nla_put_failure;
1658 }
1659
1660 if (rtnl_phys_port_id_fill(skb, dev))
1661 goto nla_put_failure;
1662
1663 if (rtnl_phys_port_name_fill(skb, dev))
1664 goto nla_put_failure;
1665
1666 if (rtnl_phys_switch_id_fill(skb, dev))
1667 goto nla_put_failure;
1668
1669 if (rtnl_fill_stats(skb, dev))
1670 goto nla_put_failure;
1671
1672 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1673 goto nla_put_failure;
1674
1675 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1676 goto nla_put_failure;
1677
1678 if (rtnl_xdp_fill(skb, dev))
1679 goto nla_put_failure;
1680
1681 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1682 if (rtnl_link_fill(skb, dev) < 0)
1683 goto nla_put_failure;
1684 }
1685
1686 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1687 goto nla_put_failure;
1688
1689 if (new_nsid &&
1690 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1691 goto nla_put_failure;
1692 if (new_ifindex &&
1693 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1694 goto nla_put_failure;
1695
1696
1697 rcu_read_lock();
1698 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1699 goto nla_put_failure_rcu;
1700 rcu_read_unlock();
1701
1702 nlmsg_end(skb, nlh);
1703 return 0;
1704
1705 nla_put_failure_rcu:
1706 rcu_read_unlock();
1707 nla_put_failure:
1708 nlmsg_cancel(skb, nlh);
1709 return -EMSGSIZE;
1710 }
460 /**
461 * nlmsg_put - Add a new netlink message to an skb
462 * @skb: socket buffer to store message in
463 * @portid: netlink PORTID of requesting application
464 * @seq: sequence number of message
465 * @type: message type
466 * @payload: length of message payload
467 * @flags: message flags
468 *
469 * Returns NULL if the tailroom of the skb is insufficient to store
470 * the message header and payload.
471 */
472 static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
473 int type, int payload, int flags)
474 {
475 if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
476 return NULL;
477
478 return __nlmsg_put(skb, portid, seq, type, payload, flags);
479 }
2169 struct nlmsghdr *
2170 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2171 {
2172 struct nlmsghdr *nlh;
2173 int size = nlmsg_msg_size(len);
2174
2175 nlh = skb_put(skb, NLMSG_ALIGN(size)); put是放在skb的tail处的,此处放入的应该就是带payload的struct nlmsghdr
2176 nlh->nlmsg_type = type;
2177 nlh->nlmsg_len = size; 带payload的struct nlmsghdr 未对齐
2178 nlh->nlmsg_flags = flags;
2179 nlh->nlmsg_pid = portid;
2180 nlh->nlmsg_seq = seq;
2181 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2182 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2183 return nlh;
2184 }
282 /**
283 * nlmsg_msg_size - length of netlink message not including padding
284 * @payload: length of message payload
285 */
286 static inline int nlmsg_msg_size(int payload)
287 {
288 return NLMSG_HDRLEN + payload; NLMSG_HDRLEN就是对齐后struct nlmsghdr的size
289 }
include/uapi/linux/netlink.h:91:#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
44 struct nlmsghdr {
45 __u32 nlmsg_len; /* Length of message including header */
46 __u16 nlmsg_type; /* Message content */
47 __u16 nlmsg_flags; /* Additional flags */
48 __u32 nlmsg_seq; /* Sequence number */
49 __u32 nlmsg_pid; /* Sending process port ID */
50 };
3381 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3382 {
3383 struct net *net = dev_net(dev);
3384
3385 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
3386 }
733 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
734 struct nlmsghdr *nlh, gfp_t flags)
735 {
736 struct sock *rtnl = net->rtnl;
737 int report = 0;
738
739 if (nlh)
740 report = nlmsg_report(nlh);
741
742 nlmsg_notify(rtnl, skb, pid, group, report, flags);
743 }
438 /**
439 * nlmsg_report - need to report back to application?
440 * @nlh: netlink message header
441 *
442 * Returns 1 if a report back to the application is requested.
443 */
444 static inline int nlmsg_report(const struct nlmsghdr *nlh)
445 {
446 return !!(nlh->nlmsg_flags & NLM_F_ECHO);
447 /**
448 NLM_F_ECHO是Netlink消息头中的一个标志位,如果设置了这个标志,
449 那么发送这条消息的应用程序希望内核在接收到消息后立即将其原封不动地回传给应用程序。
450 **/
451 }
2487 /**
2488 * nlmsg_notify - send a notification netlink message
2489 * @sk: netlink socket to use
2490 * @skb: notification message
2491 * @portid: destination netlink portid for reports or 0
2492 * @group: destination multicast group or 0
2493 * @report: 1 to report back, 0 to disable
2494 * @flags: allocation flags
2495 */
2496 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2497 unsigned int group, int report, gfp_t flags)
2498 {
2499 int err = 0;
2500
2501 if (group) {
2502 int exclude_portid = 0;
2503
2504 if (report) {
2505 refcount_inc(&skb->users);
2506 exclude_portid = portid;
2507 }
2508
2509 /* errors reported via destination sk->sk_err, but propagate
2510 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2511 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2512 if (err == -ESRCH)
2513 err = 0;
2514 }
2515
2516 if (report) {
2517 int err2;
2518
2519 err2 = nlmsg_unicast(sk, skb, portid);
2520 if (!err)
2521 err = err2;
2522 }
2523
2524 return err;
2525 }
580 /**
581 * nlmsg_multicast - multicast a netlink message
582 * @sk: netlink socket to spread messages to
583 * @skb: netlink message as socket buffer
584 * @portid: own netlink portid to avoid sending to yourself
585 * @group: multicast group id
586 * @flags: allocation flags
587 */
588 static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
589 u32 portid, unsigned int group, gfp_t flags)
590 {
591 int err;
592
593 NETLINK_CB(skb).dst_group = group;
594
595 err = netlink_broadcast(sk, skb, portid, group, flags);
596 if (err > 0)
597 err = 0;
598
599 return err;
600 }
JJJ:rtmsg_ininfo解析
最新推荐文章于 2024-05-16 20:42:16 发布