sw_flow
struct sw_flow {
struct rcu_head rcu;
struct {
struct hlist_node node[2];
u32 hash;
} flow_table, ufid_table;
int stats_last_writer; /* CPU id of the last writer on
* 'stats[0]'.
*/
struct sw_flow_key key;
struct sw_flow_id id;
struct cpumask cpu_used_mask;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
struct flow_stats __rcu *stats[]; /* One for each CPU. First one
* is allocated at flow creation time,
* the rest are allocated on demand
* while holding the 'stats[0].lock'.
*/
};
sw_flow_id
#define MAX_UFID_LENGTH 16 /* 128 bits */
struct sw_flow_id {
u32 ufid_len;
union {
u32 ufid[MAX_UFID_LENGTH / 4];
struct sw_flow_key *unmasked_key;
};
};
sw_flow_mask
struct sw_flow_mask {
int ref_count;
struct rcu_head rcu;
struct sw_flow_key_range range;
struct sw_flow_key key;
};
sw_flow_key_range
struct sw_flow_key_range {
unsigned short int start;
unsigned short int end;
};
sw_flow_actions
struct sw_flow_actions {
struct rcu_head rcu;
size_t orig_len; /* From flow_cmd_new netlink actions size */
u32 actions_len;
struct nlattr actions[];
};
nlattr
struct nlattr {
uint16_t nla_len;
uint16_t nla_type;
};
mask_array
struct mask_array {
struct rcu_head rcu;
int count, max;
struct sw_flow_mask __rcu *masks[];
};
mask_cache_entry
struct mask_cache_entry {
u32 skb_hash;
u32 mask_index;
};
table_instance