目录
1.连接的概念
网络安全服务中,存在连接的概念,类似应用层的会话,连接信息由交互双方的五元组组成(srcip、dstip、srcport、dstport、protocol),记录了报文交互双方的基础信息,从而确定一条双向报文流;
2.连接的作用
网络安全业务正是基于连接展开的,关注的是针对一个连接正反向报文的处理,比如nat,当首包到来时基于报文信息创建连接,并记录针对该连接安全业务处理的相关信息,如nat 原始ip,转换后ip等等,后续同一连接的报文直接查会话进行相关处理,根据连接记录查询针对报文进行的安全业务处理,缩短了后续报文安全业务处理的步骤,提高服务处理的性能,此外连接跟踪也是一种查询定位报文经过哪些安全业务模块处理的手段。
3.连接的实现
linux 内核中连接的建立,完整性检查等是基于内核Netfilter框架中的HOOK机制实现;即在报文处理过程中的固定位置通过函数回调机制插入不同的处理函数,同一HOOK点可以插入多个处理函数,不同处理函数依据优先级顺序处理。具体如下所示:
如图所示在Linux报文转发处理过程中,有5个HOOK点,分别为PRE_ROUTING、LOCAL_IN、FORWARD、LOCAL_OUT、POST_ROUTING,具体到连接相关处理的HOOK处理的相关定义如下:
static const struct nf_hook_ops ipv4_conntrack_ops[] = {
{
.hook = ipv4_conntrack_in,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_conntrack_local,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
.hook = ipv4_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
};
连接建立
通过连接的概念可知,连接是针对三层报文而言的,代码走读,跟踪报文处理流程,以IPV4为例,ip_rcv内部发现第一个HOOK点:
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
net, NULL, skb, dev, NULL,
ip_rcv_finish);
static inline int NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *))
通过NF_HOOK 的定义可知,这里对应NF_INET_PRE_ROUTING 位置,具体到conntrack为:
{
.hook = ipv4_conntrack_in,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK,
},
即对应ipv4_conntrack_in 处理函数,同时这里需要注意的是priority(优先级)字段,这里值为:NF_IP_PRI_CONNTRACK (-200),如前所述同一个HOOK点可以挂载多个HOOK处理函数,这些函数按优先级顺序逐个处理,优先级数值越小,优先级越高;因此当需要挂载新的HOOK处理函数时,根据报文处理需要在连接建立前或后而设定优先级,目前已知不同模块HOOK处理函数对应的优先级如下:
enum nf_ip_hook_priorities {
NF_IP_PRI_FIRST = INT_MIN,
NF_IP_PRI_RAW_BEFORE_DEFRAG = -450,
NF_IP_PRI_CONNTRACK_DEFRAG = -400,
NF_IP_PRI_RAW = -300,
NF_IP_PRI_SELINUX_FIRST = -225,
NF_IP_PRI_CONNTRACK = -200,
NF_IP_PRI_MANGLE = -150,
NF_IP_PRI_NAT_DST = -100,
NF_IP_PRI_FILTER = 0,
NF_IP_PRI_SECURITY = 50,
NF_IP_PRI_NAT_SRC = 100,
NF_IP_PRI_SELINUX_LAST = 225,
NF_IP_PRI_CONNTRACK_HELPER = 300,
NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
NF_IP_PRI_LAST = INT_MAX,
};
由此可见,NAT处理,连接确认(CONNTRACK_CONFIRM)等依据优先级顺序处理在连接建立处理之后,书归正传,回归ipv4_conntrack_in处理函数。
ipv4_conntrack_in函数
static unsigned int ipv4_conntrack_in(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
return nf_conntrack_in(skb, state);
}
ipv4_conntrack_in这里只是调用了函数nf_conntrack_in,主要处理逻辑在nf_conntrack_in函数中,继续代码跟踪;
unsigned int
nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
{
enum ip_conntrack_info ctinfo; /* 表示连接状态的枚举数值 */
struct nf_conn *ct, *tmpl;
u_int8_t protonum;
int dataoff, ret;
/* 如果报文(skb)属于已建立的连接或打上不需连接跟踪的标记,则直接跳转 */
tmpl = nf_ct_get(skb, &ctinfo);
if (tmpl || ctinfo == IP_CT_UNTRACKED) {
/* Previously seen (loopback or untracked)? Ignore. */
if ((tmpl && !nf_ct_is_template(tmpl)) ||
ctinfo == IP_CT_UNTRACKED)
return NF_ACCEPT;
skb->_nfct = 0;
}
/* rcu_read_lock()ed by nf_hook_thresh */
/* 获取L4(传输层)协议及L4数据相对一报文头部的偏移,实质获取IP头数据后面的部分起始位置,如果是ICMP协议,则为ICMP报文头部位置 */
dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
if (dataoff <= 0) {
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
}
/* ICMP报文需要特殊处理 */
if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
protonum, state);
if (ret <= 0) {
ret = -ret;
goto out;
}
/* ICMP[v6] protocol trackers may assign one conntrack. */
if (skb->_nfct)
goto out;
}
repeat:
/* 建立连接、连接主要逻辑 */
ret = resolve_normal_ct(tmpl, skb, dataoff,
protonum, state);
if (ret < 0) {
/* Too stressed to deal. */
NF_CT_STAT_INC_ATOMIC(state->net, drop);
ret = NF_DROP;
goto out;
}
/* 获取已建立的连接信息 */
ct = nf_ct_get(skb, &ctinfo);
if (!ct) {
/* Not valid part of a connection */
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
}
/* 依据连接信息,协议处理报文 */
ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
nf_ct_put(ct);
skb->_nfct = 0;
/* Special case: TCP tracker reports an attempt to reopen a
* closed/aborted connection. We have to go back and create a
* fresh conntrack.
*/
if (ret == -NF_REPEAT)
goto repeat;
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
if (ret == -NF_DROP)
NF_CT_STAT_INC_ATOMIC(state->net, drop);
ret = -ret;
goto out;
}
if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
!test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_REPLY, ct);
out:
if (tmpl)
nf_ct_put(tmpl);
return ret;
}
根据上述处理逻辑,我们重点关注连接建立处理过程,主要逻辑在resolve_normal_ct()内部中。
resolve_normal_ct函数
static int
resolve_normal_ct(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
u_int8_t protonum,
const struct nf_hook_state *state)
{
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
u32 hash, zone_id, rid;
struct nf_conn *ct;
/* 依据报文信息获取报文五元组信息,srcip,dstip,srcport, dstport, proto */
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, state->pf, protonum, state->net,
&tuple))
return 0;
/* look for tuple match, 查找报文五元组匹配区域,通常为默认zone */
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
/* 通常为默认区域id 0 */
zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
/* 根据报文五元组信息、区域id、网络命名空间net,查找当前报文五元组信息是否在已有连接五元组hash链表中,进而确定报文是否属于已已建立连接或不存在对应连接,若不存在,则后续建立新的连接,
这里查找匹配,先基于报文原始方向(正向),后基于回复方向(反向) */
hash = hash_conntrack_raw(&tuple, zone_id, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
if (!h) {
rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
if (zone_id != rid) {
u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);
}
}
/* 依据报文正反向都未匹配现有链接五元组信息,说明报文对应连接不存在,则建立新的连接 */
if (!h) {
/* 连接建立并依据报文、五元组信息初始化 */
h = init_conntrack(state->net, tmpl, &tuple,
skb, dataoff, hash);
if (!h)
return 0;
if (IS_ERR(h))
return PTR_ERR(h);
}
/* 内部使用了container_of,依据五元组信息地址获取对应的连接地址 */
ct = nf_ct_tuplehash_to_ctrack(h);
/* It exists; we have (non-exclusive) reference. */
/* 依据报文反向,以及连接状态设置连接状态值 */
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
ctinfo = IP_CT_ESTABLISHED_REPLY;
} else {
unsigned long status = READ_ONCE(ct->status);
/* Once we've had two way comms, always ESTABLISHED. */
/* 一旦看到回复方向的报文,说明连接已建立 */
if (likely(status & IPS_SEEN_REPLY))
ctinfo = IP_CT_ESTABLISHED;
else if (status & IPS_EXPECTED)
ctinfo = IP_CT_RELATED;
else
ctinfo = IP_CT_NEW;
}
/* 将连接、连接状态信息与SKB相关联 */
nf_ct_set(skb, ct, ctinfo);
return 0;
}
从上述逻辑来看,reslove_normal_ct函数主要做了如下几件事:
1)获取报文五元组信息
2)依据报文、报文五元组信息、区域 、网络命名空间等建立并初始化新的连接
3)设置连接状态
4)将连接、连接状态信息关联到报文(skb)
五元组信息是区分连接的唯一标识,因此根据报文五元组信息可以判断报文是否属于已有连接;建立连接处理集中于init_conntrack函数,继续跟踪。
init_conntrack函数
static noinline struct nf_conntrack_tuple_hash *
init_conntrack(struct net *net, struct nf_conn *tmpl,
const struct nf_conntrack_tuple *tuple,
struct sk_buff *skb,
unsigned int dataoff, u32 hash)
{
struct nf_conn *ct;
struct nf_conn_help *help;
struct nf_conntrack_tuple repl_tuple;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct nf_conntrack_ecache *ecache;
#endif
struct nf_conntrack_expect *exp = NULL;
const struct nf_conntrack_zone *zone;
struct nf_conn_timeout *timeout_ext;
struct nf_conntrack_zone tmp;
struct nf_conntrack_net *cnet;
/* 依据正向五元组信息获取反向(回复)报文五元组信息,二者之间存在对应关系,sip、dip、sport、dport 成相反关系,协议不变 */
if (!nf_ct_invert_tuple(&repl_tuple, tuple))
return NULL;
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
/* 分配连接结构体变量内存并初始化 */
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
hash);
if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
/* 与功能宏有关,默认返回true */
if (!nf_ct_add_synproxy(ct, tmpl)) {
nf_conntrack_free(ct);
return ERR_PTR(-ENOMEM);
}
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
if (timeout_ext)
nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
GFP_ATOMIC);
/* 添加连接相关扩展信息 */
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
#ifdef CONFIG_NF_CONNTRACK_EVENTS
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
if ((ecache || net->ct.sysctl_events) &&
!nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
ecache ? ecache->expmask : 0,
GFP_ATOMIC)) {
nf_conntrack_free(ct);
return ERR_PTR(-ENOMEM);
}
#endif
cnet = nf_ct_pernet(net);
if (cnet->expect_count) {
spin_lock_bh(&nf_conntrack_expect_lock);
exp = nf_ct_find_expectation(net, zone, tuple);
if (exp) {
/* Welcome, Mr. Bond. We've been expecting you... */
__set_bit(IPS_EXPECTED_BIT, &ct->status);
/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
ct->master = exp->master;
if (exp->helper) {
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help)
rcu_assign_pointer(help->helper, exp->helper);
}
#ifdef CONFIG_NF_CONNTRACK_MARK
ct->mark = READ_ONCE(exp->master->mark);
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
ct->secmark = exp->master->secmark;
#endif
NF_CT_STAT_INC(net, expect_new);
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
if (!exp && tmpl)
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
/* Other CPU might have obtained a pointer to this object before it was
* released. Because refcount is 0, refcount_inc_not_zero() will fail.
*
* After refcount_set(1) it will succeed; ensure that zeroing of
* ct->status and the correct ct->net pointer are visible; else other
* core might observe CONFIRMED bit which means the entry is valid and
* in the hash table, but its not (anymore).
*/
smp_wmb();
/* Now it is going to be associated with an sk_buff, set refcount to 1. */
refcount_set(&ct->ct_general.use, 1);
if (exp) {
if (exp->expectfn)
exp->expectfn(ct, exp);
nf_ct_expect_put(exp);
}
/* 返回正向的五元组hash节点地址 */
return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
}
至此,一个新的连接已经建立,连接处理还包含许多其他复杂的处理,比如连接完整性确认、连接状态变化、扩展信息用途、父子连接等问题,稍后讨论。
本着学习的态度,写下了这篇博客,可能存在误解或不清楚的地方,欢迎大家多提意见,共同学习讨论。