ccp和内核交互方式和cubic bbr一样
1.把自身加入到内核拥塞算法中
return tcp_register_congestion_control(&tcp_ccp_congestion_ops);
2.使用sock的icsk_ca_priv储存并且标识每个sock的数据
icsk_ca_priv是每个sock的私有数据
大小如下
#linux/include/net/inet_connection_sock.h
struct inet_connection_sock {
...
u64 icsk_ca_priv[104 / sizeof(u64)];
}
inet_csk_ca(sk)返回void *指向icsk_ca_priv
ccp用icsk_ca_priv来存储struct cpp
https://blog.csdn.net/qq_42804416/article/details/88671823
struct cpp{
...
struct ccp_connection *dp
}
struct ccp_connection{
...
void *state;
struct ccp_primitives prims;
}
//tcp_register_congestion_control(&tcp_ccp_congestion_ops);
//后所有tcp都通过tcp_ccp_init(struct sock *sk)初始化
struct tcp_sock *tp = tcp_sk(sk);
struct ccp_datapath_info dp = {
.init_cwnd = tp->snd_cwnd * tp->mss_cache,
.mss = tp->mss_cache,
.src_ip = tp->inet_conn.icsk_inet.inet_rcv_saddr,
.src_port = tp->inet_conn.icsk_inet.inet_num,
.dst_ip = tp->inet_conn.icsk_inet.inet_saddr,
.dst_port = tp->inet_conn.icsk_inet.inet_dport,
.congAlg = "reno",
};
cpl = inet_csk_ca(sk);
cpl->dp = ccp_connection_start((void *) sk, &dp);
//ccp_connection_start完成后
//sock的icsk_ca_priv(sk)->dp指向的struct ccp_connection包含了这一条tcp sock的详细属性
cpl->dp->conn->impl = impl;//impl就是tcp sock
cpl->dp->conn->index = sid + 1;//分配一个sid作为conn->index用于唯一标识一条tcp sock链接
send_conn_create(datapath, conn){
msg_size = write_create_msg(msg, REPORT_MSG_SIZE, conn->index, cr);
ok = datapath->send_msg(datapath, conn, msg, msg_size);
//每条tcp sock的创建都必须通过netlink告知用户态程序
}