Alios中的buf_queue,相当于Freertos中的msg queue,是一种很常用的消息同步机制。一般用法是在一个handler loop里,阻塞地receive其他线程send过来的msg,根据msg的参数做统一的消息处理。我们先看下这次主要分析的接口:
/**
* This function will create a dyn-queue
* @param[out] queue pointer to the queue(The space is provided by kernel)
* @param[in] name pointer to the nam
* @param[in] size size of the buf
* @param[in] max_msg max size of one msg
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_buf_queue_dyn_create(kbuf_queue_t **queue, const name_t *name,
size_t size, size_t max_msg);
/**
* This function will delete a dyn-queue
* @param[in] queue pointer to the queue
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_buf_queue_dyn_del(kbuf_queue_t *queue);
/**
* This function will send a msg at the end of queue
* @param[in] queue pointer to the queue
* @param[in] msg pointer to msg to be send
* @param[in] size size of the msg
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_buf_queue_send(kbuf_queue_t *queue, void *msg, size_t size);
/**
* This function will receive msg form aqueue
* @param[in] queue pointer to the queue
* @param[in] ticks ticks to wait before receiving msg
* @param[out] msg pointer to the buf to save msg
* @param[out] size size of received msg
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_buf_queue_recv(kbuf_queue_t *queue, tick_t ticks, void *msg, size_t *size);
由于为了讲清楚buf_queue需要的内存分配,所以分析动态分配的buf_queue,这样用户不需要对kbuf_queue_t结构做任何处理,直接使用。
先了解一下kbuf_queue_t结构:
typedef struct {
blk_obj_t blk_obj; //*blk对象,和上一章的mutex一样,记录task调用mutex如果被阻塞住时的一些信息记录*/
void *buf; //需要缓冲消息队列的存放地址
k_ringbuf_t ringbuf; //环形buf结构体,记录ringbuf内各个对象,当ringbuf宏申请为RINGBUF_TYPE_DYN比较有意义
size_t max_msg_size; //每条消息允许的最大长度
size_t cur_num; //当前buf里有存有多少消息
size_t peak_num; //buf里存在过的最大消息数量
size_t min_free_buf_size; //buf存在过的最小剩余空间大小
#if (RHINO_CONFIG_KOBJ_LIST > 0)
klist_t buf_queue_item; //用来插入到g_kobj_list里,便于查询管理
#endif
#if (RHINO_CONFIG_USER_SPACE > 0)
uint32_t key;
#endif
uint8_t mm_alloc_flag; //buf的申请方式,动态分配或是用户静态分配
} kbuf_queue_t;
typedef struct { //环形buf结构体内各个对象
uint8_t *buf; //环形buf的起始位置,不会随着msg的进出改变
uint8_t *end; //环形buf的结束位置,不会随着msg的进出改变
uint8_t *head; //环形buf的当前最早进入的一个消息的起始位置,会随着msg的进出改变
uint8_t *tail; //环形buf的当前最晚进入的消息的结束位置,也是最新将要插入消息的开始位置,会随着msg的进出改变
size_t freesize; //环形buf当前的剩余空间
size_t type; //环形buf使用方式:RINGBUF_TYPE_FIX:每个消息定长,RINGBUF_TYPE_DYN:每个消息长度可以自定义
size_t blk_size; //针对RINGBUF_TYPE_FIX类型有效,记录每个消息的长度
} k_ringbuf_t;
由于RINGBUF_TYPE_FIX类型的定长消息处理相对较容易,所以我们主要研究RINGBUF_TYPE_DYN类型的不定长消息。其实看完上边的注释,buf_queue的大概使用规则应该就清楚了,接下来我们来看下具体的buf_queue的实现原理:
kstat_t krhino_buf_queue_dyn_create(kbuf_queue_t **queue, const name_t *name,
size_t size, size_t max_msg)
{
//申请RINGBUF_TYPE_DYN类型的buf_queue, 其中size为buf缓冲区的长度,max_msg为单个msg规定的最大允许长度。
return buf_queue_dyn_create(queue, name, size, max_msg, RINGBUF_TYPE_DYN);
}
static kstat_t buf_queue_dyn_create(kbuf_queue_t **queue, const name_t *name,
size_t size, size_t max_msg, uint8_t type)
{
kstat_t stat;
kbuf_queue_t *queue_obj;
NULL_PARA_CHK(queue);
if (size == 0u) {
return RHINO_BUF_QUEUE_SIZE_ZERO;
}
//动态申请kbuf_queue_t结构体
queue_obj = krhino_mm_alloc(sizeof(kbuf_queue_t));
if (queue_obj == NULL) {
return RHINO_NO_MEM;
}
//动态申请buf size的内存
queue_obj->buf = krhino_mm_alloc(size);
if (queue_obj->buf == NULL) {
krhino_mm_free(queue_obj);
return RHINO_NO_MEM;
}
//创建buf queue
stat = buf_queue_create(queue_obj, name, queue_obj->buf, size, max_msg,
K_OBJ_DYN_ALLOC, RINGBUF_TYPE_DYN);
if (stat != RHINO_SUCCESS) {
krhino_mm_free(queue_obj->buf);
krhino_mm_free(queue_obj);
return stat;
}
*queue = queue_obj;
return RHINO_SUCCESS;
}
static kstat_t buf_queue_create(kbuf_queue_t *queue, const name_t *name, void *buf, size_t size,
size_t max_msg, uint8_t mm_alloc_flag, size_t type)
{
#if (RHINO_CONFIG_KOBJ_LIST > 0)
CPSR_ALLOC();
#endif
NULL_PARA_CHK(queue);
NULL_PARA_CHK(buf);
NULL_PARA_CHK(name);
if (max_msg == 0u) {
return RHINO_INV_PARAM;
}
if (size == 0u) {
return RHINO_BUF_QUEUE_SIZE_ZERO;
}
memset(queue, 0, sizeof(kbuf_queue_t));
/* init the queue blocked list */
klist_init(&queue->blk_obj.blk_list);
queue->buf = buf;
queue->cur_num = 0u; //当前msg个数设成0
queue->peak_num = 0u; //buf里存在过的最大消息数量设为0
queue->max_msg_size = max_msg; //设置消息内容允许的最大值
queue->blk_obj.name = name;
queue->blk_obj.blk_policy = BLK_POLICY_PRI; //设置阻塞策略按照优先级大小确定(另一种策略是先进先出)
queue->mm_alloc_flag = mm_alloc_flag;
#if (RHINO_CONFIG_TASK_DEL > 0)
queue->blk_obj.cancel = 1u;
#endif
#if (RHINO_CONFIG_KOBJ_LIST > 0)
RHINO_CRITICAL_ENTER();
klist_insert(&(g_kobj_list.buf_queue_head), &queue->buf_queue_item);
RHINO_CRITICAL_EXIT();
#endif
queue->blk_obj.obj_type = RHINO_BUF_QUEUE_OBJ_TYPE;
ringbuf_init(&(queue->ringbuf), buf, size, type, max_msg);
queue->min_free_buf_size = queue->ringbuf.freesize;
TRACE_BUF_QUEUE_CREATE(krhino_cur_task_get(), queue);
return RHINO_SUCCESS;
}
kstat_t ringbuf_init(k_ringbuf_t *p_ringbuf, void *buf, size_t len, size_t type,
size_t block_size)
{
p_ringbuf->type = type;
p_ringbuf->buf = buf;
p_ringbuf->end = (uint8_t *)buf + len;
p_ringbuf->blk_size = block_size; //单个消息的大小(只对RINGBUF_TYPE_FIX类型有意义)
ringbuf_reset(p_ringbuf);
return RHINO_SUCCESS;
}
kstat_t ringbuf_reset(k_ringbuf_t *p_ringbuf)
{
p_ringbuf->head = p_ringbuf->buf;
p_ringbuf->tail = p_ringbuf->buf;
p_ringbuf->freesize = p_ringbuf->end - p_ringbuf->buf;
return RHINO_SUCCESS;
}
初始化完成后, buf_queue指向的ringbuf状态如下图:
接着看重要的buf_queue_send接口:
static kstat_t buf_queue_send(kbuf_queue_t *queue, void *msg, size_t msg_size)
{
CPSR_ALLOC();
klist_t *head;
ktask_t *task;
kstat_t err;
uint8_t cur_cpu_num;
RHINO_CRITICAL_ENTER();
//确定queue的中blk_obj的type为RHINO_BUF_QUEUE_OBJ_TYPE
if (queue->blk_obj.obj_type != RHINO_BUF_QUEUE_OBJ_TYPE) {
RHINO_CRITICAL_EXIT();
return RHINO_KOBJ_TYPE_ERR;
}
cur_cpu_num = cpu_cur_get();
(void)cur_cpu_num;
//如果传入消息长度大于max_msg_size,退出
if (msg_size > queue->max_msg_size) {
TRACE_BUF_QUEUE_MAX(g_active_task[cur_cpu_num], queue, msg, msg_size);
RHINO_CRITICAL_EXIT();
return RHINO_BUF_QUEUE_MSG_SIZE_OVERFLOW;
}
//消息长度为0,也退出
if (msg_size == 0) {
RHINO_CRITICAL_EXIT();
return RHINO_INV_PARAM;
}
head = &queue->blk_obj.blk_list;
//如果目前没有任务被该queue block住(表示没有任务当前处于receive状态),则将该消息放入缓冲区
/* buf queue is not full here, if there is no blocked receive task */
if (is_klist_empty(head)) {
//将该消息放入ringbuf
err = ringbuf_push(&(queue->ringbuf), msg, msg_size);
if (err != RHINO_SUCCESS) {
RHINO_CRITICAL_EXIT();
if (err == RHINO_RINGBUF_FULL) {
err = RHINO_BUF_QUEUE_FULL;
}
return err;
}
//queue现存消息数量++
queue->cur_num++;
//更新queue->peak_num
if (queue->peak_num < queue->cur_num) {
queue->peak_num = queue->cur_num;
}
//更新queue->min_free_buf_size
if (queue->min_free_buf_size > queue->ringbuf.freesize) {
queue->min_free_buf_size = queue->ringbuf.freesize;
}
TRACE_BUF_QUEUE_POST(g_active_task[cur_cpu_num], queue, msg, msg_size);
RHINO_CRITICAL_EXIT();
return RHINO_SUCCESS;
}
//程序走到这里,表明有任务当前被block住(表示有任务当前处于receive状态)
task = krhino_list_entry(head->next, ktask_t, task_list);
//将msg内容直接拷贝到被阻塞的最优先级任务的task->msg结构体上
memcpy(task->msg, msg, msg_size);
task->bq_msg_size = msg_size;
//唤醒这个task(这个我们在mutex章节分析过,实际上是把block的任务放到就绪队列,下次调度时候让调度器决定按优先级怎么跑)
pend_task_wakeup(task);
TRACE_BUF_QUEUE_TASK_WAKE(g_active_task[cur_cpu_num], task, queue);
RHINO_CRITICAL_EXIT_SCHED();
return RHINO_SUCCESS;
}
从这里看,send流程相当简单,反而复杂的过程都在ringbuf的操作上:
kstat_t ringbuf_push(k_ringbuf_t *p_ringbuf, void *data, size_t len)
{
size_t len_bytes = 0;
size_t split_len = 0;
uint8_t c_len[RING_BUF_LEN] = {0};
//如果为RINGBUF_TYPE_FIX类型,每次都传入定长消息
if (p_ringbuf->type == RINGBUF_TYPE_FIX) {
//buf空闲size不够,退出
if (p_ringbuf->freesize < p_ringbuf->blk_size) {
return RHINO_RINGBUF_FULL;
}
if (p_ringbuf->tail == p_ringbuf->end) {
p_ringbuf->tail = p_ringbuf->buf;
}
//直接将消息拷贝到缓冲区,p_ringbuf->tail加一个blk_size,freesize减去一个blk_size
memcpy(p_ringbuf->tail, data, p_ringbuf->blk_size);
p_ringbuf->tail += p_ringbuf->blk_size;
p_ringbuf->freesize -= p_ringbuf->blk_size;
} else {
//如果设定的是RINGBUF_TYPE_DYN
if ((len == 0u) || (len >= (uint32_t) - 1)) {
return RHINO_INV_PARAM;
}
//RING_BUF_LEN为sizeof(size_t),记录存放长度的空间
len_bytes = RING_BUF_LEN;
//查询freesize空间是否够放下sizeof(size_t)+msg内容
/* for dynamic length ringbuf */
if (p_ringbuf->freesize < (len_bytes + len)) {
return RHINO_RINGBUF_FULL;
}
//在放入msg内容前,先放上一个长度为sizeof(size_t),内容为msg长度的数值,用来记录msg长度
memcpy(c_len, &len, RING_BUF_LEN);
//如果tail已经到尾部,则将它移到头部
if (p_ringbuf->tail == p_ringbuf->end) {
p_ringbuf->tail = p_ringbuf->buf;
}
//拷贝第一部分:开始拷贝一个长度为sizeof(size_t),内容为msg长度的数值
/* copy length data to buffer */
if (p_ringbuf->tail >= p_ringbuf->head &&
(split_len = p_ringbuf->end - p_ringbuf->tail) < len_bytes && split_len > 0) {
memcpy(p_ringbuf->tail, &c_len[0], split_len);
len_bytes -= split_len;
p_ringbuf->tail = p_ringbuf->buf;
p_ringbuf->freesize -= split_len;
} else {
split_len = 0;
}
if (len_bytes > 0) {
memcpy(p_ringbuf->tail, &c_len[split_len], len_bytes);
p_ringbuf->freesize -= len_bytes;
p_ringbuf->tail += len_bytes;
}
/* copy data to ringbuf, if break by buffer end, split data and copy to buffer head*/
split_len = 0;
if (p_ringbuf->tail == p_ringbuf->end) {
p_ringbuf->tail = p_ringbuf->buf;
}
//拷贝第二部分:开始拷贝msg内容
if (p_ringbuf->tail >= p_ringbuf->head &&
((split_len = p_ringbuf->end - p_ringbuf->tail) < len) &&
split_len > 0) {
memcpy(p_ringbuf->tail, data, split_len);
data = (uint8_t *)data + split_len;
len -= split_len;
p_ringbuf->tail = p_ringbuf->buf;
p_ringbuf->freesize -= split_len;
}
memcpy(p_ringbuf->tail, data, len);
p_ringbuf->tail += len;
p_ringbuf->freesize -= len;
}
return RHINO_SUCCESS;
}
当ringbuf宏申请为RINGBUF_TYPE_DYN,那么表示发送的message长度是可以动态改变大小的,那么,必须要为message的长度,也放置到buf上,就会如下图所示。第一个是当连续send两个message,又没有其他任务去receive,那么两个message将会以这个形式堆放在buf中。第二个是当有一个任务去recevice掉一个message后, 那么p_ringbuf->head会向后指,将第一个被取出的message空间空出来。
上面的图已经预先揭示了一些krhino_buf_queue_recv将要对ringbuf做的事情,我们来看看:
kstat_t krhino_buf_queue_recv(kbuf_queue_t *queue, tick_t ticks, void *msg, size_t *size)
{
CPSR_ALLOC();
kstat_t ret;
uint8_t cur_cpu_num;
NULL_PARA_CHK(queue);
NULL_PARA_CHK(msg);
NULL_PARA_CHK(size);
RHINO_CRITICAL_ENTER();
cur_cpu_num = cpu_cur_get();
TASK_CANCEL_CHK(queue);
//判断是否在中断中并且ticks设置为可以被block一定时间,如果是则退出,因为中断中不能被block,除非设置NO_WAIT
if ((g_intrpt_nested_level[cur_cpu_num] > 0u) && (ticks != RHINO_NO_WAIT)) {
RHINO_CRITICAL_EXIT();
return RHINO_NOT_CALLED_BY_INTRPT;
}
//判断blk_obj类型是queue_obj类型
if (queue->blk_obj.obj_type != RHINO_BUF_QUEUE_OBJ_TYPE) {
RHINO_CRITICAL_EXIT();
return RHINO_KOBJ_TYPE_ERR;
}
//当queue里当前消息数>0,表示现在可以直接拿消息
if (queue->cur_num > 0u) {
//从缓冲区中获取消息
ringbuf_pop(&(queue->ringbuf), msg, size);
//消息数--
queue->cur_num --;
RHINO_CRITICAL_EXIT();
return RHINO_SUCCESS;
}
//当前queue里没有消息
if (ticks == RHINO_NO_WAIT) {
*size = 0u;
RHINO_CRITICAL_EXIT();
return RHINO_NO_PEND_WAIT;
}
//是否被设置了禁止调度
if (g_sched_lock[cur_cpu_num] > 0u) {
*size = 0u;
RHINO_CRITICAL_EXIT();
return RHINO_SCHED_DISABLE;
}
//将当前运行task->msg指向将要放入msg的地址。
g_active_task[cur_cpu_num]->msg = msg;
/*这个也在mutex章节分析过,将当前任务从g_ready_queue里删去,放到mutex的blk_list*/
pend_to_blk_obj((blk_obj_t *)queue, g_active_task[cur_cpu_num], ticks);
TRACE_BUF_QUEUE_GET_BLK(g_active_task[cur_cpu_num], queue, ticks);
/*真正进行调度,这部分会在进程调度章节分析*/
RHINO_CRITICAL_EXIT_SCHED();
/*执行到这里,说明block已经结束,本任务已经获取到一个消息,或是获取消息超时,超过ticks,或者其他原因等等*/
RHINO_CPU_INTRPT_DISABLE();
cur_cpu_num = cpu_cur_get();
/*获取当前状态,这个也与mutex章节分析相同*/
ret = pend_state_end_proc(g_active_task[cur_cpu_num], (blk_obj_t *)queue);
switch (ret) {
case RHINO_SUCCESS:
*size = g_active_task[cur_cpu_num]->bq_msg_size;
break;
default:
*size = 0u;
break;
}
RHINO_CPU_INTRPT_ENABLE();
return ret;
}
krhino_buf_queue_recv至此就分析完了,程序退出后,用户要么已经获取到一个msg,要么执行超时或者其他原因退出,这个会在ret里返回结果。
最后我们再看下krhino_buf_queue_dyn_del:
kstat_t krhino_buf_queue_dyn_del(kbuf_queue_t *queue)
{
CPSR_ALLOC();
klist_t *head;
NULL_PARA_CHK(queue);
//禁用中断
RHINO_CRITICAL_ENTER();
//判断是否在中断处理中调用
INTRPT_NESTED_LEVEL_CHK();
//判断obj类型是否是BUF_QUEUE类型
if (queue->blk_obj.obj_type != RHINO_BUF_QUEUE_OBJ_TYPE) {
RHINO_CRITICAL_EXIT();
return RHINO_KOBJ_TYPE_ERR;
}
//判断queue申请时是否是动态申请的
if (queue->mm_alloc_flag != K_OBJ_DYN_ALLOC) {
RHINO_CRITICAL_EXIT();
return RHINO_KOBJ_DEL_ERR;
}
//获取当前还在被block的任务(调用krhino_buf_queue_recv的任务)
head = &queue->blk_obj.blk_list;
queue->blk_obj.obj_type = RHINO_OBJ_TYPE_NONE;
//如果被block的任务不为空,则将那些任务一一取消被阻塞的状态,这些任务将重新被放到就绪队列中
while (!is_klist_empty(head)) {
pend_task_rm(krhino_list_entry(head->next, ktask_t, task_list));
}
#if (RHINO_CONFIG_KOBJ_LIST > 0)
//在buf_queue对象管理链表的挂接点上,删除本buf_queue
klist_rm(&queue->buf_queue_item);
#endif
//重置ringbuf参数
ringbuf_reset(&queue->ringbuf);
//free掉buf和buf_queue结构体
RHINO_CRITICAL_EXIT_SCHED();
krhino_mm_free(queue->buf);
krhino_mm_free(queue);
return RHINO_SUCCESS;
}
以上,我们就分析完了buf_queue的主要接口。可以看出,整个运作机制其实比mutex更为简单些,因为不用考虑到优先级反转的问题,另外,很多其他概念也与mutex有一定的相似性。相比起来比mutex的逻辑更加容易理解。