一个操作系统中,进程的同步机制很多,这篇先介绍比较使用比较广泛的mutex,因为有些后续的内容也会用到mutex,所以先介绍。
Alios-Things中的mutex结构体如下:
typedef struct mutex_s {
blk_obj_t blk_obj; /*blk对象,记录task调用mutex如果被阻塞住时的一些信息记录*/
ktask_t *mutex_task; /* mutex owner task */
struct mutex_s *mutex_list; /* task mutex list */
mutex_nested_t owner_nested; /*循环嵌套时的标记*/
#if (RHINO_CONFIG_KOBJ_LIST > 0)
klist_t mutex_item; /*mutex对象管理链表的挂接点*/
#endif
uint8_t mm_alloc_flag; /*mutex内存申请类型*/
} kmutex_t;
typedef struct blk_obj {
klist_t blk_list; /*记录被该mutex阻塞的task连接的链表*/
const name_t *name;
blk_policy_t blk_policy; /*阻塞的规则,BLK_POLICY_PRI:按优先级,BLK_POLICY_FIFO:按照先进先出*/
kobj_type_t obj_type; /*kobj类型, mutex为RHINO_MUTEX_OBJ_TYPE*/
#if (RHINO_CONFIG_USER_SPACE > 0)
klist_t obj_list;
#endif
#if (RHINO_CONFIG_TASK_DEL > 0)
uint8_t cancel; /*设置1用来判断task是否已经被取消*/
#endif
} blk_obj_t;
与mutex相关的,提供给用户使用的函数接口如下:
/**
* This function will create a mutex
* @param[in] mutex pointer to the mutex(the space is provided by user)
* @param[in] name name of the mutex
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_mutex_create(kmutex_t *mutex, const name_t *name);
/**
* This function will delete a mutex
* @param[in] mutex pointer to the mutex
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_mutex_del(kmutex_t *mutex);
#if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
/**
* This function will create a dyn mutex
* @param[in] mutex pointer to the mutex(the space is provided by user)
* @param[in] name name of the mutex
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_mutex_dyn_create(kmutex_t **mutex, const name_t *name);
/**
* This function will delete a dyn mutex
* @param[in] mutex pointer to the mutex
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_mutex_dyn_del(kmutex_t *mutex);
#endif
/**
* This function will lock mutex
* @param[in] mutex pointer to the mutex
* @param[in] ticks ticks to be wait for before lock
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_mutex_lock(kmutex_t *mutex, tick_t ticks);
/**
* This function will unlock a mutex
* @param[in] mutex pointer to the mutex
* @return the operation status, RHINO_SUCCESS is OK, others is error
*/
kstat_t krhino_mutex_unlock(kmutex_t *mutex);
这其中krhino_mutex_dyn_create/krhino_mutex_dyn_del和krhino_mutex_create/krhino_mutex_del区别在于mutex的内存分配是动态分配,还是由用户自己提供分配好,我觉得一般使用肯定选前者动态分配来得方便。
所以我们需要分析的就如下几个函数:
- kstat_t krhino_mutex_dyn_create(kmutex_t **mutex, const name_t *name);
- kstat_t krhino_mutex_dyn_del(kmutex_t *mutex);
- kstat_t krhino_mutex_lock(kmutex_t *mutex, tick_t ticks);
- kstat_t krhino_mutex_unlock(kmutex_t *mutex);
先看kstat_t krhino_mutex_dyn_create(kmutex_t **mutex, const name_t *name);
kstat_t krhino_mutex_dyn_create(kmutex_t **mutex, const name_t *name)
{
kstat_t stat;
kmutex_t *mutex_obj;
if (mutex == NULL) {
return RHINO_NULL_PTR;
}
NULL_PARA_CHK(mutex);
/*动态申请kmutex_t结构体*/
mutex_obj = krhino_mm_alloc(sizeof(kmutex_t));
if (mutex_obj == NULL) {
return RHINO_NO_MEM;
}
/*动态申请mutex结构体初始化*/
stat = mutex_create(mutex_obj, name, K_OBJ_DYN_ALLOC);
if (stat != RHINO_SUCCESS) {
krhino_mm_free(mutex_obj);
return stat;
}
*mutex = mutex_obj;
return stat;
}
kstat_t mutex_create(kmutex_t *mutex, const name_t *name, uint8_t mm_alloc_flag)
{
#if (RHINO_CONFIG_KOBJ_LIST > 0)
CPSR_ALLOC();
#endif
NULL_PARA_CHK(mutex);
NULL_PARA_CHK(name);
memset(mutex, 0, sizeof(kmutex_t));
/* init the list */
klist_init(&mutex->blk_obj.blk_list);
mutex->blk_obj.blk_policy = BLK_POLICY_PRI;
mutex->blk_obj.name = name;
mutex->mutex_task = NULL;
mutex->mutex_list = NULL;
mutex->mm_alloc_flag = mm_alloc_flag;
#if (RHINO_CONFIG_TASK_DEL > 0)
mutex->blk_obj.cancel = 0u;
#endif
#if (RHINO_CONFIG_KOBJ_LIST > 0)
RHINO_CRITICAL_ENTER();
klist_insert(&(g_kobj_list.mutex_head), &mutex->mutex_item);
RHINO_CRITICAL_EXIT();
#endif
mutex->blk_obj.obj_type = RHINO_MUTEX_OBJ_TYPE;
TRACE_MUTEX_CREATE(krhino_cur_task_get(), mutex, name);
return RHINO_SUCCESS;
}
mutex_create就是mutex的一些基本信息的初始化,基本都能一眼看懂。
接下来我们重点看kstat_t krhino_mutex_lock(kmutex_t *mutex, tick_t ticks);
kstat_t krhino_mutex_lock(kmutex_t *mutex, tick_t ticks)
{
CPSR_ALLOC();
kstat_t ret;
ktask_t *mutex_task;
uint8_t cur_cpu_num;
NULL_PARA_CHK(mutex);
//调度器未初始化的话,直接退出
if (g_sys_stat == RHINO_STOPPED) {
return RHINO_SUCCESS;
}
/*关中断*/
RHINO_CRITICAL_ENTER();
//获得当前cpu号
cur_cpu_num = cpu_cur_get();
TASK_CANCEL_CHK(mutex);
/*判断是否是在中断中调用lock,如果是在中断中立刻返回。*/
INTRPT_NESTED_LEVEL_CHK();
/*判断mutex对象的类型是否真的是mutex类型*/
if (mutex->blk_obj.obj_type != RHINO_MUTEX_OBJ_TYPE) {
RHINO_CRITICAL_EXIT();
return RHINO_KOBJ_TYPE_ERR;
}
/*如果调用lock的任务,和之前调用过lock的任务是一样的,那么就是嵌套调用,*/
/* if the same task get the same mutex again, it causes mutex owner nested */
if (g_active_task[cur_cpu_num] == mutex->mutex_task) {
/*mutex->owner_nested已经等于0xFFFFFFFF,嵌套过度*/
if (mutex->owner_nested == (mutex_nested_t)-1) {
/* fatal error here, system must be stoped here */
k_err_proc(RHINO_MUTEX_NESTED_OVF);
RHINO_CRITICAL_EXIT();
return RHINO_MUTEX_NESTED_OVF;
} else {
/*mutex嵌套计数++退出,表示同样的mutex,被同一个任务lock了多次,
后续需要同样多次unlock才能释放。*/
mutex->owner_nested++;
}
RHINO_CRITICAL_EXIT();
return RHINO_MUTEX_OWNER_NESTED;
}
mutex_task = mutex->mutex_task;
/*mutex_task == NULL表明这个mutex没有被任何task获取过*/
if (mutex_task == NULL) {
/* get lock */
mutex->mutex_task = g_active_task[cur_cpu_num];
#if (RHINO_CONFIG_MUTEX_INHERIT > 0)
/*mutex->mutex_list指向当前这个task的mutex_list*/
mutex->mutex_list = g_active_task[cur_cpu_num]->mutex_list;
/*当前这个task的mutex_list指向该mutex,这样task所持有的所有mutex都在task的mutex_list的链表上*/
g_active_task[cur_cpu_num]->mutex_list = mutex;
#endif
//mutex嵌套计数设置为1
mutex->owner_nested = 1u;
TRACE_MUTEX_GET(g_active_task[cur_cpu_num], mutex, ticks);
RHINO_CRITICAL_EXIT();
/*获取mutex完成退出*/
return RHINO_SUCCESS;
}
/*程序如果走到这里,说明mutex被别的任务占用*/
/*ticks设置0的话,不阻塞等待,直接退出*/
/* can't get mutex, and return immediately if wait_option is RHINO_NO_WAIT */
if (ticks == RHINO_NO_WAIT) {
RHINO_CRITICAL_EXIT();
return RHINO_NO_PEND_WAIT;
}
/* system is locked so task can not be blocked just return immediately */
/*g_sched_lock[0] > 0表示该task设置过不能被调度,所以退出*/
if (g_sched_lock[cur_cpu_num] > 0u) {
RHINO_CRITICAL_EXIT();
return RHINO_SCHED_DISABLE;
}
#if (RHINO_CONFIG_MUTEX_INHERIT > 0)
/* if current task is a higher prio task and block on the mutex
prio inverse condition happened, prio inherit method is used here */
/*如果当前将要被block的任务优先级高于持有mutex任务的优先级,
为了防止mutex优先级反转,提高持有mutex任务的优先级,
关于mutex优先级反转的内容,见本章最后*/
if (g_active_task[cur_cpu_num]->prio < mutex_task->prio) {
task_pri_change(mutex_task, g_active_task[cur_cpu_num]->prio);
TRACE_TASK_PRI_INV(g_active_task[cur_cpu_num], mutex_task);
}
#endif
/*将当前任务从g_ready_queue里删去,放到mutex的blk_list*/
/* any way block the current task */
pend_to_blk_obj((blk_obj_t *)mutex, g_active_task[cur_cpu_num], ticks);
TRACE_MUTEX_GET_BLK(g_active_task[cur_cpu_num], mutex, ticks);
/*真正进行调度,这部分会在任务调度章节分析*/
RHINO_CRITICAL_EXIT_SCHED();
/*执行到这里,说明mutex被持有者释放掉了,本任务已经获取该mutex,
或是获取mutex超时,超过ticks,或者其他原因等导致当前任务从阻塞变为运行状态*/
RHINO_CPU_INTRPT_DISABLE();
/*获取当前状态*/
/* so the task is waked up, need know which reason cause wake up */
ret = pend_state_end_proc(g_active_task[cpu_cur_get()], (blk_obj_t *)mutex);
RHINO_CPU_INTRPT_ENABLE();
return ret;
}
void pend_to_blk_obj(blk_obj_t *blk_obj, ktask_t *task, tick_t timeout)
{
/* task need to remember which object is blocked on */
task->blk_obj = blk_obj;
/*如果超时不是RHINO_WAIT_FOREVER,那就把该任务挂到定时器上,并设定相应的timeout*/
if (timeout != RHINO_WAIT_FOREVER) {
tick_list_insert(task, timeout);
}
/*设置task状态为K_PEND*/
task->task_state = K_PEND;
/* remove from the ready list */
ready_list_rm(&g_ready_queue, task);
/*如果blk_policy 为BLK_POLICY_FIFO,则将任务插入到阻塞队列的队尾,如果是BLK_POLICY_PRI,
则按照优先级插入任务,优先级较高的阻塞任务会在mutex释放时较先唤醒获得mutex*/
if (blk_obj->blk_policy == BLK_POLICY_FIFO) {
/* add to the end of blocked objet list */
klist_insert(&blk_obj->blk_list, &task->task_list);
} else {
/* add to the prio sorted block list */
pend_list_add(&blk_obj->blk_list, task);
}
}
kstat_t pend_state_end_proc(ktask_t *task, blk_obj_t *blk_obj)
{
kstat_t status;
(void)blk_obj;
switch (task->blk_state) {
/*task->blk_state=BLK_FINISH,表示task是由于mutex释放而唤醒,返回success*/
case BLK_FINISH:
status = RHINO_SUCCESS;
break;
/*task->blk_state=BLK_ABORT,表示task是由于本task被krhino_task_cancel后,
为了跑完删除流程而唤醒,返回RHINO_BLK_TIMEOUT*/
case BLK_ABORT:
status = RHINO_BLK_ABORT;
break;
/*task->blk_state=BLK_TIMEOUT,表示task是由于mutex阻塞等待超时而唤醒,返回RHINO_BLK_TIMEOUT*/
case BLK_TIMEOUT:
status = RHINO_BLK_TIMEOUT;
break;
case BLK_DEL:
/*task->blk_state=BLK_DEL,表示task是由于mutex被DEL而唤醒,返回RHINO_BLK_DEL*/
status = RHINO_BLK_DEL;
break;
default:
k_err_proc(RHINO_BLK_INV_STATE);
status = RHINO_BLK_INV_STATE;
break;
}
/*下面这两段只是根据task里的参数,返回一个合适的RHINO_TASK_CANCELED判断,不影响大局*/
#if (RHINO_CONFIG_TASK_DEL > 0)
if (blk_obj == 0) {
if (task->cancel == 1u) {
status = RHINO_TASK_CANCELED;
}
return status;
}
if ((task->cancel == 1u) && (blk_obj->cancel == 1u)) {
status = RHINO_TASK_CANCELED;
}
#endif
return status;
}
krhino_mutex_lock大概流程就这样分析完了,再看krhino_mutex_unlock:
kstat_t krhino_mutex_unlock(kmutex_t *mutex)
{
CPSR_ALLOC();
klist_t *blk_list_head;
ktask_t *task;
uint8_t cur_cpu_num;
NULL_PARA_CHK(mutex);
//调度器未初始化的话,直接退出
if (g_sys_stat == RHINO_STOPPED) {
return RHINO_SUCCESS;
}
RHINO_CRITICAL_ENTER();
/*判断是否是在中断中调用lock,如果是在中断中立刻返回。*/
INTRPT_NESTED_LEVEL_CHK();
/*判断mutex对象的类型是否真的是mutex类型*/
if (mutex->blk_obj.obj_type != RHINO_MUTEX_OBJ_TYPE) {
RHINO_CRITICAL_EXIT();
return RHINO_KOBJ_TYPE_ERR;
}
//获取当前CPU号
cur_cpu_num = cpu_cur_get();
//check当前任务是不是获得mutex锁的任务
/* mutex must be released by itself */
if (g_active_task[cur_cpu_num] != mutex->mutex_task) {
RHINO_CRITICAL_EXIT();
return RHINO_MUTEX_NOT_RELEASED_BY_OWNER;
}
//到这里基本检查完毕,嵌套计数-1
mutex->owner_nested--;
//嵌套计数还是大于0,表示锁还不能释放,退出
if (mutex->owner_nested > 0u) {
RHINO_CRITICAL_EXIT();
return RHINO_MUTEX_OWNER_NESTED;
}
/*这里名字虽然叫mutex_release,但是和释放锁没太大关系,
只是将当前任务重置到一个合理的优先级,下面重点分析。*/
mutex_release(g_active_task[cur_cpu_num], mutex);
//查找有没有被当前mutex block的任务
blk_list_head = &mutex->blk_obj.blk_list;
//如果没有,可以直接退出了。
/* if no block task on this list just return */
if (is_klist_empty(blk_list_head)) {
//将mutex记录的持有mutex的任务信息删除
/* No wait task */
mutex->mutex_task = NULL;
TRACE_MUTEX_RELEASE_SUCCESS(g_active_task[cur_cpu_num], mutex);
RHINO_CRITICAL_EXIT();
return RHINO_SUCCESS;
}
//如果有,找到这个被阻塞任务列表里第一个任务。
/* there must have task blocked on this mutex object */
task = krhino_list_entry(blk_list_head->next, ktask_t, task_list);
//这里pend_task_wakeup,只是将任务放置到就绪队列,下次调度时候让调度器决定按优先级怎么跑。
/* wake up the occupy task, which is the highst prio task on the list */
pend_task_wakeup(task);
TRACE_MUTEX_TASK_WAKE(g_active_task[cur_cpu_num], task, mutex);
//将mutex给予当前这个任务,
/* change mutex get task */
mutex->mutex_task = task;
#if (RHINO_CONFIG_MUTEX_INHERIT > 0)
//让这个任务的mutex_list首先指向本mutex
mutex->mutex_list = task->mutex_list;
task->mutex_list = mutex;
#endif
//mutex嵌套计数重新设置为1
mutex->owner_nested = 1u;
/*真正进行调度,这部分会在进程调度章节分析*/
RHINO_CRITICAL_EXIT_SCHED();
return RHINO_SUCCESS;
}
static void mutex_release(ktask_t *task, kmutex_t *mutex_rel)
{
uint8_t new_pri;
/*这里是要重新给当前任务重置一个优先级,这个优先级必须保证当前任务必须优于
该任务获取的mutex_list上, 所有被block住任务的优先级。
当然这样做的目的也是为了保证不被优先级反转。重点看mutex_pri_look
*/
/* find suitable task prio */
new_pri = mutex_pri_look(task, mutex_rel);
if (new_pri != task->prio) {
/* change prio */
//如果这个优先级不等于当前任务的优先级,那么重置当前任务的优先级,这样做有两个目的:
//1. 如果当前任务因为获取到锁后,被提升过优先级,那么这个时候正好降下来
//2. 如果当前任务获得的其他锁上,有block住比当前任务优先级更高优先级的任务,
// 那么将当前任务优先级提升到这个任务的优先级级别,防止优先级反转的问题
/*task_pri_change虽然挺长, 但是逻辑相对简单,就是要将task的pri设置成为new_pri*/
task_pri_change(task, new_pri);
TRACE_MUTEX_RELEASE(g_active_task[cpu_cur_get()], task, new_pri);
}
}
uint8_t mutex_pri_look(ktask_t *task, kmutex_t *mutex_rel)
{
#if (RHINO_CONFIG_MUTEX_INHERIT > 0)
kmutex_t *mutex_tmp;
kmutex_t **prev;
uint8_t new_pri;
uint8_t pri;
ktask_t *first_blk_task;
klist_t *blk_list_head;
//new_pri 记录先当前task原始设置的优先级
/* the base prio of task */
new_pri = task->b_prio;
//pri当前也记录当前task原始设置的优先级
//prev记录task存有的第一个mutex锁
/* the highest prio in mutex which is locked */
pri = new_pri;
prev = &task->mutex_list;
//循环遍历task拥有的所有锁
while ((mutex_tmp = *prev) != NULL) {
//如果是当前mutex,则跳过
if (mutex_tmp == mutex_rel) {
/* delete itself from list and make task->mutex_list point to next */
//*prev内容为task->mutex_list,也就是说task->mutex_list
//指向的对象变为mutex_list指向的下一个mutex
*prev = mutex_tmp->mutex_list;
continue;
}
//获取被这个锁block住的第一个任务
blk_list_head = &mutex_tmp->blk_obj.blk_list;
if (!is_klist_empty(blk_list_head)) {
//如果不为空,则将pri设置为被这个锁block住的第一个任务的优先级
first_blk_task = krhino_list_entry(blk_list_head->next, ktask_t, task_list);
pri = first_blk_task->prio;
}
//如果new_pri优先级低于pri,则将new_pri赋值为pri(数值越高优先级越低)
if (new_pri > pri) {
new_pri = pri;
}
//跳到下一个mutex,直到NULL结束
prev = &mutex_tmp->mutex_list;
}
return new_pri;
#else
return task->b_prio;
#endif
}
kstat_t task_pri_change(ktask_t *task, uint8_t new_pri)
{
uint8_t old_pri;
kmutex_t *mutex_tmp;
ktask_t *mutex_task;
do {
if (task->prio != new_pri) {
switch (task->task_state) {
case K_RDY:
ready_list_rm(&g_ready_queue, task);
task->prio = new_pri;
///*如果task当前状态为K_RDY,则将task在就绪队列里重新安排:
//如果当前运行任务就是这个task,则排到就绪队列的头上如果不是,
//则将其安排到就绪队列的尾部 */
if (task == g_active_task[cpu_cur_get()]) {
ready_list_add_head(&g_ready_queue, task);
} else {
ready_list_add_tail(&g_ready_queue, task);
}
task = NULL;
break;
/*K_SLEEP,K_SUSPENDED,K_SLEEP_SUSPENDED这三种情况的话, 直接修改pri*/
case K_SLEEP:
case K_SUSPENDED:
case K_SLEEP_SUSPENDED:
/* set new task prio */
task->prio = new_pri;
task = NULL;
break;
/*K_PEND, K_PEND_SUSPENDED这两种有阻塞性质的状态,
又需要详细讨论(主要针对block对象为mutex的情况)*/
case K_PEND:
case K_PEND_SUSPENDED:
//old_pri记录task原来的pri
old_pri = task->prio;
//task设置新的pri
task->prio = new_pri;
//将该任务在阻塞队列上重排
pend_list_reorder(task);
//如果阻塞对象是mutex
if (task->blk_obj->obj_type == RHINO_MUTEX_OBJ_TYPE) {
//mutex_tmp记录导致这个任务阻塞的mutex
mutex_tmp = (kmutex_t *)(task->blk_obj);
//mutex_task为获得这个mutex的任务
mutex_task = mutex_tmp->mutex_task;
//如果mutex_task的优先级小于当前任务的优先级(注意数值越大,优先级越小)
if (mutex_task->prio > task->prio) {
/* since the highest prio of the lock wait task
became higher, raise the lock get task prio
higher */
/*将task指向mutex_task,再经历一次循环,
好将mutex_task优先级也做一次适当修改。*/
task = mutex_task;
} else if (mutex_task->prio == old_pri) {
/*如果mutex_task和当前task之前的优先级一样,
mutex_pri_look的作用之前分析过,是为了返回一个优先级,
让mutex_task的优先级,不低于mutex_task获得的锁,
阻塞的其他所有任务的优先级new_pri就是这个返回的优先级*/
/* find suitable tcb prio */
new_pri = mutex_pri_look(mutex_task, 0);
/**/
if (new_pri != mutex_task->prio) {
/* Change prio of lock get task */
//mutex_task优先级需要做修改
task = mutex_task;
} else {
//mutex_task优先级不做修改
task = NULL;
}
} else {
task = NULL;
}
} else {
task = NULL;
}
break;
default:
k_err_proc(RHINO_INV_TASK_STATE);
return RHINO_INV_TASK_STATE;
}
} else {
task = NULL;
}
} while (task != NULL);
return RHINO_SUCCESS;
}
void pend_list_reorder(ktask_t *task)
{
if (task->blk_obj->blk_policy == BLK_POLICY_PRI) {
/* remove it first and add it again in prio sorted list */
//将该任务从原本的阻塞队列上删除
klist_rm(&task->task_list);
//重新按照优先级,insert到blklist上
pend_list_add(&task->blk_obj->blk_list, task);
}
}
最后我们再来分析一下krhino_mutex_dyn_del,这个接口是用来销毁mutex的:
kstat_t krhino_mutex_dyn_del(kmutex_t *mutex)
{
CPSR_ALLOC();
klist_t *blk_list_head;
//如果mutex为空,说明之前就没申请成功,退出
if (mutex == NULL) {
return RHINO_NULL_PTR;
}
NULL_PARA_CHK(mutex);
RHINO_CRITICAL_ENTER();
//判断是否在中断内,是则退出。
INTRPT_NESTED_LEVEL_CHK();
//判断是否为mutex类型
if (mutex->blk_obj.obj_type != RHINO_MUTEX_OBJ_TYPE) {
RHINO_CRITICAL_EXIT();
return RHINO_KOBJ_TYPE_ERR;
}
//判断mutex内存申请类型是否是动态的,否则不执行下面释放操作。
if (mutex->mm_alloc_flag != K_OBJ_DYN_ALLOC) {
RHINO_CRITICAL_EXIT();
return RHINO_KOBJ_DEL_ERR;
}
//获得被该mutex block任务的链表
blk_list_head = &mutex->blk_obj.blk_list;
mutex->blk_obj.obj_type = RHINO_OBJ_TYPE_NONE;
/*如果该mutex正在被其他任务获取中,则将这个获取锁的任务再安排一个合理的优先级,
mutex_release的作用上面分析过,这里不赘述一遍了*/
if (mutex->mutex_task != NULL) {
mutex_release(mutex->mutex_task, mutex);
}
//如果被该mutex block任务不为空,则将那些任务一一取消被阻塞的状态,这些任务将重新被放到就绪队列中。
/* all task blocked on this mutex is waken up */
while (!is_klist_empty(blk_list_head)) {
pend_task_rm(krhino_list_entry(blk_list_head->next, ktask_t, task_list));
}
#if (RHINO_CONFIG_KOBJ_LIST > 0)
//在mutex对象管理链表的挂接点上,删除本mutex
klist_rm(&mutex->mutex_item);
#endif
TRACE_MUTEX_DEL(g_active_task[cpu_cur_get()], mutex);
/*真正进行调度,这部分会在进程调度章节分析*/
RHINO_CRITICAL_EXIT_SCHED();
/*真正free掉mutex占用的内存*/
krhino_mm_free(mutex);
return RHINO_SUCCESS;
}
void pend_task_rm(ktask_t *task)
{
switch (task->task_state) {
/*如果这个task在Pend状态(之前执行krhino_buf_queue_recv, krhino_mutex_lock等会block的操作)*/
case K_PEND:
//将该任务从阻塞队列上删除
/* remove task on the block list because task is waken up */
klist_rm(&task->task_list);
//将该任务放到就绪队列上,之后给调度器调度执行
/*add to the ready list again*/
ready_list_add(&g_ready_queue, task);
//将该任务状态设置成为就绪
task->task_state = K_RDY;
break;
/*如果这个task在Pend状态,并且又被挂起了*/
case K_PEND_SUSPENDED:
//将该任务从阻塞队列上删除
/* remove task on the block list because task is waken up */
klist_rm(&task->task_list);
//只是将该任务设置成挂起状态。
task->task_state = K_SUSPENDED;
break;
default:
k_err_proc(RHINO_SYS_FATAL_ERR);
break;
}
//将该任务从定时器上删除,因为该任务已经到了就绪队列,没必要再等待定时器调度
/* remove task on the tick list because task is waken up */
tick_list_rm(task);
/*该任务的blk状态设置成解除blk*/
task->blk_state = BLK_DEL;
/* task is nothing blocked on so reset it to NULL */
task->blk_obj = NULL;
}
至此,mutex相关的申请,删除,占用和释放四个主要接口,我们都分析完了。可以看到,mutex的处理,很大一部分逻辑都在处理规避优先级反转的问题,在下面科普一下mutex如何会造成优先级反转问题。
——————————————————————————————————————————————
mutex锁大概的逻辑基本就是这样,最后解释一下优先级反转:
-
优先级反转(Priority Inversion)
由于多进程共享资源,具有最高优先权的进程被低优先级进程阻塞,反而使具有中优先级的进程先于高优先级的进程执行,导致系统的崩溃。这就是所谓的优先级反转(Priority Inversion)。 -
产生原因
其实,优先级反转是在高优级(假设为A)的任务要访问一个被低优先级任务(假设为C)占有的资源时,被阻塞.而此时又有优先级高于占有资源的任务©而低于被阻塞的任务(A)的优先级的任务(假设为B)时,于是,占有资源的任务就被挂起(占有的资源仍为它占有),因为占有资源的任务优先级很低,所以,它可能一直被另外的任务挂起.而它占有的资源也就一直不能释放,这样,引起任务A一直没办法执行.而比它优先低的任务却可以执行.所以,一个解决办法就是提高占有资源任务的优先级,让它正常执行,然后释放资源,以让任务A能正常获取资源而得以执行.
-
解决方案 ( 优先级继承 / 优先级天花板 )
目前解决优先级反转有许多种方法。其中普遍使用的有2种方法:一种被称作优先级继承(priority inheritance);另一种被称作优先级极限(priority ceilings)。
A. 优先级继承(priority inheritance)
优先级继承是指将低优先级任务的优先级提升到等待它所占有的资源的最高优先级任务的优先级.当高优先级任务由于等待资源而被阻塞时,此时资源的拥有者的优先级将会自动被提升.
B. 优先级天花板(priority ceilings)
优先级天花板是指将申请某资源的任务的优先级提升到可能访问该资源的所有任务中最高优先级任务的优先级.(这个优先级称为该资源的优先级天花板)
A 和B的区别:
优先级继承,只有当占有资源的低优先级的任务被阻塞时,才会提高占有资源任务的优先级,而优先级天花板,不论是否发生阻塞,都提升.
(以上优先级反转内容介绍摘录自 https://blog.csdn.net/Kendiv/article/details/1788966)
目前alios中mutex使用的是方案A,也就是优先级继承的方案。