3 线程调度scheduler
rtthread中对于多线程切换是通过优先级表搭配优先级组进行调度的,优先级表中存储所有线程的node,优先级组中存储线程的优先级;
优先级表通常有32个对象,每个优先级都是一个list_head节点,相同优先级的线程存储在相同优先级链表下;
rtos中高优先级的线程可以将低优先级的线程suspend,然后让芯片执行高优先级的线程;对于优先级相同的线程rtos通过时间片轮询执行;
线程调度函数比想象中简单的多,主要分为两个函数,一个是第一次执行调度的函数、一个是之后执行调度的函数;
3.1 rt_thread_ready_priority_group
线程就绪优先级组是一个32bits常数,每1bit对应一个优先级,通过对优先级组中最低位的判断可以知道当前线程的最高优先级;
优先级组用来配合优先级表进行系统调度;
3.1.1 优先级组定义
//scheduler.c
#if RT_THREAD_PRIORITY_MAX > 32
/* Maximum priority level, 256 */
rt_uint32_t rt_thread_ready_priority_group;
rt_uint8_t rt_thread_ready_table[32];
#else
/* Maximum priority level, 32 */
rt_uint32_t rt_thread_ready_priority_group; //线程就绪优先级组
#endif
3.1.2 优先级组查询
如果对优先级组按bit判断取出最高优先级,rtthread觉得那样浪费时间,所以rtthread采用了一种以内存换效率的数组寻址优先级,搭配逻辑判断使用;
为什么不32bit全部采用数组寻址,可能全部采用数组寻址内存占用又太大划不来;
//scheduler.c 优先级组调用方式;
highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
to_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next, struct rt_thread, tlist);
//kservice.c
//判断函数中对优先级都进行了+1处理,所以调用函数中又-1处理;
int __rt_ffs(int value)
{
if (value == 0)
return 0; //优先级组为空,调用函数中-1处理得到-1 error;
if (value & 0xff)
return __lowest_bit_bitmap[value & 0xff] + 1; //bit[7:0],优先级为0也会+1处理;
if (value & 0xff00)
return __lowest_bit_bitmap[(value & 0xff00) >> 8] + 9; //bit[15:8]
if (value & 0xff0000)
return __lowest_bit_bitmap[(value & 0xff0000) >> 16] + 17; //bit[23:16]
return __lowest_bit_bitmap[(value & 0xff000000) >> 24] + 25; //bit[31:24]
}
//咋一看还以为是什么复杂的东西,其实这个数组纯粹力气活,把可能性(0:255)依次穷举列出,然后再把该数值的最高优先级存入在数值所在位;
//不要觉得这个数组是个力气活就不当回事,人家还有一个高精尖的名字叫“位图算法”;
//下面的优先级是所在字节的实际优先级
const rt_uint8_t __lowest_bit_bitmap[] =
{
/* 00 */ 0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 10 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 20 */ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 30 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 40 */ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 50 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 60 */ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 70 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 80 */ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 90 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* A0 */ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* B0 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* C0 */ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* D0 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* E0 */ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* F0 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
3.2 rt_thread_priority_table
线程就绪优先级表是一个有32个数组对象的数组,每一个对应一个优先级的list_node,这些list_node上挂载的节点就像是排队一样先进先出等着依次调用;
rtthread中的系统调度就是优先级表搭配优先级组进行线程切换的过程,对优先级表的操作即系统调度;
3.2.1 优先级表定义
//scheduler.c
rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]; //优先级表;
struct rt_thread *rt_current_thread; //当前线程指针;
rt_uint8_t rt_current_priority;
rt_list_t rt_thread_defunct;
4 scheduler初始化
4.1 scheduler初始化
rtthread系统在进入main函数前的main补丁函数中初始化了系统;
//component.c rtthread_startup()中调用rt_system_scheduler_init();
//scheduler.c
void rt_system_scheduler_init(void)
{
register rt_base_t offset;
rt_scheduler_lock_nest = 0;
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("start scheduler: max priority 0x%02x\n",RT_THREAD_PRIORITY_MAX));
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
{
rt_list_init(&rt_thread_priority_table[offset]); //rt_thread_priority_table[]
}
rt_current_priority = RT_THREAD_PRIORITY_MAX - 1; //rt_current_priority
rt_current_thread = RT_NULL; //rt_current_thread
/* initialize ready priority group */
rt_thread_ready_priority_group = 0; //rt_thread_ready_priority_group
#if RT_THREAD_PRIORITY_MAX > 32
/* initialize ready table */
rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
#endif
/* initialize thread defunct */
rt_list_init(&rt_thread_defunct); //rt_thread_defunct
}
4.2 scheduler开始
//component.c rtthread_startup()结尾调用rt_system_scheduler_start();
//scheduler.c
void rt_system_scheduler_start(void)
{
register struct rt_thread *to_thread;
register rt_ubase_t highest_ready_priority;
#if RT_THREAD_PRIORITY_MAX > 32
register rt_ubase_t number;
number = __rt_ffs(rt_thread_ready_priority_group) - 1;
highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
#else
highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
#endif
/* get switch to thread */
to_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
struct rt_thread,
tlist);
rt_current_thread = to_thread;
/* switch to new thread */
rt_hw_context_switch_to((rt_uint32_t)&to_thread->sp);
/* never come back */
}
4.3 scheduler增删
为什么shceduler只有增删操作,而没有查改操作呢?因为list先进先出的排队属性,所以不需要查改操作;
//scheduler.c
void rt_schedule_insert_thread(struct rt_thread *thread)
{
register rt_base_t temp;
RT_ASSERT(thread != RT_NULL);
/* disable interrupt */
temp = rt_hw_interrupt_disable();
/* change stat */
thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
/* insert thread to ready list */
rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
&(thread->tlist));
/* set priority mask */
#if RT_THREAD_PRIORITY_MAX <= 32
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
RT_NAME_MAX, thread->name, thread->current_priority));
#else
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
("insert thread[%.*s], the priority: %d 0x%x %d\n",
RT_NAME_MAX,
thread->name,
thread->number,
thread->number_mask,
thread->high_mask));
#endif
#if RT_THREAD_PRIORITY_MAX > 32
rt_thread_ready_table[thread->number] |= thread->high_mask;
#endif
rt_thread_ready_priority_group |= thread->number_mask;
/* enable interrupt */
rt_hw_interrupt_enable(temp);
}
//将node移出list之后,如果list为空那么清零list所对应优先级组bit;
void rt_schedule_remove_thread(struct rt_thread *thread)
{
register rt_base_t temp;
RT_ASSERT(thread != RT_NULL);
/* disable interrupt */
temp = rt_hw_interrupt_disable();
#if RT_THREAD_PRIORITY_MAX <= 32
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
RT_NAME_MAX, thread->name,
thread->current_priority));
#else
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
("remove thread[%.*s], the priority: %d 0x%x %d\n",
RT_NAME_MAX,
thread->name,
thread->number,
thread->number_mask,
thread->high_mask));
#endif
/* remove thread from ready list */
rt_list_remove(&(thread->tlist));
if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
{
#if RT_THREAD_PRIORITY_MAX > 32
rt_thread_ready_table[thread->number] &= ~thread->high_mask;
if (rt_thread_ready_table[thread->number] == 0)
{
rt_thread_ready_priority_group &= ~thread->number_mask;
}
#else
rt_thread_ready_priority_group &= ~thread->number_mask;
#endif
}
/* enable interrupt */
rt_hw_interrupt_enable(temp);
}
4.4 rt_schedule( )系统切换;
在优先级组中取出rt_current_priority,根据rt_current_priority在优先级表中取出to_thread;然后to_thread放入rt_current_thread中;启动rt_hw_context_switch;
不理解rt_hw_context_switch_interrupt和rt_hw_context_switch是同一个汇编函数,那为什么还要判断哪个执行,先放着;
void rt_schedule(void)
{
rt_base_t level;
struct rt_thread *to_thread;
struct rt_thread *from_thread;
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* check the scheduler is enabled or not */
if (rt_scheduler_lock_nest == 0)
{
register rt_ubase_t highest_ready_priority;
#if RT_THREAD_PRIORITY_MAX <= 32
highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
#else
register rt_ubase_t number;
number = __rt_ffs(rt_thread_ready_priority_group) - 1;
highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
#endif
/* get switch to thread */
to_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
struct rt_thread,
tlist);
/* if the destination thread is not the same as current thread */
if (to_thread != rt_current_thread)
{
rt_current_priority = (rt_uint8_t)highest_ready_priority;
from_thread = rt_current_thread;
rt_current_thread = to_thread;
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
/* switch to new thread */
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
("[%d]switch to priority#%d "
"thread:%.*s(sp:0x%p), "
"from thread:%.*s(sp: 0x%p)\n",
rt_interrupt_nest, highest_ready_priority,
RT_NAME_MAX, to_thread->name, to_thread->sp,
RT_NAME_MAX, from_thread->name, from_thread->sp));
#ifdef RT_USING_OVERFLOW_CHECK
_rt_scheduler_stack_check(to_thread);
#endif
if (rt_interrupt_nest == 0)
{
extern void rt_thread_handle_sig(rt_bool_t clean_state);
rt_hw_context_switch((rt_uint32_t)&from_thread->sp,
(rt_uint32_t)&to_thread->sp);
/* enable interrupt */
rt_hw_interrupt_enable(level);
#ifdef RT_USING_SIGNALS
/* check signal status */
rt_thread_handle_sig(RT_TRUE);
#endif
}
else
{
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
rt_hw_context_switch_interrupt((rt_uint32_t)&from_thread->sp,
(rt_uint32_t)&to_thread->sp);
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
}
else
{
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
}
else
{
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
}
4.5 关于链表插入取出的一次记录
//原理见2.1节,这个问题困扰了我相当长的时间,我决定给这个问题足够的关注度,两个小节都是你呢;
//如果node都是插入到list_head之前,那么第一个插入的node在最前面,即table[list_head].next;
//如果node都是插入到list_head之后,那么第一个插入的node在最后面,即table[list_head].prev;
//这个优先级表是insert_before,所以取出的时候是.next;
rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]), &(thread->tlist));
to_thread = rt_list_entry(rt_thread_priority_table[rt_current_priority].next, struct rt_thread, tlist);
//这个容器列表是insert_after,所以取出的时候是.prve;目前没有取出的例子;
rt_list_insert_after(&(information->object_list), &(object->list));
//这个定时器链表虽然是insert_after,但是是升序排列插入,所以用.next取出第一个来比较;
rt_list_insert_after(row_head[RT_TIMER_SKIP_LIST_LEVEL - 1], &(timer->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
t = rt_list_entry(rt_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1].next, struct rt_timer, row[RT_TIMER_SKIP_LIST_LEVEL - 1]);
5 节点函数
当插入的list_node前后都有node时,插入list_node的代码顺序应该如图,如果是after则从后往前写代码,如果是before则从前往后写代码,完美;
5.1 第一次插入node
list_head和list_node在初始化的时候,都初始化成了自身地址;如果只有一个list_head,那么在插入第一个list_node的时候节点代码又是如何处理的呢?
以insert_after函数举例,步骤1的l->next->prev = l->prev = n;即list_head->prev = list_rear; list首可寻址尾;
步骤3的n->next = l->next = l; 即list_rear->next = list_head;list尾可寻址首;
这个首尾寻址十分巧妙嘞,既不用多余的代码处理第一次插入,又使得链表可以时间片轮询,一举两得;
#ifndef __RT_SERVICE_H__
#define __RT_SERVICE_H__
//rtservice.h
//已知结构体节点,反推结构体首地址;
#define rt_container_of(ptr, type, member) ((type *)((char *)(ptr) - (unsigned long)(&((type *)0)->member)))
#define rt_list_entry(node, type, member) rt_container_of(node, type, member)
//简单整理了一下上面的宏变成下面这样方便理解;只有最外层和node的括号可以去,其他括号都有用;
//#define rt_list_entry(node, struct, member) (struct *)( (char *)node - (unsigned long)( &((struct *)0)->member ) )
//将list_head初始化成自身,这样第二个node插入之后,会让两个节点首位相连十分地妙;
rt_inline void rt_list_init(rt_list_t *l)
{
l->next = l->prev = l;
}
//将n_node插入l_node之后,代码顺序如图,如果先修改l节点则存储在l节点中的数据就会丢失;
rt_inline void rt_list_insert_after(rt_list_t *l, rt_list_t *n)
{
l->next->prev = n;
n->prev = l;
n->next = l->next;
l->next = n;
}
//将n_node插入l_node之前,代码顺序如图;
rt_inline void rt_list_insert_before(rt_list_t *l, rt_list_t *n)
{
l->prev->next = n;
n->next = l;
n->prev = l->prev;
l->prev = n;
}
//将前后节点连起来后,再初始化n_node为自身地址;
rt_inline void rt_list_remove(rt_list_t *n)
{
n->prev->next = n->next;
n->next->prev = n->prev;
n->next = n->prev = n;
}
//空节点的next和prev都被初始化成了自身,判断一个即可;
rt_inline int rt_list_isempty(const rt_list_t *l)
{
return l->next == l;
}
#endif /* __RT_SERVICE_H__ */
6 小结
数据结构都有一个rt_object对象,这个rt_object对象中有list节点,rt_object对象初始化的时候顺便把list节点挂载到容器列表上了;
数据结构本身还有一个节点;比如线程的节点将线程挂载到优先级表,计时器的节点将本身挂载到计时器链表;
感觉为了系统化,导致我总是把随笔写的又臭又长,糅杂了一堆相关系列,不利于结构的清晰;以后还是一个系列知识点写一篇;