FreeRTOS的队列发送源码解析

FreeRTOS的队列发送源码解析

BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )

{

BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;

TimeOut_t xTimeOut;

Queue_t * const pxQueue = ( Queue_t * ) xQueue;

 

configASSERT( pxQueue );

configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );

configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );

#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )

{

configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );

}

#endif

 

 

/* This function relaxes the coding standard somewhat to allow return

statements within the function itself.  This is done in the interest

of execution time efficiency. */

for( ;; )

{

taskENTER_CRITICAL();

{

/* Is there room on the queue now?  The running task must be the

highest priority task wanting to access the queue.  If the head item

in the queue is to be overwritten then it does not matter if the

queue is full. */

if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )

{

traceQUEUE_SEND( pxQueue );

xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );

 

#if ( configUSE_QUEUE_SETS == 1 )

{

if( pxQueue->pxQueueSetContainer != NULL )

{

if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )

{

/* The queue is a member of a queue set, and posting

to the queue set caused a higher priority task to

unblock. A context switch is required. */

queueYIELD_IF_USING_PREEMPTION();

}

else

{

mtCOVERAGE_TEST_MARKER();

}

}

else

{

/* If there was a task waiting for data to arrive on the

queue then unblock it now. */

if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )

{

if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )

{

/* The unblocked task has a priority higher than

our own so yield immediately.  Yes it is ok to

do this from within the critical section - the

kernel takes care of that. */

queueYIELD_IF_USING_PREEMPTION();

}

else

{

mtCOVERAGE_TEST_MARKER();

}

}

else if( xYieldRequired != pdFALSE )

{

/* This path is a special case that will only get

executed if the task was holding multiple mutexes

and the mutexes were given back in an order that is

different to that in which they were taken. */

queueYIELD_IF_USING_PREEMPTION();

}

else

{

mtCOVERAGE_TEST_MARKER();

}

}

}

#else /* configUSE_QUEUE_SETS */

{

/* If there was a task waiting for data to arrive on the

queue then unblock it now. */

if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )

{

if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )

{

/* The unblocked task has a priority higher than

our own so yield immediately.  Yes it is ok to do

this from within the critical section - the kernel

takes care of that. */

queueYIELD_IF_USING_PREEMPTION();

}

else

{

mtCOVERAGE_TEST_MARKER();

}

}

else if( xYieldRequired != pdFALSE ) //该处的判断源自于优先级反转

{

/* This path is a special case that will only get

executed if the task was holding multiple mutexes and

the mutexes were given back in an order that is

different to that in which they were taken. */

/*该任务申请了多个队列,其中某个互斥信号量(非当前这个),造成了该任务的优先级反转,当前这个队列没有被其他任务申请,则最后一个被该任务占用的队列(即当前队列)释放时,就会进入这里。*/

queueYIELD_IF_USING_PREEMPTION();

}

else

{

mtCOVERAGE_TEST_MARKER();

}

}

#endif /* configUSE_QUEUE_SETS */

 

taskEXIT_CRITICAL();

return pdPASS;

}

else

{

if( xTicksToWait == ( TickType_t ) 0 )

{

/* The queue was full and no block time is specified (or

the block time has expired) so leave now. */

taskEXIT_CRITICAL();

 

/* Return to the original privilege level before exiting

the function. */

traceQUEUE_SEND_FAILED( pxQueue );

return errQUEUE_FULL;

}

else if( xEntryTimeSet == pdFALSE )

{

/* The queue was full and a block time was specified so

configure the timeout structure. */

vTaskSetTimeOutState( &xTimeOut );

xEntryTimeSet = pdTRUE;

}

else

{

/* Entry time was already set. */

mtCOVERAGE_TEST_MARKER();

}

}

}

taskEXIT_CRITICAL();

 

/* Interrupts and other tasks can send to and receive from the queue

now the critical section has been exited. */

 

vTaskSuspendAll();

prvLockQueue( pxQueue );

 

/* Update the timeout state to see if it has expired yet. */

if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )

{

if( prvIsQueueFull( pxQueue ) != pdFALSE )

{

traceBLOCKING_ON_QUEUE_SEND( pxQueue );

vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );

 

/* Unlocking the queue means queue events can effect the

event list.  It is possiblethat interrupts occurring now

remove this task from the eventlist again - but as the

scheduler is suspended the task will go onto the pending

ready last instead of the actual ready list. */

prvUnlockQueue( pxQueue );

 

/* Resuming the scheduler will move tasks from the pending

ready list into the ready list - so it is feasible that this

task is already in a ready list before it yields - in which

case the yield will not cause a context switch unless there

is also a higher priority task in the pending ready list. */

if( xTaskResumeAll() == pdFALSE )

{

portYIELD_WITHIN_API();

}

}

else

{

/* Try again. */

prvUnlockQueue( pxQueue );

( void ) xTaskResumeAll();

}

}

else

{

/* The timeout has expired. */

prvUnlockQueue( pxQueue );

( void ) xTaskResumeAll();

 

traceQUEUE_SEND_FAILED( pxQueue );

return errQUEUE_FULL;

}

}

}

 

 

xQueueGenericSendFromISR

BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )

{

BaseType_t xReturn;

UBaseType_t uxSavedInterruptStatus;

Queue_t * const pxQueue = ( Queue_t * ) xQueue;

 

configASSERT( pxQueue );

configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );

configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );

 

/* RTOS ports that support interrupt nesting have the concept of a maximum

system call (or maximum API call) interrupt priority.  Interrupts that are

above the maximum system call priority are kept permanently enabled, even

when the RTOS kernel is in a critical section, but cannot make any calls to

FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h

then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion

failure if a FreeRTOS API function is called from an interrupt that has been

assigned a priority above the configured maximum system call priority.

Only FreeRTOS functions that end in FromISR can be called from interrupts

that have been assigned a priority at or (logically) below the maximum

system callinterrupt priority.  FreeRTOS maintains a separate interrupt

safe API to ensure interrupt entry is as fast and as simple as possible.

More information (albeit Cortex-M specific) is provided on the following

link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */

portASSERT_IF_INTERRUPT_PRIORITY_INVALID();

 

/* Similar to xQueueGenericSend, except without blocking if there is no room

in the queue.  Also don't directly wake a task that was blocked on a queue

read, instead return a flag to say whether a context switch is required or

not (i.e. has a task with a higher priority than us been woken by this

post). */

uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();

{

if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )

{

const int8_t cTxLock = pxQueue->cTxLock;

 

traceQUEUE_SEND_FROM_ISR( pxQueue );

 

/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a

semaphore or mutex.  That means prvCopyDataToQueue() cannot result

in a task disinheriting a priority and prvCopyDataToQueue() can be

called here even though the disinherit function does not check if

the scheduler is suspended before accessing the ready lists. */

( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );

 

/* The event list is not altered if the queue is locked.  This will

be done when the queue is unlocked later. */

if( cTxLock == queueUNLOCKED )

{

#if ( configUSE_QUEUE_SETS == 1 )

{

}

#else /* configUSE_QUEUE_SETS */

{

if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )

{

if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )

{

/* The task waiting has a higher priority so record that a

contextswitch is required. */

if( pxHigherPriorityTaskWoken != NULL )

{

*pxHigherPriorityTaskWoken = pdTRUE;

}

else

{

mtCOVERAGE_TEST_MARKER();

}

}

else

{

mtCOVERAGE_TEST_MARKER();

}

}

else

{

mtCOVERAGE_TEST_MARKER();

}

}

#endif /* configUSE_QUEUE_SETS */

}

else

{

/* Increment the lock count so the task that unlocks the queue

knows that data was posted while it was locked. */

/*如果进入中断后发现发送锁上锁了,此时队列未满或需要覆盖,又有数据被添加到队列中,则使用发送锁来计数入队的个数,这里发送锁只是锁住中断对状态表,就绪表的操作,并不阻止数据的入队操作或覆盖操作*/

pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );

}

 

xReturn = pdPASS;

}

else

{

traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );

xReturn = errQUEUE_FULL;

}

}

portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );

return xReturn;

}

prvUnlockQueue

static void prvUnlockQueue( Queue_t * const pxQueue )

//注意uxSchedulerSuspended挂起标志位解除在xTaskResumeAll中完成,而不是在这个解锁函数中,

//该函数对应上锁函数prvLockQueue只操作读写锁

{

/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */

 

/* The lock counts contains the number of extra data items placed or

removed from the queue while the queue was locked.  When a queue is

locked items can be added or removed, but the event lists cannot be

updated. */

taskENTER_CRITICAL();

{

int8_t cTxLock = pxQueue->cTxLock; //获取发送锁计数

 

/* See if data was added to the queue while it was locked. */

while( cTxLock > queueLOCKED_UNMODIFIED )

//调度上锁期间有进入中断中进行Send加锁操作

{

/* Data was posted while the queue was locked.  Are any tasks

blocked waiting for data to become available? */

#if ( configUSE_QUEUE_SETS == 1 )

{

}

#else /* configUSE_QUEUE_SETS */

{

/* Tasks that are removed from the event list will get added to

the pending ready list as the scheduler is still suspended. */

//如果上锁期间有任务在等待接收,则该任务列表项移出事务表,且若没有挂起调度则移//出状态表,加入就绪表,调度被挂起则直接将事务列表项插入xPendingReadyList

//且若开启了TickLess模式,由于对状态表,可能是delay表有操作,还要刷新最新//任务解锁时间xNextTaskUnblockTime

if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )

{

if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )

{

/* The task waiting has a higher priority so record that

a context switch is required. */

vTaskMissedYield();

}

else

{

mtCOVERAGE_TEST_MARKER();

}

}

else

{

break;

}

}

#endif /* configUSE_QUEUE_SETS */

/*在任务中(非中断中)进行解锁操作,中断中会对入队的数据进行计数,但是由于锁的缘故,中断中不能操作事务表,我们在解锁函数中将中断中累积的需要对事务表进行的释放操作,通过多次循环,直接完成,如果没有任务再请求该队列,由于数据已经在中断中拷贝到了队列中进行保存,所以直接退出循环并直接解锁,待有任务请求队列时自己去取。这里需要注意的是,在xQueueGenericSend中,加锁前我们会挂起所有任务,所以这里释放事务表的时候,我们不是将任务直接挂入就绪表,而是挂入xPendingReadyList,即挂起的就绪表*/

--cTxLock;

}

 

pxQueue->cTxLock = queueUNLOCKED;

}

taskEXIT_CRITICAL();

 

/* Do the same for the Rx lock. */

taskENTER_CRITICAL();

{

int8_t cRxLock = pxQueue->cRxLock; //获取接收锁计数

 

while( cRxLock > queueLOCKED_UNMODIFIED )

{

//对于以下部分的理解参照发送锁的情况

if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )

{

if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )

{

vTaskMissedYield();

}

else

{

mtCOVERAGE_TEST_MARKER();

}

 

--cRxLock;

}

else

{

break;

}

}

 

pxQueue->cRxLock = queueUNLOCKED;

}

taskEXIT_CRITICAL();

}

 

 

 


  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值